diff --git a/BUILD.gn b/BUILD.gn index fee6ad88af7026b60e5ae697a343153985905969..51e758f443f695266e35256ea9144e5719099677 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -376,8 +376,7 @@ config("ark_jsruntime_common_config") { } } - if (!is_mac && target_os != "ios" && !use_libfuzzer && - !(ark_standalone_build && !enable_lto)) { + if (!is_mac && target_os != "ios" && !use_libfuzzer && !enable_lto_O0) { cflags_cc += [ "-flto=thin" ] ldflags += [ "-flto=thin" ] } @@ -454,7 +453,7 @@ config("ark_jsruntime_common_config") { } } - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines += [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } @@ -466,6 +465,11 @@ config("ark_jsruntime_common_config") { defines += [ "HOOK_ENABLE" ] } } + + # is_asan: skynet config; run_with_asan: est_runtime enable asan config + if (is_asan) { + defines += [ "ECMASCRIPT_ENABLE_DFX_CONFIG" ] + } } # ecmascript unit testcase config @@ -480,7 +484,7 @@ config("ecma_test_config") { "$js_root:ark_jsruntime_common_config", ] - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines = [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } @@ -510,9 +514,12 @@ ecma_source = [ "ecmascript/base/atomic_helper.cpp", "ecmascript/base/builtins_base.cpp", "ecmascript/base/error_helper.cpp", + "ecmascript/base/fast_json_stringifier.cpp", + "ecmascript/base/json_helper.cpp", "ecmascript/base/json_parser.cpp", "ecmascript/base/json_stringifier.cpp", "ecmascript/base/number_helper.cpp", + "ecmascript/base/path_helper.cpp", "ecmascript/base/string_helper.cpp", "ecmascript/base/typed_array_helper.cpp", "ecmascript/base/utf_helper.cpp", @@ -598,6 +605,7 @@ ecma_source = [ "ecmascript/ecma_string.cpp", "ecmascript/ecma_string_table.cpp", "ecmascript/ecma_vm.cpp", + "ecmascript/elements.cpp", "ecmascript/frames.cpp", "ecmascript/free_object.cpp", "ecmascript/generator_helper.cpp", @@ -725,17 +733,24 @@ ecma_source = [ "ecmascript/module/js_module_namespace.cpp", "ecmascript/module/js_module_record.cpp", "ecmascript/module/js_module_source_text.cpp", + "ecmascript/module/js_module_deregister.cpp", "ecmascript/module/module_data_extractor.cpp", + "ecmascript/module/module_path_helper.cpp", "ecmascript/napi/jsnapi.cpp", "ecmascript/object_factory.cpp", "ecmascript/object_operator.cpp", "ecmascript/patch/patch_loader.cpp", "ecmascript/patch/quick_fix_manager.cpp", + "ecmascript/pgo_profiler/ap_file/pgo_file_info.cpp", "ecmascript/pgo_profiler/pgo_profiler.cpp", "ecmascript/pgo_profiler/pgo_profiler_decoder.cpp", "ecmascript/pgo_profiler/pgo_profiler_encoder.cpp", "ecmascript/pgo_profiler/pgo_profiler_info.cpp", "ecmascript/pgo_profiler/pgo_profiler_layout.cpp", + "ecmascript/pgo_profiler/pgo_profiler_manager.cpp", + "ecmascript/pgo_profiler/pgo_utils.cpp", + "ecmascript/pgo_profiler/ap_file/pgo_method_type_set.cpp", + "ecmascript/pgo_profiler/types/pgo_profile_type.cpp", "ecmascript/stackmap/ark_stackmap_builder.cpp", "ecmascript/stackmap/ark_stackmap_parser.cpp", "ecmascript/stackmap/llvm_stackmap_parser.cpp", @@ -784,6 +799,7 @@ if (is_ohos && is_standard_system && enable_hitrace) { ecma_debugger_source = [ "ecmascript/debugger/debugger_api.cpp", "ecmascript/debugger/js_debugger.cpp", + "ecmascript/debugger/dropframe_manager.cpp", "ecmascript/debugger/hot_reload_manager.cpp", ] @@ -1038,7 +1054,7 @@ ohos_source_set("libark_jsruntime_test_set") { deps += [ "$js_root/ecmascript/compiler:libark_mock_stub_set" ] } - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines += [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } diff --git a/docs/README_zh.md b/docs/README_zh.md index 037e4173ea5c8db25e61fbfcb653668e65dd96ed..a7bf762b8c4d55f22ac459954167ac5bb74339d6 100644 --- a/docs/README_zh.md +++ b/docs/README_zh.md @@ -106,17 +106,9 @@ print("Hello World!!!"); 1. 通过方舟前端生成hello-world.abc文件,编译命令: ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js hello-world.js + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc hello-world.js ``` - **注意**:使用node编译abc过程遇到ENOENT错误,运行如下命令进行修复 - - ``` - npm cache clean --force - cd /your_code_path/arkcompiler/ets_frontend/ts2panda - npm install - cd /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build - npm install - ``` + 2. 执行hello-world.abc文件: 1. 设置搜索路径: @@ -400,7 +392,7 @@ print('Hello World!!!') 1. 通过方舟前端生成hello-world.abc文件,编译命令: ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js -m --merge-abc test1/test.ts + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc --module --merge-abc test1/test.ts ``` 2. 执行hello-world.abc文件: @@ -439,136 +431,151 @@ print('Hello World!!!') 构建编译: ``` -$ ./build.sh --product-name rk3568 --build-target ark_ts2abc_build +$ ./build.sh --product-name rk3568 --build-target ets_frontend_build ``` -安装 `node`和 `npm`后, 使用前端工具: - ``` -$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/build -$ npm install -$ node --expose-gc src/index.js [选项] file.js +$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/ +$ ./es2abc [options] file.js ``` - -

选项

-

缩写

+ + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + - - - - - - - - + + + + - + + - - + + - - - + - + + + - - - - - - - - diff --git a/docs/development-example.md b/docs/development-example.md index 4fc6e27681fcaf0f7d541c05706645bcde2ff848..c8ee078d03e02cc03cb720233b94e7ae1a988f6c 100644 --- a/docs/development-example.md +++ b/docs/development-example.md @@ -48,7 +48,7 @@ Run the **hello-world.js** file. 1. Use the ARK frontend to create the **hello-world.abc** file. ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js hello-world.js + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc hello-world.js ``` 2. Run the **hello-world.abc** file. diff --git a/docs/using-the-toolchain.md b/docs/using-the-toolchain.md index d487322f813bd9890ac8a9b75f25af36e467eef1..2b0f65c3dfe5d80e4eeb96bc105aef73c121747b 100644 --- a/docs/using-the-toolchain.md +++ b/docs/using-the-toolchain.md @@ -9,137 +9,153 @@ Front-end tools, converting JS source code into ARK bytecode, can be built by sp Build tools: ``` -$ $ ./build.sh --product-name rk3568 --build-target ark_ts2abc_build +$ $ ./build.sh --product-name rk3568 --build-target ets_frontend_build ``` -Install `node` and `npm`, then use tools: - ``` -$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/build -$ npm install -$ node --expose-gc src/index.js [option] file.js +$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/ +$ ./es2abc [options] file.js ``` - -

选项

描述

+

描述

取值范围

+

取值范围

默认值

+

默认值

--modules

+

--debug-info

-m

+

携带debug信息

按照Module方式编译

+

-

-

-

-

+

-

--debug-log

-

-l

+

--debugger-evaluate-expression

使能log信息

+

debugger下对输入的base64形式的表达式求值

-

+

-

-

+

-

--dump-assembly

-

-a

+

--dump-assembly

输出为可读文本格式的字节码文件

+

输出为汇编文件

-

+

-

-

+

-

--debug

+

--dump-ast

-d

+

打印解析得到的ast(抽象语法树)

携带debug信息

+

-

-

-

-

+

-

--show-statistics

-

-s

+

--dump-debug-info

显示字节码相关的统计信息

+

打印debug信息

-

+

-

-

+

-

--output

+

--dump-literal-buffer

+

打印literal buffer内容

-o

+

-

+

-

+

--dump-size-stat

输出文件路径

+

显示字节码相关的统计信息

-

+

-

-

+

-

--timeout

+

--extension

-t

+

指定输入类型

超时门限

+

['js', 'ts', 'as']

-

+

-

-

+

--help

+

帮助提示

+

-

+

-

--help

+

--module

+

按照ESM模式编译

+

-

-h

+

-

帮助提示

+

--opt-level

+

指定编译优化等级

-

+

['0', '1', '2']

-

+

0

--bc-version

+

--output

+

+输出文件路径

-v

+

-

+

-

+

--parse-only

输出当前字节码版本

+

只对输入文件做解析动作

-

+

-

-

+

-

--bc-min-version

+

--thread

  

输出支持的最低字节码版本

+

指定生成字节码时所用的线程数目

-

+

0-机器支持的线程数目

-

+

0

-

Option

-

Abbreviation

+If no parameter is specified for **\[options\]**, an ARK binary file is generated by default. + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + - - - - - - - - + + + + - + + - - + + - - - + - + + + - - - - - - - - diff --git a/ecmascript/base/array_helper.cpp b/ecmascript/base/array_helper.cpp index 68fe58c692b56d0886fa3f07851d500696037065..8d85084f274d0a7da10c7790f4ffed93c0395d43 100644 --- a/ecmascript/base/array_helper.cpp +++ b/ecmascript/base/array_helper.cpp @@ -24,8 +24,104 @@ #include "ecmascript/js_hclass.h" #include "ecmascript/js_tagged_number.h" #include "ecmascript/js_tagged_value-inl.h" +#include "ecmascript/object_fast_operator-inl.h" namespace panda::ecmascript::base { +int64_t ArrayHelper::GetStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length) +{ + // Common procedure to clamp fromIndexValue to the range [0, length]. + // For integer case, conditional selection instructions (csel in ARM, cmov in x86, etc.) + // may be utilized by the compiler to minimize branching. + auto doClamp = [length](auto fromIndexValue) -> int64_t { + if (LIKELY(fromIndexValue >= 0)) { + // Including the case where fromIndexValue == Infinity + return (fromIndexValue >= length) ? length : static_cast(fromIndexValue); + } + auto plusLength = fromIndexValue + length; + if (plusLength >= 0) { + return static_cast(plusLength); + } + return 0; // Including the case where fromIndexValue == -Infinity + }; + if (LIKELY(startIndexHandle->IsInt())) { + // Fast path: startIndexHandle is tagged int32. + return doClamp(startIndexHandle->GetInt()); + } + // Slow path: startIndexHandle is targged double, or type conversion is involved. + JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, startIndexHandle); + if (UNLIKELY(thread->HasPendingException())) { + return length; + } + double fromIndexValue = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); // NaN -> 0 + return doClamp(fromIndexValue); +} + +int64_t ArrayHelper::GetStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length) +{ + uint32_t argc = argv->GetArgsNumber(); + if (argc <= argIndex) { + return 0; + } + JSHandle arg = base::BuiltinsBase::GetCallArg(argv, argIndex); + return GetStartIndex(thread, arg, length); +} + +int64_t ArrayHelper::GetLastStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length) +{ + // Common procedure to clamp fromIndexValue to the range [-1, length-1]. + auto doClamp = [length](auto fromIndexValue) -> int64_t { + if (LIKELY(fromIndexValue >= 0)) { + // Including the case where fromIndexValue == Infinity + return (length - 1 < fromIndexValue) ? (length - 1) : static_cast(fromIndexValue); + } + auto plusLength = fromIndexValue + length; + if (plusLength >= 0) { + return static_cast(plusLength); + } + return -1; // Including the case where fromIndexValue == -Infinity + }; + if (LIKELY(startIndexHandle->IsInt())) { + // Fast path: startIndexHandle is tagged int32. + return doClamp(startIndexHandle->GetInt()); + } + // Slow path: startIndexHandle is targged double, or type conversion is involved. + JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, startIndexHandle); + if (UNLIKELY(thread->HasPendingException())) { + return -1; + } + double fromIndexValue = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); // NaN -> 0 + return doClamp(fromIndexValue); +} + +int64_t ArrayHelper::GetLastStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length) +{ + uint32_t argc = argv->GetArgsNumber(); + if (argc <= argIndex) { + return length - 1; + } + JSHandle arg = base::BuiltinsBase::GetCallArg(argv, argIndex); + return GetLastStartIndex(thread, arg, length); +} + +bool ArrayHelper::ElementIsStrictEqualTo(JSThread *thread, const JSHandle &thisObjVal, + const JSHandle &keyHandle, + const JSHandle &target) +{ + bool exists = thisObjVal->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, keyHandle); + if (thread->HasPendingException() || !exists) { + return false; + } + JSHandle valueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, keyHandle); + if (thread->HasPendingException()) { + return false; + } + return JSTaggedValue::StrictEqual(thread, target, valueHandle); +} + bool ArrayHelper::IsConcatSpreadable(JSThread *thread, const JSHandle &obj) { // 1. If Type(O) is not Object, return false. @@ -37,19 +133,22 @@ bool ArrayHelper::IsConcatSpreadable(JSThread *thread, const JSHandleGetEcmaVM(); JSHandle env = ecmaVm->GetGlobalEnv(); JSHandle isConcatsprKey = env->GetIsConcatSpreadableSymbol(); - JSHandle spreadable = JSTaggedValue::GetProperty(thread, obj, isConcatsprKey).GetValue(); + JSTaggedValue spreadable = ObjectFastOperator::FastGetPropertyByValue(thread, obj.GetTaggedValue(), + isConcatsprKey.GetTaggedValue()); // 3. ReturnIfAbrupt(spreadable). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); // 4. If spreadable is not undefined, return ToBoolean(spreadable). - if (!spreadable->IsUndefined()) { - return spreadable->ToBoolean(); + if (!spreadable.IsUndefined()) { + return spreadable.ToBoolean(); } // 5. Return IsArray(O). return obj->IsArray(thread); } +// must use 'double' as return type, for sort result may double. +// let arr = [1,2,3,4,5,6]; arr.sort(() => Math.random() - 0.5); double ArrayHelper::SortCompare(JSThread *thread, const JSHandle &callbackfnHandle, const JSHandle &valueX, const JSHandle &valueY) { @@ -86,10 +185,10 @@ double ArrayHelper::SortCompare(JSThread *thread, const JSHandle RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); info->SetCallArg(valueX.GetTaggedValue(), valueY.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); if (callResult.IsInt()) { return callResult.GetInt(); } - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); JSHandle testResult(thread, callResult); JSTaggedNumber v = JSTaggedValue::ToNumber(thread, testResult); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); @@ -111,7 +210,33 @@ double ArrayHelper::SortCompare(JSThread *thread, const JSHandle JSHandle yValueHandle(JSTaggedValue::ToString(thread, valueY)); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); ComparisonResult compareResult = JSTaggedValue::Compare(thread, xValueHandle, yValueHandle); - return compareResult == ComparisonResult::GREAT ? 1 : 0; + if (compareResult == ComparisonResult::GREAT) { + return 1; + } + if (compareResult == ComparisonResult::LESS) { + return -1; + } + return 0; +} + +double ArrayHelper::StringSortCompare(JSThread *thread, const JSHandle &valueX, + const JSHandle &valueY) +{ + ASSERT(valueX->IsString()); + ASSERT(valueY->IsString()); + // 9. If xString < yString, return -1. + // 10. If xString > yString, return 1. + // 11. Return +0. + auto xHandle = JSHandle(valueX); + auto yHandle = JSHandle(valueY); + int result = EcmaStringAccessor::Compare(thread->GetEcmaVM(), xHandle, yHandle); + if (result < 0) { + return -1; + } + if (result > 0) { + return 1; + } + return 0; } int64_t ArrayHelper::GetLength(JSThread *thread, const JSHandle &thisHandle) @@ -187,6 +312,7 @@ JSTaggedValue ArrayHelper::FlattenIntoArray(JSThread *thread, const JSHandle &thisObj, + int64_t len, const JSHandle &callbackFnHandle, + HolesType holes) +{ + // 1. Let items be a new empty List. + JSHandle items(thread->GetEcmaVM()->GetFactory()->NewTaggedArray(len)); + // 2. Let k be 0. + int64_t k = 0; + // 3. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If holes is skip-holes, then + // i. Let kRead be ? HasProperty(obj, Pk). + // c. Else, + // i. Assert: holes is read-through-holes. + // ii. Let kRead be true. + // d. If kRead is true, then + // i. Let kValue be ? Get(obj, Pk). + // ii. Append kValue to items. + // e. Set k to k + 1. + bool kRead = false; + JSHandle thisObjVal(thisObj); + JSMutableHandle pk(thread, JSTaggedValue::Undefined()); + + while (k < len) { + if (holes == HolesType::SKIP_HOLES) { + pk.Update(JSTaggedValue(k)); + kRead = JSTaggedValue::HasProperty(thread, thisObjVal, pk); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } else { + ASSERT(holes == HolesType::READ_THROUGH_HOLES); + kRead = true; + } + if (kRead) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + items->Set(thread, k, kValue.GetTaggedValue()); + } + ++k; + } + JSHandle array(JSArray::CreateArrayFromList(thread, items)); + JSHandle arrayObj = JSHandle::Cast(array); + // 4. Sort items using an implementation-defined sequence of calls to SortCompare. + // If any such call returns an abrupt completion, + // stop before performing any further calls to SortCompare and return that Completion Record. + JSArray::Sort(thread, arrayObj, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 5. Return items. + return arrayObj.GetTaggedValue(); +} } // namespace panda::ecmascript::base diff --git a/ecmascript/base/array_helper.h b/ecmascript/base/array_helper.h index 1461c246cd9d96ff6829d1cdd799e26fe4402377..cf12a07a821a831178ab8c728dd8fb2961e388e6 100644 --- a/ecmascript/base/array_helper.h +++ b/ecmascript/base/array_helper.h @@ -19,6 +19,8 @@ #include #include "ecmascript/base/builtins_base.h" +#include "ecmascript/ecma_runtime_call_info.h" +#include "ecmascript/js_tagged_value.h" namespace panda::ecmascript::base { struct FlattenArgs { @@ -26,17 +28,51 @@ struct FlattenArgs { int64_t start = 0; double depth = 0; }; + +enum class HolesType { + SKIP_HOLES, + READ_THROUGH_HOLES, +}; class ArrayHelper { public: + // Common subprocedure for Array.prototype.at, Array.prototype.indexOf, Array.prototype.slice, etc. + // Gets start index that falls in range [0, length]. + // length is returned on pending exception. + static int64_t GetStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length); + // If argIndex is out of range [0, argc), then start index = 0 by default. + // Otherwise, let startIndexHandle = GetCallArg(argv, argIndex) and call GetStartIndex. + static int64_t GetStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length); + // Common subprocedure for Array.prototype.lastIndexOf, etc. + // Gets last start index that falls in range [-1, length - 1]. + // -1 is returned on pending exception. + static int64_t GetLastStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length); + // If argIndex is out of range [0, argc), then start index = length - 1 by default. + // Otherwise, let startIndexHandle = GetCallArg(argv, argIndex) and call GetLastStartIndex. + static int64_t GetLastStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length); + // Let thisHandle be the array object. Checks whether array[key] (if exists) is strictly equal to target. + // Returns false on pending exception. + static bool ElementIsStrictEqualTo(JSThread *thread, const JSHandle &thisHandle, + const JSHandle &keyHandle, + const JSHandle &target); + static bool IsConcatSpreadable(JSThread *thread, const JSHandle &obj); static double SortCompare(JSThread *thread, const JSHandle &callbackfnHandle, const JSHandle &valueX, const JSHandle &valueY); + static double StringSortCompare(JSThread *thread, const JSHandle &valueX, + const JSHandle &valueY); static int64_t GetLength(JSThread *thread, const JSHandle &thisHandle); static int64_t GetArrayLength(JSThread *thread, const JSHandle &thisHandle); static JSTaggedValue FlattenIntoArray(JSThread *thread, const JSHandle &newArrayHandle, const JSHandle &thisObjVal, const FlattenArgs &args, const JSHandle &mapperFunctionHandle, const JSHandle &thisArg); + static JSTaggedValue SortIndexedProperties(JSThread *thread, const JSHandle &thisObj, + int64_t len, const JSHandle &callbackFnHandle, + HolesType holes); }; } // namespace panda::ecmascript::base diff --git a/ecmascript/base/atomic_helper.h b/ecmascript/base/atomic_helper.h index bc9ef981f7f3f9877ae7539033370eb24266fe6a..c180bc7e3bdfeb98b94e01cd9c6064e725578d3c 100644 --- a/ecmascript/base/atomic_helper.h +++ b/ecmascript/base/atomic_helper.h @@ -19,7 +19,7 @@ #include "ecmascript/js_dataview.h" namespace panda::ecmascript::base { -enum class BytesSize : int32_t {ONEBYTES = 1, TWOBYTES = 2, FOURBYTES = 4, EIGHTBYTES = 8}; +enum class BytesSize : uint32_t {ONEBYTES = 1, TWOBYTES = 2, FOURBYTES = 4, EIGHTBYTES = 8}; class AtomicHelper final { public: @@ -101,4 +101,4 @@ public: }; } // namespace panda::ecmascript::base -#endif // ECMASCRIPT_BASE_ATOMIC_HELPER_H \ No newline at end of file +#endif // ECMASCRIPT_BASE_ATOMIC_HELPER_H diff --git a/ecmascript/base/bit_helper.h b/ecmascript/base/bit_helper.h index 1bc058b0981b8e4297937afcd481feb810033884..fe9ffbeb10865a14984f7ec88ad96fe478c0e33e 100644 --- a/ecmascript/base/bit_helper.h +++ b/ecmascript/base/bit_helper.h @@ -22,6 +22,7 @@ #include namespace panda::ecmascript::base { +constexpr uint64_t pureNaN = 0x7FF8ULL << 48U; // Be sure return the NaN that is safe. template union Data { S src; diff --git a/ecmascript/base/builtins_base.h b/ecmascript/base/builtins_base.h index be50bb83cc00234aa705531d52f3a8d29a487dd1..632fed2213f73d55a1035c32e03eedfa15942993 100644 --- a/ecmascript/base/builtins_base.h +++ b/ecmascript/base/builtins_base.h @@ -63,6 +63,11 @@ public: return JSTaggedValue(value); } + static inline JSTaggedValue GetTaggedInt64(int64_t value) + { + return JSTaggedValue(value); + } + static inline JSTaggedValue GetTaggedDouble(double value) { return JSTaggedValue(value); diff --git a/ecmascript/base/config.h b/ecmascript/base/config.h index 0dc57ae708d531f90a8d068366a75f103fc10793..0f9baeb51b3489697bd87cb10744b3777a505ae6 100644 --- a/ecmascript/base/config.h +++ b/ecmascript/base/config.h @@ -64,6 +64,15 @@ namespace panda::ecmascript { #define ECMASCRIPT_ENABLE_HEAP_VERIFY 1 #define ECMASCRIPT_ENABLE_BARRIER_CHECK 1 #define ECMASCRIPT_ENABLE_NAPI_SPECIAL_CHECK 1 +#elif defined(ECMASCRIPT_ENABLE_DFX_CONFIG) + #define ECMASCRIPT_ENABLE_IC 1 + #define ECMASCRIPT_ENABLE_ZAP_MEM 0 + #define ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC 0 + #define ECMASCRIPT_ENABLE_CAST_CHECK 0 + #define ECMASCRIPT_ENABLE_NEW_HANDLE_CHECK 0 + #define ECMASCRIPT_ENABLE_HEAP_VERIFY 1 + #define ECMASCRIPT_ENABLE_BARRIER_CHECK 0 + #define ECMASCRIPT_ENABLE_NAPI_SPECIAL_CHECK 1 #else #define ECMASCRIPT_ENABLE_IC 1 #define ECMASCRIPT_ENABLE_ZAP_MEM 0 diff --git a/ecmascript/base/error_helper.cpp b/ecmascript/base/error_helper.cpp index 78a0422f16d797efe39bff410f97d9565b904aa3..489997efc09ccf23448cc1109e252e5b45d20243 100644 --- a/ecmascript/base/error_helper.cpp +++ b/ecmascript/base/error_helper.cpp @@ -128,7 +128,7 @@ JSHandle ErrorHelper::GetErrorName(JSThread *thread, const JSHand } JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, - [[maybe_unused]] const ErrorType &errorType) + const ErrorType &errorType) { JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); @@ -166,7 +166,23 @@ JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, msgKey, msgDesc); ASSERT_PRINT(status == true, "return result exception!"); } - + // InstallErrorCause + JSHandle options = BuiltinsBase::GetCallArg(argv, 1); + // If options is an Object and ? HasProperty(options, "cause") is true, then + // a. Let cause be ? Get(options, "cause"). + // b. Perform CreateNonEnumerableDataPropertyOrThrow(O, "cause", cause). + if (options->IsECMAObject()) { + JSHandle causeKey = globalConst->GetHandledCauseString(); + bool causePresent = JSTaggedValue::HasProperty(thread, options, causeKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (causePresent) { + JSHandle cause = JSObject::GetProperty(thread, options, causeKey).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + PropertyDescriptor causeDesc(thread, cause, true, false, true); + [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, causeKey, causeDesc); + ASSERT_PRINT(status == true, "return result exception!"); + } + } JSHandle errorFunc = GetErrorJSFunction(thread); if (!errorFunc->IsUndefined()) { JSHandle errorFunckey = globalConst->GetHandledErrorFuncString(); @@ -176,7 +192,8 @@ JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, ASSERT_PRINT(status == true, "return result exception!"); } - JSHandle handleStack = BuildEcmaStackTrace(thread); + bool isOOMError = errorType == ErrorType::OOM_ERROR; + JSHandle handleStack = BuildEcmaStackTrace(thread, isOOMError); JSHandle stackkey = globalConst->GetHandledStackString(); PropertyDescriptor stackDesc(thread, JSHandle::Cast(handleStack), true, false, true); [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, stackkey, stackDesc); @@ -205,9 +222,12 @@ JSHandle ErrorHelper::GetErrorJSFunction(JSThread *thread) return thread->GlobalConstants()->GetHandledUndefined(); } -JSHandle ErrorHelper::BuildEcmaStackTrace(JSThread *thread) +JSHandle ErrorHelper::BuildEcmaStackTrace(JSThread *thread, bool isOOMError) { std::string data = JsStackInfo::BuildJsStackTrace(thread, false); + if (isOOMError) { + data = data.substr(0, MAX_ERROR_SIZE); + } ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); LOG_ECMA(DEBUG) << data; return factory->NewFromStdString(data); diff --git a/ecmascript/base/error_helper.h b/ecmascript/base/error_helper.h index 5a1deeab3ade84bede2f86ff0bcbb16ea4c7d55b..aedf378e641f6844fa52921971da955da3aa5f7f 100644 --- a/ecmascript/base/error_helper.h +++ b/ecmascript/base/error_helper.h @@ -35,10 +35,11 @@ public: private: static JSHandle GetErrorJSFunction(JSThread *thread); - static JSHandle BuildEcmaStackTrace(JSThread *thread); + static JSHandle BuildEcmaStackTrace(JSThread *thread, bool isOOMError); static JSHandle GetErrorName(JSThread *thread, const JSHandle &name, const ErrorType &errorType); + static constexpr uint32_t MAX_ERROR_SIZE = 128_KB; }; } // namespace panda::ecmascript::base diff --git a/ecmascript/base/fast_json_stringifier.cpp b/ecmascript/base/fast_json_stringifier.cpp new file mode 100644 index 0000000000000000000000000000000000000000..54677c54fdbabf04948d2f9315e2370e52e3d237 --- /dev/null +++ b/ecmascript/base/fast_json_stringifier.cpp @@ -0,0 +1,938 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/base/fast_json_stringifier.h" + +#include "ecmascript/base/builtins_base.h" +#include "ecmascript/base/json_helper.h" +#include "ecmascript/base/number_helper.h" +#include "ecmascript/builtins/builtins_errors.h" +#include "ecmascript/ecma_runtime_call_info.h" +#include "ecmascript/ecma_string-inl.h" +#include "ecmascript/ecma_vm.h" +#include "ecmascript/global_dictionary-inl.h" +#include "ecmascript/js_array.h" +#include "ecmascript/js_function.h" +#include "ecmascript/js_handle.h" +#include "ecmascript/js_object-inl.h" +#include "ecmascript/js_primitive_ref.h" +#include "ecmascript/js_tagged_value-inl.h" +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/object_fast_operator-inl.h" + +namespace panda::ecmascript::base { +JSHandle FastJsonStringifier::Stringify(const JSHandle &value) +{ + factory_ = thread_->GetEcmaVM()->GetFactory(); + JSHandle jsonCache = thread_->GetEcmaVM()->GetGlobalEnv()->GetJsonObjectHclassCache(); + if (jsonCache->IsHole()) { + hclassCache_ = factory_->NewTaggedArray(JSON_CACHE_SIZE); + } else { + hclassCache_ = JSHandle::Cast(jsonCache); + } + JSTaggedValue tagValue = value.GetTaggedValue(); + handleValue_ = JSMutableHandle(thread_, tagValue); + handleKey_ = JSMutableHandle(thread_, factory_->GetEmptyString()); + + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread_); + handleValue_.Update(serializeValue); + } + + JSTaggedValue result = SerializeJSONProperty(handleValue_); + + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread_); + if (!result.IsUndefined()) { + return JSHandle( + factory_->NewFromUtf8Literal(reinterpret_cast(result_.c_str()), result_.size())); + } + return thread_->GlobalConstants()->GetHandledUndefined(); +} + +JSTaggedValue FastJsonStringifier::GetSerializeValue(const JSHandle &key, + const JSHandle &value) +{ + JSTaggedValue tagValue = value.GetTaggedValue(); + JSHandle undefined = thread_->GlobalConstants()->GetHandledUndefined(); + // a. Let toJSON be Get(value, "toJSON"). + JSHandle toJson = thread_->GlobalConstants()->GetHandledToJsonString(); + JSHandle toJsonFun( + thread_, ObjectFastOperator::FastGetPropertyByValue(thread_, tagValue, toJson.GetTaggedValue())); + // b. ReturnIfAbrupt(toJSON). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + tagValue = value.GetTaggedValue(); + // c. If IsCallable(toJSON) is true + if (UNLIKELY(toJsonFun->IsCallable())) { + // Let value be Call(toJSON, value, «key»). + EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread_, toJsonFun, value, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + info->SetCallArg(key.GetTaggedValue()); + tagValue = JSFunction::Call(info); + // ii. ReturnIfAbrupt(value). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } + return tagValue; +} + +JSTaggedValue FastJsonStringifier::SerializeJSONProperty(const JSHandle &value) +{ + JSTaggedValue tagValue = value.GetTaggedValue(); + if (!tagValue.IsHeapObject()) { + JSTaggedType type = tagValue.GetRawData(); + switch (type) { + // If value is false, return "false". + case JSTaggedValue::VALUE_FALSE: + result_ += "false"; + return tagValue; + // If value is true, return "true". + case JSTaggedValue::VALUE_TRUE: + result_ += "true"; + return tagValue; + // If value is null, return "null". + case JSTaggedValue::VALUE_NULL: + result_ += "null"; + return tagValue; + default: + // If Type(value) is Number, then + if (tagValue.IsNumber()) { + // a. If value is finite, return ToString(value). + if (std::isfinite(tagValue.GetNumber())) { + result_ += ConvertToString(*base::NumberHelper::NumberToString(thread_, tagValue)); + } else { + // b. Else, return "null". + result_ += "null"; + } + return tagValue; + } + } + } else { + JSType jsType = tagValue.GetTaggedObject()->GetClass()->GetObjectType(); + JSHandle valHandle(thread_, tagValue); + switch (jsType) { + case JSType::JS_ARRAY: { + SerializeJSArray(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + return tagValue; + } + // If Type(value) is String, return QuoteJSONString(value). + case JSType::LINE_STRING: + case JSType::CONSTANT_STRING: + case JSType::TREE_STRING: { + JSHandle strHandle = JSHandle(valHandle); + auto string = JSHandle(thread_, + EcmaStringAccessor::Flatten(thread_->GetEcmaVM(), strHandle)); + CString str = ConvertToString(*string, StringConvertedUsage::LOGICOPERATION); + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + return tagValue; + } + case JSType::JS_PRIMITIVE_REF: { + SerializePrimitiveRef(valHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, JSTaggedValue::Exception()); + return tagValue; + } + case JSType::SYMBOL: + return JSTaggedValue::Undefined(); + case JSType::BIGINT: { + THROW_TYPE_ERROR_AND_RETURN(thread_, "cannot serialize a BigInt", JSTaggedValue::Exception()); + } + default: { + if (!tagValue.IsCallable()) { + JSHClass *jsHclass = tagValue.GetTaggedObject()->GetClass(); + if (UNLIKELY(jsHclass->IsJSProxy() && + JSProxy::Cast(tagValue.GetTaggedObject())->IsArray(thread_))) { + SerializeJSProxy(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } else { + SerializeJSONObject(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } + return tagValue; + } + } + } + } + return JSTaggedValue::Undefined(); +} + +CString FastJsonStringifier::SerializeObjectKey(const JSHandle &key, bool hasContent) +{ + if (hasContent) { + result_ += ","; + } + + CString str; + if (key->IsString()) { + str = ConvertToString(EcmaString::Cast(key->GetTaggedObject()), StringConvertedUsage::LOGICOPERATION); + } else if (key->IsInt()) { + str = NumberHelper::IntToString(static_cast(key->GetInt())); + } else { + str = ConvertToString(*JSTaggedValue::ToString(thread_, key), StringConvertedUsage::LOGICOPERATION); + } + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + result_ += ":"; + + return str; +} + +bool FastJsonStringifier::PushValue(const JSHandle &value) +{ + uint32_t thisLen = stack_.size(); + + for (uint32_t i = 0; i < thisLen; i++) { + bool equal = JSTaggedValue::SameValue(stack_[i].GetTaggedValue(), value.GetTaggedValue()); + if (equal) { + return true; + } + } + + stack_.emplace_back(value); + return false; +} + +void FastJsonStringifier::PopValue() +{ + stack_.pop_back(); +} + +bool FastJsonStringifier::SerializeJSONObject(const JSHandle &value) +{ + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "{"; + bool hasContent = false; + + ASSERT(!value->IsAccessor()); + JSHandle obj(value); + if (UNLIKELY(value->IsJSProxy() || value->IsTypedArray())) { // serialize proxy and typedArray + JSHandle propertyArray = JSObject::EnumerableOwnNames(thread_, obj); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + uint32_t arrLength = propertyArray->GetLength(); + for (uint32_t i = 0; i < arrLength; i++) { + handleKey_.Update(propertyArray->Get(i)); + JSHandle valueHandle = JSTaggedValue::GetProperty(thread_, value, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(valueHandle->IsECMAObject() || valueHandle->IsBigInt())) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, valueHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + continue; + } + handleValue_.Update(serializeValue); + } else { + handleValue_.Update(valueHandle); + } + SerializeObjectKey(handleKey_, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + hasContent = true; + } + } + } else { + uint32_t numOfKeys = obj->GetNumberOfKeys(); + uint32_t numOfElements = obj->GetNumberOfElements(); + if (numOfKeys + numOfElements < CACHE_MINIMUN_SIZIE || !cacheable_) { + if (numOfElements > 0) { + hasContent = DefaultSerializeElements(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = DefaultSerializeKeys(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } else { + JSHClass *jsHclass = value->GetTaggedObject()->GetClass(); + int32_t index = FindCache(jsHclass, numOfKeys + numOfElements); + if (index != INVALID_INDEX) { + auto strCache = thread_->GetCurrentEcmaContext()->GetJsonStringifyCache(index); + uint32_t cacheIndex = 0; + if (numOfElements > 0) { + hasContent = SerializeElementsWithCache(obj, hasContent, strCache, cacheIndex, numOfElements); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = SerializeKeysWithCache(obj, hasContent, strCache, cacheIndex); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } else { + CVector> strCache; + if (numOfElements > 0) { + hasContent = TryCacheSerializeElements(obj, hasContent, strCache); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = TryCacheSerializeKeys(obj, hasContent, strCache); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (cacheable_) { + SetCache(value->GetTaggedObject()->GetClass(), numOfElements + numOfKeys, strCache); + } + } + } + } + + result_ += "}"; + PopValue(); + return true; +} + +bool FastJsonStringifier::SerializeJSProxy(const JSHandle &object) +{ + bool isContain = PushValue(object); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "["; + JSHandle proxy(object); + JSHandle lengthKey = thread_->GlobalConstants()->GetHandledLengthString(); + JSHandle lenghHandle = JSProxy::GetProperty(thread_, proxy, lengthKey).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + JSTaggedNumber lenNumber = JSTaggedValue::ToLength(thread_, lenghHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + uint32_t length = lenNumber.ToUint32(); + for (uint32_t i = 0; i < length; i++) { + handleKey_.Update(JSTaggedValue(i)); + JSHandle valHandle = JSProxy::GetProperty(thread_, proxy, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (i > 0) { + result_ += ","; + } + if (UNLIKELY(valHandle->IsECMAObject() || valHandle->IsBigInt())) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, valHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + handleValue_.Update(serializeValue); + } else { + handleValue_.Update(valHandle); + } + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (res.IsUndefined()) { + result_ += "null"; + } + } + + result_ += "]"; + PopValue(); + return true; +} + +bool FastJsonStringifier::SerializeJSArray(const JSHandle &value) +{ + // If state.[[Stack]] contains value, throw a TypeError exception because the structure is cyclical. + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "["; + JSHandle jsArr(value); + uint32_t len = jsArr->GetArrayLength(); + if (len > 0) { + for (uint32_t i = 0; i < len; i++) { + JSTaggedValue tagVal = ObjectFastOperator::FastGetPropertyByIndex(thread_, value.GetTaggedValue(), i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(tagVal.IsAccessor())) { + tagVal = JSObject::CallGetter(thread_, AccessorData::Cast(tagVal.GetTaggedObject()), value); + } + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(tagVal); + + if (i > 0) { + result_ += ","; + } + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + handleValue_.Update(serializeValue); + } + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (res.IsUndefined()) { + result_ += "null"; + } + } + } + + result_ += "]"; + PopValue(); + return true; +} + +void FastJsonStringifier::SerializePrimitiveRef(const JSHandle &primitiveRef) +{ + JSTaggedValue primitive = JSPrimitiveRef::Cast(primitiveRef.GetTaggedValue().GetTaggedObject())->GetValue(); + if (primitive.IsString()) { + auto priStr = JSTaggedValue::ToString(thread_, primitiveRef); + RETURN_IF_ABRUPT_COMPLETION(thread_); + CString str = ConvertToString(*priStr, StringConvertedUsage::LOGICOPERATION); + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + } else if (primitive.IsNumber()) { + auto priNum = JSTaggedValue::ToNumber(thread_, primitiveRef); + RETURN_IF_ABRUPT_COMPLETION(thread_); + if (std::isfinite(priNum.GetNumber())) { + result_ += ConvertToString(*base::NumberHelper::NumberToString(thread_, priNum)); + } else { + result_ += "null"; + } + } else if (primitive.IsBoolean()) { + result_ += primitive.IsTrue() ? "true" : "false"; + } else if (primitive.IsBigInt()) { + THROW_TYPE_ERROR(thread_, "cannot serialize a BigInt"); + } +} + +bool FastJsonStringifier::TryCacheSerializeElements(const JSHandle &obj, bool hasContent, + CVector> &strCache) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(elementsArr->Get(i)); + hasContent = AppendJsonString(hasContent, strCache, i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + CVector> sortArr; + int size = numberDic->Size(); + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = numberDic->GetKey(hashIndex); + if (!key.IsUndefined() && !key.IsHole()) { + PropertyAttributes attr = numberDic->GetAttributes(hashIndex); + if (attr.IsEnumerable()) { + JSTaggedValue numberKey = JSTaggedValue(static_cast(key.GetInt())); + sortArr.emplace_back(JSHandle(thread_, numberKey)); + } + } + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = numberDic->FindEntry(entryKey); + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::SerializeElementsWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex, uint32_t elementSize) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + CString key = strCache[cacheIndex++].first; + handleValue_.Update(elementsArr->Get(i)); + hasContent = FastAppendJsonString(hasContent, key); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + for (; cacheIndex < elementSize; cacheIndex++) { + CString key = strCache[cacheIndex].first; + int index = strCache[cacheIndex].second; + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, key); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::TryCacheSerializeKeys(const JSHandle &obj, bool hasContent, + CVector> &strCache) +{ + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSTaggedValue enumCache = jsHclass->GetEnumCache(); + if (!enumCache.IsNull()) { + JSHandle cache(thread_, enumCache); + uint32_t length = cache->GetLength(); + for (uint32_t i = 0; i < length; i++) { + JSTaggedValue key = cache->Get(i); + if (!key.IsString()) { + continue; + } + handleKey_.Update(key); + JSTaggedValue value; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + int end = static_cast(jsHclass->NumberOfProps()); + if (end <= 0) { + return hasContent; + } + for (int i = 0; i < end; i++) { + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + JSTaggedValue key = layoutInfo->GetKey(i); + if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { + handleKey_.Update(key); + JSTaggedValue value; + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + int size = globalDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = globalDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = globalDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = globalDic->FindEntry(entryKey); + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + int size = nameDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = nameDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = nameDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = nameDic->FindEntry(entryKey); + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::SerializeKeysWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex) +{ + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + JSTaggedValue value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::AppendJsonString(bool hasContent, CVector> &strCache, int index) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + CString keyStr = SerializeObjectKey(handleKey_, hasContent); + strCache.emplace_back(std::pair(keyStr, index)); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(keyStr, hasContent); + return hasContent; +} + +bool FastJsonStringifier::FastAppendJsonString(bool hasContent, CString &key) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + FastSerializeObjectKey(key, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(key, hasContent); + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeElements(const JSHandle &obj, bool hasContent) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(elementsArr->Get(i)); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + CVector> sortArr; + int size = numberDic->Size(); + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = numberDic->GetKey(hashIndex); + if (!key.IsUndefined() && !key.IsHole()) { + PropertyAttributes attr = numberDic->GetAttributes(hashIndex); + if (attr.IsEnumerable()) { + JSTaggedValue numberKey = JSTaggedValue(static_cast(key.GetInt())); + sortArr.emplace_back(JSHandle(thread_, numberKey)); + } + } + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = numberDic->FindEntry(entryKey); + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeKeys(const JSHandle &obj, bool hasContent) +{ + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSTaggedValue enumCache = jsHclass->GetEnumCache(); + if (!enumCache.IsNull()) { + JSHandle cache(thread_, enumCache); + uint32_t length = cache->GetLength(); + for (uint32_t i = 0; i < length; i++) { + JSTaggedValue key = cache->Get(i); + if (!key.IsString()) { + continue; + } + handleKey_.Update(key); + JSTaggedValue value; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + int end = static_cast(jsHclass->NumberOfProps()); + if (end <= 0) { + return hasContent; + } + for (int i = 0; i < end; i++) { + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + JSTaggedValue key = layoutInfo->GetKey(i); + if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { + handleKey_.Update(key); + JSTaggedValue value; + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + int size = globalDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = globalDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = globalDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = globalDic->FindEntry(entryKey); + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + int size = nameDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = nameDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = nameDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = nameDic->FindEntry(entryKey); + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::AppendJsonString(bool hasContent) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + CString keyStr = SerializeObjectKey(handleKey_, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(keyStr, hasContent); + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeObject(const JSTaggedValue &object, uint32_t numOfKeys, + uint32_t numOfElements) +{ + JSHandle value(thread_, object); + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "{"; + bool hasContent = false; + + JSHandle obj(value); + if (numOfElements > 0) { + hasContent = DefaultSerializeElements(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = DefaultSerializeKeys(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + + result_ += "}"; + PopValue(); + return true; +} +} // namespace panda::ecmascript::base diff --git a/ecmascript/base/fast_json_stringifier.h b/ecmascript/base/fast_json_stringifier.h new file mode 100644 index 0000000000000000000000000000000000000000..32528ed402fc4d81e486f9917e96e7417196514d --- /dev/null +++ b/ecmascript/base/fast_json_stringifier.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H +#define ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H + +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/js_handle.h" +#include "ecmascript/object_factory.h" +#include "ecmascript/global_env.h" +#include "ecmascript/mem/c_containers.h" + +namespace panda::ecmascript::base { +class FastJsonStringifier { +public: + static constexpr int32_t INVALID_INDEX = -1; + static constexpr int32_t JSON_CACHE_MASK = 62; + static constexpr int32_t JSON_CACHE_SIZE = 64; + static constexpr int32_t CACHE_MINIMUN_SIZIE = 5; + FastJsonStringifier() = default; + + explicit FastJsonStringifier(JSThread *thread) : thread_(thread) {} + + ~FastJsonStringifier() = default; + NO_COPY_SEMANTIC(FastJsonStringifier); + NO_MOVE_SEMANTIC(FastJsonStringifier); + + JSHandle Stringify(const JSHandle &value); + +private: + JSTaggedValue SerializeJSONProperty(const JSHandle &value); + JSTaggedValue GetSerializeValue(const JSHandle &key, const JSHandle &value); + CString SerializeObjectKey(const JSHandle &key, bool hasContent); + + bool SerializeJSONObject(const JSHandle &value); + + bool SerializeJSArray(const JSHandle &value); + bool SerializeJSProxy(const JSHandle &object); + + void SerializePrimitiveRef(const JSHandle &primitiveRef); + + bool PushValue(const JSHandle &value); + + void PopValue(); + + bool AppendJsonString(bool hasContent, CVector> &strCache, int index); + bool FastAppendJsonString(bool hasContent, CString &key); + bool TryCacheSerializeElements(const JSHandle &obj, bool hasContent, + CVector> &strCache); + bool SerializeElementsWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex, + uint32_t elementSize); + bool TryCacheSerializeKeys(const JSHandle &obj, bool hasContent, + CVector> &strCache); + bool SerializeKeysWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex); + bool AppendJsonString(bool hasContent); + bool DefaultSerializeKeys(const JSHandle &obj, bool hasContent); + bool DefaultSerializeElements(const JSHandle &obj, bool hasContent); + bool DefaultSerializeObject(const JSTaggedValue &object, uint32_t numOfKeys, uint32_t numOfElements); + + inline void EraseKeyString(CString &keyStr, bool hasContent) + { + size_t keyLength = keyStr.length() + (hasContent ? 1 : 0) + 1; + result_.erase(result_.end() - keyLength, result_.end()); + } + + inline void FastSerializeObjectKey(CString &key, bool hasContent) + { + if (hasContent) { + result_ += ","; + } + + result_ += key; + result_ += ":"; + } + + inline int32_t FindCache(JSHClass *hclass, size_t numOfKeys) + { + size_t index = GetHash(hclass, numOfKeys); + JSTaggedValue cacheHclass = hclassCache_->Get(index); + if (cacheHclass != JSTaggedValue::Hole()) { + if (JSHClass::Cast(cacheHclass.GetTaggedObject()) == hclass) { + return index; + } else { + cacheHclass = hclassCache_->Get(++index); + if (JSHClass::Cast(cacheHclass.GetTaggedObject()) == hclass) { + return index; + } else { + return INVALID_INDEX; + } + } + } + return INVALID_INDEX; + } + + inline void SetCache(JSHClass *hclass, size_t numOfKeys, CVector> &value) + { + size_t index = GetHash(hclass, numOfKeys); + JSTaggedValue cacheHclass = hclassCache_->Get(index); + if (cacheHclass != JSTaggedValue::Hole()) { + cacheHclass = hclassCache_->Get(++index); + if (cacheHclass != JSTaggedValue::Hole()) { + --index; + } + } + hclassCache_->Set(thread_, index, JSTaggedValue(hclass)); + thread_->GetCurrentEcmaContext()->SetJsonStringifyCache(index, value); + } + + inline size_t GetHash(JSHClass *hclass, size_t numOfKeys) + { + uintptr_t ptr = reinterpret_cast(hclass); + size_t hash = (ptr + numOfKeys) & JSON_CACHE_MASK; + return hash; + } + + CString result_; + JSThread *thread_ {nullptr}; + ObjectFactory *factory_ {nullptr}; + CVector> stack_; + JSMutableHandle handleKey_ {}; + JSMutableHandle handleValue_ {}; + bool cacheable_ {true}; + JSHandle hclassCache_ {}; +}; +} // namespace panda::ecmascript::basekey +#endif // ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H diff --git a/ecmascript/base/file_header.h b/ecmascript/base/file_header.h index a34ce20ead1f27bbd9926656135e228b97d2961e..1d70e938419f522e5834eed87c6d05d13ca1100d 100644 --- a/ecmascript/base/file_header.h +++ b/ecmascript/base/file_header.h @@ -19,15 +19,18 @@ #include "ecmascript/base/string_helper.h" #include "ecmascript/log_wrapper.h" #include "utils/bit_utils.h" +#include "zlib.h" + #include #include #include namespace panda::ecmascript::base { -class FileHeader { +class FileHeaderBase { public: static constexpr size_t MAGIC_SIZE = 8; static constexpr size_t VERSION_SIZE = 4; + static constexpr uint32_t CHECKSUM_END_OFFSET = MAGIC_SIZE + VERSION_SIZE + sizeof(uint32_t); static constexpr std::array MAGIC = {'P', 'A', 'N', 'D', 'A', '\0', '\0', '\0'}; using VersionType = std::array; @@ -59,23 +62,7 @@ public: return ret; } -protected: - explicit FileHeader(const VersionType &lastVersion) : magic_(MAGIC), version_(lastVersion) {} - - static bool VerifyVersion(const char *fileDesc, const VersionType &currVersion, const VersionType &lastVersion, - bool strictMatch) - { - bool matched = strictMatch ? currVersion == lastVersion : currVersion <= lastVersion; - if (!matched) { - LOG_HOST_TOOL_ERROR << fileDesc << " version error, expected version should be " - << (strictMatch ? "equal to " : "less or equal than ") << ConvToStr(lastVersion) - << ", but got " << ConvToStr(currVersion); - return false; - } - return true; - } - - bool InternalVerify(const char *fileDesc, const VersionType &lastVersion, bool strictMatch) const + bool VerifyVersion(const char *fileDesc, const VersionType &lastVersion, bool strictMatch) const { if (magic_ != MAGIC) { LOG_HOST_TOOL_ERROR << "Magic mismatch, please make sure " << fileDesc @@ -91,11 +78,37 @@ protected: return true; } - bool InternalVerifyVersion(const VersionType &expectVersion) const + bool CompatibleVerify(const VersionType &expectVersion) const { return version_ >= expectVersion; } + VersionType GetVersion() const + { + return version_; + } + + void SetVersion(VersionType version) + { + version_ = version; + } + +protected: + explicit FileHeaderBase(const VersionType &lastVersion) : magic_(MAGIC), version_(lastVersion) {} + + static bool VerifyVersion(const char *fileDesc, const VersionType &currVersion, const VersionType &lastVersion, + bool strictMatch) + { + bool matched = strictMatch ? (currVersion == lastVersion) : (currVersion <= lastVersion); + if (!matched) { + LOG_HOST_TOOL_ERROR << fileDesc << " version error, expected version should be " + << (strictMatch ? "equal to " : "less or equal than ") << ConvToStr(lastVersion) + << ", but got " << ConvToStr(currVersion); + return false; + } + return true; + } + std::string InternalGetVersion() const { return ConvToStr(version_); @@ -124,5 +137,52 @@ private: VersionType version_; }; +class FileHeaderElastic : public FileHeaderBase { +public: + static constexpr uint32_t ENDIAN_VALUE = 0x12345678; + void SetChecksum(uint32_t checksum) + { + checksum_ = checksum; + } + + uint32_t GetChecksum() const + { + return checksum_; + } + + void SetHeaderSize(uint32_t size) + { + headerSize_ = size; + } + + uint32_t GetHeaderSize() const + { + return headerSize_; + } + + void SetFileSize(uint32_t size) + { + fileSize_ = size; + } + + uint32_t GetFileSize() const + { + return fileSize_; + } + + uint32_t GetEndianTag() const + { + return endianTag_; + } + +protected: + explicit FileHeaderElastic(const VersionType &lastVersion) : FileHeaderBase(lastVersion) {} + +private: + uint32_t checksum_ {0}; + uint32_t fileSize_ {0}; + uint32_t headerSize_ {0}; + uint32_t endianTag_ {ENDIAN_VALUE}; +}; } // namespace panda::ecmascript::base #endif // ECMASCRIPT_BASE_FILE_HEADER_H diff --git a/ecmascript/base/json_helper.cpp b/ecmascript/base/json_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0fb8de8bd83dc65c3d352c95bfd24d0ced7449d --- /dev/null +++ b/ecmascript/base/json_helper.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/base/json_helper.h" + +#include +#include +#include + +namespace panda::ecmascript::base { +constexpr unsigned char CODE_SPACE = 0x20; +constexpr int FOUR_HEX = 4; +constexpr char ZERO_FIRST = static_cast(0xc0); // \u0000 => c0 80 + +bool JsonHelper::IsFastValueToQuotedString(const char *value) +{ + if (strpbrk(value, "\"\\\b\f\n\r\t") != nullptr) { + return false; + } + while (*value != '\0') { + if ((*value > 0 && *value < CODE_SPACE) || *value == ZERO_FIRST) { + return false; + } + value++; + } + return true; +} + +CString JsonHelper::ValueToQuotedString(CString str) +{ + CString product; + const char *value = str.c_str(); + // fast mode + bool isFast = IsFastValueToQuotedString(value); + if (isFast) { + product += "\""; + product += str; + product += "\""; + return product; + } + // 1. Let product be code unit 0x0022 (QUOTATION MARK). + product += "\""; + // 2. For each code unit C in value + for (const char *c = value; *c != 0; ++c) { + switch (*c) { + /* + * a. If C is 0x0022 (QUOTATION MARK) or 0x005C (REVERSE SOLIDUS), then + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let product be the concatenation of product and C. + */ + case '\"': + product += "\\\""; + break; + case '\\': + product += "\\\\"; + break; + /* + * b. Else if C is 0x0008 (BACKSPACE), 0x000C (FORM FEED), 0x000A (LINE FEED), 0x000D (CARRIAGE RETURN), + * or 0x000B (LINE TABULATION), then + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let abbrev be the String value corresponding to the value of C as follows: + * BACKSPACE "b" + * FORM FEED (FF) "f" + * LINE FEED (LF) "n" + * CARRIAGE RETURN (CR) "r" + * LINE TABULATION "t" + * iii. Let product be the concatenation of product and abbrev. + */ + case '\b': + product += "\\b"; + break; + case '\f': + product += "\\f"; + break; + case '\n': + product += "\\n"; + break; + case '\r': + product += "\\r"; + break; + case '\t': + product += "\\t"; + break; + case ZERO_FIRST: + product += "\\u0000"; + ++c; + break; + default: + // c. Else if C has a code unit value less than 0x0020 (SPACE), then + if (*c > 0 && *c < CODE_SPACE) { + /* + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let product be the concatenation of product and "u". + * iii. Let hex be the string result of converting the numeric code unit value of C to a String of + * four hexadecimal digits. Alphabetic hexadecimal digits are presented as lowercase Latin letters. + * iv. Let product be the concatenation of product and hex. + */ + std::ostringstream oss; + oss << "\\u" << std::hex << std::setfill('0') << std::setw(FOUR_HEX) << static_cast(*c); + product += oss.str(); + } else { + // Else, + // i. Let product be the concatenation of product and C. + product += *c; + } + } + } + // 3. Let product be the concatenation of product and code unit 0x0022 (QUOTATION MARK). + product += "\""; + // Return product. + return product; +} +} // namespace panda::ecmascript::base \ No newline at end of file diff --git a/ecmascript/base/json_helper.h b/ecmascript/base/json_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..7cd093f9c5c82a50df600704058b09af1e2e5e34 --- /dev/null +++ b/ecmascript/base/json_helper.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_BASE_JSON_HELPER_H +#define ECMASCRIPT_BASE_JSON_HELPER_H + +#include "ecmascript/js_handle.h" +#include "ecmascript/mem/c_string.h" +#include "ecmascript/property_attributes.h" + +namespace panda::ecmascript::base { + +class JsonHelper { +public: + static CString ValueToQuotedString(CString str); + + static bool IsFastValueToQuotedString(const char *value); + + static inline bool CompareKey(const std::pair, PropertyAttributes> &a, + const std::pair, PropertyAttributes> &b) + { + return a.second.GetDictionaryOrder() < b.second.GetDictionaryOrder(); + } + + static inline bool CompareNumber(const JSHandle &a, const JSHandle &b) + { + return a->GetNumber() < b->GetNumber(); + } +}; + +} // namespace panda::ecmascript::base + +#endif // ECMASCRIPT_BASE_UTF_JSON_H \ No newline at end of file diff --git a/ecmascript/base/json_parser.cpp b/ecmascript/base/json_parser.cpp index 69dbf3405e6f4a5a46f76f64072f10b80e5a2d5a..6cd134cbfa696edc3e58138cf0c2e15c92727d78 100644 --- a/ecmascript/base/json_parser.cpp +++ b/ecmascript/base/json_parser.cpp @@ -22,9 +22,11 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c { JSHandle objHandle(holder); JSHandle val = JSTaggedValue::GetProperty(thread, objHandle, name).GetValue(); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); if (val->IsECMAObject()) { JSHandle obj = JSTaggedValue::ToObject(thread, val); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); bool isArray = val->IsArray(thread); if (isArray) { JSHandle lenResult = JSTaggedValue::GetProperty(thread, val, lengthKey).GetValue(); @@ -38,6 +40,7 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c // Let prop be ! ToString((I)). keyUnknow.Update(JSTaggedValue(i)); keyName.Update(JSTaggedValue::ToString(thread, keyUnknow).GetTaggedValue()); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); RecurseAndApply(thread, obj, keyName, receiver); } } else { @@ -54,12 +57,13 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c } // Return ? Call(receiver, holder, « name, val »). - const int32_t argsLength = 2; // 2: « name, val » + const uint32_t argsLength = 2; // 2: « name, val » JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, receiver, objHandle, undefined, argsLength); RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); info->SetCallArg(name.GetTaggedValue(), val.GetTaggedValue()); JSTaggedValue result = JSFunction::Call(info); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); return JSHandle(thread, result); } diff --git a/ecmascript/base/json_parser.h b/ecmascript/base/json_parser.h index ef23776a6ca2c736e94bd1fe8f9d47bce1ed24fc..8552decb7710ba9b22abf251bc34041f885bf247 100644 --- a/ecmascript/base/json_parser.h +++ b/ecmascript/base/json_parser.h @@ -16,6 +16,8 @@ #ifndef ECMASCRIPT_BASE_JSON_PARSE_INL_H #define ECMASCRIPT_BASE_JSON_PARSE_INL_H +#include + #include "ecmascript/base/json_parser.h" #include "ecmascript/base/builtins_base.h" #include "ecmascript/base/number_helper.h" @@ -134,7 +136,11 @@ private: if (isFast) { std::string strNum(current_, end_ + 1); current_ = end_; - double v = std::stod(strNum); + double v = std::strtod(strNum.c_str(), nullptr); + if (errno == ERANGE) { + errno = 0; + return v > 0 ? JSTaggedValue(base::POSITIVE_INFINITY): JSTaggedValue(-base::POSITIVE_INFINITY); + } return JSTaggedValue::TryCastDoubleToInt32(v); } } @@ -160,7 +166,11 @@ private: std::string strNum(current, end_ + 1); current_ = end_; - double v = std::stod(strNum); + double v = std::strtod(strNum.c_str(), nullptr); + if (errno == ERANGE) { + errno = 0; + return v > 0 ? JSTaggedValue(base::POSITIVE_INFINITY): JSTaggedValue(-base::POSITIVE_INFINITY); + } return JSTaggedValue::TryCastDoubleToInt32(v); } @@ -241,9 +251,9 @@ private: } current_++; } else if (UNLIKELY(*current_ > ASCII_END)) { - if (UNLIKELY(*current_ > utf_helper::DECODE_LEAD_LOW && *current_ < utf_helper::DECODE_LEAD_HIGH && - *(current_ + 1) > utf_helper::DECODE_TRAIL_LOW && - *(current_ + 1) < utf_helper::DECODE_TRAIL_HIGH)) { + if (UNLIKELY(*current_ >= utf_helper::DECODE_LEAD_LOW && *current_ <= utf_helper::DECODE_LEAD_HIGH && + *(current_ + 1) >= utf_helper::DECODE_TRAIL_LOW && + *(current_ + 1) <= utf_helper::DECODE_TRAIL_HIGH)) { std::u16string str(current_, current_ + 2); // 2 means twice as many bytes as normal u16string res += ConvertToString(StringHelper::U16stringToString(str)); current_ += 2; // 2 means twice as many bytes as normal u16string @@ -365,6 +375,7 @@ private: SkipStartWhiteSpace(); if (*current_ == '"') { keyHandle.Update(ParseString()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); } else { if (*current_ == '}' && (inObjorArr || current_ == range_)) { return result.GetTaggedValue(); @@ -378,6 +389,7 @@ private: THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Object in JSON", JSTaggedValue::Exception()); } value = ParseJSONText(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); // fast path JSTaggedValue res = ObjectFastOperator::SetPropertyByValue(thread_, result.GetTaggedValue(), keyHandle.GetTaggedValue(), value); @@ -385,6 +397,7 @@ private: // slow path JSTaggedValue::SetProperty(thread_, JSHandle(result), keyHandle, JSHandle(thread_, value), true); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); } GetNextNonSpaceChar(); if (*current_ == ',') { diff --git a/ecmascript/base/json_stringifier.cpp b/ecmascript/base/json_stringifier.cpp index 133f91feaa8b9258f5ec7fcd66445138f642424e..82658b5ea16d8bceb6de879ca4e0a0018b844040 100644 --- a/ecmascript/base/json_stringifier.cpp +++ b/ecmascript/base/json_stringifier.cpp @@ -15,11 +15,8 @@ #include "ecmascript/base/json_stringifier.h" -#include -#include -#include - #include "ecmascript/base/builtins_base.h" +#include "ecmascript/base/json_helper.h" #include "ecmascript/base/number_helper.h" #include "ecmascript/builtins/builtins_errors.h" #include "ecmascript/ecma_runtime_call_info.h" @@ -36,109 +33,7 @@ #include "ecmascript/object_fast_operator-inl.h" namespace panda::ecmascript::base { -constexpr unsigned char CODE_SPACE = 0x20; constexpr int GAP_MAX_LEN = 10; -constexpr int FOUR_HEX = 4; -constexpr char ZERO_FIRST = static_cast(0xc0); // \u0000 => c0 80 - -bool JsonStringifier::IsFastValueToQuotedString(const char *value) -{ - if (strpbrk(value, "\"\\\b\f\n\r\t") != nullptr) { - return false; - } - while (*value != '\0') { - if ((*value > 0 && *value < CODE_SPACE) || *value == ZERO_FIRST) { - return false; - } - value++; - } - return true; -} - -CString JsonStringifier::ValueToQuotedString(CString str) -{ - CString product; - const char *value = str.c_str(); - // fast mode - bool isFast = IsFastValueToQuotedString(value); - if (isFast) { - product += "\""; - product += str; - product += "\""; - return product; - } - // 1. Let product be code unit 0x0022 (QUOTATION MARK). - product += "\""; - // 2. For each code unit C in value - for (const char *c = value; *c != 0; ++c) { - switch (*c) { - /* - * a. If C is 0x0022 (QUOTATION MARK) or 0x005C (REVERSE SOLIDUS), then - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let product be the concatenation of product and C. - */ - case '\"': - product += "\\\""; - break; - case '\\': - product += "\\\\"; - break; - /* - * b. Else if C is 0x0008 (BACKSPACE), 0x000C (FORM FEED), 0x000A (LINE FEED), 0x000D (CARRIAGE RETURN), - * or 0x000B (LINE TABULATION), then - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let abbrev be the String value corresponding to the value of C as follows: - * BACKSPACE "b" - * FORM FEED (FF) "f" - * LINE FEED (LF) "n" - * CARRIAGE RETURN (CR) "r" - * LINE TABULATION "t" - * iii. Let product be the concatenation of product and abbrev. - */ - case '\b': - product += "\\b"; - break; - case '\f': - product += "\\f"; - break; - case '\n': - product += "\\n"; - break; - case '\r': - product += "\\r"; - break; - case '\t': - product += "\\t"; - break; - case ZERO_FIRST: - product += "\\u0000"; - ++c; - break; - default: - // c. Else if C has a code unit value less than 0x0020 (SPACE), then - if (*c > 0 && *c < CODE_SPACE) { - /* - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let product be the concatenation of product and "u". - * iii. Let hex be the string result of converting the numeric code unit value of C to a String of - * four hexadecimal digits. Alphabetic hexadecimal digits are presented as lowercase Latin letters. - * iv. Let product be the concatenation of product and hex. - */ - std::ostringstream oss; - oss << "\\u" << std::hex << std::setfill('0') << std::setw(FOUR_HEX) << static_cast(*c); - product += oss.str(); - } else { - // Else, - // i. Let product be the concatenation of product and C. - product += *c; - } - } - } - // 3. Let product be the concatenation of product and code unit 0x0022 (QUOTATION MARK). - product += "\""; - // Return product. - return product; -} JSHandle JsonStringifier::Stringify(const JSHandle &value, const JSHandle &replacer, @@ -313,7 +208,7 @@ JSTaggedValue JsonStringifier::GetSerializeValue(const JSHandle & if (UNLIKELY(replacer->IsCallable())) { handleValue_.Update(tagValue); // a. Let value be Call(ReplacerFunction, holder, «key, value»). - const int32_t argsLength = 2; // 2: «key, value» + const uint32_t argsLength = 2; // 2: «key, value» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread_, replacer, object, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); @@ -369,12 +264,13 @@ JSTaggedValue JsonStringifier::SerializeJSONProperty(const JSHandle strHandle = JSHandle(valHandle); auto string = JSHandle(thread_, EcmaStringAccessor::Flatten(thread_->GetEcmaVM(), strHandle)); CString str = ConvertToString(*string, StringConvertedUsage::LOGICOPERATION); - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; return tagValue; } @@ -428,7 +324,7 @@ void JsonStringifier::SerializeObjectKey(const JSHandle &key, boo str = ConvertToString(*JSTaggedValue::ToString(thread_, key), StringConvertedUsage::LOGICOPERATION); } result_ += stepBegin; - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; result_ += ":"; result_ += stepEnd; @@ -477,6 +373,7 @@ bool JsonStringifier::SerializeJSONObject(const JSHandle &value, for (uint32_t i = 0; i < arrLength; i++) { handleKey_.Update(propertyArray->Get(i)); JSHandle valueHandle = JSTaggedValue::GetProperty(thread_, value, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); JSTaggedValue serializeValue = GetSerializeValue(value, handleKey_, valueHandle, replacer); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || @@ -608,6 +505,7 @@ bool JsonStringifier::SerializeJSArray(const JSHandle &value, con if (len > 0) { for (uint32_t i = 0; i < len; i++) { JSTaggedValue tagVal = ObjectFastOperator::FastGetPropertyByIndex(thread_, value.GetTaggedValue(), i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); if (UNLIKELY(tagVal.IsAccessor())) { tagVal = JSObject::CallGetter(thread_, AccessorData::Cast(tagVal.GetTaggedObject()), value); } @@ -647,7 +545,7 @@ void JsonStringifier::SerializePrimitiveRef(const JSHandle &primi auto priStr = JSTaggedValue::ToString(thread_, primitiveRef); RETURN_IF_ABRUPT_COMPLETION(thread_); CString str = ConvertToString(*priStr, StringConvertedUsage::LOGICOPERATION); - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; } else if (primitive.IsNumber()) { auto priNum = JSTaggedValue::ToNumber(thread_, primitiveRef); @@ -692,7 +590,7 @@ bool JsonStringifier::SerializeElements(const JSHandle &obj, const JSH } } } - std::sort(sortArr.begin(), sortArr.end(), CompareNumber); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.GetTaggedValue(); handleKey_.Update(entryKey); @@ -732,7 +630,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl PropertyAttributes attr(layoutInfo->GetAttr(index)); ASSERT(static_cast(attr.GetOffset()) == index); value = attr.IsInlinedProps() - ? obj->GetPropertyInlinedProps(static_cast(index)) + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); if (attr.IsInlinedProps() && value.IsHole()) { continue; @@ -761,7 +659,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl PropertyAttributes attr(layoutInfo->GetAttr(index)); ASSERT(static_cast(attr.GetOffset()) == index); value = attr.IsInlinedProps() - ? obj->GetPropertyInlinedProps(static_cast(index)) + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); if (attr.IsInlinedProps() && value.IsHole()) { continue; @@ -793,7 +691,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); sortArr.emplace_back(pair); } - std::sort(sortArr.begin(), sortArr.end(), CompareKey); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.first.GetTaggedValue(); handleKey_.Update(entryKey); @@ -824,7 +722,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); sortArr.emplace_back(pair); } - std::sort(sortArr.begin(), sortArr.end(), CompareKey); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.first.GetTaggedValue(); handleKey_.Update(entryKey); diff --git a/ecmascript/base/json_stringifier.h b/ecmascript/base/json_stringifier.h index eba164e0b519820d23542532ebaff29728d3840e..d90189f4d7aa171549dc1b044cb003df7774cbd9 100644 --- a/ecmascript/base/json_stringifier.h +++ b/ecmascript/base/json_stringifier.h @@ -66,17 +66,6 @@ private: bool SerializeElements(const JSHandle &obj, const JSHandle &replacer, bool hasContent); bool SerializeKeys(const JSHandle &obj, const JSHandle &replacer, bool hasContent); - static inline bool CompareKey(const std::pair, PropertyAttributes> &a, - const std::pair, PropertyAttributes> &b) - { - return a.second.GetDictionaryOrder() < b.second.GetDictionaryOrder(); - } - - static inline bool CompareNumber(const JSHandle &a, const JSHandle &b) - { - return a->GetNumber() < b->GetNumber(); - } - CString gap_; CString result_; CString indent_; diff --git a/ecmascript/base/number_helper.cpp b/ecmascript/base/number_helper.cpp index e3c3a11034d866885cb0922401b6f7042070105e..5fca63db7364f25c9f685b7309539848aab1253a 100644 --- a/ecmascript/base/number_helper.cpp +++ b/ecmascript/base/number_helper.cpp @@ -314,6 +314,12 @@ CString NumberHelper::IntToString(int number) return ToCString(number); } +JSHandle NumberHelper::IntToEcmaString(const JSThread *thread, int number) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + return factory->NewFromASCII(ToCString(number)); +} + // 7.1.12.1 ToString Applied to the Number Type JSHandle NumberHelper::NumberToString(const JSThread *thread, JSTaggedValue number) { diff --git a/ecmascript/base/number_helper.h b/ecmascript/base/number_helper.h index 2552368a71024b2fa8279a73f916bc81911f917f..a6a1905312c4d0c86ef9417422c5f98aa27ad988 100644 --- a/ecmascript/base/number_helper.h +++ b/ecmascript/base/number_helper.h @@ -46,6 +46,7 @@ static constexpr double MAX_VALUE = std::numeric_limits::max(); static constexpr double MIN_VALUE = std::numeric_limits::min(); static constexpr double POSITIVE_INFINITY = std::numeric_limits::infinity(); static constexpr double NAN_VALUE = std::numeric_limits::quiet_NaN(); +static constexpr uint64_t MAX_UINT64_VALUE = std::numeric_limits::max(); // Helper defines for double static constexpr int DOUBLE_MAX_PRECISION = 17; @@ -96,6 +97,7 @@ public: } static JSTaggedValue DoubleToString(JSThread *thread, double number, int radix); static bool IsEmptyString(const uint8_t *start, const uint8_t *end); + static JSHandle IntToEcmaString(const JSThread *thread, int number); static JSHandle NumberToString(const JSThread *thread, JSTaggedValue number); static double TruncateDouble(double d); static int64_t DoubleToInt64(double d); diff --git a/ecmascript/base/path_helper.cpp b/ecmascript/base/path_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2bc6d00639a870878078f2df7e590d229fbadfc7 --- /dev/null +++ b/ecmascript/base/path_helper.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ecmascript/base/path_helper.h" + +namespace panda::ecmascript::base { +/* + * Before: ./xxx/../xxx/xxx/ + * After: xxx/xxx + */ +CString PathHelper::NormalizePath(const CString &fileName) +{ + if (fileName.find(DOUBLE_SLASH_TAG) == CString::npos && + fileName.find(CURRENT_DIREATORY_TAG) == CString::npos && + fileName[fileName.size() - 1] != SLASH_TAG) { + return fileName; + } + CString res = ""; + size_t prev = 0; + size_t curr = fileName.find(SLASH_TAG); + CVector elems; + // eliminate parent directory path + while (curr != CString::npos) { + if (curr > prev) { + CString elem = fileName.substr(prev, curr - prev); + if (elem == DOUBLE_POINT_TAG && !elems.empty()) { // looking for xxx/../ + elems.pop_back(); + } else if (elem != POINT_STRING_TAG && elem != DOUBLE_POINT_TAG) { // remove ./ ../ + elems.push_back(elem); + } + } + prev = curr + 1; + curr = fileName.find(SLASH_TAG, prev); + } + if (prev != fileName.size()) { + elems.push_back(fileName.substr(prev)); + } + for (auto e : elems) { + if (res.size() == 0 && fileName.at(0) != SLASH_TAG) { + res.append(e); + continue; + } + res.append(1, SLASH_TAG).append(e); + } + return res; +} + +/* + * Before: xxx/xxx + * After: xxx/ + */ +JSHandle PathHelper::ResolveDirPath(JSThread *thread, CString fileName) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // find last '/', '\\' + int foundPos = static_cast(fileName.find_last_of("/\\")); + if (foundPos == -1) { + return factory->NewFromUtf8(""); + } + CString dirPathStr = fileName.substr(0, foundPos + 1); + return factory->NewFromUtf8(dirPathStr); +} +} // namespace panda::ecmascript::base \ No newline at end of file diff --git a/ecmascript/base/path_helper.h b/ecmascript/base/path_helper.h index b0d3ecba732c3fd198dd2133ef73f0ae3f4d30ff..5ff0fb9bb965c8d78261b986a76a889c3f4eb049 100644 --- a/ecmascript/base/path_helper.h +++ b/ecmascript/base/path_helper.h @@ -27,157 +27,23 @@ namespace panda::ecmascript::base { class PathHelper { public: - static constexpr char EXT_NAME_ABC[] = ".abc"; - static constexpr char EXT_NAME_ETS[] = ".ets"; - static constexpr char EXT_NAME_TS[] = ".ts"; - static constexpr char EXT_NAME_JS[] = ".js"; - static constexpr char EXT_NAME_JSON[] = ".json"; - static constexpr char PREFIX_BUNDLE[] = "@bundle:"; - static constexpr char PREFIX_MODULE[] = "@module:"; - static constexpr char PREFIX_PACKAGE[] = "@package:"; - static constexpr char REQUIRE_NAITVE_MODULE_PREFIX[] = "@native:"; - static constexpr char REQUIRE_NAPI_OHOS_PREFIX[] = "@ohos:"; - static constexpr char REQUIRE_NAPI_APP_PREFIX[] = "@app:"; - static constexpr char NPM_PATH_SEGMENT[] = "node_modules"; - static constexpr char PACKAGE_PATH_SEGMENT[] = "pkg_modules"; - static constexpr char PACKAGE_ENTRY_FILE[] = "/index"; - static constexpr char BUNDLE_INSTALL_PATH[] = "/data/storage/el1/bundle/"; - static constexpr char MERGE_ABC_ETS_MODULES[] = "/ets/modules.abc"; - static constexpr char MODULE_DEFAULE_ETS[] = "/ets/"; - static constexpr char BUNDLE_SUB_INSTALL_PATH[] = "/data/storage/el1/"; - static constexpr char PREVIEW_OF_ACROSS_HAP_FLAG[] = "[preview]"; - static constexpr char NAME_SPACE_TAG[] = "@"; - static constexpr char PREVIER_TEST_DIR[] = ".test"; - - static constexpr size_t MAX_PACKAGE_LEVEL = 1; - static constexpr size_t SEGMENTS_LIMIT_TWO = 2; - static constexpr size_t EXT_NAME_ABC_LEN = 4; - static constexpr size_t EXT_NAME_ETS_LEN = 4; - static constexpr size_t EXT_NAME_TS_LEN = 3; - static constexpr size_t EXT_NAME_JS_LEN = 3; - static constexpr size_t EXT_NAME_JSON_LEN = 5; - static constexpr size_t PREFIX_BUNDLE_LEN = 8; - static constexpr size_t PREFIX_MODULE_LEN = 8; - static constexpr size_t PREFIX_PACKAGE_LEN = 9; - static constexpr size_t NATIVE_PREFIX_SIZE = 8; - static constexpr size_t OHOS_PREFIX_SIZE = 6; - static constexpr size_t APP_PREFIX_SIZE = 5; - - static void ResolveCurrentPath(JSThread *thread, - JSMutableHandle &dirPath, - JSMutableHandle &fileName, - const JSPandaFile *jsPandaFile) - { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString fullName = jsPandaFile->GetJSPandaFileDesc(); - // find last '/' - int foundPos = static_cast(fullName.find_last_of("/\\")); - if (foundPos == -1) { - RETURN_IF_ABRUPT_COMPLETION(thread); - } - CString dirPathStr = fullName.substr(0, foundPos + 1); - JSHandle dirPathName = factory->NewFromUtf8(dirPathStr); - dirPath.Update(dirPathName.GetTaggedValue()); - - // Get filename from JSPandaFile - JSHandle cbFileName = factory->NewFromUtf8(fullName); - fileName.Update(cbFileName.GetTaggedValue()); - } - - static JSHandle ResolveDirPath(JSThread *thread, - JSHandle fileName) - { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString fullName = ConvertToString(fileName.GetTaggedValue()); - // find last '/' - int foundPos = static_cast(fullName.find_last_of("/\\")); - if (foundPos == -1) { - RETURN_HANDLE_IF_ABRUPT_COMPLETION(EcmaString, thread); - } - CString dirPathStr = fullName.substr(0, foundPos + 1); - return factory->NewFromUtf8(dirPathStr); - } - - static CString NormalizePath(const CString &fileName) - { - if (fileName.find("//") == CString::npos && fileName.find("./") == CString::npos && - fileName[fileName.size() - 1] != '/') { - return fileName; - } - const char delim = '/'; - CString res = ""; - size_t prev = 0; - size_t curr = fileName.find(delim); - CVector elems; - while (curr != CString::npos) { - if (curr > prev) { - CString elem = fileName.substr(prev, curr - prev); - if (elem == ".." && !elems.empty()) { - elems.pop_back(); - } else if (elem != "." && elem != "..") { - elems.push_back(elem); - } - } - prev = curr + 1; - curr = fileName.find(delim, prev); - } - if (prev != fileName.size()) { - elems.push_back(fileName.substr(prev)); - } - for (auto e : elems) { - if (res.size() == 0 && fileName.at(0) != delim) { - res.append(e); - continue; - } - res.append(1, delim).append(e); - } - return res; - } - - static CString ParseOhmUrl(EcmaVM *vm, const CString &inputFileName, CString &outFileName) - { - CString bundleInstallName(BUNDLE_INSTALL_PATH); - size_t startStrLen = bundleInstallName.length(); - size_t pos = CString::npos; - - if (inputFileName.length() > startStrLen && inputFileName.compare(0, startStrLen, bundleInstallName) == 0) { - pos = startStrLen; - } - CString entryPoint; - if (pos != CString::npos) { - pos = inputFileName.find('/', startStrLen); - if (pos == CString::npos) { - LOG_FULL(FATAL) << "Invalid Ohm url, please check."; - } - CString moduleName = inputFileName.substr(startStrLen, pos - startStrLen); - outFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - entryPoint = vm->GetBundleName() + "/" + inputFileName.substr(startStrLen); - } else { - // Temporarily handle the relative path sent by arkui - if (StringHelper::StringStartWith(inputFileName, PREFIX_BUNDLE)) { - entryPoint = inputFileName.substr(PREFIX_BUNDLE_LEN); - outFileName = ParseUrl(vm, entryPoint); - } else { -#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) - entryPoint = vm->GetBundleName() + "/" + inputFileName; -#else - // if the inputFileName starts with '.test', the preview test page is started. - // in this case, the path ets does not need to be combined. - if (StringHelper::StringStartWith(inputFileName, PREVIER_TEST_DIR)) { - entryPoint = vm->GetBundleName() + "/" + vm->GetModuleName() + "/" + inputFileName; - } else { - entryPoint = vm->GetBundleName() + "/" + vm->GetModuleName() + MODULE_DEFAULE_ETS + inputFileName; - } -#endif - } - } - if (StringHelper::StringEndWith(entryPoint, EXT_NAME_ABC)) { - entryPoint.erase(entryPoint.length() - EXT_NAME_ABC_LEN, EXT_NAME_ABC_LEN); - } - return entryPoint; - } - - static void CropNamespaceIfAbsent(CString &moduleName) + static constexpr char COLON_TAG = ':'; + static constexpr char CURRENT_DIREATORY_TAG[] = "./"; + static constexpr char DOUBLE_POINT_TAG[] = ".."; + static constexpr char DOUBLE_SLASH_TAG[] = "//"; + static constexpr char NAME_SPACE_TAG = '@'; + static constexpr char POINT_STRING_TAG[] = "."; + static constexpr char POINT_TAG = '.'; + static constexpr char SLASH_TAG = '/'; + + static CString NormalizePath(const CString &fileName); + static JSHandle ResolveDirPath(JSThread *thread, CString fileName); + + /* + * Before: moduleName@nameSpace + * After: moduleName + */ + inline static void DeleteNamespace(CString &moduleName) { size_t pos = moduleName.find(NAME_SPACE_TAG); if (pos == CString::npos) { @@ -186,403 +52,41 @@ public: moduleName.erase(pos, moduleName.size() - pos); } - // current ohmUrl format : @bundle:bundlename/modulename@namespace/entry/src/index - static CString ParseUrl(EcmaVM *vm, const CString &entryPoint) + /* + * Before: bundleName/moduleName@namespace/moduleName/xxx/xxx + * After: moduleName/xxx/xxx + */ + inline static void AdaptOldIsaRecord(CString &recordName) { - CVector vec; - StringHelper::SplitString(entryPoint, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(DEBUG) << "ParseUrl SplitString filed, please check Url" << entryPoint; - return CString(); - } - CString bundleName = vec[0]; - CString moduleName = vec[1]; - CropNamespaceIfAbsent(moduleName); - - CString baseFileName; - if (bundleName != vm->GetBundleName()) { - // Cross-application - baseFileName = - BUNDLE_INSTALL_PATH + bundleName + "/" + moduleName + "/" + moduleName + MERGE_ABC_ETS_MODULES; - } else { - // Intra-application cross hap - baseFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - } - return baseFileName; - } - - static std::string ParseHapPath(const CString &fileName) - { - CString bundleSubInstallName(BUNDLE_SUB_INSTALL_PATH); - size_t startStrLen = bundleSubInstallName.length(); - if (fileName.length() > startStrLen && fileName.compare(0, startStrLen, bundleSubInstallName) == 0) { - CString hapPath = fileName.substr(startStrLen); - size_t pos = hapPath.find(MERGE_ABC_ETS_MODULES); - if (pos != CString::npos) { - return hapPath.substr(0, pos).c_str(); - } - } - return std::string(); - } - - static void CroppingRecord(CString &recordName) - { - size_t pos = recordName.find('/'); + size_t pos = recordName.find(SLASH_TAG); if (pos != CString::npos) { - pos = recordName.find('/', pos + 1); + pos = recordName.find(SLASH_TAG, pos + 1); if (pos != CString::npos) { recordName = recordName.substr(pos + 1); } } } - static CString ParsePrefixBundle(JSThread *thread, const JSPandaFile *jsPandaFile, - [[maybe_unused]] CString &baseFileName, CString moduleRequestName, [[maybe_unused]] CString recordName) - { - EcmaVM *vm = thread->GetEcmaVM(); - moduleRequestName = moduleRequestName.substr(PREFIX_BUNDLE_LEN); - CString entryPoint = moduleRequestName; - if (jsPandaFile->IsRecordWithBundleName()) { - CVector vec; - StringHelper::SplitString(moduleRequestName, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(INFO) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString bundleName = vec[0]; - CString moduleName = vec[1]; - CropNamespaceIfAbsent(moduleName); - -#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) - if (bundleName != vm->GetBundleName()) { - baseFileName = - BUNDLE_INSTALL_PATH + bundleName + '/' + moduleName + '/' + moduleName + MERGE_ABC_ETS_MODULES; - } else if (moduleName != vm->GetModuleName()) { - baseFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - } else { - // Support multi-module card service - baseFileName = vm->GetAssetPath(); - } -#else - CVector currentVec; - StringHelper::SplitString(recordName, currentVec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(INFO) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString currentModuleName = currentVec[1]; - CropNamespaceIfAbsent(currentModuleName); - if (bundleName != vm->GetBundleName() || moduleName != currentModuleName) { - entryPoint = PREVIEW_OF_ACROSS_HAP_FLAG; - if (vm->EnableReportModuleResolvingFailure()) { - LOG_NO_TAG(ERROR) << "[ArkRuntime Log] Importing shared package is not supported in the Previewer."; - } - } -#endif - } else { - CroppingRecord(entryPoint); - } - return entryPoint; - } - - static CString MakeNewRecord(const JSPandaFile *jsPandaFile, CString &baseFileName, const CString &recordName, - const CString &requestName) - { - CString entryPoint; - CString moduleRequestName = RemoveSuffix(requestName); - size_t pos = moduleRequestName.find("./"); - if (pos == 0) { - moduleRequestName = moduleRequestName.substr(2); // 2 means jump "./" - } - pos = recordName.rfind('/'); - if (pos != CString::npos) { - entryPoint = recordName.substr(0, pos + 1) + moduleRequestName; - } else { - entryPoint = moduleRequestName; - } - entryPoint = NormalizePath(entryPoint); - entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, entryPoint); - if (!entryPoint.empty()) { - return entryPoint; - } - // the package name may have a '.js' suffix, try to parseThirdPartyPackage - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, requestName); - if (!entryPoint.empty()) { - return entryPoint; - } - // Execute abc locally - pos = baseFileName.rfind('/'); - if (pos != CString::npos) { - baseFileName = baseFileName.substr(0, pos + 1) + moduleRequestName + EXT_NAME_ABC; - } else { - baseFileName = moduleRequestName + EXT_NAME_ABC; - } - pos = moduleRequestName.rfind('/'); - if (pos != CString::npos) { - entryPoint = moduleRequestName.substr(pos + 1); - } else { - entryPoint = moduleRequestName; - } - return entryPoint; - } - - static CString ConfirmLoadingIndexOrNot(const JSPandaFile *jsPandaFile, const CString &packageEntryPoint) - { - CString entryPoint = packageEntryPoint; - if (jsPandaFile->HasRecord(entryPoint)) { - return entryPoint; - } - // Possible import directory - entryPoint += PACKAGE_ENTRY_FILE; - if (jsPandaFile->HasRecord(entryPoint)) { - return entryPoint; - } - return CString(); - } - - static CString FindNpmEntryPoint(const JSPandaFile *jsPandaFile, const CString &packageEntryPoint) - { - // if we are currently importing a specific file or directory, we will get the entryPoint here - CString entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, packageEntryPoint); - if (!entryPoint.empty()) { - return entryPoint; - } - // When you come here, must import a packageName - return jsPandaFile->GetEntryPoint(packageEntryPoint); - } - - static CString FindPackageInTopLevel(const JSPandaFile *jsPandaFile, const CString& requestName, - const CString &packagePath) - { - // we find node_modules/0/xxx or node_modules/1/xxx - CString entryPoint; - for (size_t level = 0; level <= MAX_PACKAGE_LEVEL; ++level) { - CString levelStr = std::to_string(level).c_str(); - CString key = packagePath + "/" + levelStr + '/' + requestName; - entryPoint = FindNpmEntryPoint(jsPandaFile, key); - if (!entryPoint.empty()) { - return entryPoint; - } - } - return CString(); - } - - static CString FindOhpmEntryPoint(const JSPandaFile *jsPandaFile, const CString& ohpmPath, - const CString& requestName) - { - CVector vec; - StringHelper::SplitString(requestName, vec, 0); - size_t maxIndex = vec.size() - 1; - CString ohpmKey; - size_t index = 0; - // first we find the ohpmKey by splicing the requestName - while (index <= maxIndex) { - CString maybeKey = ohpmPath + "/" + StringHelper::JoinString(vec, 0, index); - ohpmKey = jsPandaFile->GetNpmEntries(maybeKey); - if (!ohpmKey.empty()) { - break; - } - ++index; - } - if (ohpmKey.empty()) { - return CString(); - } - // second If the ohpmKey is not empty, we will use it to obtain the real entrypoint - CString entryPoint; - if (index == maxIndex) { - // requestName is a packageName - entryPoint = jsPandaFile->GetEntryPoint(ohpmKey); - } else { - // import a specific file or directory - ohpmKey = ohpmKey + "/" + StringHelper::JoinString(vec, index + 1, maxIndex); - entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, ohpmKey); - } - return entryPoint; - } - - static CString FindPackageInTopLevelWithNamespace(const JSPandaFile *jsPandaFile, const CString& requestName, - const CString &recordName) - { - // find in current module @[moduleName|namespace]/ - CString entryPoint; - CString ohpmPath; - if (StringHelper::StringStartWith(recordName, PACKAGE_PATH_SEGMENT)) { - size_t pos = recordName.find('/'); - if (pos == CString::npos) { - LOG_ECMA(DEBUG) << "wrong recordname : " << recordName; - return CString(); - } - ohpmPath = recordName.substr(0, pos); - entryPoint = FindOhpmEntryPoint(jsPandaFile, recordName.substr(0, pos), requestName); - } else { - CVector vec; - StringHelper::SplitString(recordName, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(DEBUG) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString moduleName = vec[1]; - // If namespace exists, use namespace as moduleName - size_t pos = moduleName.find(NAME_SPACE_TAG); - if (pos != CString::npos) { - moduleName = moduleName.substr(pos + 1); - } - ohpmPath = CString(PACKAGE_PATH_SEGMENT) + NAME_SPACE_TAG + moduleName; - entryPoint = FindOhpmEntryPoint(jsPandaFile, ohpmPath, requestName); - } - if (!entryPoint.empty()) { - return entryPoint; - } - // find in project directory / - return FindOhpmEntryPoint(jsPandaFile, PACKAGE_PATH_SEGMENT, requestName); - } - - static CString ParseOhpmPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName) - { - CString entryPoint; - if (StringHelper::StringStartWith(recordName, PACKAGE_PATH_SEGMENT)) { - //this way is thirdPartyPackage import ThirdPartyPackage - auto info = const_cast(jsPandaFile)->FindRecordInfo(recordName); - CString packageName = info.npmPackageName; - size_t pos = packageName.rfind(PACKAGE_PATH_SEGMENT); - if (pos != CString::npos) { - packageName.erase(pos, packageName.size() - pos); - CString ohpmPath = packageName + PACKAGE_PATH_SEGMENT; - entryPoint = FindOhpmEntryPoint(jsPandaFile, ohpmPath, requestName); - if (!entryPoint.empty()) { - return entryPoint; - } - } - } - // Import packages under the current module or project directory - return FindPackageInTopLevelWithNamespace(jsPandaFile, requestName, recordName); - } - - static CString ParseThirdPartyPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName, const CString &packagePath) - { - CString entryPoint; - if (StringHelper::StringStartWith(recordName, packagePath)) { - auto info = const_cast(jsPandaFile)->FindRecordInfo(recordName); - CString packageName = info.npmPackageName; - size_t pos = 0; - while (true) { - CString key = packageName + '/' + packagePath + "/" + requestName; - entryPoint = FindNpmEntryPoint(jsPandaFile, key); - if (!entryPoint.empty()) { - return entryPoint; - } - pos = packageName.rfind(packagePath) - 1; - if (pos == CString::npos || pos < 0) { - break; - } - packageName.erase(pos, packageName.size() - pos); - } - } - return FindPackageInTopLevel(jsPandaFile, requestName, packagePath); - } - - static CString ParseThirdPartyPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName) - { - static CVector packagePaths = {CString(PACKAGE_PATH_SEGMENT), CString(NPM_PATH_SEGMENT)}; - // We need to deal with scenarios like this 'json5/' -> 'json5' - CString normalizeRequestName = NormalizePath(requestName); - CString entryPoint = ParseOhpmPackage(jsPandaFile, recordName, normalizeRequestName); - if (!entryPoint.empty()) { - return entryPoint; - } - // Package compatible with old soft link format - for (size_t i = 0; i < packagePaths.size(); ++i) { - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, normalizeRequestName, packagePaths[i]); - if (!entryPoint.empty()) { - return entryPoint; - } - } - return CString(); - } - - static bool IsImportFile(const CString &moduleRequestName) - { - if (moduleRequestName[0] == '.') { - return true; - } - size_t pos = moduleRequestName.rfind('.'); - if (pos != CString::npos) { - CString suffix = moduleRequestName.substr(pos); - if (suffix == EXT_NAME_JS || suffix == EXT_NAME_TS || suffix == EXT_NAME_ETS || suffix == EXT_NAME_JSON) { - return true; - } - } - return false; - } - - static CString RemoveSuffix(const CString &requestName) - { - CString res = requestName; - size_t pos = res.rfind('.'); - if (pos != CString::npos) { - CString suffix = res.substr(pos); - if (suffix == EXT_NAME_JS || suffix == EXT_NAME_TS || suffix == EXT_NAME_ETS || suffix == EXT_NAME_JSON) { - res.erase(pos, suffix.length()); - } - } - return res; - } - - inline static bool IsNativeModuleRequest(const CString &requestName) - { - if (requestName[0] != '@') { - return false; - } - if (StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAPI_OHOS_PREFIX) || - StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAPI_APP_PREFIX) || - StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAITVE_MODULE_PREFIX)) { - return true; - } - return false; - } - - static CString ConcatFileNameWithMerge(JSThread *thread, const JSPandaFile *jsPandaFile, CString &baseFileName, - CString recordName, CString requestName) - { - CString entryPoint; - if (StringHelper::StringStartWith(requestName, PREFIX_BUNDLE)) { - entryPoint = ParsePrefixBundle(thread, jsPandaFile, baseFileName, requestName, recordName); - } else if (StringHelper::StringStartWith(requestName, PREFIX_PACKAGE)) { - entryPoint = requestName.substr(PREFIX_PACKAGE_LEN); - } else if (IsImportFile(requestName)) { // load a relative pathName. - entryPoint = MakeNewRecord(jsPandaFile, baseFileName, recordName, requestName); - } else { - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, requestName); - } - if (entryPoint.empty() && thread->GetEcmaVM()->EnableReportModuleResolvingFailure()) { - LOG_ECMA(ERROR) << "Failed to resolve the requested entryPoint. baseFileName : '" << baseFileName << - "'. RecordName : '" << recordName << "'. RequestName : '" << requestName << "'."; - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString msg = "failed to load module'" + requestName + "' which imported by '" + - recordName + "'. Please check the target path."; - JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); - THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, entryPoint); - } - return entryPoint; - } - - static CString GetStrippedModuleName(const CString &moduleRequestName) + /* + * Before: @***:xxxx + * After: xxxx + */ + inline static CString GetStrippedModuleName(const CString &moduleRequestName) { - // @xxx:**** -> **** - size_t pos = moduleRequestName.find(':'); + size_t pos = moduleRequestName.find(COLON_TAG); if (pos == CString::npos) { LOG_FULL(FATAL) << "Unknown format " << moduleRequestName; } return moduleRequestName.substr(pos + 1); } - static CString GetInternalModulePrefix(const CString &moduleRequestName) + /* + * Before: @xxx:**** + * After: xxx + */ + inline static CString GetInternalModulePrefix(const CString &moduleRequestName) { - // @xxx:* -> xxx - size_t pos = moduleRequestName.find(':'); + size_t pos = moduleRequestName.find(COLON_TAG); if (pos == CString::npos) { LOG_FULL(FATAL) << "Unknown format " << moduleRequestName; } diff --git a/ecmascript/base/string_helper.h b/ecmascript/base/string_helper.h index e15b3d2c2edb543ff7ff56ed08aef2b82217db26..43c049c54f1786015bdc58aa8193be229a0788f2 100644 --- a/ecmascript/base/string_helper.h +++ b/ecmascript/base/string_helper.h @@ -229,6 +229,11 @@ public: return c; } + static inline void InplaceAppend(std::u16string &str1, const std::u16string &str2) + { + str1.append(str2); + } + static inline std::u16string Append(const std::u16string &str1, const std::u16string &str2) { std::u16string tmpStr = str1; diff --git a/ecmascript/base/tests/array_helper_test.cpp b/ecmascript/base/tests/array_helper_test.cpp index 720d8064fcaf6c4a536c285c8cf8490c72435c08..fb0ae0cc237c01d3965b44627c0545c34f5feff3 100644 --- a/ecmascript/base/tests/array_helper_test.cpp +++ b/ecmascript/base/tests/array_helper_test.cpp @@ -104,7 +104,7 @@ HWTEST_F_L0(ArrayHelperTest, SortCompare) EXPECT_EQ(resultValue3, -1); // Y is Undefined EXPECT_EQ(resultValue4, 1); // X > Y EXPECT_EQ(resultValue5, 0); // X = Y - EXPECT_EQ(resultValue6, 0); // X < Y + EXPECT_EQ(resultValue6, -1); // X < Y } /** diff --git a/ecmascript/base/tests/error_helper_test.cpp b/ecmascript/base/tests/error_helper_test.cpp index 3cad7ef139e6cbee694e73a53c8624c3462112b0..c006c0dbc28c104affa821ca9b047034e40812e1 100644 --- a/ecmascript/base/tests/error_helper_test.cpp +++ b/ecmascript/base/tests/error_helper_test.cpp @@ -324,4 +324,56 @@ HWTEST_F_L0(ErrorHelperTest, ErrorCommonConstructor_003) EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(aggregateNameValue)).ToCString().c_str(), "AggregateError"); } + +HWTEST_F_L0(ErrorHelperTest, ErrorCommonConstructor_004) +{ + auto factory = instance->GetFactory(); + auto env = instance->GetGlobalEnv(); + JSHandle msgKey = thread->GlobalConstants()->GetHandledMessageString(); + JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); + JSHandle causeKey = thread->GlobalConstants()->GetHandledCauseString(); + + JSHandle error(env->GetErrorFunction()); + JSHandle typeError(env->GetTypeErrorFunction()); + JSHandle objFun = env->GetObjectFunction(); + JSHandle optionsObj = factory->NewJSObjectByConstructor(JSHandle(objFun), objFun); + JSHandle causeValue(factory->NewFromASCII("error cause")); // test error cause + JSObject::SetProperty(thread, optionsObj, causeKey, causeValue); + + JSHandle errorMsg(factory->NewFromASCII("You have an Error!")); + EcmaRuntimeCallInfo *argv1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*error), 8); // 8 means 2 call args + argv1->SetFunction(error.GetTaggedValue()); + argv1->SetThis(JSTaggedValue(*error)); + argv1->SetCallArg(0, errorMsg.GetTaggedValue()); + argv1->SetCallArg(1, optionsObj.GetTaggedValue()); + auto prev1 = TestHelper::SetupFrame(thread, argv1); + JSHandle errorResult(thread, ErrorHelper::ErrorCommonConstructor(argv1, ErrorType::ERROR)); + TestHelper::TearDownFrame(thread, prev1); + JSHandle errorMsgValue(JSObject::GetProperty(thread, errorResult, msgKey).GetValue()); + JSHandle errorNameValue(JSObject::GetProperty(thread, errorResult, nameKey).GetValue()); + JSHandle errorCauseValue(JSObject::GetProperty(thread, errorResult, causeKey).GetValue()); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorMsgValue)).ToCString().c_str(), + "You have an Error!"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorNameValue)).ToCString().c_str(), "Error"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorCauseValue)).ToCString().c_str(), "error cause"); + + JSHandle typeErrorMsg(factory->NewFromASCII("You have a type error!")); + EcmaRuntimeCallInfo *argv2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*typeError), 8); // 8 means 2 call args + argv2->SetFunction(typeError.GetTaggedValue()); + argv2->SetThis(JSTaggedValue(*typeError)); + argv2->SetCallArg(0, typeErrorMsg.GetTaggedValue()); + argv2->SetCallArg(1, optionsObj.GetTaggedValue()); + auto prev2 = TestHelper::SetupFrame(thread, argv2); + JSHandle typeErrorResult(thread, ErrorHelper::ErrorCommonConstructor(argv2, ErrorType::TYPE_ERROR)); + TestHelper::TearDownFrame(thread, prev2); + JSHandle typeMsgValue(JSObject::GetProperty(thread, typeErrorResult, msgKey).GetValue()); + JSHandle typeNameValue(JSObject::GetProperty(thread, typeErrorResult, nameKey).GetValue()); + JSHandle typeCauseValue(JSObject::GetProperty(thread, typeErrorResult, causeKey).GetValue()); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeMsgValue)).ToCString().c_str(), + "You have a type error!"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeNameValue)).ToCString().c_str(), "TypeError"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeCauseValue)).ToCString().c_str(), "error cause"); +} } // namespace panda::test diff --git a/ecmascript/base/tests/typed_array_helper_test.cpp b/ecmascript/base/tests/typed_array_helper_test.cpp index c502f608d8778df1fd52d8e948dad6e05818de24..dce9b45f2b30c58f3777b48bb7ce1e29b49144ea 100755 --- a/ecmascript/base/tests/typed_array_helper_test.cpp +++ b/ecmascript/base/tests/typed_array_helper_test.cpp @@ -146,7 +146,7 @@ HWTEST_F_L0(TypedArrayHelperTest, AllocateTypedArray_001) auto prev = TestHelper::SetupFrame(thread, argv); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); JSHandle arrayObj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, DataViewType::UINT8); + TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, DataViewType::UINT8); TestHelper::TearDownFrame(thread, prev); JSTypedArray *jsTypedArray = JSTypedArray::Cast(*arrayObj); EXPECT_EQ(jsTypedArray->GetContentType(), ContentType::Number); @@ -167,7 +167,7 @@ HWTEST_F_L0(TypedArrayHelperTest, AllocateTypedArray_002) auto prev = TestHelper::SetupFrame(thread, argv); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); JSHandle arrayObj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, length, DataViewType::UINT8); + TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, length, DataViewType::UINT8); TestHelper::TearDownFrame(thread, prev); JSTypedArray *jsTypedArray = JSTypedArray::Cast(*arrayObj); EXPECT_EQ(jsTypedArray->GetContentType(), ContentType::Number); diff --git a/ecmascript/base/typed_array_helper-inl.h b/ecmascript/base/typed_array_helper-inl.h index e348f809f73eed7eebe650dd8d75daaa40d37df7..eecb5cc730ee8aae430aa61157f2beda5725b6ff 100644 --- a/ecmascript/base/typed_array_helper-inl.h +++ b/ecmascript/base/typed_array_helper-inl.h @@ -126,50 +126,52 @@ JSHandle TypedArrayHelper::GetConstructorFromType(JSThread *thread, { JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); switch (arrayType) { - case DataViewType::INT8: - return JSHandle(env->GetInt8ArrayFunction()); - case DataViewType::UINT8: - return JSHandle(env->GetUint8ArrayFunction()); - case DataViewType::UINT8_CLAMPED: - return JSHandle(env->GetUint8ClampedArrayFunction()); - case DataViewType::INT16: - return JSHandle(env->GetInt16ArrayFunction()); - case DataViewType::UINT16: - return JSHandle(env->GetUint16ArrayFunction()); - case DataViewType::INT32: - return JSHandle(env->GetInt32ArrayFunction()); - case DataViewType::UINT32: - return JSHandle(env->GetUint32ArrayFunction()); - case DataViewType::FLOAT32: - return JSHandle(env->GetFloat32ArrayFunction()); - case DataViewType::FLOAT64: - return JSHandle(env->GetFloat64ArrayFunction()); - case DataViewType::BIGINT64: - return JSHandle(env->GetBigInt64ArrayFunction()); - default: - break; + case DataViewType::INT8: + return JSHandle(env->GetInt8ArrayFunction()); + case DataViewType::UINT8: + return JSHandle(env->GetUint8ArrayFunction()); + case DataViewType::UINT8_CLAMPED: + return JSHandle(env->GetUint8ClampedArrayFunction()); + case DataViewType::INT16: + return JSHandle(env->GetInt16ArrayFunction()); + case DataViewType::UINT16: + return JSHandle(env->GetUint16ArrayFunction()); + case DataViewType::INT32: + return JSHandle(env->GetInt32ArrayFunction()); + case DataViewType::UINT32: + return JSHandle(env->GetUint32ArrayFunction()); + case DataViewType::FLOAT32: + return JSHandle(env->GetFloat32ArrayFunction()); + case DataViewType::FLOAT64: + return JSHandle(env->GetFloat64ArrayFunction()); + case DataViewType::BIGINT64: + return JSHandle(env->GetBigInt64ArrayFunction()); + default: + break; } return JSHandle(env->GetBigUint64ArrayFunction()); } uint32_t TypedArrayHelper::GetSizeFromType(const DataViewType arrayType) { - uint32_t elementSize; if (arrayType == DataViewType::INT8 || arrayType == DataViewType::UINT8 || arrayType == DataViewType::UINT8_CLAMPED) { - elementSize = ElementSize::ONE; - } else if (arrayType == DataViewType::INT16 || - arrayType == DataViewType::UINT16) { - elementSize = ElementSize::TWO; - } else if (arrayType == DataViewType::FLOAT32 || - arrayType == DataViewType::UINT32 || - arrayType == DataViewType::INT32) { - elementSize = ElementSize::FOUR; - } else { - elementSize = ElementSize::EIGHT; + return ElementSize::ONE; + } + + if (arrayType == DataViewType::INT16 || + arrayType == DataViewType::UINT16) { + return ElementSize::TWO; } - return elementSize; + + if (arrayType == DataViewType::FLOAT32 || + arrayType == DataViewType::UINT32 || + arrayType == DataViewType::INT32) { + return ElementSize::FOUR; + } + + return ElementSize::EIGHT; } } // namespace panda::ecmascript::base #endif // ECMASCRIPT_BASE_TYPED_ARRAY_HELPER_INL_H diff --git a/ecmascript/base/typed_array_helper.cpp b/ecmascript/base/typed_array_helper.cpp index 2cfe6b939340a642171b1f541a3d80f9555e11d6..c5153346369f0182e73098b8d008eca6d750b36d 100644 --- a/ecmascript/base/typed_array_helper.cpp +++ b/ecmascript/base/typed_array_helper.cpp @@ -45,7 +45,6 @@ JSTaggedValue TypedArrayHelper::TypedArrayConstructor(EcmaRuntimeCallInfo *argv, ASSERT(argv); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - EcmaVM *ecmaVm = thread->GetEcmaVM(); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); // 2. If NewTarget is undefined, throw a TypeError exception. if (newTarget->IsUndefined()) { @@ -54,7 +53,6 @@ JSTaggedValue TypedArrayHelper::TypedArrayConstructor(EcmaRuntimeCallInfo *argv, // 3. Let constructorName be the String value of the Constructor Name value specified in Table 61 for this // TypedArray constructor. // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget, "%TypedArray.prototype%"). - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle firstArg = BuiltinsBase::GetCallArg(argv, 0); if (!firstArg->IsECMAObject()) { // es11 22.2.4.1 TypedArray ( ) @@ -65,13 +63,13 @@ JSTaggedValue TypedArrayHelper::TypedArrayConstructor(EcmaRuntimeCallInfo *argv, RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); elementLength = static_cast(index.GetNumber()); } - JSHandle obj = TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, + JSHandle obj = TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, elementLength, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return obj.GetTaggedValue(); } - JSHandle obj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, arrayType); + + JSHandle obj = TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (firstArg->IsTypedArray()) { return TypedArrayHelper::CreateFromTypedArray(argv, obj, arrayType); @@ -98,11 +96,13 @@ JSTaggedValue TypedArrayHelper::FastCopyElementFromArray(EcmaRuntimeCallInfo *ar if (elements->GetLength() < len) { TypedArrayHelper::CreateFromOrdinaryObject(argv, obj, arrayType); } - EcmaVM *ecmaVm = thread->GetEcmaVM(); - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, len, arrayType); + + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, len, arrayType); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle targetObj = JSHandle::Cast(obj); - + JSStableArray::FastCopyFromArrayToTypedArray(thread, targetObj, arrayType, 0, len, elements); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSHandle::Cast(targetObj).GetTaggedValue(); } @@ -141,7 +141,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar } } uint32_t len = static_cast(vec.size()); - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, len, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, len, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // d. Let k be 0. // e. Repeat, while k < len @@ -154,6 +154,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = vec[k]; JSTaggedValue::SetProperty(thread, JSHandle::Cast(obj), kKey, kValue, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -173,7 +174,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint64_t rawLen = lenTemp.GetNumber(); // 10. Perform ? AllocateTypedArrayBuffer(O, len). - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, rawLen, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, rawLen, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Let k be 0. // 12. Repeat, while k < len @@ -187,6 +188,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = JSObject::GetProperty(thread, objectArg, kKey).GetValue(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedValue::SetProperty(thread, JSHandle::Cast(obj), kKey, kValue, true); @@ -230,7 +232,9 @@ JSTaggedValue TypedArrayHelper::CreateFromTypedArray(EcmaRuntimeCallInfo *argv, // 15. Let byteLength be elementSize × elementLength. uint32_t srcByteOffset = srcObj->GetByteOffset(); uint32_t elementSize = TypedArrayHelper::GetSizeFromType(arrayType); - uint32_t byteLength = elementSize * elementLength; + // If elementLength is a large number, the multiplication of elementSize and elementLength may exceed + // the maximum value of uint32, resulting in data overflow. Therefore, the type of byteLength is uint64_t. + uint64_t byteLength = elementSize * static_cast(elementLength); // 16. If IsSharedArrayBuffer(srcData) is false, then // a. Let bufferConstructor be ? SpeciesConstructor(srcData, %ArrayBuffer%). @@ -307,7 +311,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, [[maybe_unused]] EcmaHandleScope handleScope(thread); // 5. Let elementSize be the Element Size value specified in Table 61 for constructorName. // 6. Let offset be ? ToIndex(byteOffset). - uint32_t elementSize = static_cast(TypedArrayHelper::GetSizeFromType(arrayType)); + uint32_t elementSize = TypedArrayHelper::GetSizeFromType(arrayType); JSHandle byteOffset = BuiltinsBase::GetCallArg(argv, 1); JSTaggedNumber index = JSTaggedValue::ToIndex(thread, byteOffset); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -320,11 +324,11 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 8. If length is not undefined, then // a. Let newLength be ? ToIndex(length). JSHandle length = BuiltinsBase::GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); - int32_t newLength = 0; + uint64_t newLength = 0; if (!length->IsUndefined()) { index = JSTaggedValue::ToIndex(thread, length); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - newLength = static_cast(index.GetNumber()); + newLength = static_cast(index.GetNumber()); } // 9. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. JSHandle buffer = BuiltinsBase::GetCallArg(argv, 0); @@ -337,7 +341,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // a. If bufferByteLength modulo elementSize ≠ 0, throw a RangeError exception. // b. Let newByteLength be bufferByteLength - offset. // c. If newByteLength < 0, throw a RangeError exception. - uint32_t newByteLength = 0; + uint64_t newByteLength = 0; if (length->IsUndefined()) { if (bufferByteLength % elementSize != 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "The bufferByteLength cannot be an integral multiple of elementSize.", @@ -351,9 +355,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 12. Else, // a. Let newByteLength be newLength × elementSize. // b. If offset + newByteLength > bufferByteLength, throw a RangeError exception. - ASSERT((static_cast(newLength) * static_cast(elementSize)) <= - static_cast(INT32_MAX)); - newByteLength = static_cast(newLength) * elementSize; + newByteLength = newLength * elementSize; if (offset + newByteLength > bufferByteLength) { THROW_RANGE_ERROR_AND_RETURN(thread, "The newByteLength is out of range.", JSTaggedValue::Exception()); } @@ -364,23 +366,22 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 16. Set O.[[ArrayLength]] to newByteLength / elementSize. JSTypedArray *jsTypedArray = JSTypedArray::Cast(*obj); jsTypedArray->SetViewedArrayBufferOrByteArray(thread, buffer); - jsTypedArray->SetByteLength(static_cast(newByteLength)); + jsTypedArray->SetByteLength(newByteLength); jsTypedArray->SetByteOffset(offset); - jsTypedArray->SetArrayLength(static_cast(newByteLength / elementSize)); + jsTypedArray->SetArrayLength(newByteLength / elementSize); // 17. Return O. return obj.GetTaggedValue(); } // es11 22.2.4.2.1 Runtime Semantics: AllocateTypedArray ( constructorName, newTarget, defaultProto ) -JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, const DataViewType arrayType) { - JSThread *thread = ecmaVm->GetJSThread(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); // 1. Let proto be ? GetPrototypeFromConstructor(newTarget, defaultProto). // 2. Let obj be ! IntegerIndexedObjectCreate(proto). - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); JSHandle typedArrayFunc = TypedArrayHelper::GetConstructorFromType(thread, arrayType); JSHandle obj = factory->NewJSObjectByConstructor(typedArrayFunc, newTarget); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); @@ -407,18 +408,17 @@ JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, jsTypedArray->SetIsOnHeap(false); // 9. Return obj. return obj; -} // namespace panda::ecmascript::base +} // es11 22.2.4.2.1 Runtime Semantics: AllocateTypedArray ( constructorName, newTarget, defaultProto, length ) -JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, int32_t length, const DataViewType arrayType) { - JSThread *thread = ecmaVm->GetJSThread(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); // 1. Let proto be ? GetPrototypeFromConstructor(newTarget, defaultProto). // 2. Let obj be ! IntegerIndexedObjectCreate(proto). - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); JSHandle typedArrayFunc = TypedArrayHelper::GetConstructorFromType(thread, arrayType); JSHandle obj = factory->NewJSObjectByConstructor(typedArrayFunc, newTarget); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); @@ -428,14 +428,14 @@ JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, // 7. If length is not present, then // 8. Else, // a. Perform ? AllocateTypedArrayBuffer(obj, length). - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, length, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, length, arrayType); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); // 9. Return obj. return obj; } // es11 22.2.4.2.2 Runtime Semantics: AllocateTypedArrayBuffer ( O, length ) -JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, const JSHandle &obj, uint64_t length, const DataViewType arrayType) { @@ -456,7 +456,7 @@ JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, // 7. Let data be ? AllocateArrayBuffer(%ArrayBuffer%, byteLength). JSTaggedValue data; if (byteLength > JSTypedArray::MAX_ONHEAP_LENGTH) { - JSHandle constructor = ecmaVm->GetGlobalEnv()->GetArrayBufferFunction(); + JSHandle constructor = thread->GetEcmaVM()->GetGlobalEnv()->GetArrayBufferFunction(); data = BuiltinsArrayBuffer::AllocateArrayBuffer(thread, constructor, byteLength); JSTypedArray::Cast(*obj)->SetIsOnHeap(false); } else { @@ -544,6 +544,27 @@ JSHandle TypedArrayHelper::TypedArrayCreate(JSThread *thread, const JS return newTypedArray; } +// TypedArrayCreateSameType ( exemplar, argumentList ) +JSHandle TypedArrayHelper::TypedArrayCreateSameType(JSThread *thread, const JSHandle &obj, + uint32_t argc, JSTaggedType argv[]) +{ + // 1. Let constructor be the intrinsic object associated with the constructor name exemplar.[[TypedArrayName]] + // in Table 70. + JSHandle buffHandle(thread, JSTaggedValue(argv[0])); + JSHandle constructor = + TypedArrayHelper::GetConstructor(thread, JSHandle(obj)); + argv[0] = buffHandle.GetTaggedType(); + // 2. Let result be ? TypedArrayCreate(constructor, argumentList). + JSHandle result = TypedArrayHelper::TypedArrayCreate(thread, constructor, argc, argv); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); + // 3. Assert: result has [[TypedArrayName]] and [[ContentType]] internal slots. + // 4. Assert: result.[[ContentType]] is exemplar.[[ContentType]]. + [[maybe_unused]] ContentType objContentType = obj->GetContentType(); + [[maybe_unused]] ContentType resultContentType = JSHandle::Cast(result)->GetContentType(); + ASSERT(objContentType == resultContentType); + return result; +} + // es11 22.2.3.5.1 Runtime Semantics: ValidateTypedArray ( O ) JSTaggedValue TypedArrayHelper::ValidateTypedArray(JSThread *thread, const JSHandle &value) { @@ -577,7 +598,7 @@ int32_t TypedArrayHelper::SortCompare(JSThread *thread, const JSHandleIsUndefined()) { - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackfnHandle, undefined, undefined, argsLength); @@ -623,7 +644,9 @@ int32_t TypedArrayHelper::SortCompare(JSThread *thread, const JSHandle &constructorName, const DataViewType arrayType); - static JSHandle AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, + static JSHandle AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, const DataViewType arrayType); - static JSHandle AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, + static JSHandle AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, int32_t length, const DataViewType arrayType); @@ -41,6 +41,8 @@ public: uint32_t argc, JSTaggedType argv[]); static JSHandle TypedArrayCreate(JSThread *thread, const JSHandle &constructor, uint32_t argc, const JSTaggedType argv[]); + static JSHandle TypedArrayCreateSameType(JSThread *thread, const JSHandle &obj, + uint32_t argc, JSTaggedType argv[]); static JSTaggedValue ValidateTypedArray(JSThread *thread, const JSHandle &value); inline static DataViewType GetType(const JSHandle &obj); inline static DataViewType GetType(JSType type); @@ -61,7 +63,7 @@ private: const DataViewType arrayType); static JSTaggedValue CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, const JSHandle &obj, const DataViewType arrayType); - static JSHandle AllocateTypedArrayBuffer(JSThread *thread, EcmaVM *ecmaVm, const JSHandle &obj, + static JSHandle AllocateTypedArrayBuffer(JSThread *thread, const JSHandle &obj, uint64_t length, const DataViewType arrayType); static JSTaggedValue FastCopyElementFromArray(EcmaRuntimeCallInfo *argv, const JSHandle &obj, const DataViewType arrayType); diff --git a/ecmascript/builtins/builtins.cpp b/ecmascript/builtins/builtins.cpp index 9d75686f2c6e7d45b6d903b2569c3c5d0a061d88..7d82aa040975ef5f9e6282678feb4a792e844be9 100644 --- a/ecmascript/builtins/builtins.cpp +++ b/ecmascript/builtins/builtins.cpp @@ -425,6 +425,7 @@ void Builtins::InitializeGlobalObject(const JSHandle &env, const JSHa // Global object test SetFunction(env, globalObject, "print", Global::PrintEntrypoint, 0); + SetFunction(env, globalObject, "markModuleCollectable", Global::MarkModuleCollectable, 0); #if ECMASCRIPT_ENABLE_RUNTIME_STAT SetFunction(env, globalObject, "startRuntimeStat", Global::StartRuntimeStat, 0); SetFunction(env, globalObject, "stopRuntimeStat", Global::StopRuntimeStat, 0); @@ -455,6 +456,8 @@ void Builtins::InitializeGlobalObject(const JSHandle &env, const JSHa SetFunction(env, globalObject, "isNaN", Global::IsNaN, FunctionLength::ONE); SetFunction(env, globalObject, "decodeURI", Global::DecodeURI, FunctionLength::ONE); SetFunction(env, globalObject, "encodeURI", Global::EncodeURI, FunctionLength::ONE); + SetFunction(env, globalObject, "escape", Global::Escape, FunctionLength::ONE); + SetFunction(env, globalObject, "unescape", Global::Unescape, FunctionLength::ONE); SetFunction(env, globalObject, "decodeURIComponent", Global::DecodeURIComponent, FunctionLength::ONE); SetFunction(env, globalObject, "encodeURIComponent", Global::EncodeURIComponent, FunctionLength::ONE); @@ -515,7 +518,8 @@ void Builtins::InitializeFunction(const JSHandle &env, const JSHandle // Function.prototype method // 19.2.3.1 Function.prototype.apply ( thisArg, argArray ) - SetFunction(env, funcFuncPrototypeObj, "apply", Function::FunctionPrototypeApply, FunctionLength::TWO); + SetFunction(env, funcFuncPrototypeObj, "apply", Function::FunctionPrototypeApply, FunctionLength::TWO, + BUILTINS_STUB_ID(FunctionPrototypeApply)); // 19.2.3.2 Function.prototype.bind ( thisArg , ...args) SetFunction(env, funcFuncPrototypeObj, "bind", Function::FunctionPrototypeBind, FunctionLength::ONE); // 19.2.3.3 Function.prototype.call (thisArg , ...args) @@ -1315,16 +1319,19 @@ void Builtins::InitializeSet(const JSHandle &env, const JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(setFuncPrototype), constructorKey, setFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // set.prototype.add() - SetFunction(env, setFuncPrototype, "add", BuiltinsSet::Add, FunctionLength::ONE); + SetFunction(env, setFuncPrototype, "add", BuiltinsSet::Add, FunctionLength::ONE, BUILTINS_STUB_ID(SetAdd)); // set.prototype.clear() SetFunction(env, setFuncPrototype, "clear", BuiltinsSet::Clear, FunctionLength::ZERO); // set.prototype.delete() - SetFunction(env, setFuncPrototype, "delete", BuiltinsSet::Delete, FunctionLength::ONE); + SetFunction(env, setFuncPrototype, "delete", BuiltinsSet::Delete, FunctionLength::ONE, + BUILTINS_STUB_ID(SetDelete)); // set.prototype.has() SetFunction(env, setFuncPrototype, "has", BuiltinsSet::Has, FunctionLength::ONE); // set.prototype.forEach() - SetFunction(env, setFuncPrototype, "forEach", BuiltinsSet::ForEach, FunctionLength::ONE); + SetFunction(env, setFuncPrototype, "forEach", BuiltinsSet::ForEach, FunctionLength::ONE, + BUILTINS_STUB_ID(SetForEach)); // set.prototype.entries() SetFunction(env, setFuncPrototype, "entries", BuiltinsSet::Entries, FunctionLength::ZERO); // set.prototype.keys() @@ -1334,6 +1341,7 @@ void Builtins::InitializeSet(const JSHandle &env, const JSHandle values(factory_->NewFromASCII("values")); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(setFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor descriptor(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, setFuncPrototype, keys, descriptor); @@ -1388,19 +1396,23 @@ void Builtins::InitializeMap(const JSHandle &env, const JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(mapFuncPrototype), constructorKey, mapFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // map.prototype.set() - SetFunction(env, mapFuncPrototype, globalConst->GetHandledSetString(), BuiltinsMap::Set, FunctionLength::TWO); + SetFunction(env, mapFuncPrototype, globalConst->GetHandledSetString(), BuiltinsMap::Set, FunctionLength::TWO, + BUILTINS_STUB_ID(MapSet)); // map.prototype.clear() SetFunction(env, mapFuncPrototype, "clear", BuiltinsMap::Clear, FunctionLength::ZERO); // map.prototype.delete() - SetFunction(env, mapFuncPrototype, "delete", BuiltinsMap::Delete, FunctionLength::ONE); + SetFunction(env, mapFuncPrototype, "delete", BuiltinsMap::Delete, FunctionLength::ONE, + BUILTINS_STUB_ID(MapDelete)); // map.prototype.has() SetFunction(env, mapFuncPrototype, "has", BuiltinsMap::Has, FunctionLength::ONE); // map.prototype.get() SetFunction(env, mapFuncPrototype, thread_->GlobalConstants()->GetHandledGetString(), BuiltinsMap::Get, FunctionLength::ONE); // map.prototype.forEach() - SetFunction(env, mapFuncPrototype, "forEach", BuiltinsMap::ForEach, FunctionLength::ONE); + SetFunction(env, mapFuncPrototype, "forEach", BuiltinsMap::ForEach, FunctionLength::ONE, + BUILTINS_STUB_ID(MapForEach)); // map.prototype.keys() SetFunction(env, mapFuncPrototype, "keys", BuiltinsMap::Keys, FunctionLength::ZERO); // map.prototype.values() @@ -1426,6 +1438,7 @@ void Builtins::InitializeMap(const JSHandle &env, const JSHandle entries(factory_->NewFromASCII("entries")); JSHandle entriesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(mapFuncPrototype), entries); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor descriptor(thread_, entriesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, mapFuncPrototype, iteratorSymbol, descriptor); @@ -1464,6 +1477,7 @@ void Builtins::InitializeWeakMap(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakMapFuncPrototype), constructorKey, weakMapFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // weakmap.prototype.set() SetFunction(env, weakMapFuncPrototype, globalConst->GetHandledSetString(), BuiltinsWeakMap::Set, FunctionLength::TWO); @@ -1508,6 +1522,7 @@ void Builtins::InitializeWeakSet(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakSetFuncPrototype), constructorKey, weakSetFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // set.prototype.add() SetFunction(env, weakSetFuncPrototype, "add", BuiltinsWeakSet::Add, FunctionLength::ONE); // set.prototype.delete() @@ -1577,6 +1592,7 @@ void Builtins::InitializeWeakRef(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakRefFuncPrototype), constructorKey, weakRefFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // WeakRef.prototype.deref() SetFunction(env, weakRefFuncPrototype, "deref", BuiltinsWeakRef::Deref, FunctionLength::ZERO); @@ -1619,6 +1635,7 @@ void Builtins::InitializeFinalizationRegistry(const JSHandle &env, JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(finalizationRegistryFuncPrototype), constructorKey, finalizationRegistryFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // FinalizationRegistry.prototype.deref() SetFunction(env, finalizationRegistryFuncPrototype, "register", BuiltinsFinalizationRegistry::Register, FunctionLength::TWO); @@ -1708,7 +1725,7 @@ void Builtins::InitializeJson(const JSHandle &env, const JSHandle jsonObject = factory_->NewJSObjectWithInit(jsonHClass); SetFunction(env, jsonObject, "parse", Json::Parse, FunctionLength::TWO); - SetFunction(env, jsonObject, "stringify", Json::Stringify, FunctionLength::THREE); + SetFunction(env, jsonObject, "stringify", Json::Stringify, FunctionLength::THREE, BUILTINS_STUB_ID(STRINGIFY)); PropertyDescriptor jsonDesc(thread_, JSHandle::Cast(jsonObject), true, false, true); JSHandle jsonString(factory_->NewFromASCII("JSON")); @@ -1749,7 +1766,8 @@ void Builtins::InitializeString(const JSHandle &env, const JSHandle &env, const JSHandleSetStringFunction(thread_, stringFunction); + env->SetStringPrototype(thread_, stringFuncPrototype); } void Builtins::InitializeStringIterator(const JSHandle &env, @@ -2005,6 +2024,11 @@ void Builtins::InitializeRegExp(const JSHandle &env) JSHandle globalKey(globalConstants->GetHandledGlobalString()); SetGetter(regPrototype, globalKey, globalGetter); + JSHandle hasIndicesGetter = + CreateGetter(env, RegExp::GetHasIndices, "hasIndices", FunctionLength::ZERO); + JSHandle hasIndicesKey(factory_->NewFromASCII("hasIndices")); + SetGetter(regPrototype, hasIndicesKey, hasIndicesGetter); + JSHandle ignoreCaseGetter = CreateGetter(env, RegExp::GetIgnoreCase, "ignoreCase", FunctionLength::ZERO); JSHandle ignoreCaseKey(factory_->NewFromASCII("ignoreCase")); @@ -2060,13 +2084,15 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandle arrFuncPrototype = factory_->NewJSObjectWithInit(arrBaseFuncInstanceHClass); - JSHandle::Cast(arrFuncPrototype)->SetLength(thread_, JSTaggedValue(FunctionLength::ZERO)); + JSHandle::Cast(arrFuncPrototype)->SetLength(FunctionLength::ZERO); auto accessor = thread_->GlobalConstants()->GetArrayLengthAccessor(); JSArray::Cast(*arrFuncPrototype)->SetPropertyInlinedProps(thread_, JSArray::LENGTH_INLINE_PROPERTY_INDEX, accessor); JSHandle arrFuncPrototypeValue(arrFuncPrototype); // Array.prototype_or_hclass JSHandle arrFuncInstanceHClass = factory_->CreateJSArrayInstanceClass(arrFuncPrototypeValue); + auto globalConstant = const_cast(thread_->GlobalConstants()); + globalConstant->InitElementKindHClass(thread_, arrFuncInstanceHClass); // Array = new Function() JSHandle arrayFunction( @@ -2082,29 +2108,38 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandleSetFunctionPrototype(thread_, arrFuncInstanceHClass.GetTaggedValue()); // Array.prototype method - SetFunction(env, arrFuncPrototype, "concat", BuiltinsArray::Concat, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "concat", BuiltinsArray::Concat, FunctionLength::ONE, + BUILTINS_STUB_ID(ArrayConcat)); SetFunction(env, arrFuncPrototype, "copyWithin", BuiltinsArray::CopyWithin, FunctionLength::TWO); SetFunction(env, arrFuncPrototype, "entries", BuiltinsArray::Entries, FunctionLength::ZERO); SetFunction(env, arrFuncPrototype, "every", BuiltinsArray::Every, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "fill", BuiltinsArray::Fill, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "filter", BuiltinsArray::Filter, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "filter", BuiltinsArray::Filter, FunctionLength::ONE, + BUILTINS_STUB_ID(ArrayFilter)); SetFunction(env, arrFuncPrototype, "find", BuiltinsArray::Find, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "findIndex", BuiltinsArray::FindIndex, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "forEach", BuiltinsArray::ForEach, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "indexOf", BuiltinsArray::IndexOf, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "findLast", BuiltinsArray::FindLast, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "findLastIndex", BuiltinsArray::FindLastIndex, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "forEach", BuiltinsArray::ForEach, FunctionLength::ONE, + BUILTINS_STUB_ID(ArrayForEach)); + SetFunction(env, arrFuncPrototype, "indexOf", BuiltinsArray::IndexOf, FunctionLength::ONE, + BUILTINS_STUB_ID(ArrayIndexOf)); SetFunction(env, arrFuncPrototype, "join", BuiltinsArray::Join, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "keys", BuiltinsArray::Keys, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "lastIndexOf", BuiltinsArray::LastIndexOf, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "lastIndexOf", BuiltinsArray::LastIndexOf, FunctionLength::ONE, + BUILTINS_STUB_ID(ArrayLastIndexOf)); SetFunction(env, arrFuncPrototype, "map", BuiltinsArray::Map, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "pop", BuiltinsArray::Pop, FunctionLength::ZERO); SetFunction(env, arrFuncPrototype, "push", BuiltinsArray::Push, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "reduce", BuiltinsArray::Reduce, FunctionLength::ONE); SetFunction(env, arrFuncPrototype, "reduceRight", BuiltinsArray::ReduceRight, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "reverse", BuiltinsArray::Reverse, FunctionLength::ZERO); + SetFunction(env, arrFuncPrototype, "reverse", BuiltinsArray::Reverse, FunctionLength::ZERO, + BUILTINS_STUB_ID(ArrayReverse)); SetFunction(env, arrFuncPrototype, "shift", BuiltinsArray::Shift, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "slice", BuiltinsArray::Slice, FunctionLength::TWO); + SetFunction(env, arrFuncPrototype, "slice", BuiltinsArray::Slice, FunctionLength::TWO, + BUILTINS_STUB_ID(ArraySlice)); SetFunction(env, arrFuncPrototype, "some", BuiltinsArray::Some, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "sort", BuiltinsArray::Sort, FunctionLength::ONE); + SetFunction(env, arrFuncPrototype, "sort", BuiltinsArray::Sort, FunctionLength::ONE, BUILTINS_STUB_ID(SORT)); SetFunction(env, arrFuncPrototype, "splice", BuiltinsArray::Splice, FunctionLength::TWO); SetFunction(env, arrFuncPrototype, thread_->GlobalConstants()->GetHandledToLocaleStringString(), BuiltinsArray::ToLocaleString, FunctionLength::ZERO); @@ -2116,12 +2151,17 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandle values(factory_->NewFromASCII("values")); JSHandle iteratorSymbol = env->GetIteratorSymbol(); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(arrFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor iteartorDesc(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, arrFuncPrototype, iteratorSymbol, iteartorDesc); @@ -2184,6 +2224,8 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand SetFunction(env, typedArrFuncPrototype, "filter", BuiltinsTypedArray::Filter, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "find", BuiltinsTypedArray::Find, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "findIndex", BuiltinsTypedArray::FindIndex, FunctionLength::ONE); + SetFunction(env, typedArrFuncPrototype, "findLast", BuiltinsTypedArray::FindLast, FunctionLength::ONE); + SetFunction(env, typedArrFuncPrototype, "findLastIndex", BuiltinsTypedArray::FindLastIndex, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "forEach", BuiltinsTypedArray::ForEach, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "indexOf", BuiltinsTypedArray::IndexOf, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "join", BuiltinsTypedArray::Join, FunctionLength::ONE); @@ -2197,11 +2239,14 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand SetFunction(env, typedArrFuncPrototype, "slice", BuiltinsTypedArray::Slice, FunctionLength::TWO); SetFunction(env, typedArrFuncPrototype, "some", BuiltinsTypedArray::Some, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "sort", BuiltinsTypedArray::Sort, FunctionLength::ONE); + SetFunction(env, typedArrFuncPrototype, "toSorted", BuiltinsTypedArray::ToSorted, FunctionLength::ONE); SetFunction(env, typedArrFuncPrototype, "subarray", BuiltinsTypedArray::Subarray, FunctionLength::TWO); SetFunction(env, typedArrFuncPrototype, thread_->GlobalConstants()->GetHandledToLocaleStringString(), BuiltinsTypedArray::ToLocaleString, FunctionLength::ZERO); SetFunction(env, typedArrFuncPrototype, "values", BuiltinsTypedArray::Values, FunctionLength::ZERO); + SetFunction(env, typedArrFuncPrototype, "with", BuiltinsTypedArray::With, FunctionLength::TWO); SetFunction(env, typedArrFuncPrototype, "includes", BuiltinsTypedArray::Includes, FunctionLength::ONE); + SetFunction(env, typedArrFuncPrototype, "toReversed", BuiltinsTypedArray::ToReversed, FunctionLength::ZERO); JSHandle bufferGetter = CreateGetter(env, BuiltinsTypedArray::GetBuffer, "buffer", FunctionLength::ZERO); @@ -2227,6 +2272,7 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand JSHandle arrFuncPrototype = env->GetArrayPrototype(); JSHandle toStringFunc = JSObject::GetMethod(thread_, arrFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString()); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor toStringDesc(thread_, toStringFunc, true, false, true); JSObject::DefineOwnProperty(thread_, typedArrFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), toStringDesc); @@ -2236,6 +2282,7 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand JSHandle iteratorSymbol = env->GetIteratorSymbol(); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(typedArrFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor iteartorDesc(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, typedArrFuncPrototype, iteratorSymbol, iteartorDesc); diff --git a/ecmascript/builtins/builtins_ark_tools.cpp b/ecmascript/builtins/builtins_ark_tools.cpp index e442c22befdb9ca059cd9cdc1c954c1ec8b04116..ec61e459b50979db37bd6419410b9d9845df6b85 100644 --- a/ecmascript/builtins/builtins_ark_tools.cpp +++ b/ecmascript/builtins/builtins_ark_tools.cpp @@ -38,6 +38,7 @@ JSTaggedValue BuiltinsArkTools::ObjectDump(EcmaRuntimeCallInfo *info) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle str = JSTaggedValue::ToString(thread, GetCallArg(info, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // The default log level of ace_engine and js_runtime is error LOG_ECMA(ERROR) << ": " << EcmaStringAccessor(str).ToStdString(); @@ -209,6 +210,7 @@ JSTaggedValue BuiltinsArkTools::StartCpuProfiler(EcmaRuntimeCallInfo *info) std::string fileName = ""; if (fileNameValue->IsString()) { JSHandle str = JSTaggedValue::ToString(thread, fileNameValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); fileName = EcmaStringAccessor(str).ToStdString() + ".cpuprofile"; } else { fileName = GetProfileName(); diff --git a/ecmascript/builtins/builtins_array.cpp b/ecmascript/builtins/builtins_array.cpp index ebc838c025cd998b720097d84bb290de1ce43c14..e6922fa252b6b564bf15072e6d6cd582a7247858 100644 --- a/ecmascript/builtins/builtins_array.cpp +++ b/ecmascript/builtins/builtins_array.cpp @@ -21,6 +21,7 @@ #include "ecmascript/base/number_helper.h" #include "ecmascript/base/typed_array_helper-inl.h" #include "ecmascript/base/typed_array_helper.h" +#include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_runtime_call_info.h" #include "ecmascript/ecma_string.h" #include "ecmascript/global_env.h" @@ -29,8 +30,10 @@ #include "ecmascript/js_array_iterator.h" #include "ecmascript/js_function.h" #include "ecmascript/js_handle.h" +#include "ecmascript/js_map_iterator.h" #include "ecmascript/js_stable_array.h" #include "ecmascript/js_tagged_number.h" +#include "ecmascript/js_tagged_value.h" #include "ecmascript/object_factory.h" #include "ecmascript/object_fast_operator-inl.h" #include "ecmascript/tagged_array-inl.h" @@ -65,14 +68,14 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) // 22.1.1.1 Array ( ) if (argc == 0) { // 6. Return ArrayCreate(0, proto). - return JSTaggedValue(JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget).GetObject()); + return JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget).GetTaggedValue(); } // 22.1.1.2 Array(len) if (argc == 1) { // 6. Let array be ArrayCreate(0, proto). - uint32_t newLen = 0; - JSHandle newArrayHandle(JSArray::ArrayCreate(thread, JSTaggedNumber(newLen), newTarget)); + JSHandle newArrayHandle(JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle len = GetCallArg(argv, 0); // 7. If Type(len) is not Number, then // a. Let defineStatus be CreateDataProperty(array, "0", len). @@ -83,6 +86,7 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) // b. If intLen ≠ len, throw a RangeError exception. // 9. Let setStatus be Set(array, "length", intLen, true). // 10. Assert: setStatus is not an abrupt completion. + uint32_t newLen = 0; if (!len->IsNumber()) { JSHandle key0 = thread->GlobalConstants()->GetHandledZeroString(); JSObject::CreateDataProperty(thread, newArrayHandle, key0, len); @@ -91,10 +95,10 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) newLen = JSTaggedValue::ToUint32(thread, len); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (JSTaggedNumber(len.GetTaggedValue()).GetNumber() != newLen) { - THROW_RANGE_ERROR_AND_RETURN(thread, "The length is out of range.", JSTaggedValue::Exception()); + THROW_RANGE_ERROR_AND_RETURN(thread, "Invalid array length", JSTaggedValue::Exception()); } } - JSArray::SetCapacity(thread, newArrayHandle, 0, newLen); + JSArray::SetCapacity(thread, newArrayHandle, 0, newLen, true); // 11. Return array. return newArrayHandle.GetTaggedValue(); @@ -107,7 +111,6 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "Failed to create array.", JSTaggedValue::Exception()); } JSHandle newArrayHandle(thread, newArray); - // 8. Let k be 0. // 9. Let items be a zero-origined List containing the argument items in order. // 10. Repeat, while k < numberOfArgs @@ -167,6 +170,11 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) // 6. If usingIterator is not undefined, then JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); if (!usingIterator->IsUndefined()) { + // Fast path for MapIterator + if (!mapping && items->IsJSMapIterator()) { + return JSMapIterator::MapIteratorToList(thread, items, usingIterator); + } + // a. If IsConstructor(C) is true, then // i. Let A be Construct(C). // b. Else, @@ -209,7 +217,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) if (next->IsFalse()) { JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), lengthKey, key, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - return JSTaggedValue(newArrayHandle.GetTaggedValue()); + return newArrayHandle.GetTaggedValue(); } // v. Let nextValue be IteratorValue(next). JSHandle nextValue = JSIterator::IteratorValue(thread, next); @@ -221,15 +229,15 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) // 3. Let mappedValue be mappedValue.[[value]]. // viii. Else, let mappedValue be nextValue. if (mapping) { - const int32_t argsLength = 2; // 2: «nextValue, k» + const uint32_t argsLength = 2; // 2: «nextValue, k» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, mapfn, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(nextValue.GetTaggedValue(), key.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - mapValue.Update(callResult); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, - JSIterator::IteratorClose(thread, iterator, mapValue).GetTaggedValue()); + JSIterator::IteratorClose(thread, iterator, mapValue).GetTaggedValue()); + mapValue.Update(callResult); } else { mapValue.Update(nextValue.GetTaggedValue()); } @@ -239,7 +247,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) JSHandle defineStatus( thread, JSTaggedValue(JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, key, mapValue))); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, - JSIterator::IteratorClose(thread, iterator, defineStatus).GetTaggedValue()); + JSIterator::IteratorClose(thread, iterator, defineStatus).GetTaggedValue()); k++; } } @@ -290,7 +298,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (mapping) { key.Update(JSTaggedValue(k)); - const int32_t argsLength = 2; // 2: «kValue, k» + const uint32_t argsLength = 2; // 2: «kValue, k» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, mapfn, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -298,7 +306,6 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) JSTaggedValue callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); mapValue.Update(callResult); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } else { mapValue.Update(kValue.GetTaggedValue()); } @@ -404,132 +411,107 @@ JSTaggedValue BuiltinsArray::Concat(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(argv->GetThread(), Array, Concat); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - uint32_t argc = argv->GetArgsNumber(); + int argc = static_cast(argv->GetArgsNumber()); // 1. Let O be ToObject(this value). JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); - // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - // 3. Let A be ArraySpeciesCreate(O, 0). + // 2. Let A be ArraySpeciesCreate(O, 0). uint32_t arrayLen = 0; JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, JSTaggedNumber(arrayLen)); - // 4. ReturnIfAbrupt(A). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle newArrayHandle(thread, newArray); - // 5. Let n be 0. + JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + // Fast path + int64_t arrLen = ArrayHelper::GetArrayLength(thread, thisObjVal); + if (arrLen == 0 && argc == 1) { + JSHandle argHandle = GetCallArg(argv, 0); + int64_t argLen = ArrayHelper::GetArrayLength(thread, argHandle); + if (argLen == 0 && argHandle->IsJSArray()) { + JSHandle lenHandle(thread, JSTaggedValue(arrLen)); + JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), + lengthKey, lenHandle, true); + return newArrayHandle.GetTaggedValue(); + } + } + + // 3. Let n be 0. int64_t n = 0; + JSMutableHandle ele(thread, JSTaggedValue::Undefined()); JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); JSMutableHandle toKey(thread, JSTaggedValue::Undefined()); - bool isSpreadable = ArrayHelper::IsConcatSpreadable(thread, thisHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (isSpreadable) { - int64_t thisLen = ArrayHelper::GetArrayLength(thread, thisObjVal); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (n + thisLen > base::MAX_SAFE_INTEGER) { - THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); - } - int64_t k = 0; - if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::Concat(thread, newArrayHandle, thisObjHandle, k, n); - } - while (k < thisLen) { - fromKey.Update(JSTaggedValue(k)); - toKey.Update(JSTaggedValue(n)); - bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, fromKey); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle fromValHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } - n++; - k++; - } - } else { - if (n >= base::MAX_SAFE_INTEGER) { - THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + // 4. Prepend O to items. + // 5. For each element E of items, do + for (int i = -1; i < argc; i++) { + if (i < 0) { + ele.Update(thisObjHandle.GetTaggedValue()); + } else { + ele.Update(GetCallArg(argv, i)); } - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, thisObjVal); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - n++; - } - // 7. Repeat, while items is not empty - for (uint32_t i = 0; i < argc; i++) { - // a. Remove the first element from items and let E be the value of the element - JSHandle addHandle = GetCallArg(argv, i); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSHandle addObjHandle(addHandle); - - // b. Let spreadable be IsConcatSpreadable(E). - isSpreadable = ArrayHelper::IsConcatSpreadable(thread, addHandle); - // c. ReturnIfAbrupt(spreadable). + // a. Let spreadable be ? IsConcatSpreadable(E). + bool isSpreadable = ArrayHelper::IsConcatSpreadable(thread, ele); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // d. If spreadable is true, then + // b. If spreadable is true, then if (isSpreadable) { - // ii. Let len be ToLength(Get(E, "length")). - int64_t len = ArrayHelper::GetArrayLength(thread, JSHandle::Cast(addObjHandle)); - // iii. ReturnIfAbrupt(len). + // i. Let k be 0. + // ii. Let len be ? LengthOfArrayLike(E). + // iii. If n + len > 253 - 1, throw a TypeError exception. + int64_t len = ArrayHelper::GetArrayLength(thread, ele); + int64_t k = 0; RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // iv. If n + len > 253-1, throw a TypeError exception. if (n + len > base::MAX_SAFE_INTEGER) { THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); } - int64_t k = 0; - JSHandle addObjVal(addObjHandle); - if (addObjVal->IsStableJSArray(thread)) { - JSStableArray::Concat(thread, newArrayHandle, addObjHandle, k, n); + + if (ele->IsStableJSArray(thread)) { + JSStableArray::Concat(thread, newArrayHandle, JSHandle::Cast(ele), k, n); } - // v. Repeat, while k < len + // iv. Repeat, while k < len, while (k < len) { - fromKey.Update(JSTaggedValue(k)); - toKey.Update(JSTaggedValue(n)); // 1. Let P be ToString(k). // 2. Let exists be HasProperty(E, P). - // 4. If exists is true, then - bool exists = JSTaggedValue::HasProperty(thread, JSHandle::Cast(addObjHandle), fromKey); + // 3. If exists is true, then + fromKey.Update(JSTaggedValue(k)); + toKey.Update(JSTaggedValue(n)); + bool exists = JSTaggedValue::HasProperty(thread, ele, fromKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (exists) { // a. Let subElement be Get(E, P). JSHandle fromValHandle = - JSArray::FastGetPropertyByValue(thread, addHandle, fromKey); - // b. ReturnIfAbrupt(subElement). + JSArray::FastGetPropertyByValue(thread, ele, fromKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // b. Perform ? CreateDataPropertyOrThrow(A, ! ToString(𝔽(n)), subElement). JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValHandle); - // d. ReturnIfAbrupt(status). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - // 5. Increase n by 1. - // 6. Increase k by 1. + // 4. Set n to n + 1. + // 5. Set k to k + 1. n++; k++; } - } else { // e. Else E is added as a single item rather than spread, - // i. If n≥253-1, throw a TypeError exception. + //c. Else + } else { + // ii. If n ≥ 253 - 1, throw a TypeError exception. if (n >= base::MAX_SAFE_INTEGER) { THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); } - // ii. Let status be CreateDataPropertyOrThrow (A, ToString(n), E). - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, addHandle); - // iii. ReturnIfAbrupt(status). + // iii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(𝔽(n)), E). + // iv. Set n to n + 1. + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, ele); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - - // iv. Increase n by 1. n++; } } - // 8. Let setStatus be Set(A, "length", n, true). - JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + // 6. Perform ? Set(A, "length", 𝔽(n), true). JSHandle lenHandle(thread, JSTaggedValue(n)); JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), lengthKey, lenHandle, true); - // 9. ReturnIfAbrupt(setStatus). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 10. Return A. + // 7. Return A. return newArrayHandle.GetTaggedValue(); } @@ -644,6 +626,7 @@ JSTaggedValue BuiltinsArray::CopyWithin(EcmaRuntimeCallInfo *argv) } else { if (thisObjVal->IsJSProxy()) { toKey.Update(JSTaggedValue::ToString(thread, toKey).GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSTaggedValue::DeletePropertyOrThrow(thread, thisObjVal, toKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -726,7 +709,7 @@ JSTaggedValue BuiltinsArray::Every(EcmaRuntimeCallInfo *argv) } } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -839,6 +822,13 @@ JSTaggedValue BuiltinsArray::Fill(EcmaRuntimeCallInfo *argv) return thisObjHandle.GetTaggedValue(); } } + if (thisHandle->IsTypedArray()) { + bool result = JSTypedArray::FastTypedArrayFill(thread, thisHandle, value, start, end); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (result) { + return thisObjHandle.GetTaggedValue(); + } + } int64_t k = start; while (k < end) { key.Update(JSTaggedValue(k)); @@ -911,7 +901,7 @@ JSTaggedValue BuiltinsArray::Filter(EcmaRuntimeCallInfo *argv) JSStableArray::Filter(newArrayHandle, thisObjHandle, argv, k, toIndex); } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSTaggedValue callResult = GetTaggedBoolean(true); while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); @@ -925,6 +915,7 @@ JSTaggedValue BuiltinsArray::Filter(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { toIndexHandle.Update(JSTaggedValue(toIndex)); JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toIndexHandle, kValue); @@ -983,16 +974,15 @@ JSTaggedValue BuiltinsArray::Find(EcmaRuntimeCallInfo *argv) JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); key.Update(JSTaggedValue(k)); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - bool boolResult = callResult.ToBoolean(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (boolResult) { + if (callResult.ToBoolean()) { return kValue.GetTaggedValue(); } k++; @@ -1044,13 +1034,14 @@ JSTaggedValue BuiltinsArray::FindIndex(EcmaRuntimeCallInfo *argv) JSTaggedValue callResult = GetTaggedBoolean(true); if (thisObjVal->IsStableJSArray(thread)) { callResult = JSStableArray::HandleFindIndexOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { return GetTaggedDouble(k); } } JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1060,6 +1051,7 @@ JSTaggedValue BuiltinsArray::FindIndex(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { return GetTaggedDouble(k); } @@ -1113,9 +1105,10 @@ JSTaggedValue BuiltinsArray::ForEach(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); uint32_t k = 0; if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::HandleforEachOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + JSStableArray::HandleforEachOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, len, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); @@ -1138,89 +1131,91 @@ JSTaggedValue BuiltinsArray::ForEach(EcmaRuntimeCallInfo *argv) return JSTaggedValue::Undefined(); } -// 22.1.3.11 Array.prototype.indexOf ( searchElement [ , fromIndex ] ) -JSTaggedValue BuiltinsArray::IndexOf(EcmaRuntimeCallInfo *argv) +JSTaggedValue BuiltinsArray::IndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) { - ASSERT(argv); - BUILTINS_API_TRACE(argv->GetThread(), Array, IndexOf); - JSThread *thread = argv->GetThread(); - [[maybe_unused]] EcmaHandleScope handleScope(thread); - + int64_t length = JSHandle::Cast(thisHandle)->GetArrayLength(); + if (length == 0) { + return JSTaggedValue(-1); + } + int64_t fromIndex = 0; uint32_t argc = argv->GetArgsNumber(); + // 2: [target, fromIndex]. Note that fromIndex is missing in most usage cases. + if (UNLIKELY(argc >= 2)) { + JSHandle fromIndexHandle = argv->GetCallArg(1); + fromIndex = ArrayHelper::GetStartIndex(thread, fromIndexHandle, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Slow path when fromIndex is obtained from an ECMAObject + // due to potential side effects in its 'toString' and 'valueOf' methods which modify the array object. + if (UNLIKELY(fromIndexHandle->IsECMAObject())) { + return IndexOfSlowPath(argv, thread, thisHandle, length, fromIndex); + } + } + if (fromIndex >= length) { + return JSTaggedValue(-1); + } + JSHandle target = GetCallArg(argv, 0); + return JSStableArray::IndexOf( + thread, thisHandle, target, static_cast(fromIndex), static_cast(length)); +} +JSTaggedValue BuiltinsArray::IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) +{ // 1. Let O be ToObject(this value). - JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - - JSHandle searchElement = GetCallArg(argv, 0); - // 3. Let len be ToLength(Get(O, "length")). - int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + int64_t length = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. If len is 0, return −1. - if (len == 0) { - return GetTaggedInt(-1); + if (length == 0) { + return JSTaggedValue(-1); } - // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be 0. - double fromIndex = 0; - if (argc > 1) { - JSHandle msg1 = GetCallArg(argv, 1); - JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); - // 7. ReturnIfAbrupt(n). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - fromIndex = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); - } + int64_t fromIndex = ArrayHelper::GetStartIndexFromArgs(thread, argv, 1, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return IndexOfSlowPath(argv, thread, thisObjVal, length, fromIndex); +} - // 8. If n ≥ len, return −1. - if (fromIndex >= len) { - return GetTaggedInt(-1); +JSTaggedValue BuiltinsArray::IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, + int64_t length, int64_t fromIndex) +{ + if (fromIndex >= length) { + return JSTaggedValue(-1); } - - // 9. If n ≥ 0, then - // a. Let k be n. - // 10. Else n<0, - // a. Let k be len - abs(n). - // b. If k < 0, let k be 0. - int64_t from = (fromIndex >= 0) ? fromIndex : ((len + fromIndex) >= 0 ? len + fromIndex : 0); - - // if it is stable array, we can go to fast path - if (thisObjVal->IsStableJSArray(thread)) { - return JSStableArray::IndexOf(thread, thisObjVal, searchElement, static_cast(from), - static_cast(len)); - } - - // 11. Repeat, while k key(thread, JSTaggedValue::Undefined()); - while (from < len) { - key.Update(JSTaggedValue(from)); - bool exists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, key)); + JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); + JSHandle target = GetCallArg(argv, 0); + // 11. Repeat, while k < len + for (int64_t curIndex = fromIndex; curIndex < length; ++curIndex) { + keyHandle.Update(JSTaggedValue(curIndex)); + bool found = ArrayHelper::ElementIsStrictEqualTo(thread, thisObjVal, keyHandle, target); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle kValueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (JSTaggedValue::StrictEqual(thread, searchElement, kValueHandle)) { - return GetTaggedDouble(from); - } + if (UNLIKELY(found)) { + return JSTaggedValue(curIndex); } - from++; } - // 12. Return -1. - return GetTaggedInt(-1); + return JSTaggedValue(-1); +} + +// 22.1.3.11 Array.prototype.indexOf ( searchElement [ , fromIndex ] ) +JSTaggedValue BuiltinsArray::IndexOf(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, IndexOf); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return IndexOfStable(argv, thread, thisHandle); + } + return IndexOfSlowPath(argv, thread, thisHandle); } // 22.1.3.12 Array.prototype.join (separator) @@ -1231,21 +1226,26 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisHandle = GetThis(argv); + auto factory = thread->GetEcmaVM()->GetFactory(); + auto context = thread->GetCurrentEcmaContext(); + bool noCircular = context->JoinStackPushFastPath(thisHandle); + if (!noCircular) { + return factory->GetEmptyString().GetTaggedValue(); + } if (thisHandle->IsStableJSArray(thread)) { return JSStableArray::Join(JSHandle::Cast(thisHandle), argv); } - auto factory = thread->GetEcmaVM()->GetFactory(); // 1. Let O be ToObject(this value). JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); JSHandle thisObjVal(thisObjHandle); // 3. Let len be ToLength(Get(O, "length")). int64_t len = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); // 5. If separator is undefined, let separator be the single-element String ",". // 6. Let sep be ToString(separator). @@ -1258,7 +1258,7 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) JSHandle sepStringHandle = JSTaggedValue::ToString(thread, sepHandle); // 7. ReturnIfAbrupt(sep). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); std::u16string sepStr = EcmaStringAccessor(sepStringHandle).ToU16String(); // 8. If len is zero, return the empty String. @@ -1278,22 +1278,19 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) // e. Let R be a String value produced by concatenating S and next. // f. Increase k by 1. std::u16string concatStr; - std::u16string concatStrNew; for (int64_t k = 0; k < len; k++) { std::u16string nextStr; JSHandle element = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); if (!element->IsUndefined() && !element->IsNull()) { JSHandle nextStringHandle = JSTaggedValue::ToString(thread, element); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); nextStr = EcmaStringAccessor(nextStringHandle).ToU16String(); } if (k > 0) { - concatStrNew = base::StringHelper::Append(concatStr, sepStr); - concatStr = base::StringHelper::Append(concatStrNew, nextStr); - continue; + concatStr.append(sepStr); } - concatStr = base::StringHelper::Append(concatStr, nextStr); + concatStr.append(nextStr); } // 14. Return R. @@ -1301,6 +1298,7 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) auto *char16tData = const_cast(constChar16tData); auto *uint16tData = reinterpret_cast(char16tData); uint32_t u16strSize = concatStr.size(); + context->JoinStackPopFastPath(thisHandle); return factory->NewFromUtf16Literal(uint16tData, u16strSize).GetTaggedValue(); } @@ -1321,82 +1319,90 @@ JSTaggedValue BuiltinsArray::Keys(EcmaRuntimeCallInfo *argv) return iter.GetTaggedValue(); } -// 22.1.3.14 Array.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) -JSTaggedValue BuiltinsArray::LastIndexOf(EcmaRuntimeCallInfo *argv) +JSTaggedValue BuiltinsArray::LastIndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) { - ASSERT(argv); - BUILTINS_API_TRACE(argv->GetThread(), Array, LastIndexOf); - JSThread *thread = argv->GetThread(); - [[maybe_unused]] EcmaHandleScope handleScope(thread); - + int64_t length = JSHandle::Cast(thisHandle)->GetArrayLength(); + if (length == 0) { + return JSTaggedValue(-1); + } + int64_t fromIndex = length - 1; uint32_t argc = argv->GetArgsNumber(); + // 2: [target, fromIndex]. Note that fromIndex is missing in most usage cases. + if (UNLIKELY(argc >= 2)) { + JSHandle fromIndexHandle = argv->GetCallArg(1); + fromIndex = ArrayHelper::GetLastStartIndex(thread, fromIndexHandle, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Slow path when fromIndex is obtained from an ECMAObject + // due to potential side effects in its 'toString' and 'valueOf' methods which modify the array object. + if (UNLIKELY(fromIndexHandle->IsECMAObject())) { + return LastIndexOfSlowPath(argv, thread, thisHandle, fromIndex); + } + } + if (fromIndex < 0) { + return JSTaggedValue(-1); + } + JSHandle target = GetCallArg(argv, 0); + return JSStableArray::LastIndexOf( + thread, thisHandle, target, static_cast(fromIndex), static_cast(length)); +} +JSTaggedValue BuiltinsArray::LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) +{ // 1. Let O be ToObject(this value). - JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - - JSHandle searchElement = GetCallArg(argv, 0); - // 3. Let len be ToLength(Get(O, "length")). - int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + int64_t length = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. If len is 0, return −1. - if (len == 0) { - return GetTaggedInt(-1); + if (length == 0) { + return JSTaggedValue(-1); } + // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be 0. + int64_t fromIndex = ArrayHelper::GetLastStartIndexFromArgs(thread, argv, 1, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return LastIndexOfSlowPath(argv, thread, thisObjVal, fromIndex); +} - // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be len-1. - double fromIndex = len - 1; - if (argc > 1) { - JSHandle msg1 = GetCallArg(argv, 1); - JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); - // 7. ReturnIfAbrupt(n). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - fromIndex = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); +JSTaggedValue BuiltinsArray::LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, int64_t fromIndex) +{ + if (fromIndex < 0) { + return JSTaggedValue(-1); } - - // 8. If n ≥ 0, let k be min(n, len – 1). - // 9. Else n < 0, - // a. Let k be len - abs(n). - int64_t from = 0; - if (fromIndex >= 0) { - from = (len - 1) < fromIndex ? len - 1 : fromIndex; - } else { - double tempFrom = len + fromIndex; - from = tempFrom >= 0 ? tempFrom : -1; - } - - // 10. Repeat, while k≥ 0 - // a. Let kPresent be HasProperty(O, ToString(k)). - // b. ReturnIfAbrupt(kPresent). - // c. If kPresent is true, then - // i. Let elementK be Get(O, ToString(k)). - // ii. ReturnIfAbrupt(elementK). - // iii. Let same be the result of performing Strict Equality Comparison searchElement === elementK. - // iv. If same is true, return k. - // d. Decrease k by 1. - JSMutableHandle key(thread, JSTaggedValue::Undefined()); - while (from >= 0) { - key.Update(JSTaggedValue(from)); - bool exists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, key)); + JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); + JSHandle target = base::BuiltinsBase::GetCallArg(argv, 0); + // 11. Repeat, while k < len + for (int64_t curIndex = fromIndex; curIndex >= 0; --curIndex) { + keyHandle.Update(JSTaggedValue(curIndex)); + bool found = ArrayHelper::ElementIsStrictEqualTo(thread, thisObjVal, keyHandle, target); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle kValueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (JSTaggedValue::StrictEqual(thread, searchElement, kValueHandle)) { - return GetTaggedDouble(from); - } + if (UNLIKELY(found)) { + return JSTaggedValue(curIndex); } - from--; } + // 12. Return -1. + return JSTaggedValue(-1); +} - // 11. Return -1. - return GetTaggedInt(-1); +// 22.1.3.14 Array.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) +JSTaggedValue BuiltinsArray::LastIndexOf(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), Array, IndexOf); + JSThread *thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return LastIndexOfStable(argv, thread, thisHandle); + } + return LastIndexOfSlowPath(argv, thread, thisHandle); } // 22.1.3.15 Array.prototype.map ( callbackfn [ , thisArg ] ) @@ -1459,7 +1465,7 @@ JSTaggedValue BuiltinsArray::Map(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle mapResultHandle(thread, JSTaggedValue::Undefined()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1663,6 +1669,10 @@ JSTaggedValue BuiltinsArray::Reduce(EcmaRuntimeCallInfo *argv) } } + if (thisObjVal->IsStableJSArray(thread)) { + JSStableArray::Reduce(thread, thisObjHandle, callbackFnHandle, accumulator, k, len); + } + // 10. Repeat, while k < len // a. Let Pk be ToString(k). // b. Let kPresent be HasProperty(O, Pk). @@ -1826,6 +1836,10 @@ JSTaggedValue BuiltinsArray::Reverse(EcmaRuntimeCallInfo *argv) int64_t len = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Fast path for stable array. Returns thisValue. + if (thisObjVal->IsStableJSArray(thread)) { + return JSStableArray::Reverse(thread, thisObjHandle, len); + } // 5. Let middle be floor(len/2). int64_t middle = std::floor(len / 2); @@ -1869,9 +1883,6 @@ JSTaggedValue BuiltinsArray::Reverse(EcmaRuntimeCallInfo *argv) JSMutableHandle upperP(thread, JSTaggedValue::Undefined()); JSHandle lowerValueHandle(thread, JSTaggedValue::Undefined()); JSHandle upperValueHandle(thread, JSTaggedValue::Undefined()); - if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::Reverse(thread, thisObjHandle, thisHandle, lower, len); - } while (lower != middle) { int64_t upper = len - lower - 1; lowerP.Update(JSTaggedValue(lower)); @@ -2068,25 +2079,7 @@ JSTaggedValue BuiltinsArray::Slice(EcmaRuntimeCallInfo *argv) int64_t count = final > k ? (final - k) : 0; if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSHandle destElements = factory->NewTaggedArray(count); - JSHandle newArrayHandle = factory->NewJSStableArrayWithElements(destElements); - TaggedArray *srcElements = TaggedArray::Cast(thisObjHandle->GetElements().GetTaggedObject()); - - uint32_t length = srcElements->GetLength(); - if (length > k + count) { - for (uint32_t idx = 0; idx < count; idx++) { - destElements->Set(thread, idx, srcElements->Get(k + idx)); - } - } else { - for (uint32_t idx = 0; idx < count; idx++) { - uint32_t index = static_cast(k) + idx; - JSTaggedValue value = length > index ? srcElements->Get(index) : JSTaggedValue::Hole(); - destElements->Set(thread, idx, value); - } - } - - return newArrayHandle.GetTaggedValue(); + return JSStableArray::Slice(thread, thisObjHandle, k, count); } // 12. Let A be ArraySpeciesCreate(O, count). @@ -2190,16 +2183,15 @@ JSTaggedValue BuiltinsArray::Some(EcmaRuntimeCallInfo *argv) key.Update(JSTaggedValue(k)); JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - bool boolResult = callResult.ToBoolean(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (boolResult) { + if (callResult.ToBoolean()) { return GetTaggedBoolean(true); } } @@ -2369,6 +2361,7 @@ JSTaggedValue BuiltinsArray::Splice(EcmaRuntimeCallInfo *argv) toKey.Update(JSTaggedValue(k)); if (newArrayHandle->IsJSProxy()) { toKey.Update(JSTaggedValue::ToString(thread, toKey).GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValue); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2502,17 +2495,6 @@ JSTaggedValue BuiltinsArray::ToLocaleString(EcmaRuntimeCallInfo *argv) // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. Let separator be the String value for the list-separator String appropriate for the host environment’s - // current locale (this is derived in an implementation-defined way). - JSHandle sepHandle; - if ((GetCallArg(argv, 0)->IsUndefined())) { - sepHandle = JSHandle::Cast(ecmaVm->GetFactory()->NewFromASCII(",")); - } else { - sepHandle = GetCallArg(argv, 0); - } - JSHandle sepStringHandle = JSTaggedValue::ToString(thread, sepHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - CString sepString = ConvertToString(*sepStringHandle); // 6. If len is zero, return the empty String. if (len == 0) { return GetTaggedString(thread, ""); @@ -2564,7 +2546,7 @@ JSTaggedValue BuiltinsArray::ToLocaleString(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString nextString = ConvertToString(*nextStringHandle); if (k > 0) { - concatStr += sepString; + concatStr += STRING_SEPERATOR; concatStr += nextString; continue; } @@ -2914,6 +2896,7 @@ JSTaggedValue BuiltinsArray::Includes(EcmaRuntimeCallInfo *argv) while (from < len) { JSHandle handledFrom(thread, JSTaggedValue(from)); fromStr = JSTaggedValue::ToString(thread, handledFrom); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); key.Update(fromStr.GetTaggedValue()); kValueHandle.Update(JSArray::FastGetPropertyByValue(thread, thisObjVal, key).GetTaggedValue()); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2936,6 +2919,9 @@ JSTaggedValue BuiltinsArray::At(EcmaRuntimeCallInfo *argv) // 1. Let O be ToObject(this value). JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return JSStableArray::At(JSHandle::Cast(thisHandle), argv); + } JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2973,4 +2959,441 @@ JSTaggedValue BuiltinsArray::At(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return element.GetTaggedValue(); } + +// 23.1.3.39 Array.prototype.with ( index, value ) +// NOLINTNEXTLINE(readability-function-size) +JSTaggedValue BuiltinsArray::With(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, With); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 3. Let relativeIndex be ? ToIntegerOrInfinity(relativeIndex). + JSTaggedNumber index = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(index). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t relativeIndex = index.GetNumber(); + int64_t actualIndex = 0; + JSHandle value = GetCallArg(argv, 1); + // 4. If relativeIndex ≥ 0, let actualIndex be relativeIndex. + // 5. Else, let actualIndex be len + relativeIndex. + // 6. If actualIndex ≥ len or actualIndex < 0, throw a RangeError exception. + if (relativeIndex >= 0) { + actualIndex = relativeIndex; + } else { + actualIndex = len + relativeIndex; + } + if (actualIndex >= len || actualIndex < 0) { + THROW_RANGE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + } + // 7. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = + JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::With(thread, JSHandle::Cast(thisHandle), len, actualIndex, value); + } + // 8. Let k be 0. + int64_t k = 0; + // 9. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If k is actualIndex, let fromValue be value. + // c. Else, let fromValue be ? Get(O, Pk). + // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). + // e. Set k to k + 1. + JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); + JSHandle fromValue; + while (k < len) { + fromKey.Update(JSTaggedValue(k)); + if (k == actualIndex) { + fromValue = value; + } else { + fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, fromKey, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++k; + } + // 10. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.34 Array.prototype.toSorted ( comparefn ) +JSTaggedValue BuiltinsArray::ToSorted(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToSorted); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsUndefined() && !callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "Callable is false", JSTaggedValue::Exception()); + } + + // 2. Let obj be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 3. Let len be ToLength(Get(obj, "length")). + int64_t len = ArrayHelper::GetArrayLength(thread, JSHandle(thisObjHandle)); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 4. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + // 5. Let SortCompare be a new Abstract Closure with parameters (x, y) that captures comparefn and performs + // the following steps when called: + // a. Return ? CompareArrayElements(x, y, comparefn). + // 6. Let sortedList be ? SortIndexedProperties(O, len, SortCompare, read-through-holes). + JSTaggedValue sortedList = ArrayHelper::SortIndexedProperties(thread, thisObjHandle, len, callbackFnHandle, + base::HolesType::READ_THROUGH_HOLES); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle sortedArray(thread, sortedList); + + //7. Let j be 0. + int64_t j = 0; + // 8. Repeat, while j < len, + // a. Perform ! CreateDataPropertyOrThrow(A, ! ToString(𝔽(j)), sortedList[j]). + // b. Set j to j + 1. + while (j < len) { + JSHandle item = JSArray::FastGetPropertyByValue(thread, sortedArray, j); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, j, item); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++j; + } + // 9. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.35 Array.prototype.toSpliced ( start, skipCount, ...items ) +JSTaggedValue BuiltinsArray::ToSpliced(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToSpliced); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + uint32_t argc = argv->GetArgsNumber(); + // 1. Let O be ? ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetArrayLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t actualStart = 0; + int64_t actualSkipCount = 0; + int64_t newLen = 0; + int64_t insertCount = 0; + // 3. Let relativeStart be ? ToIntegerOrInfinity(start). + if (argc > 0) { + JSTaggedNumber argStart = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(relativeStart). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + double relativeStart = argStart.GetNumber(); + // 4. If relativeStart = -∞, let k be 0. + // 5. Else if relativeStart < 0, let k be max(len + relativeStart, 0). + // 6. Else, let k be min(relativeStart, len). + if (relativeStart < 0) { + double tempStart = relativeStart + len; + actualStart = tempStart > 0 ? tempStart : 0; + } else { + actualStart = relativeStart < len ? relativeStart : len; + } + actualSkipCount = len - actualStart; + } + // 7. Let insertCount be the number of elements in items. + // 8. If start is not present, then + // a. Let actualSkipCount be 0. + // 9. Else if skipCount is not present, then + // a. Let actualSkipCount be len - actualStart. + // 10. Else, + // a. Let sc be ? ToIntegerOrInfinity(skipCount). + // b. Let actualSkipCount be the result of clamping sc between 0 and len - actualStart. + if (argc > 1) { + insertCount = argc - 2; // 2:2 means there two arguments before the insert items. + JSTaggedNumber argSkipCount = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 1)); + // ReturnIfAbrupt(argSkipCount). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + double skipCount = argSkipCount.GetNumber(); + skipCount = skipCount > 0 ? skipCount : 0; + actualSkipCount = skipCount < (len - actualStart) ? skipCount : len - actualStart; + } + // 11. Let newLen be len + insertCount - actualSkipCount. + newLen = len + insertCount - actualSkipCount; + // 12. If newLen > 2^53 - 1, throw a TypeError exception. + if (newLen > base::MAX_SAFE_INTEGER) { + THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + } + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::ToSpliced(JSHandle::Cast(thisHandle), argv, argc, actualStart, + actualSkipCount, newLen); + } + // 13. Let A be ? ArrayCreate(newLen). + JSHandle newJsTaggedArray = + JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(newLen))); + // ReturnIfAbrupt(newArray). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newJsTaggedArray.GetTaggedValue()); + // 14. Let i be 0. + int64_t i = 0; + // 15. Let r be actualStart + actualSkipCount. + int64_t r = actualStart + actualSkipCount; + // 16. Repeat, while i < actualStart, + // a. Let Pi be ! ToString(𝔽(i)). + // b. Let iValue be ? Get(O, Pi). + // c. Perform ! CreateDataPropertyOrThrow(A, Pi, iValue). + // d. Set i to i + 1. + while (i < actualStart) { + JSHandle iValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, i); + // ReturnIfAbrupt(iValue). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, i, iValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + } + // 17. For each element E of items, do + // a. Let Pi be ! ToString(𝔽(i)). + // b. Perform ! CreateDataPropertyOrThrow(A, Pi, E). + // c. Set i to i + 1. + JSMutableHandle pi(thread, JSTaggedValue::Undefined()); + for (int64_t pos = 2; pos < argc; ++pos) { // 2:2 means there two arguments before the insert items. + pi.Update(JSTaggedValue(i)); + JSHandle element = GetCallArg(argv, pos); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, pi, element); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + } + // 18. Repeat, while i < newLen, + // a. Let Pi be ! ToString(𝔽(i)). + // b. Let from be ! ToString(𝔽(r)). + // c. Let fromValue be ? Get(O, from). + // d. Perform ! CreateDataPropertyOrThrow(A, Pi, fromValue). + // e. Set i to i + 1. + // f. Set r to r + 1. + JSMutableHandle from(thread, JSTaggedValue::Undefined()); + while (i < newLen) { + pi.Update(JSTaggedValue(i)); + from.Update(JSTaggedValue(r)); + JSHandle fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, from); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, pi, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + ++r; + } + JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + JSHandle newLenHandle(thread, JSTaggedValue(newLen)); + JSTaggedValue::SetProperty(thread, newJsTaggedArray, lengthKey, newLenHandle, true); + // ReturnIfAbrupt(setStatus). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 19. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.11 Array.prototype.findLast ( predicate [ , thisArg ] ) +JSTaggedValue BuiltinsArray::FindLast(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, FindLast); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // 2. ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 3. Let len be ToLength(Get(O, "length")). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // 4. ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 5. If IsCallable(predicate) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the predicate is not callable.", JSTaggedValue::Exception()); + } + + // 6. If thisArg was supplied, let T be thisArg; else let T be undefined. + JSHandle thisArgHandle = GetCallArg(argv, 1); + + // 7. Let k be (len - 1). + // 8. Repeat, while k >= 0 + // a. Let Pk be ToString(k). + // b. Let kValue be Get(O, Pk). + // c. ReturnIfAbrupt(kValue). + // d. Let testResult be ToBoolean(Call(predicate, T, «kValue, k, O»)). + // e. ReturnIfAbrupt(testResult). + // f. If testResult is true, return kValue. + // g. Decrease k by 1. + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + int64_t k = len - 1; + JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); + const uint32_t argsLength = 3; // 3: «kValue, k, O» + while (k >= 0) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + key.Update(JSTaggedValue(k)); + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); + JSTaggedValue callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (callResult.ToBoolean()) { + return kValue.GetTaggedValue(); + } + k--; + } + + // 9. Return undefined. + return JSTaggedValue::Undefined(); +} + +// 23.1.3.12 Array.prototype.findLastIndex ( predicate [ , thisArg ] ) +JSTaggedValue BuiltinsArray::FindLastIndex(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, FindLastIndex); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // 2. ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 3. Let len be ToLength(Get(O, "length")). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // 4. ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 5. If IsCallable(predicate) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the predicate is not callable.", JSTaggedValue::Exception()); + } + + // 6. If thisArg was supplied, let T be thisArg; else let T be undefined. + JSHandle thisArgHandle = GetCallArg(argv, 1); + + // 7. Let k be (len - 1). + // 8. Repeat, while k >=0 + // a. Let Pk be ToString(k). + // b. Let kValue be Get(O, Pk). + // c. ReturnIfAbrupt(kValue). + // d. Let testResult be ToBoolean(Call(predicate, T, «kValue, k, O»)). + // e. ReturnIfAbrupt(testResult). + // f. If testResult is true, return k. + // g. Decrease k by 1. + int64_t k = len - 1; + JSTaggedValue callResult = GetTaggedBoolean(true); + if (thisObjVal->IsStableJSArray(thread)) { + callResult = + JSStableArray::HandleFindLastIndexOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + if (callResult.ToBoolean()) { + return GetTaggedDouble(k); + } + } + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); + const uint32_t argsLength = 3; // 3: «kValue, k, O» + while (k >= 0) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + key.Update(JSTaggedValue(k)); + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); + callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (callResult.ToBoolean()) { + return GetTaggedDouble(k); + } + k--; + } + + // 9. Return -1. + return GetTaggedDouble(-1); +} + +// 23.1.3.33 Array.prototype.toReversed ( ) +JSTaggedValue BuiltinsArray::ToReversed(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToReversed); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::ToReversed(thread, JSHandle::Cast(thisHandle), len); + } + // 3. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + // 4. Let k be 0. + // 5. Repeat, while k < len, + // a. Let from be ! ToString(𝔽(len - k - 1)). + // b. Let Pk be ! ToString(𝔽(k)). + // c. Let fromValue be ? Get(O, from). + // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). + // e. Set k to k + 1. + JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); + JSMutableHandle toKey(thread, JSTaggedValue::Undefined()); + int64_t k = 0; + while (k < len) { + int64_t from = len - k - 1; + fromKey.Update(JSTaggedValue(from)); + toKey.Update(JSTaggedValue(k)); + JSHandle fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + k++; + } + // 6. Return A. + return newArrayHandle.GetTaggedValue(); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_array.h b/ecmascript/builtins/builtins_array.h index e63cd3e969579bdda31fcb2fe7acc097c16d5a02..38813fb4b6d8d26341d9da8aae15ef3f29afea05 100644 --- a/ecmascript/builtins/builtins_array.h +++ b/ecmascript/builtins/builtins_array.h @@ -21,6 +21,7 @@ namespace panda::ecmascript::builtins { static constexpr uint8_t INDEX_TWO = 2; static constexpr uint8_t INDEX_THREE = 3; +static const CString STRING_SEPERATOR = ","; class BuiltinsArray : public base::BuiltinsBase { public: // 22.1.1 @@ -102,7 +103,35 @@ public: static JSTaggedValue FlatMap(EcmaRuntimeCallInfo *argv); // 23.1.3.1 Array.prototype.at ( index ) static JSTaggedValue At(EcmaRuntimeCallInfo *argv); + // 23.1.3.33 Array.prototype.toReversed ( ) + static JSTaggedValue ToReversed(EcmaRuntimeCallInfo *argv); + // 23.1.3.39 Array.prototype.with ( index, value ) + static JSTaggedValue With(EcmaRuntimeCallInfo *argv); + // 23.1.3.34 Array.prototype.toSorted ( comparefn ) + static JSTaggedValue ToSorted(EcmaRuntimeCallInfo *argv); + // 23.1.3.11 + static JSTaggedValue FindLast(EcmaRuntimeCallInfo *argv); + // 23.1.3.12 + static JSTaggedValue FindLastIndex(EcmaRuntimeCallInfo *argv); + // 23.1.3.35 Array.prototype.toSpliced ( start, skipCount, ...items ) + static JSTaggedValue ToSpliced(EcmaRuntimeCallInfo *argv); + +private: + static JSTaggedValue IndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, + int64_t length, int64_t fromIndex); + + static JSTaggedValue LastIndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, int64_t fromIndex); }; } // namespace panda::ecmascript::builtins -#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARRAY_H \ No newline at end of file +#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARRAY_H diff --git a/ecmascript/builtins/builtins_arraybuffer.cpp b/ecmascript/builtins/builtins_arraybuffer.cpp index d01e5644aa8ee3b9fe66a4fc353c412bf30bba93..aadf3188f884c9def765c03a9deac75cf17d4224 100644 --- a/ecmascript/builtins/builtins_arraybuffer.cpp +++ b/ecmascript/builtins/builtins_arraybuffer.cpp @@ -87,7 +87,7 @@ JSTaggedValue BuiltinsArrayBuffer::GetByteLength(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); BUILTINS_API_TRACE(thread, ArrayBuffer, GetByteLength); [[maybe_unused]] EcmaHandleScope handleScope(thread); - + // 1. Let O be the this value. JSHandle thisHandle = GetThis(argv); // 2. If Type(O) is not Object, throw a TypeError exception. @@ -180,6 +180,7 @@ JSTaggedValue BuiltinsArrayBuffer::Slice(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(JSTaggedValue(newLen)); JSTaggedValue taggedNewArrBuf = JSFunction::Construct(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle newArrBuf(thread, taggedNewArrBuf); // 16. ReturnIfAbrupt(new). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -461,9 +462,9 @@ T BuiltinsArrayBuffer::LittleEndianToBigEndian64Bit(T liValue) template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForInteger(uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_integral_v, "T must be integral"); - static_assert(sizeof(T) == size, "Invalid number size"); - static_assert(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); + ASSERT_PRINT(std::is_integral_v, "T must be integral"); + ASSERT_PRINT(sizeof(T) == size, "Invalid number size"); + ASSERT_PRINT(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); ASSERT(size >= NumberSize::UINT16 || size <= NumberSize::FLOAT64); T res = *reinterpret_cast(block + byteIndex); @@ -485,15 +486,19 @@ JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForInteger(uint8_t *block, template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForFloat(uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be correct type"); - static_assert(sizeof(T) == size, "Invalid number size"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be correct type"); + ASSERT_PRINT(sizeof(T) == size, "Invalid number size"); UnionType unionValue = {0}; // NOLINTNEXTLINE(readability-braces-around-statements) if constexpr (std::is_same_v) { unionValue.uValue = *reinterpret_cast(block + byteIndex); if (std::isnan(unionValue.value)) { - return GetTaggedDouble(unionValue.value); + if (!JSTaggedValue::IsImpureNaN(unionValue.value)) { + return GetTaggedDouble(unionValue.value); + } else { + return GetTaggedDouble(base::NAN_VALUE); + } } if (!littleEndian) { uint32_t res = LittleEndianToBigEndian(unionValue.uValue); @@ -506,7 +511,15 @@ JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForFloat(uint8_t *block, ui } if (!littleEndian) { uint64_t res = LittleEndianToBigEndian64Bit(unionValue.uValue); - return GetTaggedDouble(base::bit_cast(res)); + T d = base::bit_cast(res); + if (JSTaggedValue::IsImpureNaN(d)) { + return GetTaggedDouble(base::bit_cast(base::pureNaN)); + } + return GetTaggedDouble(d); + } else { + if (JSTaggedValue::IsImpureNaN(unionValue.value)) { + return GetTaggedDouble(base::NAN_VALUE); + } } } @@ -516,7 +529,7 @@ template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForBigInt(JSThread *thread, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be uint64_t/int64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be uint64_t/int64_t"); auto pTmp = *reinterpret_cast(block + byteIndex); if (!littleEndian) { pTmp = LittleEndianToBigEndian64Bit(pTmp); @@ -531,7 +544,7 @@ JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForBigInt(JSThread *thread, template void BuiltinsArrayBuffer::SetValueInBufferForByte(double val, uint8_t *block, uint32_t byteIndex) { - static_assert(std::is_same_v || std::is_same_v, "T must be int8/uint8"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int8/uint8"); T res; if (std::isnan(val) || std::isinf(val)) { res = 0; @@ -562,8 +575,8 @@ void BuiltinsArrayBuffer::SetValueInBufferForUint8Clamped(double val, uint8_t *b template void BuiltinsArrayBuffer::SetValueInBufferForInteger(double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_integral_v, "T must be integral"); - static_assert(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); + ASSERT_PRINT(std::is_integral_v, "T must be integral"); + ASSERT_PRINT(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); T res; if (std::isnan(val) || std::isinf(val)) { res = 0; @@ -590,7 +603,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForInteger(double val, uint8_t *block, template void BuiltinsArrayBuffer::SetValueInBufferForFloat(double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be float type"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be float type"); auto data = static_cast(val); if (std::isnan(val)) { SetTypeData(block, data, byteIndex); @@ -614,7 +627,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForBigInt(JSThread *thread, JSHandle &arrBuf, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be int64_t/uint64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int64_t/uint64_t"); T value = 0; bool lossless = true; if constexpr(std::is_same_v) { @@ -636,7 +649,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForBigInt(JSThread *thread, double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be int64_t/uint64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int64_t/uint64_t"); T value = 0; bool lossless = true; @@ -711,14 +724,15 @@ void *BuiltinsArrayBuffer::GetDataPointFromBuffer(JSTaggedValue arrBuf, uint32_t { if (arrBuf.IsByteArray()) { return reinterpret_cast(ToUintPtr(ByteArray::Cast(arrBuf.GetTaggedObject())->GetData()) + byteOffset); - } else { - JSArrayBuffer *arrayBuffer = JSArrayBuffer::Cast(arrBuf.GetTaggedObject()); - if (arrayBuffer->GetArrayBufferByteLength() == 0) { - return nullptr; - } - JSTaggedValue data = arrayBuffer->GetArrayBufferData(); - return reinterpret_cast(ToUintPtr(JSNativePointer::Cast(data.GetTaggedObject()) - ->GetExternalPointer()) + byteOffset); } + + JSArrayBuffer *arrayBuffer = JSArrayBuffer::Cast(arrBuf.GetTaggedObject()); + if (arrayBuffer->GetArrayBufferByteLength() == 0) { + return nullptr; + } + + JSTaggedValue data = arrayBuffer->GetArrayBufferData(); + return reinterpret_cast(ToUintPtr(JSNativePointer::Cast(data.GetTaggedObject()) + ->GetExternalPointer()) + byteOffset); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_async_from_sync_iterator.cpp b/ecmascript/builtins/builtins_async_from_sync_iterator.cpp index 8ff19fd15f3e67a38094385597a8f4b984faa179..ae63f7c4b821b67d60a91dd5b80b999cb010e749 100644 --- a/ecmascript/builtins/builtins_async_from_sync_iterator.cpp +++ b/ecmascript/builtins/builtins_async_from_sync_iterator.cpp @@ -44,6 +44,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Next(EcmaRuntimeCallInfo *argv) // 3.Let promiseCapability be ! NewPromiseCapability(%Promise%). JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIteratorRecord be O.[[SyncIteratorRecord]]. JSHandle asyncIterator(thisValue); JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); @@ -79,6 +80,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) JSHandle env = vm->GetGlobalEnv(); JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIterator be O.[[SyncIteratorRecord]].[[Iterator]]. JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); JSHandle syncIterator(thread, syncIteratorRecord->GetIterator()); @@ -91,9 +93,9 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) // 7.If throw is undefined, then if (throwResult->IsUndefined()) { JSHandle iterResult = JSIterator::CreateIterResultObject(thread, value, true); - JSHandle resolve(thread, pcap->GetResolve()); + JSHandle reject(thread, pcap->GetReject()); EcmaRuntimeCallInfo *info = - EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, undefinedValue, undefinedValue, 1); + EcmaInterpreter::NewRuntimeCallInfo(thread, reject, undefinedValue, undefinedValue, 1); info->SetCallArg(iterResult.GetTaggedValue()); return pcap->GetPromise(); } @@ -102,10 +104,12 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) if (value->IsNull()) { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, throwResult, syncIterator, undefinedValue, 0); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, throwResult, pcap); ret = JSFunction::Call(callInfo); } else { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, throwResult, syncIterator, undefinedValue, 1); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, throwResult, pcap); callInfo->SetCallArg(value.GetTaggedValue()); ret = JSFunction::Call(callInfo); } @@ -148,6 +152,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Return(EcmaRuntimeCallInfo *argv) JSHandle env = vm->GetGlobalEnv(); JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIterator be O.[[SyncIteratorRecord]].[[Iterator]]. JSHandle asyncIterator(thisValue); JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); @@ -174,10 +179,12 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Return(EcmaRuntimeCallInfo *argv) if (value->IsNull()) { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, returnResult, syncIterator, undefinedValue, 0); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, returnResult, pcap); ret = JSFunction::Call(callInfo); } else { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, returnResult, syncIterator, undefinedValue, 1); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, returnResult, pcap); callInfo->SetCallArg(value.GetTaggedValue()); ret = JSFunction::Call(callInfo); } diff --git a/ecmascript/builtins/builtins_async_iterator.cpp b/ecmascript/builtins/builtins_async_iterator.cpp index a71b187534ef30494a7fedd02de5c743e7cef679..f4f31c4e1728779113489392464fceee83402f74 100644 --- a/ecmascript/builtins/builtins_async_iterator.cpp +++ b/ecmascript/builtins/builtins_async_iterator.cpp @@ -48,13 +48,16 @@ JSTaggedValue BuiltinsAsyncIterator::Return(EcmaRuntimeCallInfo *argv) JSHandle promiseFunc = env->GetPromiseFunction(); JSHandle value = GetCallArg(argv, 0); JSHandle pcap = JSPromise::NewPromiseCapability(thread, promiseFunc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle iterResult = JSIterator::CreateIterResultObject(thread, value, true); JSHandle iterResultVal(iterResult); JSHandle resolve(thread, pcap->GetResolve()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, undefined, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(iterResultVal.GetTaggedValue()); JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return pcap->GetPromise(); } diff --git a/ecmascript/builtins/builtins_atomics.cpp b/ecmascript/builtins/builtins_atomics.cpp index b0e43f65f806cc0d27a3b62fd2650c2e826b8559..a95940f65d93c3b420941ca6330309c2b4c2912b 100644 --- a/ecmascript/builtins/builtins_atomics.cpp +++ b/ecmascript/builtins/builtins_atomics.cpp @@ -469,6 +469,7 @@ JSTaggedValue BuiltinsAtomics::HandleWithBigInt64(JSThread *thread, uint32_t siz int64_t val = 0; bool lossless = true; BigInt::BigIntToInt64(thread, value, &val, &lossless); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (size == 3) { // the number of parameters is 3 auto result = op(reinterpret_cast(block + indexedPosition), &val); return BigInt::Int64ToBigInt(thread, result).GetTaggedValue(); @@ -476,6 +477,7 @@ JSTaggedValue BuiltinsAtomics::HandleWithBigInt64(JSThread *thread, uint32_t siz JSHandle newValue = BuiltinsBase::GetCallArg(argv, BuiltinsBase::ArgsPosition::FOURTH); int64_t newVal = 0; BigInt::BigIntToInt64(thread, newValue, &newVal, &lossless); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int64_t arg[ARGS_NUMBER] = {0}; arg[0] = val; arg[1] = newVal; diff --git a/ecmascript/builtins/builtins_cjs_module.cpp b/ecmascript/builtins/builtins_cjs_module.cpp index 0cb9085ebb87a8203af45b6bbe7c98970178aad5..043bbb00a0c5e18214204dd345bdfee33cb43fde 100644 --- a/ecmascript/builtins/builtins_cjs_module.cpp +++ b/ecmascript/builtins/builtins_cjs_module.cpp @@ -16,14 +16,14 @@ #include "ecmascript/builtins/builtins_cjs_module.h" #include "ecmascript/base/builtins_base.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/interpreter/interpreter-inl.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/platform/file.h" #include "ecmascript/require/js_cjs_module.h" #include "ecmascript/require/js_require_manager.h" namespace panda::ecmascript::builtins { -using PathHelper = base::PathHelper; + JSTaggedValue BuiltinsCjsModule::CjsModuleConstructor(EcmaRuntimeCallInfo *argv) { JSThread *thread = argv->GetThread(); @@ -63,7 +63,7 @@ JSTaggedValue BuiltinsCjsModule::ResolveFilename(EcmaRuntimeCallInfo *argv) JSMutableHandle parent(thread, JSTaggedValue::Undefined()); JSMutableHandle dirname(thread, JSTaggedValue::Undefined()); const JSPandaFile *jsPandaFile = EcmaInterpreter::GetNativeCallPandafile(thread); - PathHelper::ResolveCurrentPath(thread, parent, dirname, jsPandaFile); + ModulePathHelper::ResolveCurrentPath(thread, parent, dirname, jsPandaFile); if (length != 1) { // strange arg's number LOG_ECMA(FATAL) << "BuiltinsCjsModule::Load : can only accept one argument"; diff --git a/ecmascript/builtins/builtins_cjs_require.cpp b/ecmascript/builtins/builtins_cjs_require.cpp index 9e109db9df3955e2f768063256b1537af2ea72bc..c9e002e982e338b27a7d4b9ff994b5a83fa9a703 100644 --- a/ecmascript/builtins/builtins_cjs_require.cpp +++ b/ecmascript/builtins/builtins_cjs_require.cpp @@ -43,6 +43,7 @@ JSTaggedValue BuiltinsCjsRequire::CjsRequireConstructor(EcmaRuntimeCallInfo *arg } JSHandle requestName = JSHandle::Cast(GetCallArg(argv, 0)); result = CjsModule::Load(thread, requestName); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return result.GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_dataview.cpp b/ecmascript/builtins/builtins_dataview.cpp index a99ba165982e12cb963c620d4e34aa2ef5735b23..eb356e66245730798c39984178466e40f470cee3 100644 --- a/ecmascript/builtins/builtins_dataview.cpp +++ b/ecmascript/builtins/builtins_dataview.cpp @@ -347,7 +347,8 @@ JSTaggedValue BuiltinsDataView::SetBigUint64(EcmaRuntimeCallInfo *argv) // 24.2.1.1 JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type) { BUILTINS_API_TRACE(thread, DataView, GetViewValue); @@ -371,10 +372,10 @@ JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle(indexInt); // 7. Let isLittleEndian be ToBoolean(isLittleEndian). bool isLittleEndian = false; - if (littleEndian.IsUndefined()) { + if (littleEndian->IsUndefined()) { isLittleEndian = false; } else { - isLittleEndian = littleEndian.ToBoolean(); + isLittleEndian = littleEndian->ToBoolean(); } // 8. Let buffer be the value of view’s [[ViewedArrayBuffer]] internal slot. JSHandle dataView(view); @@ -401,7 +402,8 @@ JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type, const JSHandle &value) { // 1. If Type(view) is not Object, throw a TypeError exception. @@ -426,10 +428,10 @@ JSTaggedValue BuiltinsDataView::SetViewValue(JSThread *thread, const JSHandleIsUndefined()) { isLittleEndian = false; } else { - isLittleEndian = littleEndian.ToBoolean(); + isLittleEndian = littleEndian->ToBoolean(); } // 8. Let buffer be the value of view’s [[ViewedArrayBuffer]] internal slot. JSHandle dataView(view); @@ -461,11 +463,12 @@ JSTaggedValue BuiltinsDataView::GetTypedValue(EcmaRuntimeCallInfo *argv, DataVie [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisHandle = GetThis(argv); JSHandle offsetHandle = GetCallArg(argv, 0); + JSHandle trueHandle(thread, JSTaggedValue::True()); if (type == DataViewType::UINT8 || type == DataViewType::INT8) { - return GetViewValue(thread, thisHandle, offsetHandle, JSTaggedValue::True(), type); + return GetViewValue(thread, thisHandle, offsetHandle, trueHandle, type); } JSHandle littleEndianHandle = GetCallArg(argv, 1); - return GetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle.GetTaggedValue(), type); + return GetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle, type); } JSTaggedValue BuiltinsDataView::SetTypedValue(EcmaRuntimeCallInfo *argv, DataViewType type) @@ -476,10 +479,11 @@ JSTaggedValue BuiltinsDataView::SetTypedValue(EcmaRuntimeCallInfo *argv, DataVie JSHandle thisHandle = GetThis(argv); JSHandle offsetHandle = GetCallArg(argv, 0); JSHandle value = GetCallArg(argv, 1); + JSHandle trueHandle(thread, JSTaggedValue::True()); if (type == DataViewType::UINT8 || type == DataViewType::INT8) { - return SetViewValue(thread, thisHandle, offsetHandle, JSTaggedValue::True(), type, value); + return SetViewValue(thread, thisHandle, offsetHandle, trueHandle, type, value); } JSHandle littleEndianHandle = GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); - return SetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle.GetTaggedValue(), type, value); + return SetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle, type, value); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_dataview.h b/ecmascript/builtins/builtins_dataview.h index 4337c2b0bf3bb21805b4f8377da6044a324041b8..afdcd12b2cfbc68096f80b9e9cb698d168d1d10a 100644 --- a/ecmascript/builtins/builtins_dataview.h +++ b/ecmascript/builtins/builtins_dataview.h @@ -75,10 +75,12 @@ public: private: // 24.2.1.1 GetViewValue ( view, requestIndex, isLittleEndian, type ) static JSTaggedValue GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type); static JSTaggedValue SetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type, const JSHandle &value); static JSTaggedValue GetTypedValue(EcmaRuntimeCallInfo *argv, DataViewType type); diff --git a/ecmascript/builtins/builtins_displaynames.cpp b/ecmascript/builtins/builtins_displaynames.cpp index 3cac9430bcc6520550635cc8f3428cf1447dbda4..02c853165e8814fafc4f5aa95b184d379e652bda 100644 --- a/ecmascript/builtins/builtins_displaynames.cpp +++ b/ecmascript/builtins/builtins_displaynames.cpp @@ -102,6 +102,7 @@ JSTaggedValue BuiltinsDisplayNames::Of(EcmaRuntimeCallInfo *argv) JSHandle displayNames = JSHandle::Cast(thisValue); TypednsOption typeOpt = displayNames->GetType(); JSHandle code = JSDisplayNames::CanonicalCodeForDisplayNames(thread, displayNames, typeOpt, codeTemp); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); std::string codeString = intl::LocaleHelper::ConvertToStdString(code); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (codeString.size()) { diff --git a/ecmascript/builtins/builtins_errors.cpp b/ecmascript/builtins/builtins_errors.cpp index 8af25081c31d7043bb8e9ff72cd39d595e6fd0eb..341e3f61ac9c8ec1a560f5c3f00a15c8e7aa98f4 100644 --- a/ecmascript/builtins/builtins_errors.cpp +++ b/ecmascript/builtins/builtins_errors.cpp @@ -159,6 +159,22 @@ JSTaggedValue BuiltinsAggregateError::AggregateErrorConstructor(EcmaRuntimeCallI PropertyDescriptor msgDesc(thread, JSHandle::Cast(handleStr), true, false, true); JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, msgKey, msgDesc); } + // InstallErrorCause + JSHandle options = BuiltinsBase::GetCallArg(argv, 2); // 2 : Third parameter + // If options is an Object and ? HasProperty(options, "cause") is true, then + // a. Let cause be ? Get(options, "cause"). + // b. Perform CreateNonEnumerableDataPropertyOrThrow(O, "cause", cause). + if (options->IsECMAObject()) { + JSHandle causeKey = globalConst->GetHandledCauseString(); + bool causePresent = JSTaggedValue::HasProperty(thread, options, causeKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (causePresent) { + JSHandle cause = JSObject::GetProperty(thread, options, causeKey).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + PropertyDescriptor causeDesc(thread, cause, true, false, true); + JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, causeKey, causeDesc); + } + } // 4. Let errorsList be ? IterableToList(errors). JSHandle errorsList = JSObject::IterableToList(thread, errors); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -168,6 +184,7 @@ JSTaggedValue BuiltinsAggregateError::AggregateErrorConstructor(EcmaRuntimeCallI JSHandle errorsValues(JSArray::CreateArrayFromList(thread, errorsArray)); PropertyDescriptor msgDesc(thread, errorsValues, true, false, true); JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, errorsKey, msgDesc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Return O. return taggedObj.GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_finalization_registry.cpp b/ecmascript/builtins/builtins_finalization_registry.cpp index 60c8cd9acd5af1d7bacbb42f5dcef8109e58a114..ed87c399477a9e2e2a307563b16f0d612424ac46 100644 --- a/ecmascript/builtins/builtins_finalization_registry.cpp +++ b/ecmascript/builtins/builtins_finalization_registry.cpp @@ -75,19 +75,19 @@ JSTaggedValue BuiltinsFinalizationRegistry::Register(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "thisValue is not object or does not have an internalSlot internal slot", JSTaggedValue::Exception()); } - // 3. If Type(target) is not Object, throw a TypeError exception. - if (!target->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "target is not object", JSTaggedValue::Exception()); + // 3. If CanBeHeldWeakly(target) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, target)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "target invalid", JSTaggedValue::Exception()); } // 4. If SameValue(target, heldValue) is true, throw a TypeError exception. if (JSTaggedValue::SameValue(target, heldValue)) { THROW_TYPE_ERROR_AND_RETURN(thread, "target and heldValue should not be equal", JSTaggedValue::Exception()); } - // 5. If Type(unregisterToken) is not Object, then + // 5. If CanBeHeldWeakly(unregisterToken) is false, then // a. If unregisterToken is not undefined, throw a TypeError exception. // b. Set unregisterToken to empty. - if (!unregisterToken->IsECMAObject() && !unregisterToken->IsUndefined()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken should be object", JSTaggedValue::Exception()); + if (!JSTaggedValue::CanBeHeldWeakly(thread, unregisterToken) && !unregisterToken->IsUndefined()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken invalid", JSTaggedValue::Exception()); } // 6. Let cell be the Record { [[WeakRefTarget]]: target, // [[HeldValue]]: heldValue, [[UnregisterToken]]: unregisterToken }. @@ -112,9 +112,9 @@ JSTaggedValue BuiltinsFinalizationRegistry::Unregister(EcmaRuntimeCallInfo *argv THROW_TYPE_ERROR_AND_RETURN(thread, "thisValue is not object or does not have an internalSlot internal slot", JSTaggedValue::Exception()); } - // 3. If Type(unregisterToken) is not Object, throw a TypeError exception. - if (!unregisterToken->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken should be object", JSTaggedValue::Exception()); + // 3. If CanBeHeldWeakly(unregisterToken) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, unregisterToken)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken invalid", JSTaggedValue::Exception()); } // 4. Let removed be false. // 5. For each Record { [[WeakRefTarget]], [[HeldValue]], [[UnregisterToken]] } cell of diff --git a/ecmascript/builtins/builtins_function.cpp b/ecmascript/builtins/builtins_function.cpp index becda3bf9182733078023a2e202d5ab3c8d74555..f1f0e01c3809e3cc503e7de990c2e941ea36569b 100644 --- a/ecmascript/builtins/builtins_function.cpp +++ b/ecmascript/builtins/builtins_function.cpp @@ -121,18 +121,19 @@ JSTaggedValue BuiltinsFunction::FunctionPrototypeApply(EcmaRuntimeCallInfo *argv JSHandle arrayObj = GetCallArg(argv, 1); std::pair argumentsList = BuildArgumentsListFast(thread, arrayObj); if (!argumentsList.first) { - JSHandle argList = JSHandle::Cast( - JSObject::CreateListFromArrayLike(thread, arrayObj)); + JSHandle num = JSObject::CreateListFromArrayLike(thread, arrayObj); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle argList = JSHandle::Cast(num); // 4. ReturnIfAbrupt(argList). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - const int32_t argsLength = static_cast(argList->GetLength()); + const uint32_t argsLength = argList->GetLength(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, thisArg, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, argList); return JSFunction::Call(info); } // 6. Return Call(func, thisArg, argList). - const int32_t argsLength = static_cast(argumentsList.second); + const uint32_t argsLength = static_cast(argumentsList.second); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, thisArg, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, argumentsList.first); @@ -206,6 +207,7 @@ JSTaggedValue BuiltinsFunction::FunctionPrototypeBind(EcmaRuntimeCallInfo *argv) PropertyDescriptor desc(thread, JSHandle(thread, JSTaggedValue(lengthValue)), false, false, true); [[maybe_unused]] bool status = JSTaggedValue::DefinePropertyOrThrow(thread, JSHandle(boundFunction), lengthKey, desc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Assert: status is not an abrupt completion. ASSERT_PRINT(status, "DefinePropertyOrThrow failed"); diff --git a/ecmascript/builtins/builtins_generator.cpp b/ecmascript/builtins/builtins_generator.cpp index 30850bb61e9b04529e74b09924bd40b20985c8f6..4d2e1a152293c65d81e7c9101a65b717f6803450 100644 --- a/ecmascript/builtins/builtins_generator.cpp +++ b/ecmascript/builtins/builtins_generator.cpp @@ -40,6 +40,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeNext(EcmaRuntimeCallInfo *arg THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle value = GetCallArg(argv, 0); // 2.Return ? GeneratorResume(g, value). @@ -60,7 +61,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeReturn(EcmaRuntimeCallInfo *a THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 2.Let C be Completion { [[Type]]: return, [[Value]]: value, [[Target]]: empty }. JSHandle value = GetCallArg(argv, 0); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); @@ -85,7 +86,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeThrow(EcmaRuntimeCallInfo *ar THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 2.Let C be ThrowCompletion(exception). JSHandle exception = GetCallArg(argv, 0); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); diff --git a/ecmascript/builtins/builtins_global.cpp b/ecmascript/builtins/builtins_global.cpp index 2534ef4c4d0fb4e91bb234dcf4a79c8bb6abf817..918631474f6af660f3b2baf893b4b0520c3ed3f2 100644 --- a/ecmascript/builtins/builtins_global.cpp +++ b/ecmascript/builtins/builtins_global.cpp @@ -25,12 +25,18 @@ #include "ecmascript/ecma_macros.h" #include "ecmascript/js_function.h" #include "ecmascript/mem/c_containers.h" +#include "ecmascript/module/js_module_deregister.h" #include "ecmascript/stubs/runtime_stubs.h" #include "ecmascript/tagged_array-inl.h" namespace panda::ecmascript::builtins { using NumberHelper = base::NumberHelper; using StringHelper = base::StringHelper; +std::u16string g_asciiWordChars(u"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"); +std::u16string g_escapeWordChars(u"@*+-./"); +constexpr std::uint16_t CHAR16_PERCENT_SIGN = 0x0025; // u'%' +constexpr std::uint16_t CHAR16_LATIN_SMALL_LETTER_U = 0x0075; // u'u'; +constexpr std::uint16_t CHAR16_LETTER_NULL = u'\0'; // 18.2.1 JSTaggedValue BuiltinsGlobal::NotSupportEval(EcmaRuntimeCallInfo *msg) @@ -289,6 +295,126 @@ uint8_t BuiltinsGlobal::GetValueFromTwoHex(uint16_t front, uint16_t behind) return res; } +uint16_t BuiltinsGlobal::GetValueFromHexString(const JSHandle &string) +{ + uint32_t size = EcmaStringAccessor(string).GetLength(); + ASSERT(size > 0 && size <= 4); // NOLINT 4: means 4 hex digits + std::u16string hexString(u"0123456789ABCDEF"); + + uint16_t ret = 0; + for (uint32_t i = 0; i < size; ++i) { + uint16_t ch = EcmaStringAccessor(string).Get(i); + size_t idx = StringHelper::FindFromU16ToUpper(hexString, &ch); + ret = ((ret << 4U) | idx) & BIT_MASK_4F; // NOLINT 4: means shift left by 4 + } + return ret; +} + +// 22.1.3.17.2 StringPad ( S, maxLength, fillString, placement ) +EcmaString *BuiltinsGlobal::StringPad(JSThread *thread, const JSHandle &source, + uint32_t maxLength, const JSHandle &fillString, + Placement placement) +{ + // 1. Let stringLength be the length of S. + uint32_t stringLength = EcmaStringAccessor(source).GetLength(); + // 2. If maxLength ≤ stringLength, return S. + if (maxLength <= stringLength) { + return *source; + } + // 3. If fillString is the empty String, return S. + uint32_t targetStrLen = EcmaStringAccessor(fillString).GetLength(); + if (targetStrLen == 0) { + return *source; + } + // 4. Let fillLen be maxLength - stringLength. + uint32_t fillLen = maxLength - stringLength; + EcmaVM *vm = thread->GetEcmaVM(); + //5. Let truncatedStringFiller be the String value consisting of repeated concatenations + // of fillString truncated to length fillLen. + uint32_t repeatTimes = std::ceil(fillLen / targetStrLen); + EcmaString *p = nullptr; + JSHandle stringFiller = vm->GetFactory()->NewFromStdString(std::string("\0")); + for (uint32_t k = 0; k < repeatTimes; ++k) { + p = EcmaStringAccessor::Concat(vm, stringFiller, fillString); + stringFiller = JSHandle(thread, p); + } + JSHandle truncatedStringFiller(thread, + EcmaStringAccessor::FastSubString(vm, stringFiller, 0, fillLen)); + // 6. If placement is start, return the string-concatenation of truncatedStringFiller and S. + // 7. Else, return the string-concatenation of S and truncatedStringFiller. + if (placement == Placement::START) { + return EcmaStringAccessor::Concat(vm, truncatedStringFiller, source); + } else { + return EcmaStringAccessor::Concat(vm, source, truncatedStringFiller); + } +} + +// Static Semantics: UTF16SurrogatePairToCodePoint ( lead, trail ) +uint16_t BuiltinsGlobal::UTF16SurrogatePairToCodePoint(uint16_t lead, uint16_t trail) +{ + // 1. Assert: lead is a leading surrogate and trail is a trailing surrogate. + ASSERT(IsUTF16HighSurrogate(lead) && IsUTF16LowSurrogate(trail)); + // 2. Let cp be (lead - 0xD800) × 0x400 + (trail - 0xDC00) + 0x10000. + uint16_t cp = ((lead - 0xD800) << 10UL) + (trail - 0xDC00) + 0x10000; + // 3. Return the code point cp. + return cp; +} + +// 11.1.5 Static Semantics: StringToCodePoints ( string ) +EcmaString *BuiltinsGlobal::StringToCodePoints(JSThread *thread, const JSHandle &string) +{ + // 1. Let codePoints be a new empty List. + std::u16string codePoints; + // 2. Let size be the length of string. + uint32_t size = EcmaStringAccessor(string).GetLength(); + // 3. Let position be 0. + uint32_t position = 0; + // 4. Repeat, while position < size, + // a. Let cp be CodePointAt(string, position). + // b. Append cp.[[CodePoint]] to codePoints. + // c. Set position to position + cp.[[CodeUnitCount]]. + while (position < size) { + // i.Let first be the code unit at index position within string. + uint16_t first = EcmaStringAccessor(string).Get(position); + uint16_t cp = first - CHAR16_LETTER_NULL; + uint8_t codeUnitCount = 0; + bool isUnpairedSurrogate = false; + // ii. If first is neither a leading surrogate nor a trailing surrogate, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: false }. + if (!IsUTF16HighSurrogate(first) && !IsUTF16LowSurrogate(first)) { + codeUnitCount = 1; // 1 means: code unit count + isUnpairedSurrogate = false; + } else if (IsUTF16HighSurrogate(first) || position + 1 == size) { + // iii. If first is a trailing surrogate or position + 1 = size, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: true }. + codeUnitCount = 1; + isUnpairedSurrogate = true; + } else { + // iv. Let second be the code unit at index position + 1 within string. + uint16_t second = EcmaStringAccessor(string).Get(position + 1); + // v. If second is not a trailing surrogate, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: true }. + if (!IsUTF16LowSurrogate(second)) { + codeUnitCount = 1; // 1 means: code unit count + isUnpairedSurrogate = true; + } else { + // vi. Set cp to UTF16SurrogatePairToCodePoint(first, second). + // vii. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 2, [[IsUnpairedSurrogate]]: false }. + cp = UTF16SurrogatePairToCodePoint(first, second); + codeUnitCount = 2; // 2 means: code unit count + isUnpairedSurrogate = false; + } + } + codePoints.push_back(cp); + position = position + codeUnitCount; + } + // 5. Return codePoints. + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + uint16_t *ptr = reinterpret_cast(codePoints.data()); + JSHandle codePointsString = factory->NewFromUtf16Literal(ptr, codePoints.size()); + return *codePointsString; +} + // Runtime Semantics JSTaggedValue BuiltinsGlobal::Decode(JSThread *thread, const JSHandle &str, judgURIFunc IsInURISet) { @@ -501,6 +627,26 @@ JSTaggedValue BuiltinsGlobal::PrintEntrypoint(EcmaRuntimeCallInfo *msg) return JSTaggedValue::Undefined(); } +JSTaggedValue BuiltinsGlobal::MarkModuleCollectable(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + uint32_t numArgs = msg->GetArgsNumber(); + if (numArgs != 1) { + LOG_FULL(ERROR) << "The number of parameters received by markModuleCollectable is incorrect."; + return JSTaggedValue::False(); + } + JSHandle module = GetCallArg(msg, 0); + if (!module->IsModuleNamespace()) { + return JSTaggedValue::False(); + } + + ModuleDeregister::ProcessModuleReference(thread, module); + return JSTaggedValue::True(); +} + JSTaggedValue BuiltinsGlobal::CallJsBoundFunction(EcmaRuntimeCallInfo *msg) { JSThread *thread = msg->GetThread(); @@ -574,4 +720,150 @@ JSTaggedValue BuiltinsGlobal::PrintFunctionCallStat(EcmaRuntimeCallInfo *msg) return JSTaggedValue::Undefined(); } #endif + +// B.2.1.1 escape ( string ) +JSTaggedValue BuiltinsGlobal::Escape(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + BUILTINS_API_TRACE(thread, Global, Escape); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + // 1. Set string to ? ToString(string). + JSHandle string = JSTaggedValue::ToString(thread, GetCallArg(msg, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + EcmaVM *vm = thread->GetEcmaVM(); + // 2. Let len be the length of string. + uint32_t len = EcmaStringAccessor(string).GetLength(); + // 3. Let R be the empty String. + std::u16string r; + // 4. Let unescapedSet be the string-concatenation of the ASCII word characters and "@*+-./". + std::u16string unescapedSet = g_asciiWordChars + g_escapeWordChars; + // 5. Let k be 0. + uint32_t k = 0; + // 6. Repeat, while k < len, + // a. Let C be the code unit at index k within string. + // b. If unescapedSet contains C, then + // i. Let S be C. + // c. Else, + // i. Let n be the numeric value of C. + // ii. If n < 256, then + // 1. Let hex be the String representation of n, formatted as an uppercase hexadecimal number. + // 2. Let S be the string-concatenation of "%" and StringPad(hex, 2, "0", start). + // iii. Else, + // 1. Let hex be the String representation of n, formatted as an uppercase hexadecimal number. + // 2. Let S be the string-concatenation of "%u" and StringPad(hex, 4, "0", start). + // d. Set R to the string-concatenation of R and S. + // e. Set k to k + 1. + while (k < len) { + uint16_t c = EcmaStringAccessor(string).Get(k); + if (unescapedSet.find(c) != std::u16string::npos) { + r.push_back(c); + } else { + uint16_t n = c - CHAR16_LETTER_NULL; + std::ostringstream oss; + oss << std::uppercase << std::hex << n; + JSHandle hex = factory->NewFromStdString(oss.str()); + JSHandle fillString = factory->NewFromStdString(std::string("0")); + EcmaString *temp = nullptr; + JSHandle hexStringHandle = factory->NewFromStdString(std::string("\0")); + if (n <= std::numeric_limits::max()) { + EcmaString *hexEcmaString = + StringPad(thread, hex, 2, fillString, Placement::START); // NOLINT 2: means max string length + hexStringHandle = JSHandle(thread, hexEcmaString); + temp = EcmaStringAccessor::Concat(vm, factory->NewFromStdString("%"), hexStringHandle); + } else { + EcmaString *hexEcmaString = + StringPad(thread, hex, 4, fillString, Placement::START); // NOLINT 4: means max string length + hexStringHandle = JSHandle(thread, hexEcmaString); + temp = EcmaStringAccessor::Concat(vm, factory->NewFromStdString("%u"), hexStringHandle); + } + JSHandle s = JSHandle(thread, temp); + r = r + EcmaStringAccessor(s).ToU16String(); + } + ++k; + } + // 7. Return R. + auto *returnData = reinterpret_cast(r.data()); + uint32_t retSize = r.size(); + return factory->NewFromUtf16Literal(returnData, retSize).GetTaggedValue(); +} + +// B.2.1.2 unescape ( string ) +JSTaggedValue BuiltinsGlobal::Unescape(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + BUILTINS_API_TRACE(thread, Global, Unescape); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + // 1. Set string to ? ToString(string). + JSHandle string = JSTaggedValue::ToString(thread, GetCallArg(msg, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 2. Let len be the length of string. + uint32_t len = EcmaStringAccessor(string).GetLength(); + // 3. Let R be the empty String. + EcmaVM *vm = thread->GetEcmaVM(); + ObjectFactory *factory = vm->GetFactory(); + std::u16string r; + // 4. Let k be 0. + uint32_t k = 0; + // 5. Repeat, while k < len, + // a. Let C be the code unit at index k within string. + // b. If C is the code unit 0x0025 (PERCENT SIGN), then + // i. Let hexDigits be the empty String. + // ii. Let optionalAdvance be 0. + // iii. If k + 5 < len and the code unit at index k + 1 within string is the code unit + // 0x0075 (LATIN SMALL LETTER U), then + // 1. Set hexDigits to the substring of string from k + 2 to k + 6. + // 2. Set optionalAdvance to 5. + // iv. Else if k + 3 ≤ len, then + // 1. Set hexDigits to the substring of string from k + 1 to k + 3. + // 2. Set optionalAdvance to 2. + // v. Let parseResult be ParseText(StringToCodePoints(hexDigits), HexDigits[~Sep]). + // vi. If parseResult is a Parse Node, then + // 1. Let n be the MV of parseResult. + // 2. Set C to the code unit whose numeric value is n. + // 3. Set k to k + optionalAdvance. + // c. Set R to the string-concatenation of R and C. + // d. Set k to k + 1. + while (k < len) { + uint16_t c = EcmaStringAccessor(string).Get(k); + JSHandle hexDigitsString; + if (c == CHAR16_PERCENT_SIGN) { + EcmaString *hexDigits = nullptr; + uint16_t optionalAdvance = 0; + if (k + 5 < len && // NOLINT 5: means offset by 5 + EcmaStringAccessor(string).Get(k + 1) == CHAR16_LATIN_SMALL_LETTER_U) { // NOLINT 1: means offset by 1 + hexDigits = EcmaStringAccessor(string).FastSubString(vm, string, + k + 2, 4); // NOLINT 2: means offset 4: means len + optionalAdvance = optionalAdvance + 5; // NOLINT 5: means plus 5 + } else if (k + 3 <= len) { // NOLINT 3: means offset + hexDigits = EcmaStringAccessor(string).FastSubString(vm, string, k + 1, 2); // NOLINT 2:means len + optionalAdvance = optionalAdvance + 2; // NOLINT 2: means plus 2 + } + if (hexDigits != nullptr) { + hexDigitsString = JSHandle(thread, hexDigits); + EcmaString *codePoints = StringToCodePoints(thread, hexDigitsString); + JSHandle codePointString = JSHandle(thread, codePoints); + bool isHex = true; + for (uint32_t i = 0; i < EcmaStringAccessor(codePointString).GetLength(); ++i) { + if (!IsHexDigits(EcmaStringAccessor(codePointString).Get(i))) { + isHex = false; + } + } + if (isHex) { + uint16_t n = GetValueFromHexString(codePointString); + c = n; + k = k + optionalAdvance; + } + } + } + r.push_back(c); + ++k; + } + // 7. Return R. + auto *returnData = reinterpret_cast(r.data()); + uint32_t retSize = r.size(); + return factory->NewFromUtf16Literal(returnData, retSize).GetTaggedValue(); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_global.h b/ecmascript/builtins/builtins_global.h index 433219a931ed4c9deaf27e2adbd8600225cd55f1..8b14c5a44c6cff70013c9172274104b93f039406 100644 --- a/ecmascript/builtins/builtins_global.h +++ b/ecmascript/builtins/builtins_global.h @@ -22,11 +22,17 @@ namespace panda::ecmascript::builtins { static constexpr uint8_t BIT_MASK = 0x0F; static constexpr uint8_t BIT_MASK_FF = 0xFF; +static constexpr uint16_t BIT_MASK_4F = 0xFFFF; static constexpr uint16_t BIT16_MASK = 0x3FF; static constexpr uint8_t BIT_MASK_ONE = 0x80; static constexpr uint8_t BIT_MASK_TWO = 0xC0; using judgURIFunc = bool (*)(uint16_t); +enum class Placement { + START = 0, + END, +}; + class BuiltinsGlobal : public base::BuiltinsBase { public: // 18.2.1 @@ -42,6 +48,7 @@ public: static JSTaggedValue EncodeURIComponent(EcmaRuntimeCallInfo *msg); static JSTaggedValue PrintEntrypoint(EcmaRuntimeCallInfo *msg); + static JSTaggedValue MarkModuleCollectable(EcmaRuntimeCallInfo *msg); static JSTaggedValue CallJsBoundFunction(EcmaRuntimeCallInfo *msg); static JSTaggedValue CallJsProxy(EcmaRuntimeCallInfo *msg); #if ECMASCRIPT_ENABLE_RUNTIME_STAT @@ -56,6 +63,10 @@ public: #if ECMASCRIPT_ENABLE_FUNCTION_CALL_TIMER static JSTaggedValue PrintFunctionCallStat(EcmaRuntimeCallInfo *msg); #endif + // B.2.1.1 escape ( string ) + static JSTaggedValue Escape(EcmaRuntimeCallInfo *msg); + // B.2.1.2 unescape ( string ) + static JSTaggedValue Unescape(EcmaRuntimeCallInfo *msg); private: static void PrintString(JSThread *thread, EcmaString *string); @@ -69,6 +80,27 @@ private: static bool IsInMarkURISet(uint16_t ch); static bool IsHexDigits(uint16_t ch); static uint8_t GetValueFromTwoHex(uint16_t front, uint16_t behind); + static uint16_t GetValueFromHexString(const JSHandle &string); + // 22.1.3.17.2 StringPad ( S, maxLength, fillString, placement ) + static EcmaString *StringPad(JSThread *thread, + const JSHandle &string, + uint32_t maxLength, + const JSHandle &fillString, + Placement placement = Placement::START); + static bool IsUTF16HighSurrogate(uint16_t ch) + { + return base::utf_helper::DECODE_LEAD_LOW <= ch && ch <= base::utf_helper::DECODE_LEAD_HIGH; + } + + static bool IsUTF16LowSurrogate(uint16_t ch) + { + return base::utf_helper::DECODE_TRAIL_LOW <= ch && ch <= base::utf_helper::DECODE_TRAIL_HIGH; + } + + // 11.1.3 Static Semantics: UTF16SurrogatePairToCodePoint ( lead, trail ) + static uint16_t UTF16SurrogatePairToCodePoint(uint16_t lead, uint16_t trail); + // 11.1.5 Static Semantics: StringToCodePoints ( string ) + static EcmaString *StringToCodePoints(JSThread *thread, const JSHandle &string); }; } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_json.cpp b/ecmascript/builtins/builtins_json.cpp index 9f28884818f1b957e35899bdf1420bbb63e99a71..fbe8faea80583cc5cfa5b19634ad9628c594338d 100644 --- a/ecmascript/builtins/builtins_json.cpp +++ b/ecmascript/builtins/builtins_json.cpp @@ -15,6 +15,7 @@ #include "ecmascript/builtins/builtins_json.h" +#include "ecmascript/base/fast_json_stringifier.h" #include "ecmascript/base/json_parser.h" #include "ecmascript/base/json_stringifier.h" #include "ecmascript/base/number_helper.h" @@ -86,6 +87,12 @@ JSTaggedValue BuiltinsJson::Stringify(EcmaRuntimeCallInfo *argv) uint32_t argc = argv->GetArgsNumber(); JSTaggedValue value = GetCallArg(argv, 0).GetTaggedValue(); + if (argc == 1 && thread->GetCurrentEcmaContext()->IsAotEntry()) { + JSHandle handleValue(thread, value); + panda::ecmascript::base::FastJsonStringifier stringifier(thread); + JSHandle result = stringifier.Stringify(handleValue); + return result.GetTaggedValue(); + } JSTaggedValue replacer = JSTaggedValue::Undefined(); JSTaggedValue gap = JSTaggedValue::Undefined(); diff --git a/ecmascript/builtins/builtins_list_format.cpp b/ecmascript/builtins/builtins_list_format.cpp index 781aacfaf8fbcb660fb089090e15cad33816e820..b9608aae523db04b4c6b7d8d5dd5b13b27a6e620 100644 --- a/ecmascript/builtins/builtins_list_format.cpp +++ b/ecmascript/builtins/builtins_list_format.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + #include "ecmascript/builtins/builtins_list_format.h" #include "ecmascript/intl/locale_helper.h" @@ -130,6 +130,7 @@ JSTaggedValue BuiltinsListFormat::FormatToParts(EcmaRuntimeCallInfo *argv) JSHandle array = JSHandle::Cast(listArray); JSHandle result = JSListFormat::FormatListToParts(thread, listFormat, array); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return result.GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_locale.cpp b/ecmascript/builtins/builtins_locale.cpp index 7ef01c85bb87fe266ba951d35d828b9a30c9b9b8..ff0d044feaedcf12f5aa43276abc7171fe66e588 100644 --- a/ecmascript/builtins/builtins_locale.cpp +++ b/ecmascript/builtins/builtins_locale.cpp @@ -19,7 +19,7 @@ #include "ecmascript/ecma_vm.h" #include "ecmascript/global_env.h" #include "ecmascript/js_locale.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript::builtins { // 10.1.3 Intl.Locale( tag [, options] ) @@ -56,9 +56,11 @@ JSTaggedValue BuiltinsLocale::LocaleConstructor(EcmaRuntimeCallInfo *argv) JSHandle localeString = factory->GetEmptyString(); if (!tag->IsJSLocale()) { localeString = JSTaggedValue::ToString(thread, tag); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } else { icu::Locale *icuLocale = (JSHandle::Cast(tag))->GetIcuLocale(); localeString = intl::LocaleHelper::ToLanguageTag(thread, *icuLocale); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } // 10. If options is undefined, then // a.Let options be ! ObjectCreate(null). diff --git a/ecmascript/builtins/builtins_map.cpp b/ecmascript/builtins/builtins_map.cpp index 47b1cf9f1007a0c56320a3d933962d3814a49491..7700f093ee3bfb59692724a3d96572fdf9362d80 100644 --- a/ecmascript/builtins/builtins_map.cpp +++ b/ecmascript/builtins/builtins_map.cpp @@ -133,7 +133,7 @@ JSTaggedValue BuiltinsMap::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); bool flag = jsMap->Has(key.GetTaggedValue()); return GetTaggedBoolean(flag); @@ -150,7 +150,7 @@ JSTaggedValue BuiltinsMap::Get(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); JSTaggedValue value = jsMap->Get(key.GetTaggedValue()); return value; @@ -178,7 +178,7 @@ JSTaggedValue BuiltinsMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArg = GetCallArg(argv, 1); JSMutableHandle hashMap(thread, map->GetLinkedMap()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; int index = 0; int totalElements = hashMap->NumberOfElements() + hashMap->NumberOfDeletedElements(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); @@ -227,8 +227,8 @@ JSTaggedValue BuiltinsMap::GetSize(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); - int count = jsMap->GetSize(); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); + uint32_t count = jsMap->GetSize(); return JSTaggedValue(count); } @@ -239,6 +239,7 @@ JSTaggedValue BuiltinsMap::Entries(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::KEY_AND_VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -249,6 +250,7 @@ JSTaggedValue BuiltinsMap::Keys(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::KEY); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -259,6 +261,7 @@ JSTaggedValue BuiltinsMap::Values(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -308,7 +311,7 @@ JSTaggedValue BuiltinsMap::AddEntriesFromIterable(JSThread *thread, const JSHand if (thread->HasPendingException()) { return JSIterator::IteratorCloseAndReturn(thread, iter); } - const int32_t argsLength = 2; // 2: key and value pair + const uint32_t argsLength = 2; // 2: key and value pair JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, adder, JSHandle(target), undefined, argsLength); diff --git a/ecmascript/builtins/builtins_math.cpp b/ecmascript/builtins/builtins_math.cpp index 2b16ef09429aebfb17429588cd39278cc9711113..7f1bc407bce62b8ae65690c1be4da6d67be3952d 100644 --- a/ecmascript/builtins/builtins_math.cpp +++ b/ecmascript/builtins/builtins_math.cpp @@ -34,6 +34,7 @@ JSTaggedValue BuiltinsMath::Abs(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (numberValue.IsDouble()) { // if number_value is double,NaN,Undefine, deal in this case // if number_value is a String ,which can change to double. e.g."100",deal in this case @@ -52,6 +53,7 @@ JSTaggedValue BuiltinsMath::Acos(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN , <-1 or > 1,result is NaN @@ -70,6 +72,7 @@ JSTaggedValue BuiltinsMath::Acosh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (value >= 1) { @@ -87,6 +90,7 @@ JSTaggedValue BuiltinsMath::Asin(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (value >= -1 && value <= 1) { @@ -104,6 +108,7 @@ JSTaggedValue BuiltinsMath::Asinh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN, NaN, result is NaN @@ -122,6 +127,7 @@ JSTaggedValue BuiltinsMath::Atan(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN, NaN, result is NaN @@ -159,7 +165,9 @@ JSTaggedValue BuiltinsMath::Atan2(EcmaRuntimeCallInfo *argv) JSHandle msgX = GetCallArg(argv, 1); double result = base::NAN_VALUE; JSTaggedNumber numberValueY = JSTaggedValue::ToNumber(thread, msgY); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedNumber numberValueX = JSTaggedValue::ToNumber(thread, msgX); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double valueY = numberValueY.GetNumber(); double valueX = numberValueX.GetNumber(); // y = +0 and x > +0, return +0 @@ -186,6 +194,7 @@ JSTaggedValue BuiltinsMath::Cbrt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value == -NaN, NaN, result is NaN @@ -204,6 +213,7 @@ JSTaggedValue BuiltinsMath::Ceil(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite,return value @@ -228,6 +238,7 @@ JSTaggedValue BuiltinsMath::Clz32(EcmaRuntimeCallInfo *argv) constexpr int defaultValue = 32; JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); auto tmpValue = std::abs(value); auto result = numberValue.ToUint32(); @@ -247,6 +258,7 @@ JSTaggedValue BuiltinsMath::Cos(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, result is NaN @@ -265,6 +277,7 @@ JSTaggedValue BuiltinsMath::Cosh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -283,6 +296,7 @@ JSTaggedValue BuiltinsMath::Exp(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -301,6 +315,7 @@ JSTaggedValue BuiltinsMath::Expm1(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -319,6 +334,7 @@ JSTaggedValue BuiltinsMath::Floor(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, +0, -0, return value @@ -345,6 +361,7 @@ JSTaggedValue BuiltinsMath::Fround(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result; if (std::isnan(std::abs(value))) { @@ -370,6 +387,7 @@ JSTaggedValue BuiltinsMath::Hypot(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); result = std::hypot(result, value); } @@ -386,7 +404,9 @@ JSTaggedValue BuiltinsMath::Imul(EcmaRuntimeCallInfo *argv) JSHandle msg1 = GetCallArg(argv, 0); JSHandle msg2 = GetCallArg(argv, 1); JSTaggedNumber numberValue1 = JSTaggedValue::ToNumber(thread, msg1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedNumber numberValue2 = JSTaggedValue::ToNumber(thread, msg2); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); auto value1 = numberValue1.GetNumber(); auto value2 = numberValue2.GetNumber(); if (!std::isfinite(value1) || !std::isfinite(value2)) { @@ -409,6 +429,7 @@ JSTaggedValue BuiltinsMath::Log(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -427,6 +448,7 @@ JSTaggedValue BuiltinsMath::Log1p(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < -1,result is NaN @@ -445,6 +467,7 @@ JSTaggedValue BuiltinsMath::Log10(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -463,6 +486,7 @@ JSTaggedValue BuiltinsMath::Log2(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -493,6 +517,7 @@ JSTaggedValue BuiltinsMath::Max(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { // If any value is NaN, or -NaN, the max result is NaN @@ -527,6 +552,7 @@ JSTaggedValue BuiltinsMath::Min(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { // If any value is NaN or -NaN, the min result is NaN @@ -600,6 +626,7 @@ JSTaggedValue BuiltinsMath::Round(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); auto result = base::NAN_VALUE; const double diff = 0.5; @@ -637,6 +664,7 @@ JSTaggedValue BuiltinsMath::Sign(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { return GetTaggedDouble(std::abs(value)); @@ -659,6 +687,7 @@ JSTaggedValue BuiltinsMath::Sin(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, the result is NaN @@ -677,6 +706,7 @@ JSTaggedValue BuiltinsMath::Sinh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, the result is NaN @@ -695,6 +725,7 @@ JSTaggedValue BuiltinsMath::Sqrt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is negative, include -NaN and -Infinity but not -0.0, the result is NaN @@ -717,6 +748,7 @@ JSTaggedValue BuiltinsMath::Tan(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, result is NaN @@ -735,6 +767,7 @@ JSTaggedValue BuiltinsMath::Tanh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (!std::isnan(std::abs(value))) { @@ -752,6 +785,7 @@ JSTaggedValue BuiltinsMath::Trunc(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (!std::isfinite(value)) { diff --git a/ecmascript/builtins/builtins_object.cpp b/ecmascript/builtins/builtins_object.cpp index 8cb27595c3b58d02220693dd5a7bdf12016b1e26..4a0a6e05f4a6d64291bf5a6bda3d56690b452a5d 100644 --- a/ecmascript/builtins/builtins_object.cpp +++ b/ecmascript/builtins/builtins_object.cpp @@ -730,6 +730,7 @@ JSTaggedValue BuiltinsObject::IsPrototypeOf(EcmaRuntimeCallInfo *argv) return GetTaggedBoolean(true); } msgValueHandle.Update(JSTaggedValue::GetPrototype(thread, msgValueHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } return GetTaggedBoolean(false); } @@ -874,6 +875,7 @@ JSTaggedValue BuiltinsObject::ToString(EcmaRuntimeCallInfo *argv) JSHandle newLeftStringHandle = factory->ConcatFromString(leftString, JSTaggedValue::ToString(thread, tag)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); auto result = factory->ConcatFromString(newLeftStringHandle, rightString); return result.GetTaggedValue(); } @@ -1035,6 +1037,7 @@ JSTaggedValue BuiltinsObject::CreateDataPropertyOnObjectFunctions(EcmaRuntimeCal // 5. Perform ! CreateDataPropertyOrThrow(O, propertyKey, value). JSObject::CreateDataPropertyOrThrow(thread, thisObjHandle, propertyKey, value); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Return undefined. return JSTaggedValue::Undefined(); diff --git a/ecmascript/builtins/builtins_promise.cpp b/ecmascript/builtins/builtins_promise.cpp index b145ef32d8ebb0a788ae3fbbb76c0a45acce3517..58c97ca44d7f8addb5785e07a4cc69f15e2c022a 100644 --- a/ecmascript/builtins/builtins_promise.cpp +++ b/ecmascript/builtins/builtins_promise.cpp @@ -73,7 +73,7 @@ JSTaggedValue BuiltinsPromise::PromiseConstructor(EcmaRuntimeCallInfo *argv) auto resolveFunc = resolvingFunction->GetResolveFunction(); auto rejectFunc = resolvingFunction->GetRejectFunction(); JSHandle undefined = globalConst->GetHandledUndefined(); - const int32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» + const uint32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, executor, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(resolveFunc, rejectFunc); @@ -152,6 +152,7 @@ JSTaggedValue BuiltinsPromise::All(EcmaRuntimeCallInfo *argv) if (!itRecord->GetDone()) { JSHandle closeVal = JSIterator::IteratorClose(thread, itor, JSHandle::Cast(result)); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, capa); if (closeVal.GetTaggedValue().IsRecord()) { result = JSHandle::Cast(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, capa); @@ -220,6 +221,7 @@ JSTaggedValue BuiltinsPromise::Race(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle value = JSIterator::IteratorClose(thread, iterator, JSHandle::Cast(result)); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (value.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(value); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); @@ -694,6 +696,7 @@ JSTaggedValue BuiltinsPromise::Any(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle resultHandle = JSHandle::Cast(result); JSHandle closeVal = JSIterator::IteratorClose(thread, iterator, resultHandle); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (closeVal.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); @@ -759,6 +762,7 @@ JSHandle BuiltinsPromise::PerformPromiseAny(JSThread *thread, PropertyDescriptor msgDesc(thread, errorsValue, true, false, true); JSHandle errorTagged = JSHandle::Cast(error); JSTaggedValue::DefinePropertyOrThrow(thread, errorTagged, errorsKey, msgDesc); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(CompletionRecord, thread); // 3. Return ThrowCompletion(error). JSHandle errorCompletion( factory->NewCompletionRecord(CompletionRecordType::THROW, errorTagged)); @@ -876,6 +880,7 @@ JSTaggedValue BuiltinsPromise::AllSettled(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle resultHandle = JSHandle::Cast(result); JSHandle closeVal = JSIterator::IteratorClose(thread, iterator, resultHandle); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (closeVal.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); diff --git a/ecmascript/builtins/builtins_promise_handler.cpp b/ecmascript/builtins/builtins_promise_handler.cpp index d9fcc4e0b5b4544bbdfc03e13cdc99c5ce055bbe..74a7c561390f566175ef9343cf692633a2c21b36 100644 --- a/ecmascript/builtins/builtins_promise_handler.cpp +++ b/ecmascript/builtins/builtins_promise_handler.cpp @@ -274,8 +274,8 @@ JSTaggedValue BuiltinsPromiseHandler::ThenFinally(EcmaRuntimeCallInfo *argv) EcmaRuntimeCallInfo *taggedInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, onFinally, undefined, undefined, 0); JSTaggedValue result = JSFunction::Call(taggedInfo); - JSHandle resultHandle(thread, result); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle resultHandle(thread, result); // 5. Let C be F.[[Constructor]]. // 6. Assert: IsConstructor(C) is true. JSHandle thenFinallyConstructor(thread, thenFinally->GetConstructor()); @@ -315,8 +315,8 @@ JSTaggedValue BuiltinsPromiseHandler::CatchFinally(EcmaRuntimeCallInfo *argv) EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, onFinally, undefined, undefined, 0); JSTaggedValue result = JSFunction::Call(info); - JSHandle resultHandle(thread, result); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle resultHandle(thread, result); // 5. Let C be F.[[Constructor]]. // 6. Assert: IsConstructor(C) is true. JSHandle catchFinallyConstructor(thread, catchFinally->GetConstructor()); @@ -361,6 +361,7 @@ JSHandle BuiltinsPromiseHandler::PromiseResolve(JSThread *thread, // 3. Let promiseCapability be ? NewPromiseCapability(C). // 4. ReturnIfAbrupt(promiseCapability) JSHandle promiseCapability = JSPromise::NewPromiseCapability(thread, constructor); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle promiseCapaHandle = JSHandle::Cast(promiseCapability); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, promiseCapaHandle); // 6. Let resolveResult be Call(promiseCapability.[[Resolve]], undefined, «x»). @@ -415,10 +416,12 @@ JSTaggedValue BuiltinsPromiseHandler::AllSettledResolveElementFunction(EcmaRunti JSHandle statusKey = globalConst->GetHandledPromiseStatusString(); JSHandle fulfilledKey = globalConst->GetHandledPromiseFulfilledString(); JSObject::CreateDataPropertyOrThrow(thread, obj, statusKey, fulfilledKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Perform ! CreateDataPropertyOrThrow(obj, "value", x). JSHandle valueKey = globalConst->GetHandledValueString(); JSHandle xValue = GetCallArg(argv, 0); JSObject::CreateDataPropertyOrThrow(thread, obj, valueKey, xValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 12. Set values[index] to obj. JSHandle arrayValues = JSHandle::Cast(JSHandle(thread, values->GetValue())); @@ -478,10 +481,12 @@ JSTaggedValue BuiltinsPromiseHandler::AllSettledRejectElementFunction(EcmaRuntim JSHandle statusKey = globalConst->GetHandledPromiseStatusString(); JSHandle rejectedKey = globalConst->GetHandledPromiseRejectedString(); JSObject::CreateDataPropertyOrThrow(thread, obj, statusKey, rejectedKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Perform ! CreateDataPropertyOrThrow(obj, "reason", x). JSHandle xReason = GetCallArg(argv, 0); JSHandle reasonKey = globalConst->GetHandledPromiseReasonString(); JSObject::CreateDataPropertyOrThrow(thread, obj, reasonKey, xReason); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 12. Set values[index] to obj. JSHandle arrayValues = JSHandle::Cast(JSHandle(thread, values->GetValue())); @@ -550,6 +555,7 @@ JSTaggedValue BuiltinsPromiseHandler::AnyRejectElementFunction(EcmaRuntimeCallIn PropertyDescriptor msgDesc(thread, errorsValue, true, false, true); JSHandle errorTagged = JSHandle::Cast(error); JSTaggedValue::DefinePropertyOrThrow(thread, errorTagged, errorsKey, msgDesc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »). JSHandle capaReject(thread, capa->GetReject()); JSHandle undefined(globalConst->GetHandledUndefined()); diff --git a/ecmascript/builtins/builtins_promise_job.cpp b/ecmascript/builtins/builtins_promise_job.cpp index 51ed2a87a4e8aa8fe797a80eee779d9ad4def9df..37bd65ae0058efb2d438f842e86d5e851fc6ece9 100644 --- a/ecmascript/builtins/builtins_promise_job.cpp +++ b/ecmascript/builtins/builtins_promise_job.cpp @@ -15,7 +15,6 @@ #include "ecmascript/builtins/builtins_promise_job.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/ecma_macros.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/interpreter.h" @@ -26,13 +25,16 @@ #include "ecmascript/js_promise.h" #include "ecmascript/js_tagged_value.h" #include "ecmascript/module/js_dynamic_import.h" +#include "ecmascript/module/js_module_deregister.h" #include "ecmascript/module/js_module_manager.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/platform/file.h" #include "ecmascript/require/js_cjs_module.h" #include "libpandabase/macros.h" namespace panda::ecmascript::builtins { -using PathHelper = base::PathHelper; +using JSRecordInfo = ecmascript::JSPandaFile::JSRecordInfo; + JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) { ASSERT(argv); @@ -51,10 +53,11 @@ JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) // 3. Let handler be reaction.[[Handler]]. JSHandle handler(thread, reaction->GetHandler()); JSHandle call(thread, capability->GetResolve()); - const int32_t argsLength = 1; + const uint32_t argsLength = 1; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *runtimeInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, call, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (handler->IsString()) { // 4. If handler is "Identity", let handlerResult be NormalCompletion(argument). // 5. Else if handler is "Thrower", let handlerResult be Completion{[[type]]: throw, [[value]]: argument, @@ -68,6 +71,7 @@ JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) // 6. Else, let handlerResult be Call(handler, undefined, «argument»). EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, handler, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argument.GetTaggedValue()); JSTaggedValue taggedValue = JSFunction::Call(info); // 7. If handlerResult is an abrupt completion, then @@ -101,7 +105,7 @@ JSTaggedValue BuiltinsPromiseJob::PromiseResolveThenableJob(EcmaRuntimeCallInfo JSHandle then = GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); // 2. Let thenCallResult be Call(then, thenable, «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]»). - const int32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» + const uint32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, then, thenable, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -135,46 +139,45 @@ JSTaggedValue BuiltinsPromiseJob::DynamicImportJob(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle resolve(GetCallArg(argv, 0)); - JSHandle reject(GetCallArg(argv, 1)); // 1 : first argument - JSHandle dirPath(GetCallArg(argv, 2)); // 2 : second argument - JSHandle specifier(GetCallArg(argv, 3)); // 3 : third argument - JSHandle recordName(GetCallArg(argv, 4)); // 4 : fourth recordName + JSHandle reject(GetCallArg(argv, 1)); // 1 : reject method + JSHandle dirPath(GetCallArg(argv, 2)); // 2 : current file path(containing file name) + JSHandle specifier(GetCallArg(argv, 3)); // 3 : request module's path + JSHandle recordName(GetCallArg(argv, 4)); // 4 : js recordName or undefined // Let specifierString be Completion(ToString(specifier)) JSHandle specifierString = JSTaggedValue::ToString(thread, specifier); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - JSHandle moduleName; - CString entryPoint = JSPandaFile::ENTRY_MAIN_FUNCTION; - CString baseFilename = ConvertToString(dirPath.GetTaggedValue()); - CString fileNameStr = ""; CString requestPath = ConvertToString(specifierString.GetTaggedValue()); - + LOG_ECMA(DEBUG) << "Start importing dynamic module : " << requestPath; // resolve native module auto [isNative, moduleType] = SourceTextModule::CheckNativeModule(requestPath); + ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); if (isNative) { return DynamicImport::ExecuteNativeModule(thread, specifierString, moduleType, resolve, reject); } + + // Resolve request module's ohmurl + CString entryPoint = JSPandaFile::ENTRY_MAIN_FUNCTION; + CString fileNameStr = ConvertToString(dirPath.GetTaggedValue()); + JSMutableHandle moduleName(thread, thread->GlobalConstants()->GetUndefined()); if (recordName->IsUndefined()) { - moduleName = ResolveFilenameFromNative(thread, dirPath.GetTaggedValue(), - specifierString.GetTaggedValue()); + moduleName.Update(ResolveFilenameFromNative(thread, dirPath.GetTaggedValue(), + specifierString.GetTaggedValue()).GetTaggedValue()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); fileNameStr = ConvertToString(moduleName.GetTaggedValue()); } else { CString recordNameStr = ConvertToString(recordName.GetTaggedValue()); std::shared_ptr jsPandaFile = - JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, baseFilename, recordNameStr.c_str()); + JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, fileNameStr, recordNameStr.c_str()); if (jsPandaFile == nullptr) { - CString msg = "Load file with filename '" + baseFilename + "' failed, recordName '" + recordNameStr + "'"; - JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); - THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); + LOG_FULL(FATAL) << "Load current file's panda file failed. Current file is " << recordNameStr; } - entryPoint = - PathHelper::ConcatFileNameWithMerge(thread, jsPandaFile.get(), baseFilename, recordNameStr, requestPath); + ModulePathHelper::ConcatFileNameWithMerge(thread, jsPandaFile.get(), + fileNameStr, recordNameStr, requestPath); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - fileNameStr = baseFilename; - moduleName = vm->GetFactory()->NewFromUtf8(entryPoint); + moduleName.Update(factory->NewFromUtf8(entryPoint).GetTaggedValue()); } std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, fileNameStr, entryPoint); @@ -183,34 +186,45 @@ JSTaggedValue BuiltinsPromiseJob::DynamicImportJob(EcmaRuntimeCallInfo *argv) JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); } - bool isModule = jsPandaFile->IsModule(thread, entryPoint); - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); - JSMutableHandle moduleNamespace(thread, JSTaggedValue::Undefined()); + + // Loading request module. if (!moduleManager->IsImportedModuleLoaded(moduleName.GetTaggedValue())) { if (!JSPandaFileExecutor::ExecuteFromFile(thread, fileNameStr.c_str(), entryPoint.c_str(), false, true)) { CString msg = "Cannot execute request dynamic-imported module : " + entryPoint; JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); } + } else { + ModuleDeregister::ReviseLoadedModuleCount(thread, moduleName.GetTaggedValue()); } RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - if (!isModule) { + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entryPoint, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << entryPoint <<"' in basefileName " << fileNameStr << "."; + CString msg = "cannot find record '" + entryPoint + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), CatchException(thread, reject)); + } + JSMutableHandle moduleNamespace(thread, JSTaggedValue::Undefined()); + // only support importing es module, or return a default object. + if (!jsPandaFile->IsModule(recordInfo)) { moduleNamespace.Update(vm->GetGlobalEnv()->GetExportOfScript()); } else { // b. Let moduleRecord be ! HostResolveImportedModule(referencingScriptOrModule, specifier). JSHandle moduleRecord = moduleManager->HostGetImportedModule(moduleName.GetTaggedValue()); - // d. Let namespace be ? GetModuleNamespace(moduleRecord). - moduleNamespace.Update(SourceTextModule::GetModuleNamespace(thread, moduleRecord)); + JSHandle nameSp = SourceTextModule::GetModuleNamespace(thread, moduleRecord); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); + // d. Let namespace be ? GetModuleNamespace(moduleRecord). + moduleNamespace.Update(nameSp); } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, JSHandle(resolve), undefined, undefined, 1); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); info->SetCallArg(moduleNamespace.GetTaggedValue()); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSFunction::Call(info); diff --git a/ecmascript/builtins/builtins_proxy.cpp b/ecmascript/builtins/builtins_proxy.cpp index fa7f52944d1807b2d822b64e11ed23f22e430d36..7753f81e827d458868fabac35faf6257053e6363 100644 --- a/ecmascript/builtins/builtins_proxy.cpp +++ b/ecmascript/builtins/builtins_proxy.cpp @@ -85,12 +85,8 @@ JSTaggedValue BuiltinsProxy::InvalidateProxyFunction(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - JSHandle revokeObj(GetThis(argv)); - JSHandle revokeKey = thread->GlobalConstants()->GetHandledRevokeString(); - - PropertyDescriptor desc(thread); - JSObject::GetOwnProperty(thread, revokeObj, revokeKey, desc); - JSProxyRevocFunction::ProxyRevocFunctions(thread, JSHandle(desc.GetValue())); + JSHandle proxy = GetConstructor(argv); + JSProxyRevocFunction::ProxyRevocFunctions(thread, JSHandle(proxy)); return JSTaggedValue::Undefined(); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_reflect.cpp b/ecmascript/builtins/builtins_reflect.cpp index 05f037fac98cceb5537885d3656ceec6c5b25007..0cd4459a717734a5d708fde7e3144b5cdcbc2016 100644 --- a/ecmascript/builtins/builtins_reflect.cpp +++ b/ecmascript/builtins/builtins_reflect.cpp @@ -40,7 +40,7 @@ JSTaggedValue BuiltinsReflect::ReflectApply(EcmaRuntimeCallInfo *argv) // 3. Perform PrepareForTailCall(). // 4. Return ? Call(target, thisArgument, args). - const int32_t argsLength = static_cast(args->GetLength()); + const uint32_t argsLength = args->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, target, thisArgument, undefined, argsLength); @@ -75,7 +75,7 @@ JSTaggedValue BuiltinsReflect::ReflectConstruct(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle args = JSHandle::Cast(argOrAbrupt); // 5. Return ? Construct(target, args, newTarget). - const int32_t argsLength = static_cast(args->GetLength()); + const uint32_t argsLength = args->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, target, undefined, newTarget, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); diff --git a/ecmascript/builtins/builtins_regexp.cpp b/ecmascript/builtins/builtins_regexp.cpp index 7df6eec0364ba4a28c6140becffc4e937d5b3d4e..60e197446de3536b4397ca2d8c7e1c72b310d9d7 100644 --- a/ecmascript/builtins/builtins_regexp.cpp +++ b/ecmascript/builtins/builtins_regexp.cpp @@ -95,6 +95,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) // 5.c Else, let F be flags. flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); } + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Else if patternIsRegExp is true } else if (patternIsRegExp) { JSHandle sourceString(globalConst->GetHandledSourceString()); @@ -114,6 +115,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) } else { // 6.d Else, let F be flags. flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } } else { // 7.a Let P be pattern. @@ -123,6 +125,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) flagsTemp = flags; } else { flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } } // 8. Let O be RegExpAlloc(newTarget). @@ -268,6 +271,18 @@ JSTaggedValue BuiltinsRegExp::GetGlobal(EcmaRuntimeCallInfo *argv) return GetTaggedBoolean(result); } +// 22.2.6.6 +JSTaggedValue BuiltinsRegExp::GetHasIndices(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, RegExp, GetHasIndices); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle thisObj = GetThis(argv); + bool result = GetFlagsInternal(thread, thisObj, RegExpParser::FLAG_HASINDICES); + return GetTaggedBoolean(result); +} + // 20.2.5.5 JSTaggedValue BuiltinsRegExp::GetIgnoreCase(EcmaRuntimeCallInfo *argv) { @@ -377,6 +392,7 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) // 3. Let S be ToString(string) JSHandle inputString = GetCallArg(argv, 0); JSHandle stringHandle = JSTaggedValue::ToString(thread, inputString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool useCache = true; JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); if (cacheTable->GetLargeStrCount() == 0 || cacheTable->GetConflictCount() == 0) { @@ -545,8 +561,8 @@ JSTaggedValue BuiltinsRegExp::MatchAll(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); runtimeInfo->SetCallArg(thisObj.GetTaggedValue(), flagsStrHandle.GetTaggedValue()); JSTaggedValue taggedMatcher = JSFunction::Construct(runtimeInfo); - JSHandle matcherHandle(thread, taggedMatcher); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle matcherHandle(thread, taggedMatcher); // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). JSHandle lastIndexString(globalConstants->GetHandledLastIndexString()); @@ -866,6 +882,7 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) // 16. Repeat, for each result in results, for (int i = 0; i < resultsIndex; i++) { resultValues.Update(ObjectFastOperator::FastGetPropertyByIndex(thread, resultsList.GetTaggedValue(), i)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // a. Let nCaptures be ToLength(Get(result, "length")). JSHandle lengthHandle = globalConst->GetHandledLengthString(); ncapturesHandle.Update(ObjectFastOperator::FastGetPropertyByValue( @@ -956,13 +973,14 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) replacerArgs->Set(thread, index + 3, namedCaptures.GetTaggedValue()); // 3: position of groups } // iv. Let replValue be Call(replaceValue, undefined, replacerArgs). - const int32_t argsLength = static_cast(replacerArgs->GetLength()); + const uint32_t argsLength = replacerArgs->GetLength(); JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, inputReplaceValue, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, replacerArgs); JSTaggedValue replaceResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle replValue(thread, replaceResult); // v. Let replacement be ToString(replValue). JSHandle replacementString = JSTaggedValue::ToString(thread, replValue); @@ -1391,6 +1409,69 @@ bool BuiltinsRegExp::GetFlagsInternal(JSThread *thread, const JSHandle(regexpObj->GetOriginalFlags().GetInt()); return flags & mask; } + +// 22.2.7.8 +JSHandle BuiltinsRegExp::MakeMatchIndicesIndexPairArray(JSThread *thread, + const std::vector>& indices, + const std::vector>& groupNames, bool hasGroups) +{ + // 1. Let n be the number of elements in indices. + uint32_t n = indices.size(); + // Assert: groupNames has n - 1 elements. + ASSERT(groupNames.size() == n - 1); + // 5. Let A be ! ArrayCreate(n). + JSHandle results(JSArray::ArrayCreate(thread, JSTaggedNumber(n))); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // 6. If hasGroups is true, then + // a. Let groups be OrdinaryObjectCreate(null). + // 7. Else, + // a. Let groups be undefined. + JSMutableHandle groups(thread, JSTaggedValue::Undefined()); + if (hasGroups) { + JSHandle nullHandle(thread, JSTaggedValue::Null()); + JSHandle nullObj = factory->OrdinaryNewJSObjectCreate(nullHandle); + groups.Update(nullObj.GetTaggedValue()); + } + // 8. Perform ! CreateDataPropertyOrThrow(A, "groups", groups). + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + JSHandle groupsKey = globalConst->GetHandledGroupsString(); + JSObject::CreateDataProperty(thread, results, groupsKey, groups); + // 9. For each integer i such that 0 ≤ i < n, in ascending order, do + // a. Let matchIndices be indices[i]. + // b. If matchIndices is not undefined, then + // i. Let matchIndexPair be GetMatchIndexPair(S, matchIndices). + // c. Else, + // i. Let matchIndexPair be undefined. + // d. Perform ! CreateDataPropertyOrThrow(A, ! ToString(𝔽(i)), matchIndexPair). + // e. If i > 0 and groupNames[i - 1] is not undefined, then + // i. Assert: groups is not undefined. + // ii. Perform ! CreateDataPropertyOrThrow(groups, groupNames[i - 1], matchIndexPair). + JSMutableHandle matchIndexPair(thread, JSTaggedValue::Undefined()); + for (uint32_t i = 0; i < n; i++) { + std::pair matchIndices = indices[i]; + if (!matchIndices.first.IsUndefined()) { + JSHandle match = factory->NewTaggedArray(2); // 2 means the length of array + match->Set(thread, 0, matchIndices.first); + match->Set(thread, 1, matchIndices.second); + JSHandle pair(JSArray::CreateArrayFromList(thread, JSHandle::Cast(match))); + matchIndexPair.Update(pair.GetTaggedValue()); + } else { + matchIndexPair.Update(JSTaggedValue::Undefined()); + } + JSObject::CreateDataProperty(thread, results, i, matchIndexPair); + if (i > 0) { + JSHandle groupName = groupNames[i - 1]; + if (!groupName->IsUndefined()) { + JSHandle groupObject = JSHandle::Cast(groups); + JSObject::CreateDataProperty(thread, groupObject, groupName, matchIndexPair); + } + } + } + // 10. Return A. + return JSHandle::Cast(results); +} + // 21.2.5.2.2 JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle ®exp, const JSHandle &inputStr, bool useCache) @@ -1412,23 +1493,26 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle lastIndex = lastIndexNumber.GetNumber(); } - JSHandle globalHandle = globalConst->GetHandledGlobalString(); - bool global = ObjectFastOperator::FastGetPropertyByValue( - thread, regexp.GetTaggedValue(), globalHandle.GetTaggedValue()).ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSHandle stickyHandle = globalConst->GetHandledStickyString(); - bool sticky = ObjectFastOperator::FastGetPropertyByValue( - thread, regexp.GetTaggedValue(), stickyHandle.GetTaggedValue()).ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (!global && !sticky) { - lastIndex = 0; - } - JSHandle regexpObj(regexp); JSMutableHandle pattern(thread, regexpObj->GetOriginalSource()); JSMutableHandle flags(thread, regexpObj->GetOriginalFlags()); - JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); + + uint8_t flagsBits = static_cast(flags->GetInt()); + bool global = (flagsBits & RegExpParser::FLAG_GLOBAL) != 0; + bool sticky = (flagsBits & RegExpParser::FLAG_STICKY) != 0; + bool hasIndices = (flagsBits & RegExpParser::FLAG_HASINDICES) != 0; + if (!global && !sticky) { + if (useCache) { + JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, inputStr, + RegExpExecResultCache::EXEC_TYPE, regexp); + if (!cacheResult.IsUndefined()) { + return cacheResult; + } + } + lastIndex = 0; + } + uint32_t length = EcmaStringAccessor(inputStr->GetTaggedObject()).GetLength(); if (lastIndex > static_cast(length)) { ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), lastIndexHandle.GetTaggedValue(), @@ -1437,6 +1521,7 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle return JSTaggedValue::Null(); } JSHandle inputString = JSTaggedValue::ToString(thread, inputStr); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isUtf16 = EcmaStringAccessor(inputString).IsUtf16(); auto inputPtr = EcmaStringAccessor(inputString).ToOneByteDataForced(); const uint8_t *strBuffer = inputPtr.get(); @@ -1470,36 +1555,55 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle JSObject::CreateDataProperty(thread, results, indexKey, indexValue); // 25. Perform CreateDataProperty(A, "input", S). JSHandle inputKey = globalConst->GetHandledInputString(); - JSHandle inputValue(thread, static_cast(inputStr->GetTaggedObject())); JSObject::CreateDataProperty(thread, results, inputKey, inputValue); + // 27. Perform CreateDataProperty(A, "0", matched_substr). - JSHandle zeroValue(matchResult.captures_[0].second); + JSHandle zeroValue(matchResult.captures_[0].second.capturedValue); JSObject::CreateDataProperty(thread, results, 0, zeroValue); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + // Let indices be a new empty List. + // Let groupNames be a new empty List. + // Append match to indices. + std::vector> indices; + std::vector> groupNames; + indices.emplace_back(std::make_pair(JSTaggedValue(matchIndex), JSTaggedValue(endIndex))); + // If R contains any GroupName, then + // a. Let groups be OrdinaryObjectCreate(null). + // b. Let hasGroups be true. + // Else, + // a. Let groups be undefined. + // b. Let hasGroups be false. JSHandle groupName(thread, regexpObj->GetGroupName()); JSMutableHandle groups(thread, JSTaggedValue::Undefined()); + bool hasGroups = false; if (!groupName->IsUndefined()) { JSHandle nullHandle(thread, JSTaggedValue::Null()); JSHandle nullObj = factory->OrdinaryNewJSObjectCreate(nullHandle); groups.Update(nullObj.GetTaggedValue()); + hasGroups = true; } + // Perform ! CreateDataPropertyOrThrow(A, "groups", groups). JSHandle groupsKey = globalConst->GetHandledGroupsString(); JSObject::CreateDataProperty(thread, results, groupsKey, groups); // Create a new RegExp on global JSHandle globalRegExp = JSHandle(env->GetRegExpFunction()); JSMutableHandle keyString(thread, JSTaggedValue::Undefined()); uint32_t captureIndex = 1; + JSHandle undefined = globalConst->GetHandledUndefined(); // 28. For each integer i such that i > 0 and i <= n for (; captureIndex < capturesSize; captureIndex++) { // a. Let capture_i be ith element of r's captures List JSTaggedValue capturedValue; if (matchResult.captures_[captureIndex].first) { capturedValue = JSTaggedValue::Undefined(); + indices.emplace_back(std::make_pair(JSTaggedValue::Undefined(), JSTaggedValue::Undefined())); } else { - capturedValue = matchResult.captures_[captureIndex].second.GetTaggedValue(); + auto captureI = matchResult.captures_[captureIndex].second; + capturedValue = captureI.capturedValue.GetTaggedValue(); + indices.emplace_back(std::make_pair(JSTaggedValue(captureI.startIndex), JSTaggedValue(captureI.endIndex))); } JSHandle iValue(thread, capturedValue); // add to RegExp.$i and i must <= 9 @@ -1517,9 +1621,22 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle if (groupArray->GetLength() > captureIndex - 1) { JSHandle skey(thread, groupArray->Get(captureIndex - 1)); JSObject::CreateDataProperty(thread, groupObject, skey, iValue); + groupNames.emplace_back(skey); + } else { + groupNames.emplace_back(undefined); } + } else { + groupNames.emplace_back(undefined); } } + // If hasIndices is true, then + // a. Let indicesArray be MakeMatchIndicesIndexPairArray(S, indices, groupNames, hasGroups). + // b. Perform ! CreateDataPropertyOrThrow(A, "indices", indicesArray). + if (hasIndices) { + auto indicesArray = MakeMatchIndicesIndexPairArray(thread, indices, groupNames, hasGroups); + JSHandle indicesKey = globalConst->GetHandledIndicesString(); + JSObject::CreateDataProperty(thread, results, indicesKey, indicesArray); + } JSHandle emptyString = thread->GlobalConstants()->GetHandledEmptyString(); while (captureIndex <= REGEXP_GLOBAL_ARRAY_SIZE) { keyString.Update(GetDollarString(thread, static_cast(captureIndex))); @@ -1548,7 +1665,7 @@ JSTaggedValue BuiltinsRegExp::RegExpExec(JSThread *thread, const JSHandleIsString()); // 3. Let exec be Get(R, "exec"). JSHandle inputStr = JSTaggedValue::ToString(thread, inputString); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle execHandle = globalConst->GetHandledExecString(); JSTaggedValue execVal = ObjectFastOperator::FastGetPropertyByValue(thread, regexp.GetTaggedValue(), @@ -1622,6 +1739,9 @@ uint32_t BuiltinsRegExp::UpdateExpressionFlags(JSThread *thread, const CString & case 'y': flagsBitsTemp = RegExpParser::FLAG_STICKY; break; + case 'd': + flagsBitsTemp = RegExpParser::FLAG_HASINDICES; + break; default: { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle syntaxError = @@ -1669,10 +1789,14 @@ JSHandle BuiltinsRegExp::GetDollarString(JSThread *thread, RegExp JSTaggedValue BuiltinsRegExp::FlagsBitsToString(JSThread *thread, uint8_t flags) { - ASSERT((flags & 0xC0) == 0); // 0xC0: first 2 bits of flags must be 0 + ASSERT((flags & 0x80) == 0); // 0x80: first bit of flags must be 0 BUILTINS_API_TRACE(thread, RegExp, FlagsBitsToString); uint8_t *flagsStr = new uint8_t[7]; // 7: maximum 6 flags + '\0' size_t flagsLen = 0; + if (flags & RegExpParser::FLAG_HASINDICES) { + flagsStr[flagsLen] = 'd'; + flagsLen++; + } if (flags & RegExpParser::FLAG_GLOBAL) { flagsStr[flagsLen] = 'g'; flagsLen++; @@ -1733,7 +1857,7 @@ JSTaggedValue BuiltinsRegExp::RegExpInitialize(JSThread *thread, const JSHandle< // 6. ReturnIfAbrupt(F). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); /** - * 7. If F contains any code unit other than "g", "i", "m", "u", or "y" or if it contains the same code + * 7. If F contains any code unit other than "d", "g", "i", "m", "u", or "y" or if it contains the same code * unit more than once, throw a SyntaxError exception. **/ CString checkStr = ConvertToString(*flagsStrHandle, StringConvertedUsage::LOGICOPERATION); @@ -1869,6 +1993,11 @@ JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, const JS ASSERT((static_cast(CACHE_TABLE_HEADER_SIZE) + static_cast(entry) * static_cast(ENTRY_SIZE)) <= static_cast(UINT32_MAX)); uint32_t index = CACHE_TABLE_HEADER_SIZE + entry * ENTRY_SIZE; + // update cached value if input value is changed + JSTaggedValue cachedStr = Get(index + INPUT_STRING_INDEX); + if (!cachedStr.IsUndefined() && cachedStr != inputValue) { + Set(thread, index + INPUT_STRING_INDEX, inputValue); + } JSTaggedValue result; switch (type) { case REPLACE_TYPE: diff --git a/ecmascript/builtins/builtins_regexp.h b/ecmascript/builtins/builtins_regexp.h index 6830d9d070cc01d60d425f0799b85f0ce71f2e98..9f5dd543e3b72a0f7bcdf9c1580ab3a11e1506fe 100644 --- a/ecmascript/builtins/builtins_regexp.h +++ b/ecmascript/builtins/builtins_regexp.h @@ -86,6 +86,8 @@ public: // 21.2.5.2.3 AdvanceStringIndex ( S, index, unicode ) static uint32_t AdvanceStringIndex(const JSHandle &inputStr, uint32_t index, bool unicode); + // 22.2.6.6 get RegExp.prototype.hasIndices + static JSTaggedValue GetHasIndices(EcmaRuntimeCallInfo *argv); private: static constexpr uint32_t MIN_REPLACE_STRING_LENGTH = 1000; @@ -116,6 +118,10 @@ private: const JSHandle &flags); static JSTaggedValue RegExpReplaceFast(JSThread *thread, JSHandle ®exp, JSHandle inputString, uint32_t inputLength); + // 22.2.7.8 MakeMatchIndicesIndexPairArray ( S, indices, groupNames, hasGroups ) + static JSHandle MakeMatchIndicesIndexPairArray(JSThread* thread, + const std::vector>& indices, + const std::vector>& groupNames, bool hasGroups); }; class RegExpExecResultCache : public TaggedArray { diff --git a/ecmascript/builtins/builtins_set.cpp b/ecmascript/builtins/builtins_set.cpp index 9a3d709f757cb4eef1fa0e17f1d1689f7208b36f..2b8883d5ffd71cbe3faeb9352bde52bd4896068e 100644 --- a/ecmascript/builtins/builtins_set.cpp +++ b/ecmascript/builtins/builtins_set.cpp @@ -117,8 +117,7 @@ JSTaggedValue BuiltinsSet::Add(EcmaRuntimeCallInfo *argv) } JSHandle value(GetCallArg(argv, 0)); - JSHandle set(JSTaggedValue::ToObject(thread, self)); - + JSHandle set(self); JSSet::Add(thread, set, value); return set.GetTaggedValue(); } @@ -136,7 +135,7 @@ JSTaggedValue BuiltinsSet::Clear(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSHandle set(thread, JSSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle set(self); JSSet::Clear(thread, set); return JSTaggedValue::Undefined(); } @@ -154,7 +153,7 @@ JSTaggedValue BuiltinsSet::Delete(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSHandle set(thread, JSSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle set(self); JSHandle value = GetCallArg(argv, 0); bool flag = JSSet::Delete(thread, set, value); return GetTaggedBoolean(flag); @@ -172,7 +171,7 @@ JSTaggedValue BuiltinsSet::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSSet *jsSet = JSSet::Cast(*JSTaggedValue::ToObject(thread, self)); + JSSet* jsSet = JSSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle value = GetCallArg(argv, 0); bool flag = jsSet->Has(value.GetTaggedValue()); return GetTaggedBoolean(flag); @@ -202,7 +201,7 @@ JSTaggedValue BuiltinsSet::ForEach(EcmaRuntimeCallInfo *argv) // 6.Let entries be the List that is the value of S’s [[SetData]] internal slot. JSMutableHandle hashSet(thread, set->GetLinkedSet()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; int index = 0; int totalElements = hashSet->NumberOfElements() + hashSet->NumberOfDeletedElements(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); @@ -249,8 +248,8 @@ JSTaggedValue BuiltinsSet::GetSize(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSSet *jsSet = JSSet::Cast(*JSTaggedValue::ToObject(thread, self)); - int count = jsSet->GetSize(); + JSSet* jsSet = JSSet::Cast(self.GetTaggedValue().GetTaggedObject()); + uint32_t count = jsSet->GetSize(); return JSTaggedValue(count); } @@ -262,6 +261,7 @@ JSTaggedValue BuiltinsSet::Entries(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSSetIterator::CreateSetIterator(thread, self, IterationKind::KEY_AND_VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -273,6 +273,7 @@ JSTaggedValue BuiltinsSet::Values(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSSetIterator::CreateSetIterator(thread, self, IterationKind::VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_string.cpp b/ecmascript/builtins/builtins_string.cpp index eb4efd1a1597f0f175a5d948adf2a588698c67a4..216617edea6270c0f8e720c2eae8375db22a5a36 100644 --- a/ecmascript/builtins/builtins_string.cpp +++ b/ecmascript/builtins/builtins_string.cpp @@ -160,13 +160,13 @@ JSTaggedValue BuiltinsString::FromCodePoint(EcmaRuntimeCallInfo *argv) ((static_cast(cp) - ENCODE_SECOND_FACTOR) % ENCODE_FIRST_FACTOR) + ENCODE_TRAIL_LOW; std::u16string nextU16str1 = base::StringHelper::Utf16ToU16String(&cu1, 1); std::u16string nextU16str2 = base::StringHelper::Utf16ToU16String(&cu2, 1); - u16str = base::StringHelper::Append(u16str, nextU16str1); - u16str = base::StringHelper::Append(u16str, nextU16str2); + base::StringHelper::InplaceAppend(u16str, nextU16str1); + base::StringHelper::InplaceAppend(u16str, nextU16str2); u16strSize++; } else { auto u16tCp = static_cast(cp); std::u16string nextU16str = base::StringHelper::Utf16ToU16String(&u16tCp, 1); - u16str = base::StringHelper::Append(u16str, nextU16str); + base::StringHelper::InplaceAppend(u16str, nextU16str); } } const char16_t *constChar16tData = u16str.data(); @@ -247,6 +247,7 @@ JSTaggedValue BuiltinsString::CharAt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -277,6 +278,7 @@ JSTaggedValue BuiltinsString::CharCodeAt(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -307,6 +309,7 @@ JSTaggedValue BuiltinsString::CodePointAt(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -338,36 +341,24 @@ JSTaggedValue BuiltinsString::Concat(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(argv->GetThread(), String, Concat); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + auto ecmaVm = thread->GetEcmaVM(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t argLength = argv->GetArgsNumber(); if (argLength == 0) { return thisHandle.GetTaggedValue(); } - std::u16string u16strThis; - std::u16string u16strNext; - bool canBeCompress = true; - u16strThis = EcmaStringAccessor(thisHandle).ToU16String(); - if (EcmaStringAccessor(thisHandle).IsUtf16()) { - canBeCompress = false; - } for (uint32_t i = 0; i < argLength; i++) { JSHandle nextTag = BuiltinsString::GetCallArg(argv, i); JSHandle nextHandle = JSTaggedValue::ToString(thread, nextTag); - u16strNext = EcmaStringAccessor(nextHandle).ToU16String(); - if (EcmaStringAccessor(nextHandle).IsUtf16()) { - canBeCompress = false; - } - u16strThis = base::StringHelper::Append(u16strThis, u16strNext); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + EcmaString *tempStr = EcmaStringAccessor::Concat(ecmaVm, thisHandle, nextHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + thisHandle = JSHandle(thread, tempStr); } - const char16_t *constChar16tData = u16strThis.data(); - auto *char16tData = const_cast(constChar16tData); - auto *uint16tData = reinterpret_cast(char16tData); - uint32_t u16strSize = u16strThis.size(); - return canBeCompress ? factory->NewFromUtf16LiteralCompress(uint16tData, u16strSize).GetTaggedValue() : - factory->NewFromUtf16LiteralNotCompress(uint16tData, u16strSize).GetTaggedValue(); + return thisHandle.GetTaggedValue(); } // 21.1.3.5 String.prototype.constructor @@ -379,6 +370,7 @@ JSTaggedValue BuiltinsString::EndsWith(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -421,6 +413,7 @@ JSTaggedValue BuiltinsString::Includes(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isRegexp = JSObject::IsRegExp(thread, searchTag); @@ -456,6 +449,7 @@ JSTaggedValue BuiltinsString::IndexOf(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t thisLen = EcmaStringAccessor(thisHandle).GetLength(); @@ -489,6 +483,7 @@ JSTaggedValue BuiltinsString::LastIndexOf(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -525,6 +520,7 @@ JSTaggedValue BuiltinsString::LocaleCompare(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thatTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); [[maybe_unused]] JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); [[maybe_unused]] JSHandle thatHandle = JSTaggedValue::ToString(thread, thatTag); @@ -585,6 +581,7 @@ JSTaggedValue BuiltinsString::Match(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle matchTag = thread->GetEcmaVM()->GetGlobalEnv()->GetMatchSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -633,6 +630,7 @@ JSTaggedValue BuiltinsString::MatchAll(EcmaRuntimeCallInfo *argv) const GlobalEnvConstants *globalConst = thread->GlobalConstants(); // 1. Let O be ? RequireObjectCoercible(this value). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle matchAllTag = thread->GetEcmaVM()->GetGlobalEnv()->GetMatchAllSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -704,6 +702,7 @@ JSTaggedValue BuiltinsString::Normalize(EcmaRuntimeCallInfo *argv) auto vm = thread->GetEcmaVM(); ObjectFactory *factory = vm->GetFactory(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); JSHandle formValue; @@ -769,6 +768,7 @@ JSTaggedValue BuiltinsString::Repeat(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t thisLen = EcmaStringAccessor(thisHandle).GetLength(); @@ -845,7 +845,7 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) // If replacer is not undefined, then if (!replaceMethod->IsUndefined()) { // Return Call(replacer, searchValue, «O, replaceValue»). - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceMethod, searchTag, undefined, argsLength); @@ -883,12 +883,13 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) // If functionalReplace is true, then if (replaceTag->IsCallable()) { // Let replValue be Call(replaceValue, undefined,«matched, pos, and string»). - const int32_t argsLength = 3; // 3: «matched, pos, and string» + const uint32_t argsLength = 3; // 3: «matched, pos, and string» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceTag, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(searchString.GetTaggedValue(), JSTaggedValue(pos), thisString.GetTaggedValue()); JSTaggedValue replStrDeocodeValue = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); replHandle.Update(replStrDeocodeValue); } else { // Let captures be an empty List. @@ -899,6 +900,7 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) replHandle.Update(GetSubstitution(thread, searchString, thisString, pos, capturesList, undefined, replacement)); } JSHandle realReplaceStr = JSTaggedValue::ToString(thread, replHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let tailPos be pos + the number of code units in matched. int32_t tailPos = pos + static_cast(EcmaStringAccessor(searchString).GetLength()); // Let newString be the String formed by concatenating the first pos code units of string, @@ -911,8 +913,12 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) auto thisLen = EcmaStringAccessor(thisString).GetLength(); JSHandle suffixString(thread, EcmaStringAccessor::FastSubString(ecmaVm, thisString, tailPos, thisLen - tailPos)); - JSHandle tempString(thread, EcmaStringAccessor::Concat(ecmaVm, prefixString, realReplaceStr)); - return JSTaggedValue(EcmaStringAccessor::Concat(ecmaVm, tempString, suffixString)); + EcmaString *tempStr = EcmaStringAccessor::Concat(ecmaVm, prefixString, realReplaceStr); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle tempString(thread, tempStr); + EcmaString *resultStr = EcmaStringAccessor::Concat(ecmaVm, tempString, suffixString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return JSTaggedValue(resultStr); } JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) @@ -967,6 +973,7 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceMethod, searchTag, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(thisTag.GetTaggedValue(), replaceTag.GetTaggedValue()); return JSFunction::Call(info); } @@ -1005,11 +1012,13 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) // If functionalReplace is true, then if (replaceTag->IsCallable()) { // Let replValue be Call(replaceValue, undefined,«matched, pos, and string»). - const int32_t argsLength = 3; // 3: «matched, pos, and string» + const uint32_t argsLength = 3; // 3: «matched, pos, and string» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceTag, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(searchString.GetTaggedValue(), JSTaggedValue(pos), thisString.GetTaggedValue()); JSTaggedValue replStrDeocodeValue = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); replHandle.Update(replStrDeocodeValue); } else { // Let captures be an empty List. @@ -1021,6 +1030,7 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) capturesList, undefined, replacement)); } JSHandle realReplaceStr = JSTaggedValue::ToString(thread, replHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let tailPos be pos + the number of code units in matched. // Let newString be the String formed by concatenating the first pos code units of string, // replStr, and the trailing substring of string starting at index tailPos. @@ -1254,6 +1264,7 @@ JSTaggedValue BuiltinsString::Search(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle searchTag = thread->GetEcmaVM()->GetGlobalEnv()->GetSearchSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -1291,6 +1302,7 @@ JSTaggedValue BuiltinsString::Slice(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -1335,6 +1347,7 @@ JSTaggedValue BuiltinsString::Split(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObj(thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle seperatorTag = BuiltinsString::GetCallArg(argv, 0); @@ -1347,7 +1360,7 @@ JSTaggedValue BuiltinsString::Split(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (!splitter->IsUndefined()) { // Return Call(splitter, separator, «‍O, limit»). - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, splitter, seperatorTag, undefined, argsLength); @@ -1441,6 +1454,7 @@ JSTaggedValue BuiltinsString::StartsWith(EcmaRuntimeCallInfo *argv) JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isRegexp = JSObject::IsRegExp(thread, searchTag); @@ -1483,6 +1497,7 @@ JSTaggedValue BuiltinsString::Substring(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -1504,6 +1519,9 @@ JSTaggedValue BuiltinsString::Substring(EcmaRuntimeCallInfo *argv) int32_t from = std::min(start, end); int32_t to = std::max(start, end); int32_t len = to - from; + if (static_cast(len) >= SlicedString::MIN_SLICED_ECMASTRING_LENGTH) { + return JSTaggedValue(EcmaStringAccessor::GetSlicedString(thread->GetEcmaVM(), thisHandle, from, len)); + } return JSTaggedValue(EcmaStringAccessor::FastSubString(thread->GetEcmaVM(), thisHandle, from, len)); } @@ -1518,7 +1536,7 @@ JSTaggedValue BuiltinsString::ToLocaleLowerCase(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle obj(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ? ToString(O). JSHandle string = JSTaggedValue::ToString(thread, obj); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1579,7 +1597,7 @@ JSTaggedValue BuiltinsString::ToLocaleUpperCase(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle obj(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ? ToString(O). JSHandle string = JSTaggedValue::ToString(thread, obj); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1632,6 +1650,7 @@ JSTaggedValue BuiltinsString::ToLowerCase(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *result = EcmaStringAccessor::ToLower(thread->GetEcmaVM(), thisHandle); @@ -1654,6 +1673,7 @@ JSTaggedValue BuiltinsString::ToUpperCase(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *result = EcmaStringAccessor::ToUpper(thread->GetEcmaVM(), thisHandle); @@ -1668,6 +1688,7 @@ JSTaggedValue BuiltinsString::Trim(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM); @@ -1681,6 +1702,7 @@ JSTaggedValue BuiltinsString::TrimStart(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_START); @@ -1694,6 +1716,7 @@ JSTaggedValue BuiltinsString::TrimEnd(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_END); @@ -1707,6 +1730,7 @@ JSTaggedValue BuiltinsString::TrimLeft(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_START); @@ -1720,6 +1744,7 @@ JSTaggedValue BuiltinsString::TrimRight(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_END); @@ -1742,6 +1767,7 @@ JSTaggedValue BuiltinsString::GetStringIterator(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); // 1. Let O be RequireObjectCoercible(this value). JSHandle current(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ToString(O). JSHandle string = JSTaggedValue::ToString(thread, current); @@ -1763,6 +1789,7 @@ JSTaggedValue BuiltinsString::SubStr(EcmaRuntimeCallInfo *argv) // 2. Let S be ToString(O). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisString = JSTaggedValue::ToString(thread, thisTag); // 3. ReturnIfAbrupt(S). @@ -1811,6 +1838,7 @@ JSTaggedValue BuiltinsString::At(EcmaRuntimeCallInfo *argv) // 1. Let O be RequireObjectCoercible(this value). // 2. Let S be ToString(O). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1847,6 +1875,7 @@ JSTaggedValue BuiltinsString::GetLength(EcmaRuntimeCallInfo *argv) JSHandle thisHandle = GetThis(argv); JSHandle thisString = JSTaggedValue::ToString(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return GetTaggedInt(EcmaStringAccessor(thisString).GetLength()); } @@ -1876,6 +1905,7 @@ JSTaggedValue BuiltinsString::Pad(EcmaRuntimeCallInfo *argv, bool isStart) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, BuiltinsString::GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle lengthTag = GetCallArg(argv, 0); @@ -1890,6 +1920,7 @@ JSTaggedValue BuiltinsString::Pad(EcmaRuntimeCallInfo *argv, bool isStart) stringBuilder = u" "; } else { JSHandle filler = JSTaggedValue::ToString(thread, fillString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); stringBuilder = EcmaStringAccessor(filler).ToU16String(); } if (stringBuilder.size() == 0) { diff --git a/ecmascript/builtins/builtins_symbol.cpp b/ecmascript/builtins/builtins_symbol.cpp index d629e11eefc7cf71fcc297f1e1ef2e99e0fbf141..c58a7926fd0c52ee82455f7eba14d96df5864a22 100644 --- a/ecmascript/builtins/builtins_symbol.cpp +++ b/ecmascript/builtins/builtins_symbol.cpp @@ -110,6 +110,7 @@ JSTaggedValue BuiltinsSymbol::SymbolDescriptiveString(JSThread *thread, JSTagged JSHandle rightHandle(factory->NewFromASCII(")")); JSHandle stringLeft = factory->ConcatFromString(leftHandle, JSTaggedValue::ToString(thread, descHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle str = factory->ConcatFromString(stringLeft, rightHandle); return str.GetTaggedValue(); } @@ -194,35 +195,29 @@ JSTaggedValue BuiltinsSymbol::KeyFor(EcmaRuntimeCallInfo *argv) // 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) JSTaggedValue BuiltinsSymbol::ToPrimitive(EcmaRuntimeCallInfo *argv) { - // The allowed values for hint are "default", "number", and "string". ASSERT(argv); BUILTINS_API_TRACE(argv->GetThread(), Symbol, ToPrimitive); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - // 1.Let s be the this value. + // Let s be the this value. JSHandle sym = GetThis(argv); - // 2.If Type(s) is Symbol, return s. + // 1.If value is a Symbol, return value. if (sym->IsSymbol()) { return sym.GetTaggedValue(); } - // 3.If Type(s) is not Object, throw a TypeError exception. - if (!sym->IsHeapObject()) { - // return TypeError - THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: s is not Object", JSTaggedValue::Exception()); - } - ASSERT(sym->IsHeapObject()); - // 4.If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. - // 5.Return the value of s's [[SymbolData]] internal slot. - if (!sym->IsJSPrimitiveRef()) { - // If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. - THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: no [[SymbolData]]", JSTaggedValue::Exception()); + + // 2.If value is an Object and value has a [[SymbolData]] internal slot, then + if (sym->IsJSPrimitiveRef()) { + // Let sym be the value of s's [[SymbolData]] internal slot. + JSTaggedValue primitive = JSPrimitiveRef::Cast(sym->GetTaggedObject())->GetValue(); + if (primitive.IsSymbol()) { + return primitive; + } } - // Let sym be the value of s's [[SymbolData]] internal slot. - JSTaggedValue primitive = JSPrimitiveRef::Cast(sym->GetTaggedObject())->GetValue(); - ASSERT(primitive.IsSymbol()); - return primitive; -} + // 3.If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. + THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: s is not Object", JSTaggedValue::Exception()); +} JSTaggedValue BuiltinsSymbol::DescriptionGetter(EcmaRuntimeCallInfo *argv) { ASSERT(argv); diff --git a/ecmascript/builtins/builtins_typedarray.cpp b/ecmascript/builtins/builtins_typedarray.cpp index 15a73f532ce06559a2caee489c494342d41a359d..f890205d5a4e6060d0b1b6d02f460b8385496cfb 100644 --- a/ecmascript/builtins/builtins_typedarray.cpp +++ b/ecmascript/builtins/builtins_typedarray.cpp @@ -20,7 +20,7 @@ #include "ecmascript/builtins/builtins_array.h" #include "ecmascript/builtins/builtins_arraybuffer.h" #include "ecmascript/ecma_runtime_call_info.h" -#include "ecmascript/ecma_string.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/interpreter.h" #include "ecmascript/js_array.h" @@ -184,7 +184,9 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) JSHandle usingIterator = JSObject::GetMethod(thread, source, iteratorSymbol); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle arrIter = JSObject::GetMethod(thread, env->GetArrayProtoValuesFunction(), iteratorSymbol); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle typedArrIter = JSObject::GetMethod(thread, env->GetTypedArrayPrototype(), iteratorSymbol); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isArrIter = JSTaggedValue::SameValue(usingIterator, arrIter); bool isTypedArrIter = JSTaggedValue::SameValue(usingIterator, typedArrIter); // 6. If usingIterator is not undefined, then @@ -220,7 +222,7 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) // vi. Set k to k + 1. JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); JSMutableHandle mapValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 2; + const uint32_t argsLength = 2; uint32_t k = 0; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); while (k < len) { @@ -275,7 +277,7 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) // e. Perform ? Set(targetObj, Pk, mappedValue, true). // f. Set k to k + 1. JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 2; + const uint32_t argsLength = 2; int64_t k = 0; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); @@ -337,6 +339,7 @@ JSTaggedValue BuiltinsTypedArray::Of(EcmaRuntimeCallInfo *argv) while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = GetCallArg(argv, k); JSTaggedValue::SetProperty(thread, JSHandle::Cast(newObj), kKey, kValue, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -518,7 +521,7 @@ JSTaggedValue BuiltinsTypedArray::Every(EcmaRuntimeCallInfo *argv) // v. If testResult is false, return false. // e. Increase k by 1. JSMutableHandle key(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); uint32_t k = 0; while (k < len) { @@ -611,9 +614,7 @@ JSTaggedValue BuiltinsTypedArray::Filter(EcmaRuntimeCallInfo *argv) info->SetCallArg(kValue.GetTaggedValue(), tKey.GetTaggedValue(), thisHandle.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - bool testResult = callResult.ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (testResult) { + if (callResult.ToBoolean()) { kept->Set(thread, captured, kValue); captured++; } @@ -707,7 +708,7 @@ JSTaggedValue BuiltinsTypedArray::ForEach(EcmaRuntimeCallInfo *argv) // iv. ReturnIfAbrupt(funcResult). // e. Increase k by 1. JSMutableHandle key(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); uint32_t k = 0; while (k < len) { @@ -935,7 +936,7 @@ JSTaggedValue BuiltinsTypedArray::Map(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle mapValue(thread, JSTaggedValue::Undefined()); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); for (uint32_t k = 0; k < len; k++) { key.Update(JSTaggedValue(k)); @@ -1017,13 +1018,13 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) // 5. Assert: target has a [[ViewedArrayBuffer]] internal slot. // 6. Let targetOffset be ToInteger (offset). const JSHandle srcOffset = GetCallArg(argv, 1); - uint32_t targetOffset = 0; + uint64_t targetOffset = 0; if (srcOffset->IsInt()) { if (srcOffset->GetInt() < 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "The targetOffset of This value is less than 0.", JSTaggedValue::Exception()); } - targetOffset = static_cast(srcOffset->GetInt()); + targetOffset = static_cast(srcOffset->GetInt()); } else { JSTaggedNumber tTargetOffset = JSTaggedValue::ToInteger(thread, srcOffset); // 7. ReturnIfAbrupt(targetOffset). @@ -1037,7 +1038,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) THROW_RANGE_ERROR_AND_RETURN(thread, "The targetOffset is infinty, which is greater than targetLength.", JSTaggedValue::Exception()); } else { - targetOffset = static_cast(rawTargetOffset); + targetOffset = static_cast(rawTargetOffset); } } // 9. Let targetBuffer be the value of target’s [[ViewedArrayBuffer]] internal slot. @@ -1092,7 +1093,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) JSTaggedValue::Exception()); } // 21. Let targetByteIndex be targetOffset × targetElementSize + targetByteOffset. - ASSERT((static_cast(targetOffset) * static_cast(targetElementSize) + + ASSERT((targetOffset * static_cast(targetElementSize) + static_cast(targetByteOffset)) <= static_cast(UINT32_MAX)); uint32_t targetByteIndex = static_cast(targetOffset * targetElementSize + targetByteOffset); // 22. Let k be 0. @@ -1116,6 +1117,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) while (targetByteIndex < limit) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); kValue.Update(ObjectFastOperator::FastGetPropertyByValue( thread, JSHandle::Cast(src).GetTaggedValue(), kKey.GetTaggedValue())); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1190,9 +1192,9 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) srcByteIndex = srcByteOffset; } // 26. Let targetByteIndex be targetOffset × targetElementSize + targetByteOffset. - ASSERT((static_cast(targetOffset) * static_cast(targetElementSize) + + ASSERT((targetOffset * static_cast(targetElementSize) + static_cast(targetByteOffset)) <= static_cast(UINT32_MAX)); - uint32_t targetByteIndex = targetOffset * targetElementSize + targetByteOffset; + uint32_t targetByteIndex = static_cast(targetOffset) * targetElementSize + targetByteOffset; // 27. Let limit be targetByteIndex + targetElementSize × srcLength. ASSERT((static_cast(targetElementSize) * static_cast(srcLength) + static_cast(targetByteIndex)) <= static_cast(UINT32_MAX)); @@ -1406,7 +1408,7 @@ JSTaggedValue BuiltinsTypedArray::Sort(EcmaRuntimeCallInfo *argv) key.GetTaggedValue())); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); while (beginIndex < endIndex) { - uint32_t middleIndex = (beginIndex + endIndex) / 2; // 2 : half + uint32_t middleIndex = beginIndex + (endIndex - beginIndex) / 2; // 2 : half key1.Update(JSTaggedValue(middleIndex)); middleValue.Update(ObjectFastOperator::FastGetPropertyByValue(thread, thisObjHandle.GetTaggedValue(), key1.GetTaggedValue())); @@ -1511,7 +1513,7 @@ JSTaggedValue BuiltinsTypedArray::Subarray(EcmaRuntimeCallInfo *argv) // 21. Let argumentsList be «buffer, beginByteOffset, newLength». // 5. Let buffer be the value of O’s [[ViewedArrayBuffer]] internal slot. // 22. Return Construct(constructor, argumentsList). - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSTaggedType args[argsLength] = { buffer.GetRawData(), JSTaggedValue(beginByteOffset).GetRawData(), @@ -1620,7 +1622,7 @@ JSTaggedValue BuiltinsTypedArray::At(EcmaRuntimeCallInfo *argv) int64_t k = 0; // 5. If relativeIndex ≥ 0, then Let k be relativeIndex. // 6. Else, Let k be len + relativeIndex. - k = relativeIndex >= 0 ? relativeIndex : len + relativeIndex; + k = relativeIndex >= 0 ? relativeIndex : static_cast(len) + relativeIndex; // 7. If k < 0 or k ≥ len, return undefined. if (k < 0 || k >= len) { return JSTaggedValue::Undefined(); @@ -1631,6 +1633,168 @@ JSTaggedValue BuiltinsTypedArray::At(EcmaRuntimeCallInfo *argv) return kValue.GetTaggedValue(); } +// 23.2.3.33 +JSTaggedValue BuiltinsTypedArray::ToSorted(EcmaRuntimeCallInfo* argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, ToSorted); + JSThread* thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, throw a TypeError exception. + JSHandle comparefnHandle = GetCallArg(argv, 0); + if (!comparefnHandle->IsUndefined() && !comparefnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the comparefn is not callable.", JSTaggedValue::Exception()); + } + // 2. Let O be the this value. + JSHandle thisHandle = GetThis(argv); + // 3. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + // ReturnIfAbrupt(valid). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle thisObj(thisHandle); + // 4. Let len be O.[[ArrayLength]]. + uint32_t len = thisObj->GetArrayLength(); + + // 5. Let A be ? TypedArrayCreateSameType(O, « 𝔽(len) »). + JSTaggedType args[1] = { JSTaggedValue(len).GetRawData() }; + JSHandle newArrObj = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); // 1: one arg. + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle buffer = + JSHandle(thread, TypedArrayHelper::ValidateTypedArray(thread, thisHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSMutableHandle presentValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle middleValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle previousValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + JSMutableHandle key1(thread, JSTaggedValue::Undefined()); + JSMutableHandle key2(thread, JSTaggedValue::Undefined()); + if (len > 0) { + previousValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), JSTaggedValue(0))); + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), 0, previousValue.GetTaggedValue()); + } + for (uint32_t i = 1; i < len; i++) { + uint32_t beginIndex = 0; + uint32_t endIndex = i; + key.Update(JSTaggedValue(i)); + presentValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), key.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + while (beginIndex < endIndex) { + uint32_t middleIndex = beginIndex + (endIndex - beginIndex) / 2; // 2 : half + key1.Update(JSTaggedValue(middleIndex)); + middleValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, newArrObj.GetTaggedValue(), key1.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int32_t compareResult = + TypedArrayHelper::SortCompare(thread, comparefnHandle, buffer, middleValue, presentValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + compareResult > 0 ? (endIndex = middleIndex) : (beginIndex = middleIndex + 1); + } + + if (endIndex < i) { + for (uint32_t j = i; j > endIndex; j--) { + key2.Update(JSTaggedValue(j - 1)); + previousValue.Update(ObjectFastOperator::FastGetPropertyByValue( + thread, newArrObj.GetTaggedValue(), key2.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), j, previousValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + } + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), endIndex, presentValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + return newArrObj.GetTaggedValue(); +} + +// 23.2.3.36 +JSTaggedValue BuiltinsTypedArray::With(EcmaRuntimeCallInfo* argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, With); + JSThread* thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be the this value. + JSHandle thisHandle = GetThis(argv); + // 2. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + // ReturnIfAbrupt(valid). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle thisObj(thisHandle); + // 3. Let len be O.[[ArrayLength]]. + uint32_t len = thisObj->GetArrayLength(); + + // 4. Let relativeIndex be ? ToIntegerOrInfinity(index). + JSTaggedNumber indexVal = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(indexVal). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t relativeIndex = indexVal.GetNumber(); + // 5. If relativeIndex ≥ 0, let actualIndex be relativeIndex. + // 6. Else, let actualIndex be len + relativeIndex. + int64_t actualIndex = relativeIndex >= 0 ? relativeIndex : static_cast(len) + relativeIndex; + + // 7. If O.[[ContentType]] is BigInt, let numericValue be ? ToBigInt(value). + // 8. Else, let numericValue be ? ToNumber(value). + JSHandle value = GetCallArg(argv, 1); + ContentType contentType = thisObj->GetContentType(); + JSHandle numericValue; + if (contentType == ContentType::BigInt) { + numericValue = JSHandle(thread, JSTaggedValue::ToBigInt(thread, value)); + } else { + numericValue = JSHandle(thread, JSTaggedValue::ToNumber(thread, value)); + } + // ReturnIfAbrupt(numericValue). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 9. If IsValidIntegerIndex(O, 𝔽(actualIndex)) is false, throw a RangeError exception. + if (!JSTypedArray::IsValidIntegerIndex(thisHandle, JSTaggedValue(actualIndex))) { + THROW_RANGE_ERROR_AND_RETURN(thread, "Invalid typed array index", JSTaggedValue::Exception()); + } + + // 10. Let A be ? TypedArrayCreateSameType(O, « 𝔽(len) »). + JSTaggedType args[1] = { JSTaggedValue(len).GetRawData() }; + JSHandle newArrObj = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); // 1: one arg. + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 11. Let k be 0. + // 12. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If k is actualIndex, let fromValue be numericValue. + // c. Else, let fromValue be ! Get(O, Pk). + // d. Perform ! Set(A, Pk, fromValue, true). + // e. Set k to k + 1. + JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); + JSMutableHandle fromValue(thread, JSTaggedValue::Undefined()); + uint32_t k = 0; + while (k < len) { + tKey.Update(JSTaggedValue(k)); + if (k == actualIndex) { + fromValue.Update(numericValue); + } else { + fromValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), tKey.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + ObjectFastOperator::FastSetPropertyByValue(thread, newArrObj.GetTaggedValue(), + tKey.GetTaggedValue(), fromValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + k++; + } + return newArrObj.GetTaggedValue(); +} + // es12 23.2.3.13 JSTaggedValue BuiltinsTypedArray::Includes(EcmaRuntimeCallInfo *argv) { @@ -1641,4 +1805,74 @@ JSTaggedValue BuiltinsTypedArray::Includes(EcmaRuntimeCallInfo *argv) } return BuiltinsArray::Includes(argv); } + +// 23.2.3.32 +JSTaggedValue BuiltinsTypedArray::ToReversed(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, TypedArray, ToReversed); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObj(thisHandle); + // 2. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 3. Let len be O.[[ArrayLength]]. + uint32_t len = JSHandle::Cast(thisObjHandle)->GetArrayLength(); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 4. Let A be ? TypedArrayCreateSameType(O, « 𝔽(length) »). + JSTaggedType args[1] = {JSTaggedValue(len).GetRawData()}; + JSHandle newArrayHandle = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); + // ReturnIfAbrupt(newObj). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 5. Let k be 0. + uint32_t k = 0; + + // 6. Repeat, while k < length, + // a. Let from be ! ToString(𝔽(length - k - 1)). + // b. Let Pk be ! ToString(𝔽(k)). + // c. Let fromValue be ! Get(O, from). + // d. Perform ! Set(A, Pk, fromValue, true). + // e. Set k to k + 1. + while (k < len) { + uint32_t from = len - k - 1; + JSHandle fromValue = JSTypedArray::GetProperty(thread, thisHandle, from).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFastOperator::FastSetPropertyByIndex(thread, newArrayHandle.GetTaggedValue(), k, + fromValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++k; + } + // 7. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.2.3.13 +JSTaggedValue BuiltinsTypedArray::FindLast(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, FindLast); + if (!GetThis(argv)->IsTypedArray()) { + THROW_TYPE_ERROR_AND_RETURN(argv->GetThread(), "This is not a TypedArray.", JSTaggedValue::Exception()); + } + return BuiltinsArray::FindLast(argv); +} + +// 23.2.3.14 +JSTaggedValue BuiltinsTypedArray::FindLastIndex(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, FindLastIndex); + if (!GetThis(argv)->IsTypedArray()) { + THROW_TYPE_ERROR_AND_RETURN(argv->GetThread(), "This is not a TypedArray.", JSTaggedValue::Exception()); + } + return BuiltinsArray::FindLastIndex(argv); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_typedarray.h b/ecmascript/builtins/builtins_typedarray.h index 4e249b83e3f18f14d77c1b9d0563ad586040a428..05e72d6fd761ee1bc5a78e063a9ce1ef7fbeaaa5 100644 --- a/ecmascript/builtins/builtins_typedarray.h +++ b/ecmascript/builtins/builtins_typedarray.h @@ -109,6 +109,16 @@ public: static JSTaggedValue Includes(EcmaRuntimeCallInfo *argv); // 23.2.3.1 static JSTaggedValue At(EcmaRuntimeCallInfo *argv); + // 23.2.3.32 %TypedArray%.prototype.toReversed ( ) + static JSTaggedValue ToReversed(EcmaRuntimeCallInfo *argv); + // 23.2.3.13 + static JSTaggedValue FindLast(EcmaRuntimeCallInfo *argv); + // 23.2.3.14 + static JSTaggedValue FindLastIndex(EcmaRuntimeCallInfo *argv); + // 23.2.3.33 + static JSTaggedValue ToSorted(EcmaRuntimeCallInfo *argv); + // 23.2.3.36 + static JSTaggedValue With(EcmaRuntimeCallInfo *argv); static const uint32_t MAX_ARRAY_INDEX = std::numeric_limits::max(); }; } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_weak_map.cpp b/ecmascript/builtins/builtins_weak_map.cpp index c0dcf62bc8ccaf3b0f5295b9a8826ca6fe007428..b2a0c216da4e0e5cfefa53fe6d26da962fbf4099 100644 --- a/ecmascript/builtins/builtins_weak_map.cpp +++ b/ecmascript/builtins/builtins_weak_map.cpp @@ -87,8 +87,8 @@ JSTaggedValue BuiltinsWeakMap::Delete(EcmaRuntimeCallInfo *argv) JSHandle weakMap(self); JSHandle key = GetCallArg(argv, 0); - // 5.if Type(key) is not Object, return false. - if (!key->IsHeapObject()) { + // 5.If CanBeHeldWeakly(key) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return GetTaggedBoolean(false); } return GetTaggedBoolean(JSWeakMap::Delete(thread, weakMap, key)); @@ -106,10 +106,10 @@ JSTaggedValue BuiltinsWeakMap::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakMap.", JSTaggedValue::Exception()); } - JSWeakMap *jsWeakMap = JSWeakMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); - // 5.if Type(key) is not Object, return false. - if (!key->IsHeapObject()) { + // 5.If CanBeHeldWeakly(key) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return GetTaggedBoolean(false); } return GetTaggedBoolean(jsWeakMap->Has(key.GetTaggedValue())); @@ -127,9 +127,10 @@ JSTaggedValue BuiltinsWeakMap::Get(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakMap.", JSTaggedValue::Exception()); } - JSWeakMap *jsWeakMap = JSWeakMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); - if (!key->IsHeapObject()) { + // 4.If CanBeHeldWeakly(key) is false, return undefined. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return JSTaggedValue::Undefined(); } return jsWeakMap->Get(key.GetTaggedValue()); @@ -150,11 +151,9 @@ JSTaggedValue BuiltinsWeakMap::Set(EcmaRuntimeCallInfo *argv) } JSHandle key = GetCallArg(argv, 0); - if (!key->IsHeapObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not an object.", JSTaggedValue::Exception()); - } - if (key->IsSymbol() || key->IsString()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "key is Symblol or String", JSTaggedValue::Exception()); + // 4.If CanBeHeldWeakly(key) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "invalid value used as weak map key.", JSTaggedValue::Exception()); } JSHandle value = GetCallArg(argv, 1); diff --git a/ecmascript/builtins/builtins_weak_ref.cpp b/ecmascript/builtins/builtins_weak_ref.cpp index 289c11c4cb8cee6e8d5a17e2830ac796935ce37e..ad4b85a330782bf6b7ed55fa18c6ce1d1f3917aa 100644 --- a/ecmascript/builtins/builtins_weak_ref.cpp +++ b/ecmascript/builtins/builtins_weak_ref.cpp @@ -17,7 +17,7 @@ #include "ecmascript/ecma_vm.h" #include "ecmascript/js_weak_ref.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript::builtins { JSTaggedValue BuiltinsWeakRef::WeakRefConstructor(EcmaRuntimeCallInfo *argv) @@ -32,10 +32,10 @@ JSTaggedValue BuiltinsWeakRef::WeakRefConstructor(EcmaRuntimeCallInfo *argv) if (newTarget->IsUndefined()) { THROW_TYPE_ERROR_AND_RETURN(thread, "new target can't be undefined", JSTaggedValue::Exception()); } - // 2. If Type(target) is not Object, throw a TypeError exception. + // 2. If CanBeHeldWeakly(target) is false, throw a TypeError exception. JSHandle target = GetCallArg(argv, 0); - if (!target->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "target is not object", JSTaggedValue::Exception()); + if (!JSTaggedValue::CanBeHeldWeakly(thread, target)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "target invalid", JSTaggedValue::Exception()); } // 3. Let weakRef be ? OrdinaryCreateFromConstructor(NewTarget, "%WeakRef.prototype%", « [[WeakRefTarget]] »). JSHandle constructor = GetConstructor(argv); diff --git a/ecmascript/builtins/builtins_weak_set.cpp b/ecmascript/builtins/builtins_weak_set.cpp index 4205ef913ebf4321a394bab93c64ee5e0b22e6cc..12d71e83ffaba3718702366e1850d7a2362683d7 100644 --- a/ecmascript/builtins/builtins_weak_set.cpp +++ b/ecmascript/builtins/builtins_weak_set.cpp @@ -120,15 +120,12 @@ JSTaggedValue BuiltinsWeakSet::Add(EcmaRuntimeCallInfo *argv) } JSHandle value(GetCallArg(argv, 0)); - if (!value->IsHeapObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "value is not an object", JSTaggedValue::Exception()); + // 4.If CanBeHeldWeakly(value) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "invalid value used in weak set", JSTaggedValue::Exception()); } - if (value->IsSymbol() || value->IsString()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "value is Symblol or String", JSTaggedValue::Exception()); - } - - JSHandle weakSet(thread, JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle weakSet(self); JSWeakSet::Add(thread, weakSet, value); return weakSet.GetTaggedValue(); } @@ -146,9 +143,10 @@ JSTaggedValue BuiltinsWeakSet::Delete(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakSet", JSTaggedValue::Exception()); } - JSHandle weakSet(thread, JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle weakSet(self); JSHandle value = GetCallArg(argv, 0); - if (!value->IsHeapObject()) { + // 4.If CanBeHeldWeakly(value) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { GetTaggedBoolean(false); } return GetTaggedBoolean(JSWeakSet::Delete(thread, weakSet, value)); @@ -166,9 +164,10 @@ JSTaggedValue BuiltinsWeakSet::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakSet", JSTaggedValue::Exception()); } - JSWeakSet *jsWeakSet = JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakSet *jsWeakSet = JSWeakSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle value = GetCallArg(argv, 0); - if (!value->IsHeapObject()) { + // 4.If CanBeHeldWeakly(value) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { GetTaggedBoolean(false); } return GetTaggedBoolean(jsWeakSet->Has(value.GetTaggedValue())); diff --git a/ecmascript/builtins/tests/builtins_array_test.cpp b/ecmascript/builtins/tests/builtins_array_test.cpp index 248ac346b3220d08a8f47c265be1680b150ddfde..be2ddd938cc4f6f6b36e286a13ded4cbbef9af61 100644 --- a/ecmascript/builtins/tests/builtins_array_test.cpp +++ b/ecmascript/builtins/tests/builtins_array_test.cpp @@ -35,6 +35,26 @@ using namespace panda::ecmascript; using namespace panda::ecmascript::builtins; using namespace panda::ecmascript::base; +constexpr int32_t INT_VALUE_0 = 0; +constexpr int32_t INT_VALUE_1 = 1; +constexpr int32_t INT_VALUE_2 = 2; +constexpr int32_t INT_VALUE_3 = 3; +constexpr int32_t INT_VALUE_4 = 4; +constexpr int32_t INT_VALUE_50 = 50; +constexpr int32_t INT_VALUE_200 = 200; +constexpr int32_t INT_VALUE_666 = 666; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_0 = 0; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_1 = 1; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_4 = 4; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_8 = 8; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_10 = 10; + +enum class ArrayIndex { + ARRAY_INDEX_0, + ARRAY_INDEX_1, + ARRAY_INDEX_2, + ARRAY_INDEX_3 +}; namespace panda::test { using Array = ecmascript::builtins::BuiltinsArray; @@ -141,6 +161,30 @@ public: return GetTaggedBoolean(false); } + static JSTaggedValue TestFindLastFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastIndexFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + static JSTaggedValue TestReduceFunc(EcmaRuntimeCallInfo *argv) { int accumulator = GetCallArg(argv, 0)->GetInt(); @@ -928,7 +972,53 @@ HWTEST_F_L0(BuiltinsArrayTest, ForEach) EXPECT_EQ(jsArray->GetArrayLength(), 3U); } -// 22.1.3.11 new Array(1,2,3,4,3).IndexOf(searchElement [ , fromIndex ]) +#define ARRAY_DEFINE_OWN_PROPERTY(dest, index, value) \ + do { \ + JSHandle key(thread, JSTaggedValue(index)); \ + PropertyDescriptor desc(thread, JSHandle(thread, JSTaggedValue(value)), true, true, true); \ + JSArray::DefineOwnProperty(thread, dest, key, desc); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(method, target, expected) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(method, target, expected, arg0) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + ecmaRuntimeCallInfo->SetCallArg(0, JSTaggedValue(arg0)); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(method, target, expected, arg0, arg1) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + ecmaRuntimeCallInfo->SetCallArg(0, JSTaggedValue(arg0)); \ + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(arg1)); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +// 22.1.3.11 Array.IndexOf(searchElement [ , fromIndex ]) HWTEST_F_L0(BuiltinsArrayTest, IndexOf) { JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); @@ -937,67 +1027,54 @@ HWTEST_F_L0(BuiltinsArrayTest, IndexOf) JSHandle obj(thread, arr); EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); - JSHandle key0(thread, JSTaggedValue(0)); - PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(1)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key0, desc0); - JSHandle key1(thread, JSTaggedValue(1)); - PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key1, desc1); - JSHandle key2(thread, JSTaggedValue(2)); - PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key2, desc2); - JSHandle key3(thread, JSTaggedValue(3)); - PropertyDescriptor desc3(thread, JSHandle(thread, JSTaggedValue(4)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key3, desc3); - JSHandle key4(thread, JSTaggedValue(4)); - PropertyDescriptor desc4(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key4, desc4); - - auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(0))); - - [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); - JSTaggedValue result = Array::IndexOf(ecmaRuntimeCallInfo1); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(2).GetRawData()); - - auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); - result = Array::IndexOf(ecmaRuntimeCallInfo2); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(4).GetRawData()); - - auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo3->SetCallArg(0, JSTaggedValue(static_cast(5))); - ecmaRuntimeCallInfo3->SetCallArg(1, JSTaggedValue(static_cast(0))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); - result = Array::IndexOf(ecmaRuntimeCallInfo3); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(-1).GetRawData()); - - auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); - ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo4->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo4->SetCallArg(0, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); - result = Array::IndexOf(ecmaRuntimeCallInfo4); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(2).GetRawData()); + // arr = [1, 2, 3, 4, 3, 0, 2.0, +0.0, 3.0, -0.0, , , undefined] + ARRAY_DEFINE_OWN_PROPERTY(obj, 0, 1); + ARRAY_DEFINE_OWN_PROPERTY(obj, 1, 2); + ARRAY_DEFINE_OWN_PROPERTY(obj, 2, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 3, 4); + ARRAY_DEFINE_OWN_PROPERTY(obj, 4, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 5, 0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 6, 2.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 7, +0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 8, 3.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 9, -0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 12, JSTaggedValue::Undefined()); + + // arr.indexOf(3, 0) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 2, 3, 0); + // arr.indexOf(3, 3) == 4 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 4, 3, 3); + // arr.indexOf(5, 0) == -1 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, -1, 5, 0); + // arr.indexOf(3) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 2, 3); + + // Expects int32_t(x) and double(x) to be strictly equal + // arr.indexOf(3.0) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 2, 3.0); + // arr.indexOf(3, 5) == 8 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 8, 3, 5); + + // Expects 0, +0.0, -0.0 to be strictly equal + // arr.indexOf(+0.0) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 5, +0.0); + // arr.indexOf(-0.0) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 5, -0.0); + // arr.indexOf(0, 6) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 7, 0, 6); + // arr.indexOf(-0.0, 6) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 7, -0.0, 6); + // arr.indexOf(0, 8) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 9, 0, 8); + // arr.indexOf(+0.0, 8) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 9, +0.0, 8); + + // Expects undefined to be found + // arr.indexOf() == 12, where the first argument is undefined + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(IndexOf, obj, 12); } -// 22.1.3.14 new Array(1,2,3,4,3).LastIndexOf(searchElement [ , fromIndex ]) +// 22.1.3.14 Array.LastIndexOf(searchElement [ , fromIndex ]) HWTEST_F_L0(BuiltinsArrayTest, LastIndexOf) { JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); @@ -1006,68 +1083,50 @@ HWTEST_F_L0(BuiltinsArrayTest, LastIndexOf) JSHandle obj(thread, arr); EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); - JSHandle key0(thread, JSTaggedValue(0)); - PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(1)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key0, desc0); - JSHandle key1(thread, JSTaggedValue(1)); - PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key1, desc1); - JSHandle key2(thread, JSTaggedValue(2)); - PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key2, desc2); - JSHandle key3(thread, JSTaggedValue(3)); - PropertyDescriptor desc3(thread, JSHandle(thread, JSTaggedValue(4)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key3, desc3); - JSHandle key4(thread, JSTaggedValue(4)); - PropertyDescriptor desc4(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key4, desc4); - - // new Array(1,2,3,4,3).LastIndexOf(3,4) - auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(4))); - - [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); - JSTaggedValue result = Array::LastIndexOf(ecmaRuntimeCallInfo1); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(4)).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(3,3) - auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); - result = Array::LastIndexOf(ecmaRuntimeCallInfo2); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(5,4) - auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo3->SetCallArg(0, JSTaggedValue(static_cast(5))); - ecmaRuntimeCallInfo3->SetCallArg(1, JSTaggedValue(static_cast(4))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); - result = Array::LastIndexOf(ecmaRuntimeCallInfo3); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(-1).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(3) - auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); - ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo4->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo4->SetCallArg(0, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); - result = Array::LastIndexOf(ecmaRuntimeCallInfo4); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(4)).GetRawData()); + // arr = [1, 2, 3, 4, 3, 0, 2.0, +0.0, 3.0, -0.0, , , undefined, , , -1] + ARRAY_DEFINE_OWN_PROPERTY(obj, 0, 1); + ARRAY_DEFINE_OWN_PROPERTY(obj, 1, 2); + ARRAY_DEFINE_OWN_PROPERTY(obj, 2, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 3, 4); + ARRAY_DEFINE_OWN_PROPERTY(obj, 4, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 5, 0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 6, 2.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 7, +0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 8, 3.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 9, -0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 12, JSTaggedValue::Undefined()); + ARRAY_DEFINE_OWN_PROPERTY(obj, 15, -1); + + // arr.lastIndexOf(3, 4) == 4 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 4, 3, 4); + // arr.lastIndexOf(3, 3) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 2, 3, 3); + // arr.lastIndexOf(5, 4) == -1 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, -1, 5, 4); + + // Expects int32_t(x) and double(x) to be strictly equal + // arr.lastIndexOf(3) == 8 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 8, 3); + // arr.lastIndexOf(1.0) == 0 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 0, 1.0); + + // Expects 0, +0.0, -0.0 to be strictly equal + // arr.indexOf(+0.0) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 9, +0.0); + // arr.indexOf(0) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 9, 0); + // arr.indexOf(0, 8) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 7, 0, 8); + // arr.indexOf(-0.0, 8) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 7, -0.0, 8); + // arr.indexOf(-0.0, 6) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 5, -0.0, 6); + // arr.indexOf(+0.0, 6) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 5, +0.0, 6); + + // Expects undefined to be found + // arr.indexOf() == 12, where the first argument is undefined + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(LastIndexOf, obj, 12); } // 22.1.3.11 new Array().Pop() @@ -1706,4 +1765,247 @@ HWTEST_F_L0(BuiltinsArrayTest, At) TestHelper::TearDownFrame(thread, prev6); ASSERT_EQ(result, JSTaggedValue::Undefined()); } + +HWTEST_F_L0(BuiltinsArrayTest, With) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + + JSHandle key0(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_0))); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_0)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_1))); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_2))); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_8); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(RUNTIME_CALL_INFO_PARA_0, + JSTaggedValue(static_cast((ArrayIndex::ARRAY_INDEX_1)))); + ecmaRuntimeCallInfo1->SetCallArg(RUNTIME_CALL_INFO_PARA_1, JSTaggedValue(INT_VALUE_3)); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::With(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_0); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_3); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_2); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToSorted) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_3)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result2 = Array::ToSorted(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result2.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result2.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_1); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_2); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_3); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToSpliced) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_0)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_10); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_0, JSTaggedValue(INT_VALUE_1)); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_1, JSTaggedValue(INT_VALUE_1)); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_2, JSTaggedValue(INT_VALUE_666)); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result2 = Array::ToSpliced(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result2.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result2.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_0); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_666); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_2); +} + +HWTEST_F_L0(BuiltinsArrayTest, FindLast) +{ + auto ecmaVM = thread->GetEcmaVM(); + JSHandle env = ecmaVM->GetGlobalEnv(); + ObjectFactory *factory = ecmaVM->GetFactory(); + + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); + // arr [50, 40, 2] + JSHandle key0(thread, JSTaggedValue(0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(40)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + JSHandle jsArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(1, jsArray.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::FindLast(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(40).GetRawData()); +} + +HWTEST_F_L0(BuiltinsArrayTest, FindLastIndex) +{ + auto ecmaVM = thread->GetEcmaVM(); + JSHandle env = ecmaVM->GetGlobalEnv(); + ObjectFactory *factory = ecmaVM->GetFactory(); + + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); + + // arr [50, 40, 30] + JSHandle key0(thread, JSTaggedValue(0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + + JSHandle key1(thread, JSTaggedValue(1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(40)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + + JSHandle key2(thread, JSTaggedValue(2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(30)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + JSHandle jsArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastIndexFunc)); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(1, jsArray.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::FindLastIndex(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToReversed) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_200)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_3)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), INT_VALUE_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::ToReversed(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + JSTaggedValue value(static_cast(result.GetRawData())); + ASSERT_TRUE(value.IsECMAObject()); + + PropertyDescriptor descRes(thread); + JSHandle valueHandle(thread, value); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(valueHandle), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_3); + JSObject::GetOwnProperty(thread, valueHandle, key0, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_3)); + JSObject::GetOwnProperty(thread, valueHandle, key1, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_200)); + JSObject::GetOwnProperty(thread, valueHandle, key2, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_50)); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_3); + JSObject::GetOwnProperty(thread, obj, key0, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_50)); + JSObject::GetOwnProperty(thread, obj, key1, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_200)); + JSObject::GetOwnProperty(thread, obj, key2, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_3)); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_errors_test.cpp b/ecmascript/builtins/tests/builtins_errors_test.cpp index 307341fd76589456b3c3e240c23bb562ad5e5fb5..025e6201294ec8349232589d203179332e323798 100644 --- a/ecmascript/builtins/tests/builtins_errors_test.cpp +++ b/ecmascript/builtins/tests/builtins_errors_test.cpp @@ -40,6 +40,7 @@ using TypeError = builtins::BuiltinsTypeError; using URIError = builtins::BuiltinsURIError; using EvalError = builtins::BuiltinsEvalError; using SyntaxError = builtins::BuiltinsSyntaxError; +using AggregateError = builtins::BuiltinsAggregateError; using JSType = ecmascript::JSType; class BuiltinsErrorsTest : public testing::Test { @@ -980,4 +981,55 @@ HWTEST_F_L0(BuiltinsErrorsTest, EvalErrorToString) EXPECT_EQ(EcmaStringAccessor::Compare(instance, factory->NewFromASCII("EvalError: This is EvalError!"), resultHandle), 0); } + +/* + * @tc.name: AggregateErrorParameterConstructor + * @tc.desc: new AggregateError([], "Hello AggregateError", {cause: "error cause"}) + * @tc.type: FUNC + */ +HWTEST_F_L0(BuiltinsErrorsTest, AggregateErrorParameterConstructor) +{ + ObjectFactory *factory = instance->GetFactory(); + JSHandle env = instance->GetGlobalEnv(); + + JSHandle error(env->GetAggregateErrorFunction()); + JSHandle paramMsg(factory->NewFromASCII("Hello AggregateError!")); + + JSHandle errayFunc = env->GetArrayFunction(); + JSHandle newArray = factory->NewJSObjectByConstructor(JSHandle(errayFunc), errayFunc); + + JSHandle causeKey = thread->GlobalConstants()->GetHandledCauseString(); + JSHandle objFun = env->GetObjectFunction(); + JSHandle optionsObj = factory->NewJSObjectByConstructor(JSHandle(objFun), objFun); + JSHandle causeValue(factory->NewFromASCII("error cause")); // test error cause + JSObject::SetProperty(thread, optionsObj, causeKey, causeValue); + + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*error), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(error.GetTaggedValue()); + ecmaRuntimeCallInfo->SetThis(JSTaggedValue(*error)); + ecmaRuntimeCallInfo->SetCallArg(0, newArray.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(1, paramMsg.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(2, optionsObj.GetTaggedValue()); // 2 means the options arg + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + JSTaggedValue result = AggregateError::AggregateErrorConstructor(ecmaRuntimeCallInfo); + EXPECT_TRUE(result.IsECMAObject()); + + JSHandle errorObject(thread, reinterpret_cast(result.GetRawData())); + JSHandle msgKey(factory->NewFromASCII("message")); + JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); + + JSHandle msgValue(JSObject::GetProperty(thread, errorObject, msgKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("Hello AggregateError!"), JSHandle(msgValue)), 0); + + JSHandle nameValue(JSObject::GetProperty(thread, errorObject, nameKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("AggregateError"), JSHandle(nameValue)), 0); + + JSHandle errCauseValue(JSObject::GetProperty(thread, errorObject, causeKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("error cause"), JSHandle(errCauseValue)), 0); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp b/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp index 2fda459022ad52f7f62dae89c3dd0862edfed871..9ff2f2ad6ba16637418ef00f16894b5c4e36957c 100644 --- a/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp +++ b/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp @@ -441,4 +441,96 @@ HWTEST_F_L0(BuiltinsFinalizationRegistryTest, Unregister2) vm->SetEnableForceGC(true); ASSERT_EQ(testValue, 0); } + +// finalizationRegistry.Register(target, heldValue [ , unregisterToken ]) target and unregisterToken Symbol +HWTEST_F_L0(BuiltinsFinalizationRegistryTest, RegisterTargetSymbol) +{ + testValue = 0; + EcmaVM *vm = thread->GetEcmaVM(); + + JSTaggedValue result = CreateFinalizationRegistryConstructor(thread); + JSHandle jsfinalizationRegistry(thread, result); + + vm->SetEnableForceGC(false); + JSTaggedValue target = JSTaggedValue::Undefined(); + JSTaggedValue target1 = JSTaggedValue::Undefined(); + { + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle symbol1 = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle symbol2 = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + target = symbol1.GetTaggedValue(); + target1 = symbol2.GetTaggedValue(); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, target); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo->SetCallArg(2, target); // 2 means the unregisterToken arg + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, target1); + ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo1->SetCallArg(2, target1); // 2 means the unregisterToken arg + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + } + vm->CollectGarbage(TriggerGCType::FULL_GC); + if (!thread->HasPendingException()) { + job::MicroJobQueue::ExecutePendingJob(thread, vm->GetJSThread()->GetCurrentEcmaContext()->GetMicroJobQueue()); + } + vm->SetEnableForceGC(true); + ASSERT_EQ(testValue, 2); +} + +// finalizationRegistry.Unregister(unregisterToken) unregisterToken Symbol +HWTEST_F_L0(BuiltinsFinalizationRegistryTest, UnregisterTokenSymbol) +{ + testValue = 0; + EcmaVM *vm = thread->GetEcmaVM(); + + JSTaggedValue result = CreateFinalizationRegistryConstructor(thread); + JSHandle jsfinalizationRegistry(thread, result); + vm->SetEnableForceGC(false); + JSTaggedValue target = JSTaggedValue::Undefined(); + { + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle symbol = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + target = symbol.GetTaggedValue(); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, target); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo->SetCallArg(2, target); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, target); + + BuiltinsFinalizationRegistry::Unregister(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + } + vm->CollectGarbage(TriggerGCType::FULL_GC); + if (!thread->HasPendingException()) { + job::MicroJobQueue::ExecutePendingJob(thread, vm->GetJSThread()->GetCurrentEcmaContext()->GetMicroJobQueue()); + } + vm->SetEnableForceGC(true); + ASSERT_EQ(testValue, 0); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_global_test.cpp b/ecmascript/builtins/tests/builtins_global_test.cpp index 9ed7dc1ed1157eb7324ed7a58f9da74904da54be..b6d4bd2c6d10b3fc471b1c3dce786fe39cd80a4c 100644 --- a/ecmascript/builtins/tests/builtins_global_test.cpp +++ b/ecmascript/builtins/tests/builtins_global_test.cpp @@ -120,4 +120,98 @@ HWTEST_F_L0(BuiltinsGlobalTest, CallJsProxy) EXPECT_EQ(result, JSTaggedValue::Undefined()); thread->ClearException(); } + +HWTEST_F_L0(BuiltinsGlobalTest, Escape) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle str1 = factory->NewFromASCII("?!=()#%&"); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT, 6 means 3 paras + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, str1.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + EXPECT_TRUE(result1.IsString()); + JSHandle ecmaStrHandle1(thread, result1); + EXPECT_STREQ("%3F%21%3D%28%29%23%25%26", EcmaStringAccessor(ecmaStrHandle1).ToCString().c_str()); // NOLINT + + JSHandle str2 = factory->NewFromASCII("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F"); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetCallArg(0, str2.GetTaggedValue()); // NOLINT + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + EXPECT_TRUE(result2.IsString()); + JSHandle ecmaStrHandle2(thread, result2); + EXPECT_STREQ("%25u%25u0%25u9%25ua%25uF%25u00%25u09%25u0f%25u0F%25u000%25u00a%25u00F", // NOLINT special value + EcmaStringAccessor(ecmaStrHandle2).ToCString().c_str()); + + JSHandle str3 = factory->NewFromASCII("Hello World!"); + auto ecmaRuntimeCallInfo3 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetCallArg(0, str3.GetTaggedValue()); + + [[maybe_unused]] auto prev3 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + JSTaggedValue result3 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev3); + EXPECT_TRUE(result3.IsString()); + JSHandle ecmaStrHandle3(thread, result3); + EXPECT_STREQ("Hello%20World%21", EcmaStringAccessor(ecmaStrHandle3).ToCString().c_str()); +} + +HWTEST_F_L0(BuiltinsGlobalTest, Unescape) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle str1 = factory->NewFromASCII(""); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, str1.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsGlobal::Unescape(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + EXPECT_TRUE(result1.IsString()); + JSHandle ecmaStrHandle1(thread, result1); + EXPECT_STREQ("", EcmaStringAccessor(ecmaStrHandle1).ToCString().c_str()); + + JSHandle str2 = factory->NewFromASCII("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F"); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetCallArg(0, str2.GetTaggedValue()); // NOLINT + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsGlobal::Unescape(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + EXPECT_TRUE(result2.IsString()); + JSHandle ecmaStrHandle2(thread, result2); + EXPECT_STREQ("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F", + EcmaStringAccessor(ecmaStrHandle2).ToCString().c_str()); + + JSHandle str3 = factory->NewFromASCII("Hello%20World%21"); + auto ecmaRuntimeCallInfo3 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT 6 means 3 paras + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetCallArg(0, str3.GetTaggedValue()); + + [[maybe_unused]] auto prev3 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + JSTaggedValue result3 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev3); + EXPECT_TRUE(result3.IsString()); + JSHandle ecmaStrHandle3(thread, result3); + EXPECT_STREQ("Hello%2520World%2521", EcmaStringAccessor(ecmaStrHandle3).ToCString().c_str()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_regexp_test.cpp b/ecmascript/builtins/tests/builtins_regexp_test.cpp index 79df1902e4e954845a44c1d1cee80c040576c800..6727f3fc6a7bf4ca817e069fe02430cb8d1aec7c 100644 --- a/ecmascript/builtins/tests/builtins_regexp_test.cpp +++ b/ecmascript/builtins/tests/builtins_regexp_test.cpp @@ -657,4 +657,64 @@ HWTEST_F_L0(BuiltinsRegExpTest, RegExpParseCache) RegExpParserCache::CACHE_SIZE, vec).first.IsHole()); ASSERT_TRUE(regExpParserCache->GetCache(*string2, 0, vec).first.IsHole()); } + +HWTEST_F_L0(BuiltinsRegExpTest, FlagD) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // invoke RegExpConstructor method + JSHandle pattern1 = factory->NewFromASCII("(?a)"); + JSHandle flags1 = factory->NewFromASCII("gd"); + JSTaggedValue result1 = CreateBuiltinsRegExpObjByPatternAndFlags(thread, pattern1, flags1); + JSHandle result1Handle(thread, result1); + + // invoke GetFlags method + JSHandle flags(factory->NewFromASCII("flags")); + JSHandle flagsResult(JSObject::GetProperty(thread, result1Handle, flags).GetValue()); + JSHandle expectResult = factory->NewFromASCII("dg"); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, JSHandle(flagsResult), expectResult), 0); + + // invoke GetHasIndices method + JSHandle hasIndices(factory->NewFromASCII("hasIndices")); + JSTaggedValue taggedHasIndicesResult = + JSObject::GetProperty(thread, result1Handle, hasIndices).GetValue().GetTaggedValue(); + ASSERT_EQ(taggedHasIndicesResult.GetRawData(), JSTaggedValue::True().GetRawData()); + + JSHandle inputString = factory->NewFromASCII("babcae"); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(result1Handle.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, inputString.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // invoke Exec method + JSTaggedValue results = BuiltinsRegExp::Exec(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + JSHandle execResult(thread, results); + JSHandle indices(factory->NewFromASCII("indices")); + JSHandle indicesArr = JSObject::GetProperty(thread, execResult, indices).GetValue(); + EXPECT_TRUE(indicesArr->IsJSArray()); + + JSHandle indices0 = JSObject::GetProperty(thread, indicesArr, 0).GetValue(); + EXPECT_TRUE(indices0->IsJSArray()); + // indices[0] [1, 2] + EXPECT_EQ(JSObject::GetProperty(thread, indices0, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, indices0, 1).GetValue()->GetInt(), 2); + JSHandle indices1 = JSObject::GetProperty(thread, indicesArr, 1).GetValue(); + EXPECT_TRUE(indices1->IsJSArray()); + // indices[1] [1, 2] + EXPECT_EQ(JSObject::GetProperty(thread, indices1, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, indices1, 1).GetValue()->GetInt(), 2); + + JSHandle groups(factory->NewFromASCII("groups")); + JSHandle groupsObj = JSObject::GetProperty(thread, indicesArr, groups).GetValue(); + EXPECT_TRUE(groupsObj->IsJSObject()); + JSHandle groupName(factory->NewFromASCII("groupname")); + JSHandle groupNameArr = JSObject::GetProperty(thread, groupsObj, groupName).GetValue(); + EXPECT_TRUE(groupNameArr->IsJSArray()); + // {groupname: [1,2]]} + EXPECT_EQ(JSObject::GetProperty(thread, groupNameArr, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, groupNameArr, 1).GetValue()->GetInt(), 2); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_typedarray_test.cpp b/ecmascript/builtins/tests/builtins_typedarray_test.cpp index 6e0dd96d626065e27f2847151491f33c82d4fa5a..95c62679f4382280e8e2638c4593a0a5880b2608 100644 --- a/ecmascript/builtins/tests/builtins_typedarray_test.cpp +++ b/ecmascript/builtins/tests/builtins_typedarray_test.cpp @@ -44,6 +44,20 @@ namespace panda::test { using Array = ecmascript::builtins::BuiltinsArray; using TypedArray = ecmascript::builtins::BuiltinsTypedArray; using TypedArrayHelper = ecmascript::base::TypedArrayHelper; +constexpr uint32_t ECMA_RUNTIME_CALL_INFO_4 = 4; +constexpr uint32_t ECMA_RUNTIME_CALL_INFO_6 = 6; + +enum class TypeArrayIndex { + TYPED_ARRAY_INDEX_0, + TYPED_ARRAY_INDEX_1, + TYPED_ARRAY_INDEX_2, + TYPED_ARRAY_INDEX_3 +}; +constexpr uint32_t TYPED_ARRAY_LENGTH_3 = 3; +constexpr int32_t INT_VALUE_0 = 0; +constexpr int32_t INT_VALUE_2 = 2; +constexpr int32_t INT_VALUE_4 = 4; +constexpr int32_t INT_VALUE_9 = 9; class BuiltinsTypedArrayTest : public testing::Test { public: @@ -143,6 +157,42 @@ protected: return GetTaggedBoolean(false); } + static JSTaggedValue TestToSortedFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 1) { + // x < y + if (GetCallArg(argv, 0)->GetInt() < GetCallArg(argv, 1)->GetInt()) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastIndexFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + static JSTaggedValue TestReduceFunc(EcmaRuntimeCallInfo *argv) { int accumulator = GetCallArg(argv, 0)->GetInt(); @@ -350,4 +400,219 @@ HWTEST_F_L0(BuiltinsTypedArrayTest, At) ASSERT_TRUE(result.IsUndefined()); } + +HWTEST_F_L0(BuiltinsTypedArrayTest, ToReversed) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + [[maybe_unused]] JSHandle array(factory->NewTaggedArray(TYPED_ARRAY_LENGTH_3)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), JSTaggedValue(INT_VALUE_0)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_1), JSTaggedValue(INT_VALUE_4)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_2), JSTaggedValue(INT_VALUE_9)); + + [[maybe_unused]] JSHandle obj = + JSHandle(thread, CreateTypedArrayFromList(thread, array)); + auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + [[maybe_unused]] JSTaggedValue result = TypedArray::ToReversed(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo2->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_0)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue value = TypedArray::At(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_0)); + + auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo3->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_2)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + value = TypedArray::At(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_9)); + + auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo4->SetThis(result); + ecmaRuntimeCallInfo4->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_0)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); + value = TypedArray::At(ecmaRuntimeCallInfo4); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_9)); + auto ecmaRuntimeCallInfo5 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo5->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo5->SetThis(result); + ecmaRuntimeCallInfo5->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_2)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo5); + value = TypedArray::At(ecmaRuntimeCallInfo5); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_0)); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, ToSorted) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [10, 8, 30] + array->Set(thread, 0, JSTaggedValue(10)); + array->Set(thread, 1, JSTaggedValue(8)); + array->Set(thread, 2, JSTaggedValue(30)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestToSortedFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = TypedArray::ToSorted(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + + EXPECT_TRUE(result1.IsTypedArray()); + JSHandle resultArr1 = JSHandle(thread, result1); + // [30, 10, 8] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 0).GetValue()->GetInt(), 30); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 1).GetValue()->GetInt(), 10); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 2).GetValue()->GetInt(), 8); + + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 4); // 4 means 0 call arg + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = TypedArray::ToSorted(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + + EXPECT_TRUE(result2.IsTypedArray()); + JSHandle resultArr2 = JSHandle(thread, result2); + // [8, 10 ,30] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 0).GetValue()->GetInt(), 8); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 1).GetValue()->GetInt(), 10); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 2).GetValue()->GetInt(), 30); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, With) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle array(factory->NewTaggedArray(3)); + // array [1, 2, 3] + array->Set(thread, 0, JSTaggedValue(1)); + array->Set(thread, 1, JSTaggedValue(2)); + array->Set(thread, 2, JSTaggedValue(3)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(-1))); + ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(30))); // with(-1, 30) + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = TypedArray::With(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + + EXPECT_TRUE(result1.IsTypedArray()); + JSHandle resultArr1 = JSHandle(thread, result1); + // [1, 2, 30] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 1).GetValue()->GetInt(), 2); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 2).GetValue()->GetInt(), 30); + + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(1))); + ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(-100))); // with(1, -100) + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = TypedArray::With(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + + EXPECT_TRUE(result2.IsTypedArray()); + JSHandle resultArr2 = JSHandle(thread, result2); + // [1, -100, 3] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 1).GetValue()->GetInt(), -100); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 2).GetValue()->GetInt(), 3); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, FindLast) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [50, 40, 2] + array->Set(thread, 0, JSTaggedValue(50)); + array->Set(thread, 1, JSTaggedValue(40)); + array->Set(thread, 2, JSTaggedValue(2)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = TypedArray::FindLast(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(40).GetRawData()); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, FindLastIndex) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [50, 40, 30] + array->Set(thread, 0, JSTaggedValue(50)); + array->Set(thread, 1, JSTaggedValue(40)); + array->Set(thread, 2, JSTaggedValue(30)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = TypedArray::FindLastIndex(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_map_test.cpp b/ecmascript/builtins/tests/builtins_weak_map_test.cpp index f3a45779c1be8d1c574034563e2826a5263193ba..ac410b067dcba99b36942bd76ca734b7f9ab5e94 100644 --- a/ecmascript/builtins/tests/builtins_weak_map_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_map_test.cpp @@ -213,4 +213,55 @@ HWTEST_F_L0(BuiltinsWeakMapTest, DeleteAndRemove) EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); } + +HWTEST_F_L0(BuiltinsWeakMapTest, SymbolKey) +{ + // create jsWeakMap + JSHandle weakMap(thread, CreateBuiltinsWeakMap(thread)); + + // add 2 symbol keys + JSTaggedValue lastKey(JSTaggedValue::Undefined()); + for (int i = 0; i < 2; i++) { + JSHandle symbolKey = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle key(symbolKey); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(weakMap.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, key.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(static_cast(i))); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // set + JSTaggedValue result1 = BuiltinsWeakMap::Set(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result1.IsECMAObject()); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(reinterpret_cast(result1.GetRawData())); + EXPECT_EQ(jsWeakMap->GetSize(), static_cast(i) + 1); + lastKey = key.GetTaggedValue(); + } + + // check whether jsWeakMap can get and delete lastKey + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(weakMap.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, lastKey); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + // get + JSTaggedValue result2 = BuiltinsWeakMap::Get(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + EXPECT_EQ(result2, JSTaggedValue(1)); + + // delete + JSTaggedValue result3 = BuiltinsWeakMap::Delete(ecmaRuntimeCallInfo1); + EXPECT_EQ(result3.GetRawData(), JSTaggedValue::True().GetRawData()); + + // check deleteKey is deleted + JSTaggedValue result4 = BuiltinsWeakMap::Has(ecmaRuntimeCallInfo1); + EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_ref_test.cpp b/ecmascript/builtins/tests/builtins_weak_ref_test.cpp index 71d1bace2994dfcea9b85839d61db3416676ccf2..5c314ab2b2dd0cf3324d0486179252e6a7d04e7f 100644 --- a/ecmascript/builtins/tests/builtins_weak_ref_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_ref_test.cpp @@ -188,4 +188,38 @@ HWTEST_F_L0(BuiltinsWeakRefTest, Deref3) vm->SetEnableForceGC(true); ASSERT_TRUE(!result2.IsUndefined()); } + +// symbol target +HWTEST_F_L0(BuiltinsWeakRefTest, SymbolTarget) +{ + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle symbolTarget = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle target(symbolTarget); + + JSHandle weakRef(env->GetBuiltinsWeakRefFunction()); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, weakRef.GetTaggedValue(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(weakRef.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, target.GetTaggedValue()); + + // constructor + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsWeakRef::WeakRefConstructor(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + ASSERT_TRUE(result1.IsECMAObject()); + + JSHandle jsWeakRef(thread, JSWeakRef::Cast(reinterpret_cast(result1.GetRawData()))); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 4); // 4 means 0 call arg + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(jsWeakRef.GetTaggedValue()); + + // weakRef.Deref() + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsWeakRef::Deref(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + ASSERT_EQ(result2, target.GetTaggedValue()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_set_test.cpp b/ecmascript/builtins/tests/builtins_weak_set_test.cpp index 38d5eee994b90e6a203a8ed22bc555b51c7a7d8b..61aedcdc35b5ed09b5fbdcbdd3fe43bf21d5fcff 100644 --- a/ecmascript/builtins/tests/builtins_weak_set_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_set_test.cpp @@ -207,4 +207,54 @@ HWTEST_F_L0(BuiltinsWeakSetTest, DeleteAndRemove) EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); } + +HWTEST_F_L0(BuiltinsWeakSetTest, SymbolKey) +{ + // create jsSet + JSHandle weakSet(thread, CreateBuiltinsWeakSet(thread)); + + // add 2 keys + JSTaggedValue lastKey(JSTaggedValue::Undefined()); + for (int i = 0; i < 2; i++) { + JSHandle symbolKey = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle key(symbolKey); + + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(weakSet.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, key.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // add + JSTaggedValue result1 = BuiltinsWeakSet::Add(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result1.IsECMAObject()); + JSWeakSet *jsWeakSet = JSWeakSet::Cast(reinterpret_cast(result1.GetRawData())); + EXPECT_EQ(jsWeakSet->GetSize(), static_cast(i) + 1); + lastKey = key.GetTaggedValue(); + } + // whether jsWeakSet has delete lastKey + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(weakSet.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, lastKey); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + // has + JSTaggedValue result2 = BuiltinsWeakSet::Has(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + EXPECT_EQ(result2.GetRawData(), JSTaggedValue::True().GetRawData()); + + // delete + JSTaggedValue result3 = BuiltinsWeakSet::Delete(ecmaRuntimeCallInfo1); + EXPECT_EQ(result3.GetRawData(), JSTaggedValue::True().GetRawData()); + + // check deleteKey is deleted + JSTaggedValue result4 = BuiltinsWeakSet::Has(ecmaRuntimeCallInfo1); + EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/common.h b/ecmascript/common.h index 7dfb6d04c5f1f8f570038d3b9339065fdf39ab2a..c22714e7c3c68376660a2f61620c1e00e7ee5cc5 100644 --- a/ecmascript/common.h +++ b/ecmascript/common.h @@ -50,6 +50,10 @@ enum class GCReason : uint8_t { OTHER, }; +enum class RequestAotMode : uint8_t { + RE_COMPILE_ON_IDLE = 0 +}; + #define SCOPE_LIST(V) \ V(TotalGC) \ V(Initialize) \ diff --git a/ecmascript/compiler/BUILD.gn b/ecmascript/compiler/BUILD.gn index 886b989359077db1760f5bbf418fa6c411b1ace3..789a0f81acfa4bb755ad4a0443434dd0732cce5e 100644 --- a/ecmascript/compiler/BUILD.gn +++ b/ecmascript/compiler/BUILD.gn @@ -54,22 +54,51 @@ config("include_llvm") { cflags_cc = [ "-DARK_GC_SUPPORT" ] } +config("include_maple") { + include_dirs = [ + "$js_root/ecmascript/mapleall/maple_be/include/cg", + "$js_root/ecmascript/mapleall/maple_be/include/litecg", + "$js_root/ecmascript/mapleall/maple_be/cg/aarch64", + "$js_root/ecmascript/mapleall/maple_be/include/ad", + "${MAPLE_BUILD_OUTPUT}/common/target", + "$js_root/ecmascript/mapleall/maple_be/include/ad/target", + "$js_root/ecmascript/mapleall/maple_be/include/be/aarch64", + "$js_root/ecmascript/mapleall/maple_be/include/be", + "$js_root/ecmascript/mapleall/maple_driver/include", + "$js_root/ecmascript/mapleall/maple_driver/defs", + "$js_root/ecmascript/mapleall/maple_driver/defs/default", + "$js_root/ecmascript/mapleall/maple_util/include", + "$js_root/ecmascript/mapleall/mpl2mpl/include", + "$js_root/ecmascript/mapleall/mempool/include", + "$js_root/ecmascript/mapleall/maple_phase/include", + "$js_root/ecmascript/mapleall/maple_ir/include", + "$js_root/ecmascript/mapleall/maple_me/include", + ] +} + ohos_source_set("libark_jsoptimizer_set") { stack_protector_ret = false sources = [ "access_object_stub_builder.cpp", "argument_accessor.cpp", + "array_bounds_check_elimination.cpp", "assembler/aarch64/assembler_aarch64.cpp", "assembler/aarch64/extend_assembler.cpp", "assembler/x64/assembler_x64.cpp", "assembler/x64/extended_assembler_x64.cpp", "assembler_module.cpp", "async_function_lowering.cpp", + "base/depend_chain_helper.cpp", "bc_call_signature.cpp", + "builtins/builtins_array_stub_builder.cpp", "builtins/builtins_call_signature.cpp", + "builtins/builtins_collection_stub_builder.cpp", + "builtins/builtins_function_stub_builder.cpp", + "builtins/builtins_object_stub_builder.cpp", "builtins/builtins_string_stub_builder.cpp", "builtins/builtins_stubs.cpp", "builtins/containers_stub_builder.cpp", + "builtins/linked_hashtable_stub_builder.cpp", "builtins_lowering.cpp", "bytecode_circuit_builder.cpp", "bytecode_info_collector.cpp", @@ -95,7 +124,11 @@ ohos_source_set("libark_jsoptimizer_set") { "later_elimination.cpp", "lcr_lowering.cpp", "llvm_codegen.cpp", + "litecg_codegen.cpp", + "ir_builder.cpp", + "ir_module.cpp", "llvm_ir_builder.cpp", + "litecg_ir_builder.cpp", "loop_analysis.cpp", "loop_peeling.cpp", "new_object_stub_builder.cpp", @@ -104,18 +137,18 @@ ohos_source_set("libark_jsoptimizer_set") { "number_speculative_lowering.cpp", "number_speculative_retype.cpp", "number_speculative_runner.cpp", + "object_access_helper.cpp", "operations_stub_builder.cpp", "pass_manager.cpp", "profiler_stub_builder.cpp", "range_analysis.cpp", + "range_guard.cpp", "rt_call_signature.cpp", "scheduler.cpp", "slowpath_lowering.cpp", "state_split_linearizer.cpp", "stub.cpp", "stub_builder.cpp", - "test_stubs.cpp", - "test_stubs_signature.cpp", "trampoline/aarch64/asm_interpreter_call.cpp", "trampoline/aarch64/common_call.cpp", "trampoline/aarch64/optimized_call.cpp", @@ -133,6 +166,7 @@ ohos_source_set("libark_jsoptimizer_set") { "type_inference/initialization_analysis.cpp", "type_inference/method_type_infer.cpp", "type_inference/pgo_type_infer.cpp", + "type_inference/pgo_type_infer_helper.cpp", "type_mcr_lowering.cpp", "type_recorder.cpp", "typed_array_stub_builder.cpp", @@ -150,6 +184,7 @@ ohos_source_set("libark_jsoptimizer_set") { public_configs = [ ":include_llvm", + ":include_maple", "$js_root:ark_jsruntime_compiler_config", "$js_root:ark_jsruntime_public_config", ] @@ -230,16 +265,14 @@ ohos_source_set("libark_jsoptimizer_set") { "LLVMBitWriter", ] + if (!is_mac && !is_ios) { + libs += [ "LLVMParts" ] + } + # Only support compiling aarch64 target at device-side(arm64 platform). - # So these os-related libs of arm and x86 are not needed on arm64 platform. + # So these os-related libs of x86 are not needed on arm64 platform. if (is_mac || current_cpu != "arm64") { libs += [ - "LLVMARMUtils", - "LLVMARMCodeGen", - "LLVMARMDisassembler", - "LLVMARMDesc", - "LLVMARMInfo", - "LLVMARMAsmParser", "LLVMX86AsmParser", "LLVMX86CodeGen", "LLVMX86Desc", @@ -326,35 +359,13 @@ ohos_shared_library("libark_jsoptimizer") { subsystem_name = "arkcompiler" } -ohos_shared_library("libark_jsoptimizer_test") { - stack_protector_ret = false - deps = [ - ":libark_jsoptimizer_set", - "$ark_root/libpandafile:libarkfile_static", - "$js_root:libark_jsruntime_test_set", - ] - - ldflags = [] - if (enable_coverage) { - ldflags += [ "--coverage" ] - cflags_cc = [ "--coverage" ] - } - - if (!ark_standalone_build) { - ldflags += [ "-Wl,--lto-O0" ] - } - install_enable = false - - output_extension = "so" - subsystem_name = "test" -} - ohos_executable("ark_stub_compiler") { sources = [ "stub_compiler.cpp" ] include_dirs = [ "$target_gen_dir" ] configs = [ ":include_llvm", + ":include_maple", "$js_root:ark_jsruntime_compiler_config", "$js_root:ark_jsruntime_public_config", ] @@ -364,6 +375,19 @@ ohos_executable("ark_stub_compiler") { ":libark_mock_stub_set", "$js_root:libark_js_intl_set", "$js_root:libark_jsruntime_set", + "$js_root/ecmascript/mapleall/maple_util:libmplutil", + "$js_root/ecmascript/mapleall/maple_util:libcommandline", + "$js_root/ecmascript/mapleall/maple_driver:libdriver_option", + "$js_root/ecmascript/mapleall/mempool:libmempool", + "$js_root/ecmascript/mapleall/maple_phase:libmplphase", + "$js_root/ecmascript/mapleall/maple_be:libmplad", + "$js_root/ecmascript/mapleall/maple_be:libcg", + "$js_root/ecmascript/mapleall/maple_be:libmplbe", + "$js_root/ecmascript/mapleall/mpl2mpl:libmpl2mpl", + "$js_root/ecmascript/mapleall/maple_ir:libmplir", + "$js_root/ecmascript/mapleall/maple_ipa:libmplipa", + "$js_root/ecmascript/mapleall/maple_me:libmplme", + "$js_root/ecmascript/mapleall/maple_me:libmplmewpo", ] external_deps = [] if (!is_cross_platform_build) { @@ -399,9 +423,9 @@ ohos_executable("ark_stub_compiler") { ohos_executable("ark_aot_compiler") { sources = [ "aot_compiler.cpp" ] - configs = [ ":include_llvm", + ":include_maple", "$js_root:ark_jsruntime_compiler_config", "$js_root:ark_jsruntime_public_config", ] @@ -412,6 +436,19 @@ ohos_executable("ark_aot_compiler") { "$js_root:libark_js_intl_set", "$js_root:libark_jsruntime_set", "$js_root/ecmascript/compiler:libark_jsoptimizer_set", + "$js_root/ecmascript/mapleall/maple_util:libmplutil", + "$js_root/ecmascript/mapleall/maple_util:libcommandline", + "$js_root/ecmascript/mapleall/maple_driver:libdriver_option", + "$js_root/ecmascript/mapleall/mempool:libmempool", + "$js_root/ecmascript/mapleall/maple_phase:libmplphase", + "$js_root/ecmascript/mapleall/maple_be:libmplad", + "$js_root/ecmascript/mapleall/maple_be:libcg", + "$js_root/ecmascript/mapleall/maple_be:libmplbe", + "$js_root/ecmascript/mapleall/mpl2mpl:libmpl2mpl", + "$js_root/ecmascript/mapleall/maple_ir:libmplir", + "$js_root/ecmascript/mapleall/maple_ipa:libmplipa", + "$js_root/ecmascript/mapleall/maple_me:libmplme", + "$js_root/ecmascript/mapleall/maple_me:libmplmewpo", ] external_deps = [] if (!is_cross_platform_build) { diff --git a/ecmascript/compiler/access_object_stub_builder.cpp b/ecmascript/compiler/access_object_stub_builder.cpp index 20f67bcec933b8c79b60edab95c47dca690639e2..aafa30cf36be2d68abca09ed04880980fb906897 100644 --- a/ecmascript/compiler/access_object_stub_builder.cpp +++ b/ecmascript/compiler/access_object_stub_builder.cpp @@ -15,6 +15,7 @@ #include "ecmascript/compiler/access_object_stub_builder.h" #include "ecmascript/compiler/ic_stub_builder.h" #include "ecmascript/compiler/interpreter_stub-inl.h" +#include "ecmascript/compiler/profiler_stub_builder.h" #include "ecmascript/compiler/rt_call_signature.h" #include "ecmascript/compiler/stub_builder-inl.h" #include "ecmascript/ic/profile_type_info.h" @@ -34,11 +35,11 @@ GateRef AccessObjectStubBuilder::LoadObjByName(GateRef glue, GateRef receiver, G GateRef value = 0; ICStubBuilder builder(this); builder.SetParameters(glue, receiver, profileTypeInfo, value, slotId); - builder.LoadICByName(&result, &tryFastPath, &slowPath, &exit); + builder.LoadICByName(&result, &tryFastPath, &slowPath, &exit, callback); Bind(&tryFastPath); { GateRef propKey = ResolvePropKey(glue, prop, info); - result = GetPropertyByName(glue, receiver, propKey); + result = GetPropertyByName(glue, receiver, propKey, callback); Label notHole(env); Branch(TaggedIsHole(*result), &slowPath, ¬Hole); Bind(¬Hole); @@ -75,7 +76,7 @@ GateRef AccessObjectStubBuilder::DeprecatedLoadObjByName(GateRef glue, GateRef r Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - result = GetPropertyByName(glue, receiver, propKey); + result = GetPropertyByName(glue, receiver, propKey, ProfileOperation()); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); @@ -129,8 +130,8 @@ GateRef AccessObjectStubBuilder::StoreObjByName(GateRef glue, GateRef receiver, GateRef AccessObjectStubBuilder::ResolvePropKey(GateRef glue, GateRef prop, const StringIdInfo &info) { if (jsFunc_ != Circuit::NullGate()) { - GateRef key = LoadObjectFromConstPool(jsFunc_, prop); - return key; + GateRef constpool = GetConstPoolFromFunction(jsFunc_); + return GetStringFromConstPool(glue, constpool, ChangeIntPtrToInt32(prop)); } if (!info.IsValid()) { return prop; @@ -142,7 +143,7 @@ GateRef AccessObjectStubBuilder::ResolvePropKey(GateRef glue, GateRef prop, cons } GateRef AccessObjectStubBuilder::LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, - GateRef slotId) + GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -155,16 +156,17 @@ GateRef AccessObjectStubBuilder::LoadObjByValue(GateRef glue, GateRef receiver, GateRef value = 0; ICStubBuilder builder(this); builder.SetParameters(glue, receiver, profileTypeInfo, value, slotId, key); - builder.LoadICByValue(&result, &tryFastPath, &slowPath, &exit); + builder.LoadICByValue(&result, &tryFastPath, &slowPath, &exit, callback); Bind(&tryFastPath); { - result = GetPropertyByValue(glue, receiver, key); + result = GetPropertyByValue(glue, receiver, key, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); { result = CallRuntime(glue, RTSTUB_ID(LoadICByValue), { profileTypeInfo, receiver, key, IntToTaggedInt(slotId) }); + ProfilerStubBuilder(env).ProfileObjLayoutOrIndex(glue, receiver, key, False(), callback); Jump(&exit); } Bind(&exit); @@ -187,7 +189,7 @@ GateRef AccessObjectStubBuilder::DeprecatedLoadObjByValue(GateRef glue, GateRef Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - result = GetPropertyByValue(glue, receiver, key); + result = GetPropertyByValue(glue, receiver, key, ProfileOperation()); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); @@ -225,6 +227,7 @@ GateRef AccessObjectStubBuilder::StoreObjByValue(GateRef glue, GateRef receiver, { result = CallRuntime(glue, RTSTUB_ID(StoreICByValue), { profileTypeInfo, receiver, key, value, IntToTaggedInt(slotId) }); + ProfilerStubBuilder(env).ProfileObjLayoutOrIndex(glue, receiver, key, True(), callback); Jump(&exit); } Bind(&exit); @@ -234,7 +237,8 @@ GateRef AccessObjectStubBuilder::StoreObjByValue(GateRef glue, GateRef receiver, } GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId) + GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -264,7 +268,7 @@ GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, Bind(¬FoundInRecord); { GateRef globalObject = GetGlobalObject(glue); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } } @@ -283,7 +287,8 @@ GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, } GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef value, GateRef profileTypeInfo, GateRef slotId) + GateRef value, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -312,7 +317,7 @@ GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop Bind(¬FoundInRecord); { GateRef globalObject = GetGlobalObject(glue); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Label isFoundInGlobal(env); Label notFoundInGlobal(env); Branch(TaggedIsHole(*result), ¬FoundInGlobal, &isFoundInGlobal); @@ -345,7 +350,7 @@ GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop } GateRef AccessObjectStubBuilder::LoadGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId) + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -364,7 +369,7 @@ GateRef AccessObjectStubBuilder::LoadGlobalVar(GateRef glue, GateRef prop, const { GateRef globalObject = GetGlobalObject(glue); GateRef propKey = ResolvePropKey(glue, prop, info); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); diff --git a/ecmascript/compiler/access_object_stub_builder.h b/ecmascript/compiler/access_object_stub_builder.h index e669596129e6a7a13045bff825d8c172f655a249..3e972b6a35c701fe095da7434a91a692c54d3cd4 100644 --- a/ecmascript/compiler/access_object_stub_builder.h +++ b/ecmascript/compiler/access_object_stub_builder.h @@ -38,16 +38,17 @@ public: GateRef DeprecatedLoadObjByName(GateRef glue, GateRef receiver, GateRef propKey); GateRef StoreObjByName(GateRef glue, GateRef receiver, GateRef prop, const StringIdInfo &info, GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback = ProfileOperation()); - GateRef LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, GateRef slotId); + GateRef LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback = ProfileOperation()); GateRef StoreObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback = ProfileOperation()); GateRef DeprecatedLoadObjByValue(GateRef glue, GateRef receiver, GateRef key); GateRef TryLoadGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId); + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef TryStoreGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef value, GateRef profileTypeInfo, GateRef slotId); + GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef LoadGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId); + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef StoreGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, GateRef value, GateRef profileTypeInfo, GateRef slotId); private: diff --git a/ecmascript/compiler/aot_compiler.cpp b/ecmascript/compiler/aot_compiler.cpp index c213f9e600c2832ad6db5ceb035941e4c05fc531..94af51a7f3706cf248281608daafe1222f969ef3 100644 --- a/ecmascript/compiler/aot_compiler.cpp +++ b/ecmascript/compiler/aot_compiler.cpp @@ -19,6 +19,7 @@ #include #include "ecmascript/compiler/aot_file/aot_file_manager.h" +#include "ecmascript/compiler/ohos_pkg_args.h" #include "ecmascript/base/string_helper.h" #include "ecmascript/compiler/pass_manager.h" #include "ecmascript/compiler/compiler_log.h" @@ -31,6 +32,7 @@ #include "ecmascript/platform/file.h" namespace panda::ecmascript::kungfu { +namespace { std::string GetHelper() { std::string str; @@ -47,14 +49,25 @@ void AOTInitialize(EcmaVM *vm) vm->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->Initialize(); } -JSPandaFile *CreateAndVerifyJSPandaFile(const JSRuntimeOptions &runtimeOptions, const std::string &fileName, EcmaVM *vm) +JSPandaFile *CreateAndVerifyJSPandaFile(const JSRuntimeOptions &runtimeOptions, const OhosPkgArgs &pkgArgs, + const std::string &fileName, EcmaVM *vm) { JSPandaFileManager *jsPandaFileManager = JSPandaFileManager::GetInstance(); std::shared_ptr jsPandaFile = nullptr; if (runtimeOptions.IsTargetCompilerMode()) { - std::string hapPath = runtimeOptions.GetHapPath(); - uint32_t offset = runtimeOptions.GetHapAbcOffset(); - uint32_t size = runtimeOptions.GetHapAbcSize(); + std::string hapPath; + uint32_t offset {}; + uint32_t size {}; + if (pkgArgs.Valid()) { + hapPath = pkgArgs.GetPath(); + offset = pkgArgs.GetOffset(); + size = pkgArgs.GetSize(); + } else { + // for legacy params + hapPath = runtimeOptions.GetHapPath(); + offset = runtimeOptions.GetHapAbcOffset(); + size = runtimeOptions.GetHapAbcSize(); + } if (size == 0) { LOG_ECMA(ERROR) << "buffer is empty in target compiler mode!"; return nullptr; @@ -91,6 +104,42 @@ JSPandaFile *CreateAndVerifyJSPandaFile(const JSRuntimeOptions &runtimeOptions, return jsPandaFile.get(); } +bool HandleOhosPkgArgs(EcmaVM *vm, JSRuntimeOptions &runtimeOptions, OhosPkgArgs &pkgArgs, arg_list_t &pandaFileNames) +{ + ASSERT(runtimeOptions.IsTargetCompilerMode()); + if (!runtimeOptions.GetCompilerPkgJsonInfo().empty()) { + if (pkgArgs.ParseFromJson(vm, runtimeOptions.GetCompilerPkgJsonInfo())) { + LOG_COMPILER(INFO) << "Parse main pkg info success."; + pkgArgs.Dump(); + pandaFileNames.emplace_back(pkgArgs.GetFullName()); + } else { + return false; + } + } + // for external pkg, dump it first. + if (!runtimeOptions.GetCompilerExternalPkgJsonInfo().empty()) { + std::list externalList; + OhosPkgArgs::ParseListFromJson(vm, runtimeOptions.GetCompilerExternalPkgJsonInfo(), externalList); + for (const auto &externalPkg : externalList) { + externalPkg.Dump(); + } + } + return true; +} + +void HandleTargetModeInfo(EcmaVM *vm, bool &isEnableOptOnHeapCheck, size_t &optLevel) +{ + JSRuntimeOptions &vmOpt = vm->GetJSOptions(); + ASSERT(vmOpt.IsTargetCompilerMode()); + // target need fast compiler mode + vmOpt.SetFastAOTCompileMode(true); + vmOpt.SetOptLevel(3); // 3: default opt level + optLevel = 3; + vmOpt.SetEnableOptOnHeapCheck(false); + isEnableOptOnHeapCheck = false; +} +} // namespace + int Main(const int argc, const char **argv) { auto startTime = @@ -102,22 +151,13 @@ int Main(const int argc, const char **argv) LOG_ECMA(DEBUG) << argv[i]; } - int newArgc = argc; if (argc < 2) { // 2: at least have two arguments LOG_COMPILER(ERROR) << GetHelper(); return -1; } - std::string files = argv[argc - 1]; - if (!base::StringHelper::EndsWith(files, ".abc")) { - LOG_COMPILER(ERROR) << "The last argument must be abc file" << std::endl; - LOG_COMPILER(ERROR) << GetHelper(); - return 1; - } - - newArgc--; JSRuntimeOptions runtimeOptions; - bool retOpt = runtimeOptions.ParseCommand(newArgc, argv); + bool retOpt = runtimeOptions.ParseCommand(argc, argv); if (!retOpt) { LOG_COMPILER(ERROR) << GetHelper(); return 1; @@ -141,7 +181,8 @@ int Main(const int argc, const char **argv) { LocalScope scope(vm); std::string delimiter = GetFileDelimiter(); - arg_list_t pandaFileNames = base::StringHelper::SplitString(files, delimiter); + arg_list_t pandaFileNames {}; + OhosPkgArgs pkgArgs; std::string triple = runtimeOptions.GetTargetTriple(); if (runtimeOptions.GetAOTOutputFile().empty()) { @@ -155,6 +196,7 @@ int Main(const int argc, const char **argv) bool compilerLogTime = runtimeOptions.IsEnableCompilerLogTime(); size_t maxAotMethodSize = runtimeOptions.GetMaxAotMethodSize(); size_t maxMethodsInModule = runtimeOptions.GetCompilerModuleMethods(); + bool isEnableArrayBoundsCheckElimination = runtimeOptions.IsEnableArrayBoundsCheckElimination(); bool isEnableTypeLowering = runtimeOptions.IsEnableTypeLowering(); bool isEnableEarlyElimination = runtimeOptions.IsEnableEarlyElimination(); bool isEnableLaterElimination = runtimeOptions.IsEnableLaterElimination(); @@ -163,9 +205,32 @@ int Main(const int argc, const char **argv) bool isEnableTypeInfer = isEnableTypeLowering || vm->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->AssertTypes(); bool isEnableOptPGOType = runtimeOptions.IsEnableOptPGOType(); + bool isEnableOptTrackField = runtimeOptions.IsEnableOptTrackField(); + bool isEnableOptLoopPeeling = runtimeOptions.IsEnableOptLoopPeeling(); + bool isEnableOptOnHeapCheck = runtimeOptions.IsEnableOptOnHeapCheck(); + if (runtimeOptions.IsTargetCompilerMode()) { + if (!HandleOhosPkgArgs(vm, runtimeOptions, pkgArgs, pandaFileNames)) { + LOG_COMPILER(ERROR) << GetHelper(); + LOG_COMPILER(ERROR) << "Parse pkg info failed, exit."; + return 1; + } + HandleTargetModeInfo(vm, isEnableOptOnHeapCheck, optLevel); + } + if (runtimeOptions.GetCompilerPkgJsonInfo().empty() || !pkgArgs.Valid()) { + // if no pkgArgs, last param must be abc file + std::string files = argv[argc - 1]; + if (!base::StringHelper::EndsWith(files, ".abc")) { + LOG_COMPILER(ERROR) << "The last argument must be abc file" << std::endl; + LOG_COMPILER(ERROR) << GetHelper(); + return 1; + } + pandaFileNames = base::StringHelper::SplitString(files, delimiter); + } + PassOptions passOptions(isEnableArrayBoundsCheckElimination, isEnableTypeLowering, isEnableEarlyElimination, + isEnableLaterElimination, isEnableValueNumbering, isEnableTypeInfer, + isEnableOptInlining, isEnableOptPGOType, isEnableOptTrackField, isEnableOptLoopPeeling, + isEnableOptOnHeapCheck); - PassOptions passOptions(isEnableTypeLowering, isEnableEarlyElimination, isEnableLaterElimination, - isEnableValueNumbering, isEnableTypeInfer, isEnableOptInlining, isEnableOptPGOType); uint32_t hotnessThreshold = runtimeOptions.GetPGOHotnessThreshold(); AOTInitialize(vm); @@ -183,7 +248,7 @@ int Main(const int argc, const char **argv) for (const auto &fileName : pandaFileNames) { auto extendedFilePath = panda::os::file::File::GetExtendedFilePath(fileName); LOG_COMPILER(INFO) << "AOT compile: " << extendedFilePath; - JSPandaFile *jsPandaFile = CreateAndVerifyJSPandaFile(runtimeOptions, extendedFilePath, vm); + JSPandaFile *jsPandaFile = CreateAndVerifyJSPandaFile(runtimeOptions, pkgArgs, extendedFilePath, vm); if (passManager.Compile(jsPandaFile, extendedFilePath, generator) == false) { ret = false; continue; diff --git a/ecmascript/compiler/aot_file/an_file_info.cpp b/ecmascript/compiler/aot_file/an_file_info.cpp index 5d677e11e0c7aa9bc50a23a3d397f57577634283..9d965f35bfa0b72265b1a882a4279779416a6950 100644 --- a/ecmascript/compiler/aot_file/an_file_info.cpp +++ b/ecmascript/compiler/aot_file/an_file_info.cpp @@ -40,7 +40,7 @@ void AnFileInfo::Save(const std::string &filename, Triple triple) ElfBuilder builder(des_, GetDumpSectionNames()); llvm::ELF::Elf64_Ehdr header; - builder.PackELFHeader(header, base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); + builder.PackELFHeader(header, base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); file.write(reinterpret_cast(&header), sizeof(llvm::ELF::Elf64_Ehdr)); builder.PackELFSections(file); builder.PackELFSegment(file); @@ -69,7 +69,7 @@ bool AnFileInfo::Load(const std::string &filename) ElfReader reader(fileMapMem_); std::vector secs = GetDumpSectionNames(); - if (!reader.VerifyELFHeader(base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), + if (!reader.VerifyELFHeader(base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), AOTFileVersion::AN_STRICT_MATCH)) { return false; } diff --git a/ecmascript/compiler/aot_file/an_file_info.h b/ecmascript/compiler/aot_file/an_file_info.h index 1ee48111479eb1e872e70795bd81457feebee40b..ebd62be068067c9a0382e9e43afdc4036dcc9a5e 100644 --- a/ecmascript/compiler/aot_file/an_file_info.h +++ b/ecmascript/compiler/aot_file/an_file_info.h @@ -47,9 +47,9 @@ public: void TryRemoveAnFile(const char *filename); - void AlignTextSec() + void AlignTextSec(uint32_t alignSize) { - curTextSecOffset_ = AlignUp(curTextSecOffset_, TEXT_SEC_ALIGN); + curTextSecOffset_ = AlignUp(curTextSecOffset_, alignSize); } void UpdateCurTextSecOffset(uint64_t size) diff --git a/ecmascript/compiler/aot_file/aot_file_info.h b/ecmascript/compiler/aot_file/aot_file_info.h index dddb7c43ff81a420100ebd0d2af439260fadba83..4d7f772bd052987ab054b03d3750585057fa3573 100644 --- a/ecmascript/compiler/aot_file/aot_file_info.h +++ b/ecmascript/compiler/aot_file/aot_file_info.h @@ -32,8 +32,9 @@ public: AOTFileInfo() = default; virtual ~AOTFileInfo() = default; + static constexpr uint32_t TEXT_SEC_ALIGN = 16; static constexpr uint32_t DATA_SEC_ALIGN = 8; - static constexpr uint32_t TEXT_SEC_ALIGN = 4096; + static constexpr uint32_t PAGE_ALIGN = 4096; struct FuncEntryDes { uint64_t codeAddr_ {}; @@ -149,7 +150,7 @@ public: des.SetArkStackMapSize(size); } - size_t GetCodeUnitsNum() + size_t GetCodeUnitsNum() const { return des_.size(); } diff --git a/ecmascript/compiler/aot_file/aot_file_manager.cpp b/ecmascript/compiler/aot_file/aot_file_manager.cpp index 6428e23b96a3bd87697236b06726e551447b645f..f17b51bd371d254f253ec708afd844b8f2f29624 100644 --- a/ecmascript/compiler/aot_file/aot_file_manager.cpp +++ b/ecmascript/compiler/aot_file/aot_file_manager.cpp @@ -20,6 +20,7 @@ #include "ecmascript/compiler/aot_file/elf_builder.h" #include "ecmascript/compiler/aot_file/elf_reader.h" #include "ecmascript/compiler/bc_call_signature.h" +#include "ecmascript/compiler/call_signature.h" #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/compiler_log.h" #include "ecmascript/deoptimizer/deoptimizer.h" @@ -82,24 +83,25 @@ bool AOTFileManager::LoadAiFile([[maybe_unused]] const std::string &filename) #endif } -void AOTFileManager::LoadAiFile(const JSPandaFile *jsPandaFile) +bool AOTFileManager::LoadAiFile(const JSPandaFile *jsPandaFile) { uint32_t anFileInfoIndex = GetAnFileIndex(jsPandaFile); // this abc file does not have corresponding an file if (anFileInfoIndex == INVALID_INDEX) { - return; + return false; } auto iter = desCPs_.find(anFileInfoIndex); // already loaded if (iter != desCPs_.end()) { - return; + return false; } AnFileDataManager *anFileDataManager = AnFileDataManager::GetInstance(); std::string aiFilename = anFileDataManager->GetDir(); aiFilename += JSFilePath::GetHapName(jsPandaFile) + AOTFileManager::FILE_EXTENSION_AI; LoadAiFile(aiFilename); + return true; } const std::shared_ptr AOTFileManager::GetAnFileInfo(const JSPandaFile *jsPandaFile) const @@ -217,6 +219,10 @@ void AOTFileManager::SetAOTMainFuncEntry(JSHandle mainFunc, const JS #ifndef NDEBUG PrintAOTEntry(jsPandaFile, method, mainEntry); #endif + + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + methodLiteral->SetAotCodeBit(true); + methodLiteral->SetIsFastCall(isFastCall); } void AOTFileManager::SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *method, @@ -239,6 +245,10 @@ void AOTFileManager::SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *met if (canFastCall != nullptr) { *canFastCall = entry.isFastCall_; } + + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + methodLiteral->SetAotCodeBit(true); + methodLiteral->SetIsFastCall(entry.isFastCall_); } kungfu::ArkStackMapParser *AOTFileManager::GetStackMapParser() const @@ -393,4 +403,12 @@ bool AOTFileManager::GetAbsolutePath(const CString &relativePathCstr, CString &a } return false; } + +const Heap *AOTFileManager::GetHeap() +{ + if (vm_ == nullptr) { + return nullptr; + } + return vm_->GetHeap(); +} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_file/aot_file_manager.h b/ecmascript/compiler/aot_file/aot_file_manager.h index 9e429e2f43e6e9e816eca525b61ffe5faefa9e3d..77766633d47ff2f353e35391a94de7fcbc70b342 100644 --- a/ecmascript/compiler/aot_file/aot_file_manager.h +++ b/ecmascript/compiler/aot_file/aot_file_manager.h @@ -45,6 +45,7 @@ class JSThread; * | AOT Function Entry Index | | * +--------------------------------+---- * | AOT Instance Hclass (IHC) | + * | AOT Constructor Hclass (CHC) | * +--------------------------------+ */ class AOTLiteralInfo : public TaggedArray { @@ -64,6 +65,7 @@ public: { TaggedArray::InitializeWithSpecialValue(initValue, capacity + RESERVED_LENGTH, extraLength); SetIhc(JSTaggedValue::Undefined()); + SetChc(JSTaggedValue::Undefined()); } inline uint32_t GetCacheLength() const @@ -81,6 +83,16 @@ public: return JSTaggedValue(Barriers::GetValue(GetData(), GetIhcOffset())); } + inline void SetChc(JSTaggedValue value) + { + Barriers::SetPrimitive(GetData(), GetChcOffset(), value.GetRawData()); + } + + inline JSTaggedValue GetChc() const + { + return JSTaggedValue(Barriers::GetValue(GetData(), GetChcOffset())); + } + inline void SetObjectToCache(JSThread *thread, uint32_t index, JSTaggedValue value) { Set(thread, index, value); @@ -91,13 +103,19 @@ public: return Get(index); } private: - static constexpr size_t AOT_IHC_INDEX = 1; + static constexpr size_t AOT_CHC_INDEX = 1; + static constexpr size_t AOT_IHC_INDEX = 2; static constexpr size_t RESERVED_LENGTH = AOT_IHC_INDEX; inline size_t GetIhcOffset() const { return JSTaggedValue::TaggedTypeSize() * (GetLength() - AOT_IHC_INDEX); } + + inline size_t GetChcOffset() const + { + return JSTaggedValue::TaggedTypeSize() * (GetLength() - AOT_CHC_INDEX); + } }; class AOTFileManager { @@ -126,13 +144,14 @@ public: void SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *method, uint32_t entryIndex, bool *canFastCall = nullptr); bool LoadAiFile([[maybe_unused]] const std::string &filename); - void LoadAiFile(const JSPandaFile *jsPandaFile); + bool LoadAiFile(const JSPandaFile *jsPandaFile); kungfu::ArkStackMapParser* GetStackMapParser() const; static JSTaggedValue GetAbsolutePath(JSThread *thread, JSTaggedValue relativePathVal); static bool GetAbsolutePath(const CString &relativePathCstr, CString &absPathCstr); static bool RewriteDataSection(uintptr_t dataSec, size_t size, uintptr_t newData, size_t newSize); void AddConstantPool(const CString &snapshotFileName, JSTaggedValue deserializedCPList); JSHandle GetDeserializedConstantPool(const JSPandaFile *jsPandaFile, int32_t cpID); + const Heap *GetHeap(); static void DumpAOTInfo() DUMP_API_ATTR; diff --git a/ecmascript/compiler/aot_file/aot_version.h b/ecmascript/compiler/aot_file/aot_version.h index 0e50be1746469f82461e0d71cc3cf5b780f4dc42..28d7f771694e05430638739f016835e4ff06a4d2 100644 --- a/ecmascript/compiler/aot_file/aot_version.h +++ b/ecmascript/compiler/aot_file/aot_version.h @@ -25,10 +25,10 @@ public: // Release Version Snapshot Version // 3.2 0.0.0.x // 4.0 4.0.0.x - static constexpr base::FileHeader::VersionType AN_VERSION = {4, 0, 0, 3}; + static constexpr base::FileHeaderBase::VersionType AN_VERSION = {4, 0, 0, 5}; static constexpr bool AN_STRICT_MATCH = true; - static constexpr base::FileHeader::VersionType AI_VERSION = {4, 0, 0, 1}; + static constexpr base::FileHeaderBase::VersionType AI_VERSION = {4, 0, 0, 2}; static constexpr bool AI_STRICT_MATCH = true; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_AOT_VERSION_H \ No newline at end of file +#endif // ECMASCRIPT_AOT_VERSION_H diff --git a/ecmascript/compiler/aot_file/elf_builder.cpp b/ecmascript/compiler/aot_file/elf_builder.cpp index 044b2ab20b36aba52ccf4e62e15f76ee2929834f..f2f3c525aa1b33862467e79a8cadb3f94c853303 100644 --- a/ecmascript/compiler/aot_file/elf_builder.cpp +++ b/ecmascript/compiler/aot_file/elf_builder.cpp @@ -83,12 +83,7 @@ void ElfBuilder::Initialize() des_[i].AddArkStackMapSection(); } sectionToAlign_ = { - {ElfSecName::RODATA, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST4, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST8, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST16, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST32, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::TEXT, AOTFileInfo::TEXT_SEC_ALIGN}, + {ElfSecName::TEXT, AOTFileInfo::PAGE_ALIGN}, {ElfSecName::STRTAB, 1}, {ElfSecName::SYMTAB, AOTFileInfo::DATA_SEC_ALIGN}, {ElfSecName::SHSTRTAB, AOTFileInfo::DATA_SEC_ALIGN}, @@ -155,7 +150,7 @@ uint32_t ElfBuilder::GetShIndex(ElfSecName section) const int ElfBuilder::GetSecNum() const { - return sections_.size(); + return sections_.size() + 1; // add first empty section. } /* @@ -218,7 +213,7 @@ void ElfBuilder::PackELFHeader(llvm::ELF::Elf64_Ehdr &header, uint32_t version, // size of section headers header.e_shentsize = sizeof(llvm::ELF::Elf64_Shdr); // number of section headers - header.e_shnum = GetSecNum() + 1; // 1: skip null section and ark stackmap + header.e_shnum = GetSecNum(); // section header string table index header.e_shstrndx = static_cast(GetShIndex(ElfSecName::SHSTRTAB)); // section header stub sec info index @@ -301,8 +296,10 @@ std::pair ElfBuilder::FindShStrTab() const void ElfBuilder::AllocateShdr(std::unique_ptr &shdr, const uint32_t &secNum) { shdr = std::make_unique(secNum); - if (memset_s(reinterpret_cast(&shdr[0]), sizeof(llvm::ELF::Elf64_Shdr), - 0, sizeof(llvm::ELF::Elf64_Shdr)) != EOK) { + if (memset_s(reinterpret_cast(&shdr[0]), + sizeof(llvm::ELF::Elf64_Shdr), + 0, + sizeof(llvm::ELF::Elf64_Shdr)) != EOK) { UNREACHABLE(); } } @@ -331,19 +328,32 @@ void ElfBuilder::MergeTextSections(std::ofstream &file, ModuleSectionDes::ModuleRegionInfo &curInfo = moduleInfo[i]; uint32_t curSecSize = des.GetSecSize(ElfSecName::TEXT); uint64_t curSecAddr = des.GetSecAddr(ElfSecName::TEXT); - curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::PAGE_ALIGN); file.seekp(curSecOffset); auto curModuleSec = des.GetSectionsInfo(); - if (curModuleSec.find(ElfSecName::RODATA_CST8) != curModuleSec.end()) { - uint32_t rodataSize = des.GetSecSize(ElfSecName::RODATA_CST8); - uint64_t rodataAddr = des.GetSecAddr(ElfSecName::RODATA_CST8); - file.write(reinterpret_cast(rodataAddr), rodataSize); - curInfo.rodataSize = rodataSize; - curSecOffset += rodataSize; + uint64_t rodataAddrBeforeText = 0; + uint32_t rodataSizeBeforeText = 0; + uint64_t rodataAddrAfterText = 0; + uint32_t rodataSizeAfterText = 0; + std::tie(rodataAddrBeforeText, rodataSizeBeforeText, rodataAddrAfterText, rodataSizeAfterText) = + des.GetMergedRODataAddrAndSize(curSecAddr); + if (rodataSizeBeforeText != 0) { + file.write(reinterpret_cast(rodataAddrBeforeText), rodataSizeBeforeText); + curInfo.rodataSizeBeforeText = rodataSizeBeforeText; + curSecOffset += rodataSizeBeforeText; + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + file.seekp(curSecOffset); } - curInfo.textSize = curSecSize; file.write(reinterpret_cast(curSecAddr), curSecSize); + curInfo.textSize = curSecSize; curSecOffset += curSecSize; + if (rodataSizeAfterText != 0) { + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::DATA_SEC_ALIGN); + file.seekp(curSecOffset); + file.write(reinterpret_cast(rodataAddrAfterText), rodataSizeAfterText); + curInfo.rodataSizeAfterText = rodataSizeAfterText; + curSecOffset += rodataSizeAfterText; + } } } @@ -366,6 +376,37 @@ void ElfBuilder::MergeArkStackMapSections(std::ofstream &file, } } +void ElfBuilder::FixSymtab(llvm::ELF::Elf64_Shdr* shdr) { + using Elf64_Sym = llvm::ELF::Elf64_Sym; + + uint32_t secSize = des_[FullSecIndex].GetSecSize(ElfSecName::SYMTAB); + uint64_t secAddr = des_[FullSecIndex].GetSecAddr(ElfSecName::SYMTAB); + uint32_t secNum = GetSecNum(); + uint64_t textSecOffset = sectionToShdr_[ElfSecName::TEXT].sh_offset; + uint32_t shStrTabIndex = GetShIndex(ElfSecName::SHSTRTAB); + uint32_t textSecIndex = GetShIndex(ElfSecName::TEXT); + + Elf64_Sym *syms = reinterpret_cast(secAddr); + size_t n = secSize / sizeof(Elf64_Sym); + int localCount = -1; + for (size_t i = 0; i < n; ++i) { + Elf64_Sym* sy = &syms[i]; + if (sy->getBinding() != llvm::ELF::STB_LOCAL && localCount == -1) { + localCount = i; + } + if (sy->getType() == llvm::ELF::STT_SECTION) { + sy->st_shndx = shStrTabIndex; + } else if (sy->getType() == llvm::ELF::STT_FUNC) { + sy->st_shndx = textSecIndex; + sy->st_value += textSecOffset; + } + if (sy->st_shndx > secNum) { + sy->st_shndx = 0; + } + } + shdr->sh_info = localCount; +} + /* section of aot.an layout as follows: @@ -410,7 +451,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) llvm::ELF::Elf64_Off curSecOffset = ComputeEndAddrOfShdr(secNum); file.seekp(curSecOffset); - int i = 1; // 1: skip null section + int i = GetShIndex(ElfSecName::TEXT); auto shStrTab = FindShStrTab(); for (auto const &[secName, secInfo] : sections) { @@ -427,7 +468,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) std::string secNameStr = ModuleSectionDes::GetSecName(secName); // text section address needs 16 bytes alignment if (secName == ElfSecName::TEXT) { - curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::PAGE_ALIGN); file.seekp(curSecOffset); } llvm::ELF::Elf64_Word shName = FindShName(secNameStr, shStrTab.first, shStrTab.second); @@ -437,6 +478,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) curShdr.sh_flags = section.Flag(); curShdr.sh_addr = curSecOffset; curShdr.sh_offset = static_cast(curSecOffset); + curShdr.sh_info = 0; sectionToFileOffset_[secName] = static_cast(file.tellp()); switch (secName) { case ElfSecName::ARK_MODULEINFO: { @@ -463,6 +505,9 @@ void ElfBuilder::PackELFSections(std::ofstream &file) case ElfSecName::SHSTRTAB: case ElfSecName::ARK_FUNCENTRY: case ElfSecName::ARK_ASMSTUB: { + if (secName == ElfSecName::SYMTAB) { + FixSymtab(&curShdr); + } uint32_t curSecSize = des_[FullSecIndex].GetSecSize(secName); uint64_t curSecAddr = des_[FullSecIndex].GetSecAddr(secName); file.write(reinterpret_cast(curSecAddr), curSecSize); @@ -480,7 +525,6 @@ void ElfBuilder::PackELFSections(std::ofstream &file) file.seekp(curSecOffset); } curShdr.sh_link = static_cast(section.Link()); - curShdr.sh_info = 0; curShdr.sh_entsize = static_cast(section.Entsize()); sectionToShdr_[secName] = curShdr; LOG_COMPILER(DEBUG) << " shdr[i].sh_entsize " << std::hex << curShdr.sh_entsize << std::endl; diff --git a/ecmascript/compiler/aot_file/elf_builder.h b/ecmascript/compiler/aot_file/elf_builder.h index a1722003fa0c84e5098ce93c381a6d4cd1357123..710ae95fc0c377525de2953075b03ac5be618b05 100644 --- a/ecmascript/compiler/aot_file/elf_builder.h +++ b/ecmascript/compiler/aot_file/elf_builder.h @@ -65,10 +65,12 @@ private: void Initialize(); void SetLastSection(); void RemoveNotNeedSection(); + void FixSymtab(llvm::ELF::Elf64_Shdr* shdr); static constexpr uint32_t ASMSTUB_MODULE_NUM = 3; static constexpr uint32_t ShStrTableModuleDesIndex = 0; static constexpr uint32_t FullSecIndex = 0; + std::vector des_ {}; std::unique_ptr shStrTabPtr_ {nullptr}; std::map sectionToShdr_; diff --git a/ecmascript/compiler/aot_file/elf_reader.cpp b/ecmascript/compiler/aot_file/elf_reader.cpp index bc1f56c9688396d32e794e8edb85cd6944a06921..8223e230850d26eb19f95ef8460cb95a15f368c9 100644 --- a/ecmascript/compiler/aot_file/elf_reader.cpp +++ b/ecmascript/compiler/aot_file/elf_reader.cpp @@ -32,7 +32,7 @@ bool ElfReader::VerifyELFHeader(uint32_t version, bool strictMatch) << header.e_ident[llvm::ELF::EI_MAG2] << header.e_ident[llvm::ELF::EI_MAG3]; return false; } - if (!base::FileHeader::VerifyVersion("Elf ", header.e_version, version, strictMatch)) { + if (!base::FileHeaderBase::VerifyVersion("Elf ", header.e_version, version, strictMatch)) { return false; } return true; @@ -256,15 +256,20 @@ void ElfReader::SeparateTextSections(std::vector &des, { for (size_t i = 0; i < des.size(); ++i) { auto moduleInfo = GetCurModuleInfo(i, moduleInfoOffset); - secOffset = AlignUp(secOffset, TEXT_SEC_ALIGN); - uint32_t rodataSize = moduleInfo->rodataSize; - if (rodataSize > 0) { - des[i].SetSecAddrAndSize(ElfSecName::RODATA_CST8, secAddr + secOffset, rodataSize); - secOffset += rodataSize; + secOffset = AlignUp(secOffset, AOTFileInfo::PAGE_ALIGN); + uint32_t rodataSizeBeforeText = moduleInfo->rodataSizeBeforeText; + uint32_t rodataSizeAfterText = moduleInfo->rodataSizeAfterText; + if (rodataSizeBeforeText != 0) { + secOffset += rodataSizeBeforeText; + secOffset = AlignUp(secOffset, AOTFileInfo::TEXT_SEC_ALIGN); } uint32_t textSize = moduleInfo->textSize; des[i].SetSecAddrAndSize(ElfSecName::TEXT, secAddr + secOffset, textSize); secOffset += textSize; + if (rodataSizeAfterText != 0) { + secOffset = AlignUp(secOffset, AOTFileInfo::DATA_SEC_ALIGN); + secOffset += rodataSizeAfterText; + } } } @@ -294,17 +299,25 @@ void ElfReader::SeparateTextSections(BinaryBufferParser &parser, { for (size_t i = 0; i < des.size(); ++i) { auto moduleInfo = moduleInfo_[i]; - secOffset = AlignUp(secOffset, TEXT_SEC_ALIGN); - uint32_t rodataSize = moduleInfo.rodataSize; - if (rodataSize > 0) { - parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSize, curShOffset + secOffset); - des[i].SetSecAddrAndSize(ElfSecName::RODATA_CST8, secAddr + secOffset, rodataSize); - secOffset += rodataSize; + secOffset = AlignUp(secOffset, AOTFileInfo::PAGE_ALIGN); + uint32_t rodataSizeBeforeText = moduleInfo.rodataSizeBeforeText; + uint32_t rodataSizeAfterText = moduleInfo.rodataSizeAfterText; + if (rodataSizeBeforeText != 0) { + parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSizeBeforeText, + curShOffset + secOffset); + secOffset += rodataSizeBeforeText; + secOffset = AlignUp(secOffset, AOTFileInfo::TEXT_SEC_ALIGN); } uint32_t textSize = moduleInfo.textSize; parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), textSize, curShOffset + secOffset); des[i].SetSecAddrAndSize(ElfSecName::TEXT, secAddr + secOffset, textSize); secOffset += textSize; + if (rodataSizeAfterText != 0) { + secOffset = AlignUp(secOffset, AOTFileInfo::DATA_SEC_ALIGN); + parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSizeAfterText, + curShOffset + secOffset); + secOffset += rodataSizeAfterText; + } } } diff --git a/ecmascript/compiler/aot_file/elf_reader.h b/ecmascript/compiler/aot_file/elf_reader.h index 24c2752782705107c1766d142a5eca461fd8b03a..4a6ef0a7f44f3bf292ec095503a83c6bd6f3e997 100644 --- a/ecmascript/compiler/aot_file/elf_reader.h +++ b/ecmascript/compiler/aot_file/elf_reader.h @@ -53,7 +53,6 @@ private: return moduleInfoSize / sizeof(ModuleSectionDes::ModuleRegionInfo); } - static constexpr uint32_t TEXT_SEC_ALIGN = 4096; static constexpr uint32_t ASMSTUB_MODULE_NUM = 3; ExecutedMemoryAllocator::ExeMem stubsMem_ {}; MemMap fileMapMem_ {}; diff --git a/ecmascript/compiler/aot_file/module_section_des.h b/ecmascript/compiler/aot_file/module_section_des.h index 5e292e21edb0960b88aefa3adc15d3bd455f44f5..b92e67b8c5a62cff788dadccd4a0f501511e6ade 100644 --- a/ecmascript/compiler/aot_file/module_section_des.h +++ b/ecmascript/compiler/aot_file/module_section_des.h @@ -18,6 +18,7 @@ #include #include +#include "ecmascript/base/number_helper.h" #include "ecmascript/compiler/aot_file/binary_buffer_parser.h" #include "ecmascript/compiler/binary_section.h" @@ -27,12 +28,45 @@ public: struct ModuleRegionInfo { uint32_t startIndex {0}; uint32_t funcCount {0}; - uint32_t rodataSize {0}; + uint32_t rodataSizeBeforeText {0}; + uint32_t rodataSizeAfterText {0}; uint32_t textSize {0}; uint32_t stackMapSize {0}; }; static std::string GetSecName(ElfSecName idx); + void UpdateRODataInfo(uint64_t textAddr, uint64_t &addrBeforeText, uint32_t &sizeBeforeText, + uint64_t &addrAfterText, uint32_t &sizeAfterText, ElfSecName sec) const + { + if (sectionsInfo_.find(sec) == sectionsInfo_.end()) { + return; + } + uint64_t curSectionAddr = GetSecAddr(sec); + ASSERT(curSectionAddr != 0); + ASSERT(curSectionAddr != textAddr); + if (curSectionAddr < textAddr) { + addrBeforeText = (curSectionAddr < addrBeforeText) ? curSectionAddr : addrBeforeText; + sizeBeforeText += GetSecSize(sec); + } else { + addrAfterText = (curSectionAddr < addrAfterText) ? curSectionAddr : addrAfterText; + sizeAfterText += GetSecSize(sec); + } + } + + std::tuple GetMergedRODataAddrAndSize(uint64_t textAddr) const + { + uint64_t addrBeforeText = base::MAX_UINT64_VALUE; + uint32_t sizeBeforeText = 0; + uint64_t addrAfterText = base::MAX_UINT64_VALUE; + uint32_t sizeAfterText = 0; + for (uint8_t i = static_cast(ElfSecName::RODATA); i <= static_cast(ElfSecName::RODATA_CST32); + i++) { + UpdateRODataInfo(textAddr, addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText, + static_cast(i)); + } + return std::make_tuple(addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText); + } + void SetArkStackMapPtr(std::shared_ptr ptr) { arkStackMapPtr_ = std::move(ptr); diff --git a/ecmascript/compiler/aot_file/stub_file_info.cpp b/ecmascript/compiler/aot_file/stub_file_info.cpp index 5fabb372fccc8584664f47c02095518c0aa52c8a..dae836b24af5a52a8ea782ce4db60893d64e9cb0 100644 --- a/ecmascript/compiler/aot_file/stub_file_info.cpp +++ b/ecmascript/compiler/aot_file/stub_file_info.cpp @@ -49,7 +49,7 @@ void StubFileInfo::Save(const std::string &filename, Triple triple) ElfBuilder builder(des_, GetDumpSectionNames()); llvm::ELF::Elf64_Ehdr header; - builder.PackELFHeader(header, base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); + builder.PackELFHeader(header, base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); file.write(reinterpret_cast(&header), sizeof(llvm::ELF::Elf64_Ehdr)); builder.PackELFSections(file); builder.PackELFSegment(file); diff --git a/ecmascript/compiler/array_bounds_check_elimination.cpp b/ecmascript/compiler/array_bounds_check_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ae4b38a8931db56aee78fa1475553a99040b8645 --- /dev/null +++ b/ecmascript/compiler/array_bounds_check_elimination.cpp @@ -0,0 +1,944 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/array_bounds_check_elimination.h" + +namespace panda::ecmascript::kungfu { +void ArrayBoundsCheckElimination::Run() +{ + bounds_.resize(circuit_->GetMaxGateId() + 1, nullptr); // 1: +1 for size + indexCheckInfo_.resize(circuit_->GetMaxGateId() + 1, nullptr); + graphLinearizer_.SetScheduleJSOpcode(); + graphLinearizer_.LinearizeGraph(); + + CalcBounds(graphLinearizer_.GetEntryRegion(), nullptr); + + if (IsLogEnabled()) { + LOG_COMPILER(INFO) << ""; + LOG_COMPILER(INFO) << "\033[34m" + << "====================" + << " After array bounds check elimination " + << "[" << GetMethodName() << "]" + << "====================" + << "\033[0m"; + circuit_->PrintAllGatesWithBytecode(); + LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; + } +} + +/* + i_lower + c_lower <= x <= i_upper + c_upper + Initially, when nothing about the bounds is known yet, every instrution has the bounds: + MIN <= x <= MAX +*/ +ArrayBoundsCheckElimination::Bound::Bound() +{ + lower_ = INT_MIN; + upper_ = INT_MAX; + lowerGate_ = Circuit::NullGate(); + upperGate_ = Circuit::NullGate(); +} + +ArrayBoundsCheckElimination::Bound::Bound(int lower, GateRef lowerGate, int upper, GateRef upperGate) +{ + lower_ = lower; + upper_ = upper; + lowerGate_ = lowerGate; + upperGate_ = upperGate; +} + +ArrayBoundsCheckElimination::Bound::Bound(TypedBinOp op, GateRef gate, int constant) +{ + switch (op) { + case TypedBinOp::TYPED_EQ: + lower_ = constant; + lowerGate_ = gate; + upper_ = constant; + upperGate_ = gate; + break; + case TypedBinOp::TYPED_NOTEQ: + lower_ = INT_MIN; + lowerGate_ = Circuit::NullGate(); + upper_ = INT_MAX; + upperGate_ = Circuit::NullGate(); + if (gate == Circuit::NullGate()) { + if (constant == INT_MIN) { + lower_++; + } + if (constant == INT_MAX) { + upper_--; + } + } + break; + case TypedBinOp::TYPED_GREATEREQ: + lower_ = constant; + lowerGate_ = gate; + upper_ = INT_MAX; + upperGate_ = Circuit::NullGate(); + break; + case TypedBinOp::TYPED_LESSEQ: + lower_ = INT_MIN; + lowerGate_ = Circuit::NullGate(); + upper_ = constant; + upperGate_ = gate; + break; + default: + UNREACHABLE(); + } +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::AndOp(Bound *bound, Bound *b) +{ + // Update lower bound + if (bound->lowerGate_ == b->lowerGate_) { + bound->lower_ = std::max(bound->lower_, b->lower_); + } + if (b->HasLower()) { + bool set = true; + if (bound->lowerGate_ != Circuit::NullGate() && b->lowerGate_ != Circuit::NullGate()) { + auto boundLowerGateRegion = graphLinearizer_.GateToRegion(bound->lowerGate_); + auto bLowerGateRegion = graphLinearizer_.GateToRegion(b->lowerGate_); + int32_t boundLowerDominatorDepth = -1; + if (boundLowerGateRegion) { + boundLowerDominatorDepth = boundLowerGateRegion->GetDepth(); + } + int32_t bLowerDominatorDepth = -1; + if (bLowerGateRegion) { + bLowerDominatorDepth = bLowerGateRegion->GetDepth(); + } + set = (boundLowerDominatorDepth > bLowerDominatorDepth); + } + if (set) { + bound->lower_ = b->lower_; + bound->lowerGate_ = b->lowerGate_; + } + } + + // Update upper bound + if (bound->upperGate_ == b->upperGate_) { + bound->upper_ = std::min(bound->upper_, b->upper_); + } + if (b->HasUpper()) { + bool set = true; + if (bound->upperGate_ != Circuit::NullGate() && b->upperGate_ != Circuit::NullGate()) { + auto boundUpperGateRegion = graphLinearizer_.GateToRegion(bound->upperGate_); + auto bUpperGateRegion = graphLinearizer_.GateToRegion(b->upperGate_); + int32_t boundUpperDominatorDepth = -1; + if (boundUpperGateRegion) { + boundUpperDominatorDepth = boundUpperGateRegion->GetDepth(); + } + int32_t bUpperDominatorDepth = -1; + if (bUpperGateRegion) { + bUpperDominatorDepth = bUpperGateRegion->GetDepth(); + } + set = (boundUpperDominatorDepth > bUpperDominatorDepth); + } + if (set) { + bound->upper_ = b->upper_; + bound->upperGate_ = b->upperGate_; + } + } + + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::OrOp(Bound *bound, Bound *b) +{ + // Update lower bound + if (bound->lowerGate_ != b->lowerGate_) { + bound->lowerGate_ = Circuit::NullGate(); + bound->lower_ = INT_MIN; + } else { + bound->lower_ = std::min(bound->lower_, b->lower_); + } + // Update upper bound + if (bound->upperGate_ != b->upperGate_) { + bound->upperGate_ = Circuit::NullGate(); + bound->upper_ = INT_MAX; + } else { + bound->upper_ = std::max(bound->upper_, b->upper_); + } + + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoConstant(GateRef gate) +{ + int constValue = acc_.GetConstantValue(gate); + return new Bound(constValue, Circuit::NullGate(), constValue, Circuit::NullGate()); +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoArithmeticOp(GateRef gate) +{ + auto op = acc_.GetTypedBinaryOp(gate); + auto x = acc_.GetValueIn(gate, 0); + auto y = acc_.GetValueIn(gate, 1); + if (!acc_.IsConstant(x) || !acc_.IsConstant(y)) { // One of the operands must be non-constant! + if (op == TypedBinOp::TYPED_AND && (acc_.IsConstant(x) || acc_.IsConstant(y))) { + int constValue = 0; + if (acc_.IsConstant(x)) { + constValue = acc_.GetConstantValue(x); + } else { + constValue = acc_.GetConstantValue(y); + } + if (constValue >= 0) { + return new Bound(0, Circuit::NullGate(), constValue, Circuit::NullGate()); + } + } else if (op == TypedBinOp::TYPED_MOD) { + Bound *xBound = GetBound(x); + if (xBound->Lower() >= 0 && xBound->LowerGate() == Circuit::NullGate() && IsArrayLength(y)) { + return new Bound(0, Circuit::NullGate(), -1, y); + } else if (xBound->HasLower() && xBound->Lower() >= 0 && acc_.IsConstant(y) + && acc_.GetConstantValue(y) != 0) { + int constValue = acc_.GetConstantValue(y); + if (constValue != INT_MIN) { + return new Bound(0, Circuit::NullGate(), abs(constValue) - 1, Circuit::NullGate()); + } else { + return new Bound(); + } + } else { + return new Bound(); + } + } else if (((acc_.IsConstant(x) || acc_.IsConstant(y)) && op == TypedBinOp::TYPED_ADD) || + (acc_.IsConstant(y) && op == TypedBinOp::TYPED_SUB)) { + // x is constant, y is variable. + if (acc_.IsConstant(y)) { + std::swap(x, y); + } + + // Add, Constant now in x + int constValue = acc_.GetConstantValue(x); + if (op == TypedBinOp::TYPED_SUB) { + constValue = -constValue; + } + + Bound *bound = GetBound(y); + if (!bound->HasUpper() || !bound->HasLower()) { + return new Bound(); + } + + int lower = bound->Lower(); + int upper = bound->Upper(); + int newLower = lower + constValue; + int newUpper = upper + constValue; + bool overflow = ((constValue < 0 && (newLower > lower)) || + (constValue > 0 && (newUpper < upper))); + if (overflow) { + return new Bound(); + } else { + return new Bound(newLower, bound->LowerGate(), newUpper, bound->UpperGate()); + } + } else if (op == TypedBinOp::TYPED_SUB) { + Bound *bound = GetBound(x); + if (bound->LowerGate() == y) { + return new Bound(TypedBinOp::TYPED_GREATEREQ, Circuit::NullGate(), bound->Lower()); + } else { + return new Bound(); + } + } else { + return new Bound(); + } + } + return nullptr; +} + +bool ArrayBoundsCheckElimination::InLoop(GateRef loopHeader, GateRef gate) +{ + while (gate != acc_.GetStateRoot()) { + if (gate == loopHeader) { + return true; + } else { + gate = acc_.GetState(gate, 0); + } + } + return false; +} + +/* +Do phi +*/ +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoPhi(GateRef gate) +{ + Bound *bound = nullptr; + size_t valueSize = acc_.GetInValueCount(gate); + GateRef stateIn = acc_.GetState(gate); + bool isLoopHead = acc_.IsLoopHead(stateIn); + bool hasUpper = true; + bool hasLower = true; + for (size_t i = 0; i < valueSize; i++) { + GateRef value = acc_.GetValueIn(gate, i); + // Check if instruction is connected with phi itself + if (isLoopHead && acc_.GetOpCode(value) == OpCode::TYPED_UNARY_OP + && InLoop(stateIn, value)) { + auto unOp = acc_.GetTypedUnAccessor(value).GetTypedUnOp(); + switch (unOp) { + case TypedUnOp::TYPED_INC: + hasUpper = false; + break; + case TypedUnOp::TYPED_DEC: + hasLower = false; + break; + default: + break; + } + continue; + } + + Bound *vBound = GetBound(value); + Bound *curBound; + GateRef curGate; + int curConstant; + GetInstrAndConstValueFromOp(value, curGate, curConstant); + if (!vBound->HasUpper() || !vBound->HasLower()) { + curBound = new Bound(curConstant, curGate, curConstant, curGate); + } else { + curBound = vBound; + } + + if (curBound) { + if (!bound) { + bound = curBound->Copy(); + } else { + bound = OrOp(bound, curBound); + } + } else { + bound = new Bound(); + break; + } + } + + if (!hasUpper) { + bound->RemoveUpper(); + } + if (!hasLower) { + bound->RemoveLower(); + } + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::VisitGate(GateRef gate) +{ + OpCode op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::CONSTANT: + return DoConstant(gate); + case OpCode::TYPED_BINARY_OP: + return DoArithmeticOp(gate); + case OpCode::VALUE_SELECTOR: + return DoPhi(gate); + default: + return nullptr; + } + return nullptr; +} + +// y = a + b - c ..... +void ArrayBoundsCheckElimination::GetInstrAndConstValueFromOp(GateRef gate, GateRef& instrValue, int& constValue) +{ + int base = 0; + constValue = 0; + instrValue = gate; + if (acc_.IsConstant(gate)) { + constValue = acc_.GetConstantValue(gate); + instrValue = Circuit::NullGate(); + } else { + while (acc_.GetOpCode(gate) == OpCode::TYPED_BINARY_OP) { + auto op = acc_.GetTypedBinaryOp(gate); + auto x = acc_.GetValueIn(gate, 0); + auto y = acc_.GetValueIn(gate, 1); + GateRef other = x; + if ((op == TypedBinOp::TYPED_ADD && (acc_.IsConstant(x) || acc_.IsConstant(y))) + || (op == TypedBinOp::TYPED_SUB && acc_.IsConstant(y))) { + int value = 0; + if (acc_.IsConstant(x)) { + value = acc_.GetConstantValue(x); + other = y; + } else { + value = acc_.GetConstantValue(y); + other = x; + } + + while (acc_.GetOpCode(other) == OpCode::INDEX_CHECK) { // Get IndexCheck Index + other = acc_.GetValueIn(other, 1); + } + + if (op == TypedBinOp::TYPED_SUB) { + value = -value; + } + + if (acc_.IsConstant(other)) { + base += value + acc_.GetConstantValue(other); + constValue = base; + instrValue = Circuit::NullGate(); + break ; + } else { + base += value; + constValue = base; + instrValue = other; + gate = other; + } + } else { + break; + } + } + } +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::GetBound(GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return nullptr; + } + if (!bounds_[acc_.GetId(gate)]) { + bounds_[acc_.GetId(gate)] = new BoundStack(chunk_); + Bound *bound = VisitGate(gate); + if (bound) { + bounds_[acc_.GetId(gate)]->push_back(bound); + } + if (bounds_[acc_.GetId(gate)]->size() == 0) { + bounds_[acc_.GetId(gate)]->push_back(new Bound()); + } + } else if (bounds_[acc_.GetId(gate)]->size() == 0) { + return new Bound(); + } + return bounds_[acc_.GetId(gate)]->back(); +} + +void ArrayBoundsCheckElimination::UpdateBound(IntegerStack &pushed, GateRef gate, Bound *bound) +{ + if (acc_.IsConstant(gate)) { + // No bound update for constants + return; + } + if (!bounds_[acc_.GetId(gate)]) { + GetBound(gate); + } + Bound* top = nullptr; + if (bounds_[acc_.GetId(gate)]->size() > 0) { + top = bounds_[acc_.GetId(gate)]->back(); + } + if (top) { + bound = AndOp(bound, top); + } + bounds_[acc_.GetId(gate)]->push_back(bound); + pushed.push_back(acc_.GetId(gate)); +} + +/* +x op y + constValue +for example: + x >= Circuit::NullGate() + 0 + x < Length + 0 +*/ +void ArrayBoundsCheckElimination::UpdateBound(IntegerStack &pushed, GateRef x, TypedBinOp op, + GateRef instrValue, int constValue) +{ + if (op == TypedBinOp::TYPED_GREATER) { // x < 3 -> x <= 4 + op = TypedBinOp::TYPED_GREATEREQ; + // Cannot Represent c > INT_MAX, do not update bounds + if (constValue == INT_MAX && instrValue == Circuit::NullGate()) { + return; + } else { + constValue++; + } + } else if (op == TypedBinOp::TYPED_LESS) { // x > 3 -> x >= 2 + op = TypedBinOp::TYPED_LESSEQ; + // Cannot Represent c < INT_MIN, do not update bounds + if (constValue == INT_MIN && instrValue == Circuit::NullGate()) { + return; + } else { + constValue--; + } + } + Bound *bound = new Bound(op, instrValue, constValue); + UpdateBound(pushed, x, bound); +} + +// Add if condition when x is a variable, x op y +void ArrayBoundsCheckElimination::AddIfCondition(IntegerStack &pushed, GateRef x, GateRef y, TypedBinOp op) +{ + if (acc_.IsConstant(x)) { // x must be non-constant! + return; + } + int constValue; + GateRef instrValue; + GetInstrAndConstValueFromOp(y, instrValue, constValue); + UpdateBound(pushed, x, op, instrValue, constValue); +} + +bool ArrayBoundsCheckElimination::IsArrayLength(GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return false; + } + OpCode op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::LOAD_ARRAY_LENGTH: + case OpCode::LOAD_TYPED_ARRAY_LENGTH: + return true; + default: + return false; + } + UNREACHABLE(); + return false; +} + +bool ArrayBoundsCheckElimination::InArrayBound(Bound *bound, GateRef length, GateRef array) +{ + if (!bound || array == Circuit::NullGate()) { + return false; + } + + if (bound->Lower() >= 0 && bound->LowerGate() == Circuit::NullGate() && + bound->Upper() < 0 && bound->UpperGate() != Circuit::NullGate()) { + if (length != Circuit::NullGate() && bound->UpperGate() == length) { + return true; + } + } + + // TODO: Solve Multidimensional array. To fix a[i][j] case, maybe LOAD_ELEMENT are not the same one. + return false; +} + +void ArrayBoundsCheckElimination::RemoveIndexCheck(GateRef gate) +{ + ASSERT(acc_.GetDependCount(gate) == 1); + ASSERT(acc_.GetStateCount(gate) == 1); + ASSERT(acc_.GetInValueCount(gate) == 2); + + GateRef depend = acc_.GetDep(gate); + GateRef state = acc_.GetState(gate); + GateRef value = acc_.GetValueIn(gate, 1); // Index + + acc_.ReplaceGate(gate, state, depend, value); +} + +bool ArrayBoundsCheckElimination::CheckLoop(GateRef array, GateRef lowerGate, int lower, GateRef upperGate, int upper) +{ + if (IsArrayLength(upperGate) && acc_.GetValueIn(upperGate, 0) == array) { + if (upper >= 0) { + return false; + } + } + if (IsArrayLength(lowerGate) && acc_.GetValueIn(lowerGate, 0) == array) { + if (lower >= 0) { + return false; + } + } + return true; +} + +bool ArrayBoundsCheckElimination::LoopInvariant(GateRegion *loopHeader, GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return true; + } + auto gateRegion = graphLinearizer_.GateToRegion(gate); + if (!gateRegion) { + return true; + } + GateRegion* g = loopHeader->GetDominator(); + while (g != nullptr) { + if (g == gateRegion) { + return true; + } + if (g == g->GetDominator()) { // entry + break ; + } + g = g->GetDominator(); + } + return false; +} + +GateRef ArrayBoundsCheckElimination::Predicate(GateRef left, TypedBinOp cond, GateRef right) +{ + return builder_.InsertRangeCheckPredicate(left, cond, right); +} + +GateRef ArrayBoundsCheckElimination::PredicateCmpWithConst(GateRef left, TypedBinOp cond, int32_t right) +{ + GateRef constGate = builder_.Int32(right); + return Predicate(left, cond, constGate); +} + +GateRef ArrayBoundsCheckElimination::PredicateAdd(GateRef left, int32_t leftConst, TypedBinOp cond, GateRef right) +{ + GateRef constGate = builder_.Int32(leftConst); + GateRef binaryOpGate = builder_.InsertTypedBinaryop(left, constGate, GateType::NumberType(), + GateType::NumberType(), GateType::AnyType(), + PGOSampleType::NoneType(), TypedBinOp::TYPED_ADD); + return Predicate(binaryOpGate, cond, right); +} + +GateRef ArrayBoundsCheckElimination::PredicateAddCmpWithConst(GateRef left, int32_t leftConst, + TypedBinOp cond, int32_t right) +{ + GateRef constGate = builder_.Int32(right); + return PredicateAdd(left, leftConst, cond, constGate); +} + +void ArrayBoundsCheckElimination::LoopInvariantMotionForIndexCheck(GateRef array, GateRef length, + GateRef lowerGate, int lower, + GateRef upperGate, int upper, + bool isTypedArray) +{ + // lower > 0 + if (lowerGate != Circuit::NullGate()) { + if (lower == 0) { + // lowerGate >= 0 + PredicateCmpWithConst(lowerGate, TypedBinOp::TYPED_GREATEREQ, 0); + } else if (lower > 0) { + // lowerGate + lower >= 0 + PredicateAddCmpWithConst(lowerGate, lower, TypedBinOp::TYPED_GREATEREQ, 0); + } else { + // lowerGate + lower < 0 + // lower < 0 + // lowerGate < -lower + lower++; + lower = -lower; + PredicateCmpWithConst(lowerGate, TypedBinOp::TYPED_GREATER, lower); + } + } + + // LOAD LENGTH if necessary + if (length == Circuit::NullGate()) { + length = builder_.InsertLoadArrayLength(array, isTypedArray); + } + + if (upperGate == Circuit::NullGate()) { + ASSERT(upper >= 0); + PredicateCmpWithConst(length, TypedBinOp::TYPED_GREATER, upper); + } else { + if (upper == 0) { + Predicate(upperGate, TypedBinOp::TYPED_LESS, length); + } else if (upper > 0) { + // upperGate + upper < length + PredicateAdd(upperGate, upper, TypedBinOp::TYPED_LESS, length); + } else { + // upperGate + upper < length + // upper < 0 + // upperGate < length + (-upper) + PredicateAdd(length, -upper, TypedBinOp::TYPED_GREATER, upperGate); + } + } +} + +void ArrayBoundsCheckElimination::ProcessIndexCheck(GateRegion *loopHeader, GateRef gate) +{ + auto length = acc_.GetValueIn(gate, 0); + auto array = acc_.GetValueIn(length, 0); + auto index = acc_.GetValueIn(gate, 1); + Bound *indexBound = GetBound(index); + if (!indexBound->HasLower() || !indexBound->HasUpper()) { + return; + } + + if (InArrayBound(indexBound, length, array)) { + RemoveIndexCheck(gate); + } else if (loopHeader) { + if (!LoopInvariant(loopHeader, array) + || !LoopInvariant(loopHeader, indexBound->LowerGate()) + || !LoopInvariant(loopHeader, indexBound->UpperGate()) + || (indexBound->LowerGate() == Circuit::NullGate() && indexBound->Lower() < 0) + || (indexBound->UpperGate() == Circuit::NullGate() && indexBound->Upper() < 0)) { + return; + } + + ASSERT(length != Circuit::NullGate()); + bool isTypedArray = false; + if (acc_.GetOpCode(length) == OpCode::LOAD_TYPED_ARRAY_LENGTH) { + isTypedArray = true; + } + + // Length instrution + if (!LoopInvariant(loopHeader, length)) { + // Generate length instruction yourself + length = Circuit::NullGate(); + } + + // Insert Before loopHeader State, and if find IF_TRUE and IF_FALSE, insert after the DEPEND_RELAY + // if find MERGE, insert after DEPEND_SELECTOR + GateRef insertAfter = acc_.GetState(loopHeader->GetState(), 0); // after end + GateRef stateIn = insertAfter; + GateRef dependIn = insertAfter; + acc_.GetStateInAndDependIn(insertAfter, stateIn, dependIn); + + if (!CheckLoop(array, indexBound->LowerGate(), indexBound->Lower(), + indexBound->UpperGate(), indexBound->Upper())) { + return; + } + + Environment env(stateIn, dependIn, {}, circuit_, &builder_); + LoopInvariantMotionForIndexCheck(array, length, indexBound->LowerGate(), indexBound->Lower(), + indexBound->UpperGate(), indexBound->Upper(), isTypedArray); + RemoveIndexCheck(gate); + } +} + +void ArrayBoundsCheckElimination::ProcessIf(IntegerStack &pushed, GateRegion *parent, OpCode cond) +{ + auto& gateLists = parent->GetGates(); + for (int i = gateLists.size() - 1; i >= 0; i--) { // Found the last BinaryOp + GateRef gate = gateLists[i]; + if (gate == Circuit::NullGate()) continue; + OpCode opGate = acc_.GetOpCode(gate); + if (opGate != OpCode::TYPED_BINARY_OP) { + continue ; + } + + TypedBinOp op = acc_.GetTypedBinaryOp(gate); + GateRef x = acc_.GetValueIn(gate, 0); + GateRef y = acc_.GetValueIn(gate, 1); + + switch (op) { + case TypedBinOp::TYPED_LESS: + case TypedBinOp::TYPED_LESSEQ: + case TypedBinOp::TYPED_GREATER: + case TypedBinOp::TYPED_GREATEREQ: + case TypedBinOp::TYPED_EQ: + case TypedBinOp::TYPED_NOTEQ: + if (cond == OpCode::IF_TRUE) { + op = GateMetaData::GetRevCompareOp(op); + } + AddIfCondition(pushed, x, y, op); + AddIfCondition(pushed, y, x, GateMetaData::GetSwapCompareOp(op)); + break; + default: + break; + } + break; + } +} + +bool ArrayBoundsCheckElimination::Contain(GateLists &gateLists, GateRef gate) +{ + for (size_t i = 0; i < gateLists.size(); i++) { + if (gateLists[i] == gate) { + return true; + } + } + return false; +} + +void ArrayBoundsCheckElimination::AddAccessIndexedInfo(GateLists &indices, GateRef gate, int idx, GateRef indexCheck) +{ + IndexCheckInfo *indexCheckInfo = indexCheckInfo_[acc_.GetId(gate)]; + if (indexCheckInfo == nullptr) { + indexCheckInfo = new IndexCheckInfo(chunk_); + indexCheckInfo_[acc_.GetId(gate)] = indexCheckInfo; + indices.push_back(gate); + indexCheckInfo->min_ = idx; + indexCheckInfo->max_ = idx; + } else if (idx >= indexCheckInfo->min_ && idx <= indexCheckInfo->max_) { + RemoveIndexCheck(indexCheck); + return; + } + indexCheckInfo->min_ = std::min(indexCheckInfo->min_, idx); + indexCheckInfo->max_ = std::max(indexCheckInfo->max_, idx); + indexCheckInfo->list_.push_back(indexCheck); +} + +void ArrayBoundsCheckElimination::InBlockMotion(GateLists &indexChecked, GateLists &arrays) +{ + GateLists indices(chunk_); + for (size_t i = 0; i < arrays.size(); i++) { + int maxConstant = -1; + GateLists listConstant(chunk_); + GateRef arrayGate = arrays[i]; + for (size_t j = 0; j < indexChecked.size(); j++) { + GateRef indexCheck = indexChecked[j]; + // INDEX_CHECK may be dead + if (acc_.GetOpCode(indexCheck) != OpCode::INDEX_CHECK) { + continue; + } + GateRef length = acc_.GetValueIn(indexCheck, 0); + GateRef index = acc_.GetValueIn(indexCheck, 1); + GateRef array = acc_.GetValueIn(length, 0); + if (array != arrayGate) { + continue; + } + if (acc_.IsConstant(index)) { + int constValue = acc_.GetConstantValue(index); + if (constValue >= 0 && constValue <= maxConstant) { + RemoveIndexCheck(indexCheck); + } else if (constValue >= 0 && constValue > maxConstant) { + maxConstant = constValue; + listConstant.push_back(indexCheck); + } + } else { + int lastInteger; + GateRef lastGate; + GetInstrAndConstValueFromOp(index, lastGate, lastInteger); + if (lastInteger >= 0 && lastGate == Circuit::NullGate()) { // IsConstant + if (lastInteger <= maxConstant) { + RemoveIndexCheck(indexCheck); + } else { + maxConstant = lastInteger; + listConstant.push_back(indexCheck); + } + } else if (lastGate != Circuit::NullGate()) { + AddAccessIndexedInfo(indices, lastGate, lastInteger, indexCheck); + } // when lastInteger < 0, dont remove IndexCheck + } + } + + // Iterate over all different indices + for (size_t j = 0; j < indices.size(); j++) { + GateRef index = indices[j]; + + IndexCheckInfo *info = indexCheckInfo_[acc_.GetId(index)]; + ASSERT(info != nullptr); + + // maybe index < 0, max > 0 + // max + index in [0, a.length) + // min + index overflow !!!, min + index > 0 + // so, min + index >= INT_MIN, min >= INT_MIN - index + // max in [-index, a.length - index) + // min >= INT_MIN + max + bool rangeCond = (info->max_ < 0 || info->max_ + INT_MIN <= info->min_); + if (info->list_.size() > 2 && rangeCond) { + GateRef insertAfter = info->list_.front(); + GateRef length = acc_.GetValueIn(insertAfter, 0); + ASSERT(length != Circuit::NullGate()); + + Environment env(insertAfter, circuit_, &builder_); + + // Calculate lower bound + GateRef lowerCompare = index; + if (info->min_ > 0) { + GateRef minGate = builder_.Int32(info->min_); + lowerCompare = builder_.InsertTypedBinaryop(lowerCompare, minGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOSampleType::NoneType(), + TypedBinOp::TYPED_ADD); + } else if (info->min_ < 0) { + GateRef minGate = builder_.Int32(-info->min_); + lowerCompare = builder_.InsertTypedBinaryop(lowerCompare, minGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOSampleType::NoneType(), + TypedBinOp::TYPED_SUB); + } + + PredicateCmpWithConst(lowerCompare, TypedBinOp::TYPED_GREATEREQ, 0); + + // Calculate upper bound + GateRef upperCompare = index; + if (info->max_ != 0) { + if (info->max_ > 0) { + GateRef maxGate = builder_.Int32(info->max_); + upperCompare = builder_.InsertTypedBinaryop(upperCompare, maxGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOSampleType::NoneType(), + TypedBinOp::TYPED_ADD); + } else if (info->max_ < 0) { + GateRef maxGate = builder_.Int32(-info->max_); + upperCompare = builder_.InsertTypedBinaryop(upperCompare, maxGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOSampleType::NoneType(), + TypedBinOp::TYPED_SUB); + } + } + + Predicate(upperCompare, TypedBinOp::TYPED_LESS, length); + for (auto& indexCheck: (info->list_)) { + RemoveIndexCheck(indexCheck); + } + } + } + + // index only constant + if (listConstant.size() > 1) { + GateRef firIndexCheckGate = listConstant.front(); + Environment env(firIndexCheckGate, circuit_, &builder_); + GateRef length = acc_.GetValueIn(firIndexCheckGate, 0); + ASSERT(length != Circuit::NullGate()); + ASSERT(maxConstant >= 0); + PredicateCmpWithConst(length, TypedBinOp::TYPED_GREATER, maxConstant); // length > index + for (size_t j = 0; j < listConstant.size(); j++) { + GateRef indexCheck = listConstant[j]; + RemoveIndexCheck(indexCheck); + } + } + + for (size_t j = 0; j < indices.size(); j++) { + indexCheckInfo_[acc_.GetId(indices[j])] = nullptr; + } + indices.clear(); + } +} + +void ArrayBoundsCheckElimination::CalcBounds(GateRegion *block, GateRegion *loopHeader) +{ + // Pushed stack for condition + IntegerStack pushed(chunk_); + + // Process If + GateRegion *parent = block->GetDominator(); + if (parent != nullptr) { + auto gate = block->GetGates().front(); + auto op = acc_.GetOpCode(gate); + if (op == OpCode::IF_TRUE || op == OpCode::IF_FALSE) { // Recognize If (including the condition in forloop) + ProcessIf(pushed, parent, op); + } + } + + GateLists indexChecked(chunk_); + GateLists arrays(chunk_); + + auto& gateList_ = block->GetGates(); + for (size_t i = 0; i < gateList_.size(); i++) { // Visit GateUnion + GateRef gate = gateList_[i]; + auto op = acc_.GetOpCode(gate); + if (op == OpCode::INDEX_CHECK) { + auto length = acc_.GetValueIn(gate, 0); + auto index = acc_.GetValueIn(gate, 1); + auto array = acc_.GetValueIn(length, 0); + + ProcessIndexCheck(loopHeader, gate); + indexChecked.push_back(gate); + + if (!Contain(arrays, array)) { + arrays.push_back(array); + } + + // Give IndexCheck a bound [0, Length - 1] + Bound *b = GetBound(index); + if (b->LowerGate() == Circuit::NullGate()) { // LowerBound is the Constant !!! + UpdateBound(pushed, index, TypedBinOp::TYPED_GREATEREQ, Circuit::NullGate(), 0); + } + if (!b->HasUpper() && length != Circuit::NullGate()) { // default dont know the Length + UpdateBound(pushed, index, TypedBinOp::TYPED_LESS, length, 0); + } + } + } + + InBlockMotion(indexChecked, arrays); + + auto& dominatedRegions_ = block->GetDominatedRegions(); + for (size_t i = 0; i < dominatedRegions_.size(); i++) { + GateRegion *nex = dominatedRegions_[i]; + if (block->IsLoopHead() && (block->GetInnerLoopIndex() == nex->GetInnerLoopIndex() + || nex->GetLoopDepth() > block->GetLoopDepth())) { + CalcBounds(nex, block); + } else { + CalcBounds(nex, loopHeader); + } + } + + for (size_t i = 0; i < pushed.size(); i++) { + bounds_[pushed[i]]->pop_back(); + } +} +} \ No newline at end of file diff --git a/ecmascript/compiler/array_bounds_check_elimination.h b/ecmascript/compiler/array_bounds_check_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..3fe79e9f12b650e03c7a243e888d1d20e7e6235e --- /dev/null +++ b/ecmascript/compiler/array_bounds_check_elimination.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_ARRAY_BOUNDS_CHECK_ELIMINATION_H +#define ECMASCRIPT_COMPILER_ARRAY_BOUNDS_CHECK_ELIMINATION_H + +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate_meta_data.h" +#include "ecmascript/compiler/gate_accessor.h" +#include "ecmascript/compiler/graph_linearizer.h" +#include "ecmascript/compiler/pass_manager.h" +#include "ecmascript/mem/chunk_containers.h" + +namespace panda::ecmascript::kungfu { +class ArrayBoundsCheckElimination { +public: + ArrayBoundsCheckElimination(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk) + : acc_(circuit), bounds_(chunk), circuit_(circuit), builder_(circuit), chunk_(chunk), enableLog_(enableLog), + graphLinearizer_(circuit, enableLog, name, chunk, true), methodName_(name), indexCheckInfo_(chunk) {} + + ~ArrayBoundsCheckElimination() = default; + void Run(); + +private: + class Bound { + public: + Bound(); + Bound(GateRef v); + Bound(int lower, GateRef lowerGate, int upper, GateRef upperGate); + Bound(TypedBinOp op, GateRef gate, int constant); + ~Bound(){}; + int Upper() + { + return upper_; + } + GateRef UpperGate() + { + return upperGate_; + } + int Lower() + { + return lower_; + } + GateRef LowerGate() + { + return lowerGate_; + } + bool HasUpper() + { + return upperGate_ != Circuit::NullGate() || upper_ < INT_MAX; + } + bool HasLower() + { + return lowerGate_ != Circuit::NullGate() || lower_ > INT_MIN; + } + void RemoveUpper() + { + upperGate_ = Circuit::NullGate(); + upper_ = INT_MAX; + } + void RemoveLower() + { + lowerGate_ = Circuit::NullGate(); + lower_ = INT_MIN; + } + bool IsSmaller(Bound *b) + { + if (b->LowerGate() != upperGate_) { + return false; + } + return upper_ < b->Lower(); + } + Bound* Copy() + { + return new Bound(lower_, lowerGate_, upper_, upperGate_); + } + + private: + int upper_; + GateRef upperGate_; + int lower_; + GateRef lowerGate_; + + friend ArrayBoundsCheckElimination; + }; + + bool IsLogEnabled() const + { + return enableLog_; + } + + const std::string& GetMethodName() const + { + return methodName_; + } + + typedef ChunkVector BoundStack; + typedef ChunkVector BoundMap; + typedef ChunkVector IntegerStack; + typedef ChunkVector GateLists; + + void AddAccessIndexedInfo(GateLists &indices, GateRef gate, int idx, GateRef indexCheck); + void AddIfCondition(IntegerStack &pushed, GateRef x, GateRef y, TypedBinOp op); + Bound *AndOp(Bound *bound, Bound *b); + Bound *OrOp(Bound *bound, Bound *b); + bool Contain(GateLists& gateLists, GateRef gate); + void CalcBounds(GateRegion *block, GateRegion *loopHeader); + bool CheckLoop(GateRef array, GateRef lowerGate, int lower, GateRef upperGate, int upper); + void InBlockMotion(GateLists &indexChecked, GateLists &arrays); + bool InLoop(GateRef loopHeader, GateRef gate); + bool IsArrayLength(GateRef gate); + bool LoopInvariant(GateRegion *loopHeader, GateRef gate); + void UpdateBound(IntegerStack &pushed, GateRef gate, Bound *bound); + void UpdateBound(IntegerStack &pushed, GateRef x, TypedBinOp op, GateRef y, int constValue); + void ProcessIndexCheck(GateRegion *loopHeader, GateRef gate); + void RemoveIndexCheck(GateRef gate); + void CopyStateInAndDependIn(GateRef &stateIn, GateRef &dependIn, GateRef insertAfter); + void LoopInvariantMotionForIndexCheck(GateRef array, GateRef length, GateRef lowerGate, int lower, + GateRef upperGate, int upper, bool isTypedArray); + void GetInstrAndConstValueFromOp(GateRef gate, GateRef &instrValue, int& constValue); + Bound *GetBound(GateRef gate); + Bound *DoConstant(GateRef gate); + Bound *DoArithmeticOp(GateRef gate); + Bound *DoPhi(GateRef gate); + void SetBound(GateRef gate, Bound *bound); + void ProcessIf(IntegerStack &pushed, GateRegion *parent, OpCode cond); + bool InArrayBound(Bound *bound, GateRef length, GateRef array); + Bound *VisitGate(GateRef gate); + + void ReplaceIn(GateRef stateIn, GateRef dependIn, GateRef newGate); + + GateRef Predicate(GateRef left, TypedBinOp cond, GateRef right); + GateRef PredicateCmpWithConst(GateRef left, TypedBinOp cond, int right); + GateRef PredicateAdd(GateRef left, int leftConst, TypedBinOp cond, GateRef right); + GateRef PredicateAddCmpWithConst(GateRef left, int leftConst, TypedBinOp cond, int right); + + GateAccessor acc_; + BoundMap bounds_; + Circuit *circuit_ {nullptr}; + CircuitBuilder builder_; + Chunk *chunk_ {nullptr}; + bool enableLog_ {false}; + GraphLinearizer graphLinearizer_; + std::string methodName_; + + class IndexCheckInfo { + public: + IndexCheckInfo(Chunk* chunk): list_(chunk) {} + GateLists list_; + int min_; + int max_; + }; + typedef ChunkVector IndexCheckInfoList; + IndexCheckInfoList indexCheckInfo_; +}; +} +#endif \ No newline at end of file diff --git a/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp b/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp index 05bb40ffe5de18d2726e4479d544b34513ba5434..1a08bbbc98f4b3a09897ecc392f7da21d9cee3ee 100644 --- a/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp +++ b/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp @@ -80,13 +80,6 @@ public: LLVMInitializeAArch64AsmPrinter(); LLVMInitializeAArch64AsmParser(); LLVMInitializeAArch64Target(); - } else if (triple.compare(TARGET_ARM32) == 0) { - LLVMInitializeARMTargetInfo(); - LLVMInitializeARMTargetMC(); - LLVMInitializeARMDisassembler(); - LLVMInitializeARMAsmPrinter(); - LLVMInitializeARMAsmParser(); - LLVMInitializeARMTarget(); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); diff --git a/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp b/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp index 5afa7050f2dde919138b84e5287b99ac4f7b1174..56820bc6a739dbbaeb8b840adae24a12c4dca15e 100644 --- a/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp +++ b/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp @@ -85,13 +85,6 @@ public: LLVMInitializeAArch64AsmPrinter(); LLVMInitializeAArch64AsmParser(); LLVMInitializeAArch64Target(); - } else if (triple.compare(TARGET_ARM32) == 0) { - LLVMInitializeARMTargetInfo(); - LLVMInitializeARMTargetMC(); - LLVMInitializeARMDisassembler(); - LLVMInitializeARMAsmPrinter(); - LLVMInitializeARMAsmParser(); - LLVMInitializeARMTarget(); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); diff --git a/ecmascript/compiler/async_function_lowering.cpp b/ecmascript/compiler/async_function_lowering.cpp index 647a0b47a87e46f771ea5a46f2883198896ee943..6b3a5db6ae0fe8fb35ce93cb6e8090c7f7e4ad9e 100644 --- a/ecmascript/compiler/async_function_lowering.cpp +++ b/ecmascript/compiler/async_function_lowering.cpp @@ -41,7 +41,7 @@ void AsyncFunctionLowering::ProcessJumpTable() GateRef ifBranchCondition = builder_.Branch(stateEntry_, isEqual); GateRef ifTrueCondition = builder_.IfTrue(ifBranchCondition); GateRef ifFalseCondition = builder_.IfFalse(ifBranchCondition); - if (accessor_.GetOpCode(*firstUse) == OpCode::STATE_SPLIT) { + while (accessor_.GetOpCode(*firstUse) == OpCode::STATE_SPLIT) { firstUse++; } accessor_.ReplaceStateIn(*firstUse, ifTrueCondition); @@ -81,10 +81,9 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest GateRef loopBeginStateIn = Circuit::NullGate(); GateRef prevBcOffsetPhiGate = Circuit::NullGate(); while (true) { - auto opcode = accessor_.GetOpCode(stateInGate); - if (opcode == OpCode::STATE_ENTRY) { + if (stateInGate == GetEntryBBStateOut()) { // from state entry GateRef condition = builder_.Equal(offsetConstantGate, restoreOffsetGate); - GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(), { ifFalseCondition, condition }); + GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(0), { ifFalseCondition, condition }); GateRef ifTrue = circuit_->NewGate(circuit_->IfTrue(), {ifBranch}); GateRef ifFalse = circuit_->NewGate(circuit_->IfFalse(), {ifBranch}); GateRef ifTrueDepend = builder_.DependRelay(ifTrue, restoreOffsetGate); @@ -110,7 +109,7 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest } firstState = ifBranch; } - + auto opcode = accessor_.GetOpCode(stateInGate); if (opcode == OpCode::LOOP_BEGIN) { bool resumeInLoopBody = false; CheckResumeInLoopBody(stateInGate, resumeInLoopBody); @@ -125,7 +124,7 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest GateType::NJSValue()); GateRef condition = builder_.Equal(offsetConstantGate, bcOffsetPhiGate); - GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(), {stateInGate, condition}); + GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(0), {stateInGate, condition}); GateRef ifTrue = circuit_->NewGate(circuit_->IfTrue(), {ifBranch}); GateRef ifFalse = circuit_->NewGate(circuit_->IfFalse(), {ifBranch}); @@ -158,7 +157,7 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest UpdateValueSelector(prevLoopBeginGate, loopBeginStateIn, prevBcOffsetPhiGate); break; } - if (accessor_.GetOpCode(stateInGate) == OpCode::STATE_ENTRY) { + if (stateInGate == GetEntryBBStateOut()) { break; } stateInGate = accessor_.GetState(stateInGate); @@ -192,9 +191,16 @@ void AsyncFunctionLowering::UpdateValueSelector(GateRef prevLoopBeginGate, if (accessor_.GetOpCode(use) == OpCode::VALUE_SELECTOR && use != prevBcOffsetPhiGate) { auto machineType = accessor_.GetMachineType(use); auto gateType = accessor_.GetGateType(use); - GateRef undefinedGate = + GateRef undefinedGate = Circuit::NullGate(); + if (gateType.IsNumberType()) { + undefinedGate = + circuit_->NewGate(circuit_->GetMetaBuilder()->Constant(JSTaggedValue::VALUE_ZERO), + machineType, GateType::IntType()); + } else { + undefinedGate = circuit_->NewGate(circuit_->GetMetaBuilder()->Constant(JSTaggedValue::VALUE_UNDEFINED), machineType, gateType); + } auto firstValueGate = accessor_.GetValueIn(use, 0); auto newValueSelector = circuit_->NewGate(circuit_->ValueSelector(2), machineType, // 2: valuesIn {newGate, undefinedGate, firstValueGate}, @@ -279,5 +285,27 @@ GateRef AsyncFunctionLowering::GetDependPhiFromLoopBegin(GateRef gate) const LOG_COMPILER(FATAL) << "Can not find depend-selector from loopbegin"; return Circuit::NullGate(); } + +GateRef AsyncFunctionLowering::GetEntryBBStateOut() const +{ + auto bb = bcBuilder_->GetBasicBlockById(1); // 1 : First Block Id + auto state = bb.stateCurrent; + if (accessor_.IsCFGMerge(state)) { + return accessor_.GetState(state); + } else { + return state; + } +} + +GateRef AsyncFunctionLowering::GetEntryBBDependOut() const +{ + auto bb = bcBuilder_->GetBasicBlockById(1); // 1 : First Block Id + auto depend = bb.dependCurrent; + if (accessor_.IsDependSelector(depend)) { + return accessor_.GetDep(depend); + } else { + return depend; + } +} } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/async_function_lowering.h b/ecmascript/compiler/async_function_lowering.h index 35c19cac298282399df7506ea26bf54c193457c8..fed6efb594121bb34a0382b1b700e417664bddf6 100644 --- a/ecmascript/compiler/async_function_lowering.h +++ b/ecmascript/compiler/async_function_lowering.h @@ -29,9 +29,8 @@ public: AsyncFunctionLowering(BytecodeCircuitBuilder *bcBuilder, Circuit *circuit, CompilationConfig *cmpCfg, bool enableLog, const std::string& name) : bcBuilder_(bcBuilder), circuit_(circuit), builder_(circuit, cmpCfg), enableLog_(enableLog), - stateEntry_(circuit->GetStateRoot()), - dependEntry_(circuit->GetDependRoot()), - accessor_(circuit), argAccessor_(circuit), methodName_(name) + accessor_(circuit), argAccessor_(circuit), stateEntry_(GetEntryBBStateOut()), + dependEntry_(GetEntryBBDependOut()), methodName_(name) { } @@ -66,14 +65,18 @@ private: GateRef GetDependPhiFromLoopBegin(GateRef loopbegin) const; + GateRef GetEntryBBStateOut() const; + + GateRef GetEntryBBDependOut() const; + BytecodeCircuitBuilder *bcBuilder_; Circuit *circuit_; CircuitBuilder builder_; bool enableLog_ {false}; - GateRef stateEntry_ {Circuit::NullGate()}; - GateRef dependEntry_ {Circuit::NullGate()}; GateAccessor accessor_; ArgumentAccessor argAccessor_; + GateRef stateEntry_ {Circuit::NullGate()}; + GateRef dependEntry_ {Circuit::NullGate()}; std::string methodName_; }; } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/base/depend_chain_helper.cpp b/ecmascript/compiler/base/depend_chain_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5d87bc51ce349ebf9bc632f00adab9a8a3ed6fee --- /dev/null +++ b/ecmascript/compiler/base/depend_chain_helper.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/base/depend_chain_helper.h" + +namespace panda::ecmascript::kungfu { + +void DependChains::Merge(DependChains* that) +{ + // find common sub list + while (size_ > that->size_) { + head_ = head_->next; + size_--; + } + + auto lhs = this->head_; + auto rhs = that->head_; + size_t rhsSize = that->size_; + while (rhsSize > size_) { + rhs = rhs->next; + rhsSize--; + } + while (lhs != rhs) { + ASSERT(lhs != nullptr); + lhs = lhs->next; + rhs = rhs->next; + size_--; + } + head_ = lhs; +} + +bool DependChains::Equals(DependChains* that) +{ + if (that == nullptr) { + return false; + } + if (size_ != that->size_) { + return false; + } + auto lhs = this->head_; + auto rhs = that->head_; + while (lhs != rhs) { + if (lhs->gate != rhs->gate) { + return false; + } + lhs = lhs->next; + rhs = rhs->next; + } + return true; +} + +uint32_t DependChains::FoundIndexCheckedForLength(RangeGuard* rangeGuard, GateRef input) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + uint32_t length = rangeGuard->CheckIndexCheckLengthInput(node->gate, input); + if (length > 0) { // found !!! + return length; + } + } + return 0; +} + +uint32_t DependChains::FoundIndexCheckedForIndex(RangeGuard* rangeGuard, GateRef input) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + uint32_t length = rangeGuard->CheckIndexCheckIndexInput(node->gate, input); + if (length > 0) { // found !!! + return length; + } + } + return 0; +} + +GateRef DependChains::LookupNode(LaterElimination* elimination, GateRef gate) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + if (elimination->CheckReplacement(node->gate, gate)) { + return node->gate; + } + } + return Circuit::NullGate(); +} + +DependChains* DependChains::UpdateNode(GateRef gate) +{ + // assign node->next to head + Node* node = chunk_->New(gate, head_); + DependChains* that = new (chunk_) DependChains(chunk_); + // assign head to node + that->head_ = node; + that->size_ = size_ + 1; + return that; +} +} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/base/depend_chain_helper.h b/ecmascript/compiler/base/depend_chain_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..aac9dd52a44101f5ceaa404695e2c80e5ba9d67f --- /dev/null +++ b/ecmascript/compiler/base/depend_chain_helper.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H +#define ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H + +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate_accessor.h" +#include "ecmascript/compiler/graph_visitor.h" +#include "ecmascript/compiler/later_elimination.h" +#include "ecmascript/compiler/range_guard.h" +#include "ecmascript/mem/chunk_containers.h" + +namespace panda::ecmascript::kungfu { +class LaterElimination; +class RangeGuard; +class DependChains : public ChunkObject { +public: + DependChains(Chunk* chunk) : chunk_(chunk) {} + ~DependChains() = default; + + DependChains* UpdateNode(GateRef gate); + bool Equals(DependChains* that); + void Merge(DependChains* that); + void CopyFrom(DependChains *other) + { + head_ = other->head_; + size_ = other->size_; + } + uint32_t FoundIndexCheckedForLength(RangeGuard* rangeGuard, GateRef input); + uint32_t FoundIndexCheckedForIndex(RangeGuard* rangeGuard, GateRef input); + GateRef LookupNode(LaterElimination* elimination, GateRef gate); +private: + struct Node { + Node(GateRef gate, Node* next) : gate(gate), next(next) {} + GateRef gate; + Node *next; + }; + + Node *head_{nullptr}; + size_t size_ {0}; + Chunk* chunk_; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H \ No newline at end of file diff --git a/ecmascript/compiler/bc_call_signature.h b/ecmascript/compiler/bc_call_signature.h index 08aceb8cd7207bc2c839d6e5da16f512fb9da6f1..5965feed627449d542d51f11c0540077ceb51af6 100644 --- a/ecmascript/compiler/bc_call_signature.h +++ b/ecmascript/compiler/bc_call_signature.h @@ -453,7 +453,13 @@ namespace panda::ecmascript::kungfu { APPEND_SUFFIX(HandleCallthis3Imm8V8V8V8V8, V) \ APPEND_SUFFIX(HandleNewobjrangeImm8Imm8V8, V) \ APPEND_SUFFIX(HandleNewobjrangeImm16Imm8V8, V) \ - APPEND_SUFFIX(HandleWideNewobjrangePrefImm16V8, V) + APPEND_SUFFIX(HandleWideNewobjrangePrefImm16V8, V) \ + APPEND_SUFFIX(HandleInstanceofImm8V8, V) \ + APPEND_SUFFIX(HandleTryldglobalbynameImm8Id16, V) \ + APPEND_SUFFIX(HandleTryldglobalbynameImm16Id16, V) \ + APPEND_SUFFIX(HandleTrystglobalbynameImm8Id16, V) \ + APPEND_SUFFIX(HandleTrystglobalbynameImm16Id16, V) \ + APPEND_SUFFIX(HandleLdglobalvarImm16Id16, V) #define ASM_INTERPRETER_BC_LAYOUT_PROFILER_STUB_LIST(V) \ APPEND_SUFFIX(HandleDefineclasswithbufferImm8Id16Id16Imm16V8, V) \ @@ -461,6 +467,11 @@ namespace panda::ecmascript::kungfu { APPEND_SUFFIX(HandleDefinegettersetterbyvalueV8V8V8V8, V) \ APPEND_SUFFIX(HandleCreateobjectwithbufferImm8Id16, V) \ APPEND_SUFFIX(HandleCreateobjectwithbufferImm16Id16, V) \ + APPEND_SUFFIX(HandleCreatearraywithbufferImm8Id16, V) \ + APPEND_SUFFIX(HandleCreatearraywithbufferImm16Id16, V) \ + APPEND_SUFFIX(HandleCreateemptyobject, V) \ + APPEND_SUFFIX(HandleCreateemptyarrayImm8, V) \ + APPEND_SUFFIX(HandleCreateemptyarrayImm16, V) \ APPEND_SUFFIX(HandleLdobjbynameImm8Id16, V) \ APPEND_SUFFIX(HandleLdobjbynameImm16Id16, V) \ APPEND_SUFFIX(HandleLdthisbynameImm16Id16, V) \ @@ -473,6 +484,19 @@ namespace panda::ecmascript::kungfu { APPEND_SUFFIX(HandleStobjbynameImm8Id16V8, V) \ APPEND_SUFFIX(HandleStobjbynameImm16Id16V8, V) \ APPEND_SUFFIX(HandleStobjbyvalueImm8V8V8, V) \ + APPEND_SUFFIX(HandleStobjbyindexImm8V8Imm16, V) \ + APPEND_SUFFIX(HandleStobjbyindexImm16V8Imm16, V) \ + APPEND_SUFFIX(HandleLdobjbyvalueImm8V8, V) \ + APPEND_SUFFIX(HandleLdobjbyvalueImm16V8, V) \ + APPEND_SUFFIX(HandleLdthisbyvalueImm16, V) \ + APPEND_SUFFIX(HandleLdthisbyvalueImm8, V) \ + APPEND_SUFFIX(HandleLdobjbyindexImm8Imm16, V) \ + APPEND_SUFFIX(HandleLdobjbyindexImm16Imm16, V) \ + APPEND_SUFFIX(HandleWideLdobjbyindexPrefImm32, V) \ + APPEND_SUFFIX(HandleWideStobjbyindexPrefV8Imm32, V) \ + APPEND_SUFFIX(HandleStownbyindexImm16V8Imm16, V) \ + APPEND_SUFFIX(HandleStownbyindexImm8V8Imm16, V) \ + APPEND_SUFFIX(HandleWideStownbyindexPrefV8Imm32, V) \ APPEND_SUFFIX(HandleStownbyvaluewithnamesetImm16V8V8, V) \ APPEND_SUFFIX(HandleStownbyvaluewithnamesetImm8V8V8, V) \ APPEND_SUFFIX(HandleStownbyvalueImm8V8V8, V) \ diff --git a/ecmascript/compiler/binary_section.h b/ecmascript/compiler/binary_section.h index 7a221e269b4afa771432c60416936253db0435c9..54c26ee2b791f6dc1db6a39622fe499edae23239 100644 --- a/ecmascript/compiler/binary_section.h +++ b/ecmascript/compiler/binary_section.h @@ -144,7 +144,8 @@ public: int Link() const { - return value_ == ElfSecName::SYMTAB ? 1 : 0; + // The strtab index is 2 inside An file. + return value_ == ElfSecName::SYMTAB ? 2 : 0; } void InitShTypeAndFlag() @@ -212,7 +213,7 @@ public: // RO data section needs 16 bytes alignment bool InRodataSection() const { - return ElfSecName::RODATA <= value_ && value_ <= ElfSecName::RODATA_CST8; + return ElfSecName::RODATA <= value_ && value_ <= ElfSecName::RODATA_CST32; } private: static int const FIX_SIZE = 24; // 24:Elf_Rel diff --git a/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..70020c17d9e12f7a7e8b469d4e6ae2daa9436577 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_array_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/compiler/profiler_operation.h" +#include "ecmascript/compiler/rt_call_signature.h" +#include "ecmascript/runtime_call_id.h" + +namespace panda::ecmascript::kungfu { +void BuiltinsArrayStubBuilder::Concat(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) At most one argument; + // (3) all the arguments (if exists) are empty arrays. + JsArrayRequirements reqThisValue; + reqThisValue.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, reqThisValue), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Label argValIsEmpty(env); + GateRef numArgsAsInt32 = TruncPtrToInt32(numArgs); + Branch(Int32LessThanOrEqual(numArgsAsInt32, Int32(1)), &atMostOneArg, slowPath); + Bind(&atMostOneArg); + { + Label exactlyOneArg(env); + Branch(Int32Equal(numArgsAsInt32, Int32(0)), &argValIsEmpty, &exactlyOneArg); + Bind(&exactlyOneArg); + GateRef argVal = GetCallArg0(numArgs); + JsArrayRequirements reqArgVal; + Branch(IsJsArrayWithLengthLimit(glue, argVal, MAX_LENGTH_ZERO, reqArgVal), &argValIsEmpty, slowPath); + // Creates an empty array on fast path + Bind(&argValIsEmpty); + NewObjectStubBuilder newBuilder(this); + ProfileOperation callback; + result->WriteVariable(newBuilder.CreateEmptyArray(glue, callback)); + Jump(exit); + } + } +} + +void BuiltinsArrayStubBuilder::Filter(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) callbackFn is callable (otherwise a TypeError shall be thrown in the slow path) + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label isCallable(env); + Branch(IsCallable(GetCallArg0(numArgs)), &isCallable, slowPath); + // Creates an empty array on fast path + Bind(&isCallable); + NewObjectStubBuilder newBuilder(this); + ProfileOperation callback; + result->WriteVariable(newBuilder.CreateEmptyArray(glue, callback)); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::ForEach([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + [[maybe_unused]] Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) callbackFn is callable (otherwise a TypeError shall be thrown in the slow path) + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + // Do nothing on fast path + Branch(IsCallable(GetCallArg0(numArgs)), exit, slowPath); +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::IndexOf([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: (1) this is an empty array; (2) fromIndex is missing + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Branch(Int32LessThanOrEqual(TruncPtrToInt32(numArgs), Int32(1)), &atMostOneArg, slowPath); + // Returns -1 on fast path + Bind(&atMostOneArg); + result->WriteVariable(IntToTaggedPtr(Int32(-1))); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::LastIndexOf([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: (1) this is an empty array; (2) fromIndex is missing + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Branch(Int32LessThanOrEqual(TruncPtrToInt32(numArgs), Int32(1)), &atMostOneArg, slowPath); + // Returns -1 on fast path + Bind(&atMostOneArg); + result->WriteVariable(IntToTaggedPtr(Int32(-1))); + Jump(exit); + } +} + +void BuiltinsArrayStubBuilder::Slice(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) no arguments exist + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label noArgs(env); + GateRef numArgsAsInt32 = TruncPtrToInt32(numArgs); + Branch(Int32Equal(numArgsAsInt32, Int32(0)), &noArgs, slowPath); + // Creates a new empty array on fast path + Bind(&noArgs); + NewObjectStubBuilder newBuilder(this); + ProfileOperation callback; + result->WriteVariable(newBuilder.CreateEmptyArray(glue, callback)); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::Reverse([[maybe_unused]] GateRef glue, GateRef thisValue, + [[maybe_unused]] GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path is this is an array of length 0 or 1 + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ONE, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + // Returns thisValue on fast path + result->WriteVariable(thisValue); + Jump(exit); +} + +GateRef BuiltinsArrayStubBuilder::IsJsArrayWithLengthLimit(GateRef glue, GateRef object, + uint32_t maxLength, JsArrayRequirements requirements) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label isHeapObject(env); + Label isJsArray(env); + Label stabilityCheckPassed(env); + Label defaultConstructorCheckPassed(env); + Label exit(env); + DEFVARIABLE(result, VariableType::BOOL(), False()); + + Branch(TaggedIsHeapObject(object), &isHeapObject, &exit); + Bind(&isHeapObject); + Branch(IsJsArray(object), &isJsArray, &exit); + Bind(&isJsArray); + if (requirements.stable) { + Branch(IsStableJSArray(glue, object), &stabilityCheckPassed, &exit); + } else { + Jump(&stabilityCheckPassed); + } + Bind(&stabilityCheckPassed); + if (requirements.defaultConstructor) { + // If HasConstructor bit is set to 1, then the constructor has been modified. + Branch(HasConstructor(object), &exit, &defaultConstructorCheckPassed); + } else { + Jump(&defaultConstructorCheckPassed); + } + Bind(&defaultConstructorCheckPassed); + result.WriteVariable(Int32UnsignedLessThanOrEqual(GetArrayLength(object), Int32(maxLength))); + Jump(&exit); + Bind(&exit); + GateRef ret = *result; + env->SubCfgExit(); + return ret; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_array_stub_builder.h b/ecmascript/compiler/builtins/builtins_array_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..09111d19866b06a1a4c421db5fdae1a06f62d247 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_array_stub_builder.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate.h" +#include "ecmascript/compiler/gate_meta_data.h" +#include "ecmascript/compiler/stub_builder-inl.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsArrayStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsArrayStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + ~BuiltinsArrayStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsArrayStubBuilder); + NO_COPY_SEMANTIC(BuiltinsArrayStubBuilder); + void GenerateCircuit() override {} + + void Concat(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Filter(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void ForEach(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void IndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void LastIndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Slice(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Reverse(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + +private: + static constexpr uint32_t MAX_LENGTH_ZERO = 0; + static constexpr uint32_t MAX_LENGTH_ONE = 1; + struct JsArrayRequirements { + bool stable = false; + bool defaultConstructor = false; + }; + GateRef IsJsArrayWithLengthLimit(GateRef glue, GateRef object, + uint32_t maxLength, JsArrayRequirements requirements); +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins/builtins_call_signature.h b/ecmascript/compiler/builtins/builtins_call_signature.h index af7f5ec30dbaf2832fee9bf9344d2bde886fdc13..a4f81f7ec5e000b8461d81cac6c56088d7992e33 100644 --- a/ecmascript/compiler/builtins/builtins_call_signature.h +++ b/ecmascript/compiler/builtins/builtins_call_signature.h @@ -50,7 +50,29 @@ namespace panda::ecmascript::kungfu { V(ListForEach) \ V(ArrayListForEach) \ V(ArrayListReplaceAllElements) \ - V(FunctionPrototypeApply) + V(FunctionPrototypeApply) \ + V(ArrayConcat) \ + V(ArrayFilter) \ + V(ArrayForEach) \ + V(ArrayIndexOf) \ + V(ArrayLastIndexOf) \ + V(ArraySlice) \ + V(ArrayReverse) \ + V(SetClear) \ + V(SetValues) \ + V(SetEntries) \ + V(SetForEach) \ + V(SetAdd) \ + V(SetDelete) \ + V(SetHas) \ + V(MapClear) \ + V(MapValues) \ + V(MapEntries) \ + V(MapKeys) \ + V(MapForEach) \ + V(MapSet) \ + V(MapDelete) \ + V(MapHas) #define BUILTINS_CONSTRUCTOR_STUB_LIST(V) \ V(BooleanConstructor) \ @@ -58,14 +80,16 @@ namespace panda::ecmascript::kungfu { V(ArrayConstructor) #define AOT_BUILTINS_STUB_LIST(V) \ - V(SQRT) \ + V(SQRT) /* list start and math list start */ \ V(COS) \ V(SIN) \ V(ACOS) \ V(ATAN) \ V(ABS) \ - V(FLOOR) \ - V(LocaleCompare) + V(FLOOR) /* math list end */ \ + V(LocaleCompare) \ + V(SORT) \ + V(STRINGIFY) class BuiltinsStubCSigns { public: @@ -79,6 +103,10 @@ public: AOT_BUILTINS_STUB_LIST(DEF_STUB_ID) #undef DEF_STUB_ID BUILTINS_CONSTRUCTOR_STUB_FIRST = BooleanConstructor, + TYPED_BUILTINS_FIRST = SQRT, + TYPED_BUILTINS_LAST = STRINGIFY, + TYPED_BUILTINS_MATH_FIRST = SQRT, + TYPED_BUILTINS_MATH_LAST = FLOOR, INVALID = 0xFF, }; @@ -115,18 +143,14 @@ public: static bool IsTypedBuiltin(ID builtinId) { - switch (builtinId) { - case BuiltinsStubCSigns::ID::COS: - case BuiltinsStubCSigns::ID::SIN: - case BuiltinsStubCSigns::ID::ACOS: - case BuiltinsStubCSigns::ID::ATAN: - case BuiltinsStubCSigns::ID::ABS: - case BuiltinsStubCSigns::ID::FLOOR: - case BuiltinsStubCSigns::ID::SQRT: - return true; - default: - return false; - } + return (BuiltinsStubCSigns::ID::TYPED_BUILTINS_FIRST <= builtinId) && + (builtinId <= BuiltinsStubCSigns::ID::TYPED_BUILTINS_LAST); + } + + static bool IsTypedBuiltinMath(ID builtinId) + { + return (BuiltinsStubCSigns::ID::TYPED_BUILTINS_MATH_FIRST <= builtinId) && + (builtinId <= BuiltinsStubCSigns::ID::TYPED_BUILTINS_MATH_LAST); } static ConstantIndex GetConstantIndex(ID builtinId) @@ -146,6 +170,12 @@ public: return ConstantIndex::MATH_FLOOR_FUNCTION_INDEX; case BuiltinsStubCSigns::ID::SQRT: return ConstantIndex::MATH_SQRT_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::LocaleCompare: + return ConstantIndex::LOCALE_COMPARE_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::SORT: + return ConstantIndex::ARRAY_SORT_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::STRINGIFY: + return ConstantIndex::JSON_STRINGIFY_FUNCTION_INDEX; default: LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -163,6 +193,8 @@ public: {"abs", ABS}, {"floor", FLOOR}, {"localeCompare", LocaleCompare}, + {"sort", SORT}, + {"stringify", STRINGIFY}, }; if (str2BuiltinId.count(idStr) > 0) { return str2BuiltinId.at(idStr); @@ -191,6 +223,7 @@ enum class BuiltinsArgs : size_t { #define BUILTINS_STUB_ID(name) kungfu::BuiltinsStubCSigns::name #define IS_TYPED_BUILTINS_ID(id) kungfu::BuiltinsStubCSigns::IsTypedBuiltin(id) +#define IS_TYPED_BUILTINS_MATH_ID(id) kungfu::BuiltinsStubCSigns::IsTypedBuiltinMath(id) #define GET_TYPED_CONSTANT_INDEX(id) kungfu::BuiltinsStubCSigns::GetConstantIndex(id) } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BUILTINS_CALL_SIGNATURE_H diff --git a/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..26ca3d5cbe199a375e05b070020cd0d60b8841f2 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_collection_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/linked_hash_table.h" +#include "ecmascript/js_map.h" +#include "ecmascript/js_set.h" +#include "ecmascript/js_iterator.h" + +namespace panda::ecmascript::kungfu { + +template +void BuiltinsCollectionStubBuilder::CheckCollectionObj(Label *thisCollectionObj, Label *slowPath) +{ + // check target obj + auto jsType = std::is_same_v ? JSType::JS_SET : JSType::JS_MAP; + GateRef isJsCollectionObj = IsJSObjectType(thisValue_, jsType); + Branch(isJsCollectionObj, thisCollectionObj, slowPath); +} + +template +void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Clear(linkedTable); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Clear(linkedTable); + } + + Label exception(env); + Label noException(env); + Branch(TaggedIsException(res), &exception, &noException); + Bind(&noException); + SetLinked(res); + Jump(exit); + Bind(&exception); + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::CreateIterator(Variable *result, + Label *exit, Label *slowPath, GateRef kind) +{ + auto env = GetEnvironment(); + Label entry(env); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + NewObjectStubBuilder newBuilder(this); + newBuilder.SetGlue(glue_); + if constexpr (std::is_same_v) { + newBuilder.CreateJSCollectionIterator(result, exit, thisValue_, kind); + } else { + newBuilder.CreateJSCollectionIterator(result, exit, thisValue_, kind); + } +} + +template +void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::VALUE)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::KEY_AND_VALUE)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Keys(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::KEY)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Keys(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef callbackFnHandle = GetCallArg0(numArgs_); + Label callable(env); + // check heap obj + Label heapObj(env); + Branch(TaggedIsHeapObject(callbackFnHandle), &heapObj, slowPath); + Bind(&heapObj); + Branch(IsCallable(callbackFnHandle), &callable, slowPath); + Bind(&callable); + + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.ForEach(thisValue_, linkedTable, numArgs_); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.ForEach(thisValue_, linkedTable, numArgs_); + } + + Label exception(env); + Branch(TaggedIsException(res), &exception, exit); + Bind(&exception); + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::MapSetOrSetAdd( + Variable *result, Label *exit, Label *slowPath, bool isJsMapSet) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + // check key + Label keyNotHole(env); + Branch(TaggedIsHole(key), slowPath, &keyNotHole); + Bind(&keyNotHole); + GateRef value = isJsMapSet ? GetCallArg1(numArgs_) : key; + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Insert(linkedTable, key, value); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Insert(linkedTable, key, value); + } + + SetLinked(res); + *result = thisValue_; + Jump(exit); +} + +template +void BuiltinsCollectionStubBuilder::Set(Variable *result, Label *exit, Label *slowPath) +{ + MapSetOrSetAdd(result, exit, slowPath, true); +} + +template void BuiltinsCollectionStubBuilder::Set(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Add(Variable *result, Label *exit, Label *slowPath) +{ + MapSetOrSetAdd(result, exit, slowPath, false); +} + +template void BuiltinsCollectionStubBuilder::Add(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Delete(linkedTable, key); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Delete(linkedTable, key); + } + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Has(linkedTable, key); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Has(linkedTable, key); + } + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath); +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_collection_stub_builder.h b/ecmascript/compiler/builtins/builtins_collection_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..36298475e7b365a828cad7c18687400042993009 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_collection_stub_builder.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/builtins/linked_hashtable_stub_builder.h" + +namespace panda::ecmascript::kungfu { +template +class BuiltinsCollectionStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsCollectionStubBuilder(BuiltinsStubBuilder *parent, GateRef glue, GateRef thisValue, + GateRef numArgs) : BuiltinsStubBuilder(parent), glue_(glue), thisValue_(thisValue), numArgs_(numArgs) {} + ~BuiltinsCollectionStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsCollectionStubBuilder); + NO_COPY_SEMANTIC(BuiltinsCollectionStubBuilder); + void GenerateCircuit() override {} + + void Clear(Variable *result, Label *exit, Label *slowPath); + void Values(Variable *result, Label *exit, Label *slowPath); + void Entries(Variable *result, Label *exit, Label *slowPath); + void Keys(Variable *result, Label *exit, Label *slowPath); + void ForEach(Variable *result, Label *exit, Label *slowPath); + void Set(Variable *result, Label *exit, Label *slowPath); + void Add(Variable *result, Label *exit, Label *slowPath); + void Delete(Variable *result, Label *exit, Label *slowPath); + void Has(Variable *result, Label *exit, Label *slowPath); + +private: + // check target obj + void CheckCollectionObj(Label *exit, Label *slowPath); + void CreateIterator(Variable *result, Label *exit, Label *slowPath, GateRef iterationKind); + void MapSetOrSetAdd(Variable *result, Label *exit, Label *slowPath, bool isJsMapSet); + + GateRef GetLinkedOffset() + { + int32_t linkedTableOffset = 0; + if constexpr (std::is_same_v) { + linkedTableOffset = CollectionType::LINKED_MAP_OFFSET; + } else { + linkedTableOffset = CollectionType::LINKED_SET_OFFSET; + } + return IntPtr(linkedTableOffset); + } + + GateRef GetLinked() + { + GateRef linkedTableOffset = GetLinkedOffset(); + return Load(VariableType::JS_ANY(), thisValue_, linkedTableOffset); + } + + void SetLinked(GateRef newTable) + { + GateRef linkedTableOffset = GetLinkedOffset(); + Store(VariableType::JS_ANY(), glue_, thisValue_, linkedTableOffset, newTable); + } + + GateRef glue_; + GateRef thisValue_; + GateRef numArgs_; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..98dabbcadc691e5d113e8fde9b6e91e0ad9b2dea --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_function_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_object_stub_builder.h" +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/js_arguments.h" + +namespace panda::ecmascript::kungfu { + +void BuiltinsFunctionStubBuilder::Apply(GateRef glue, GateRef thisValue, + GateRef numArgs, Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label targetIsCallable(env); + Label targetIsUndefined(env); + Label targetNotUndefined(env); + Label isHeapObject(env); + //1. If IsCallable(func) is false, throw a TypeError exception + Branch(TaggedIsHeapObject(thisValue), &isHeapObject, slowPath); + Bind(&isHeapObject); + { + Branch(IsCallable(thisValue), &targetIsCallable, slowPath); + Bind(&targetIsCallable); + { + GateRef thisArg = GetCallArg0(numArgs); + GateRef arrayObj = GetCallArg1(numArgs); + // 2. If argArray is null or undefined, then + Branch(TaggedIsUndefined(arrayObj), &targetIsUndefined, &targetNotUndefined); + Bind(&targetIsUndefined); + { + // a. Return Call(func, thisArg). + res->WriteVariable(JSCallDispatch(glue, thisValue, Int32(0), 0, Circuit::NullGate(), + JSCallMode::CALL_GETTER, { thisArg })); + Jump(exit); + } + Bind(&targetNotUndefined); + { + // 3. Let argList be CreateListFromArrayLike(argArray). + GateRef elements = BuildArgumentsListFastElements(glue, arrayObj); + Label targetIsHole(env); + Label targetNotHole(env); + Branch(TaggedIsHole(elements), &targetIsHole, &targetNotHole); + Bind(&targetIsHole); + { + BuiltinsObjectStubBuilder objectStubBuilder(this); + GateRef argList = objectStubBuilder.CreateListFromArrayLike(glue, arrayObj); + // 4. ReturnIfAbrupt(argList). + Label isPendingException(env); + Label noPendingException(env); + Branch(HasPendingException(glue), &isPendingException, &noPendingException); + Bind(&isPendingException); + { + Jump(slowPath); + } + Bind(&noPendingException); + { + GateRef argsLength = GetLengthOfTaggedArray(argList); + GateRef argv = PtrAdd(argList, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, argv, thisArg })); + Jump(exit); + } + } + Bind(&targetNotHole); + { + // 6. Return Call(func, thisArg, argList). + Label taggedIsStableJsArg(env); + Label taggedNotStableJsArg(env); + Branch(IsStableJSArguments(glue, arrayObj), &taggedIsStableJsArg, &taggedNotStableJsArg); + Bind(&taggedIsStableJsArg); + { + GateRef hClass = LoadHClass(arrayObj); + GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); + GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); + GateRef length = TaggedGetInt(result); + GateRef argsLength = MakeArgListWithHole(glue, elements, length); + GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg })); + Jump(exit); + } + Bind(&taggedNotStableJsArg); + { + GateRef length = GetArrayLength(arrayObj); + GateRef argsLength = MakeArgListWithHole(glue, elements, length); + GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg })); + Jump(exit); + } + } + } + } + } +} + +// return elements +GateRef BuiltinsFunctionStubBuilder::BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj) +{ + auto env = GetEnvironment(); + Label subentry(env); + env->SubCfgEntry(&subentry); + DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); + Label exit(env); + Label hasStableElements(env); + Label targetIsStableJSArguments(env); + Label targetNotStableJSArguments(env); + Label targetIsInt(env); + Label hClassEqual(env); + Label targetIsStableJSArray(env); + Label targetNotStableJSArray(env); + + Branch(HasStableElements(glue, arrayObj), &hasStableElements, &exit); + Bind(&hasStableElements); + { + Branch(IsStableJSArguments(glue, arrayObj), &targetIsStableJSArguments, &targetNotStableJSArguments); + Bind(&targetIsStableJSArguments); + { + GateRef hClass = LoadHClass(arrayObj); + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); + GateRef argmentsClass = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, + GlobalEnv::ARGUMENTS_CLASS); + Branch(Int64Equal(hClass, argmentsClass), &hClassEqual, &exit); + Bind(&hClassEqual); + { + GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); + GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); + Branch(TaggedIsInt(result), &targetIsInt, &exit); + Bind(&targetIsInt); + { + res = GetElementsArray(arrayObj); + Jump(&exit); + } + } + } + Bind(&targetNotStableJSArguments); + { + Branch(IsStableJSArray(glue, arrayObj), &targetIsStableJSArray, &targetNotStableJSArray); + Bind(&targetIsStableJSArray); + { + res = GetElementsArray(arrayObj); + Jump(&exit); + } + Bind(&targetNotStableJSArray); + { + FatalPrint(glue, { Int32(GET_MESSAGE_STRING_ID(ThisBranchIsUnreachable)) }); + Jump(&exit); + } + } + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsFunctionStubBuilder::MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length) +{ + auto env = GetEnvironment(); + Label subentry(env); + env->SubCfgEntry(&subentry); + DEFVARIABLE(res, VariableType::INT32(), length); + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + Label exit(env); + + GateRef argsLength = GetLengthOfTaggedArray(argv); + + Label lengthGreaterThanArgsLength(env); + Label lengthLessThanArgsLength(env); + Branch(Int32GreaterThan(length, argsLength), &lengthGreaterThanArgsLength, &lengthLessThanArgsLength); + Bind(&lengthGreaterThanArgsLength); + { + res = argsLength; + Jump(&lengthLessThanArgsLength); + } + Bind(&lengthLessThanArgsLength); + { + Label loopHead(env); + Label loopEnd(env); + Label targetIsHole(env); + Label targetNotHole(env); + Branch(Int32UnsignedLessThan(*i, *res), &loopHead, &exit); + LoopBegin(&loopHead); + { + GateRef value = GetValueFromTaggedArray(argv, *i); + Branch(TaggedIsHole(value), &targetIsHole, &targetNotHole); + Bind(&targetIsHole); + { + SetValueToTaggedArray(VariableType::JS_ANY(), glue, argv, *i, Undefined()); + Jump(&targetNotHole); + } + Bind(&targetNotHole); + i = Int32Add(*i, Int32(1)); + Branch(Int32UnsignedLessThan(*i, *res), &loopEnd, &exit); + } + Bind(&loopEnd); + LoopEnd(&loopHead); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_function_stub_builder.h b/ecmascript/compiler/builtins/builtins_function_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..babea4fce2d3eb34dcf7356f5abc220a76717c9c --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_function_stub_builder.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_stubs.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsFunctionStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsFunctionStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + ~BuiltinsFunctionStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsFunctionStubBuilder); + NO_COPY_SEMANTIC(BuiltinsFunctionStubBuilder); + void GenerateCircuit() override {} + void Apply(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + GateRef BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj); +private: + GateRef MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length); +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3e19f441b01e7278955dec17d5a305797e97b49f --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_object_stub_builder.h" + +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/typed_array_stub_builder.h" +#include "ecmascript/js_arguments.h" +#include "ecmascript/message_string.h" +namespace panda::ecmascript::kungfu { +GateRef BuiltinsObjectStubBuilder::CreateListFromArrayLike(GateRef glue, GateRef arrayObj) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); + DEFVARIABLE(index, VariableType::INT32(), Int32(0)); + Label exit(env); + + // 3. If Type(obj) is Object, throw a TypeError exception. + Label targetIsHeapObject(env); + Label targetIsEcmaObject(env); + Label targetNotEcmaObject(env); + Branch(TaggedIsHeapObject(arrayObj), &targetIsHeapObject, &targetNotEcmaObject); + Bind(&targetIsHeapObject); + Branch(TaggedObjectIsEcmaObject(arrayObj), &targetIsEcmaObject, &targetNotEcmaObject); + Bind(&targetNotEcmaObject); + { + GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(TargetTypeNotObject)); + CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); + Jump(&exit); + } + Bind(&targetIsEcmaObject); + { + // 4. Let len be ToLength(Get(obj, "length")). + GateRef lengthString = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, + ConstantIndex::LENGTH_STRING_INDEX); + GateRef value = FastGetPropertyByName(glue, arrayObj, lengthString, ProfileOperation()); + GateRef number = ToLength(glue, value); + // 5. ReturnIfAbrupt(len). + Label isPendingException1(env); + Label noPendingException1(env); + Branch(HasPendingException(glue), &isPendingException1, &noPendingException1); + Bind(&isPendingException1); + { + Jump(&exit); + } + Bind(&noPendingException1); + { + Label indexInRange(env); + Label indexOutRange(env); + + GateRef doubleLen = GetDoubleOfTNumber(number); + Branch(DoubleGreaterThan(doubleLen, Double(JSObject::MAX_ELEMENT_INDEX)), &indexOutRange, &indexInRange); + Bind(&indexOutRange); + { + DebugPrint(glue, { Int32(GET_MESSAGE_STRING_ID(ThisBranchIsUnreachable)) }); + GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(LenGreaterThanMax)); + CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); + Jump(&exit); + } + Bind(&indexInRange); + { + GateRef int32Len = DoubleToInt(glue, doubleLen); + // 6. Let list be an empty List. + NewObjectStubBuilder newBuilder(this); + GateRef array = newBuilder.NewTaggedArray(glue, int32Len); + Label targetIsTypeArray(env); + Label targetNotTypeArray(env); + Branch(IsTypedArray(arrayObj), &targetIsTypeArray, &targetNotTypeArray); + Bind(&targetIsTypeArray); + { + TypedArrayStubBuilder arrayStubBuilder(this); + arrayStubBuilder.FastCopyElementToArray(glue, arrayObj, array); + // c. ReturnIfAbrupt(next). + Label isPendingException2(env); + Label noPendingException2(env); + Branch(HasPendingException(glue), &isPendingException2, &noPendingException2); + Bind(&isPendingException2); + { + Jump(&exit); + } + Bind(&noPendingException2); + { + res = array; + Jump(&exit); + } + } + Bind(&targetNotTypeArray); + // 8. Repeat while index < len + Label loopHead(env); + Label loopEnd(env); + Label afterLoop(env); + Label isPendingException3(env); + Label noPendingException3(env); + Label storeValue(env); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32UnsignedLessThan(*index, int32Len), &storeValue, &afterLoop); + Bind(&storeValue); + { + GateRef next = FastGetPropertyByIndex(glue, arrayObj, *index, ProfileOperation()); + // c. ReturnIfAbrupt(next). + Branch(HasPendingException(glue), &isPendingException3, &noPendingException3); + Bind(&isPendingException3); + { + Jump(&exit); + } + Bind(&noPendingException3); + SetValueToTaggedArray(VariableType::JS_ANY(), glue, array, *index, next); + index = Int32Add(*index, Int32(1)); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + LoopEnd(&loopHead); + Bind(&afterLoop); + { + res = array; + Jump(&exit); + } + } + } + } + Bind(&exit); + GateRef ret = *res; + env->SubCfgExit(); + return ret; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_object_stub_builder.h b/ecmascript/compiler/builtins/builtins_object_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7c2b8e3880a01dee94ed4ddb2ac5ebfeabfc0627 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_object_stub_builder.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_stubs.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsObjectStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsObjectStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + ~BuiltinsObjectStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsObjectStubBuilder); + NO_COPY_SEMANTIC(BuiltinsObjectStubBuilder); + void GenerateCircuit() override {} + GateRef CreateListFromArrayLike(GateRef glue, GateRef arrayObj); +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp index b9acfb50097885b62cb76d600f01383f773eef7b..5bb79966e514fb0d0ea9195c03ad6bb4c12e615d 100644 --- a/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp +++ b/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp @@ -19,7 +19,7 @@ #include "ecmascript/compiler/new_object_stub_builder.h" namespace panda::ecmascript::kungfu { -GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) +GateRef BuiltinsStringStubBuilder::StringAt(const StringInfoGateRef &stringInfoGate, GateRef index) { auto env = GetEnvironment(); Label entry(env); @@ -32,8 +32,8 @@ GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) Label doIntOp(env); Label leftIsNumber(env); Label rightIsNumber(env); - GateRef dataUtf16 = GetNormalStringData(obj); - Branch(IsUtf16String(obj), &isUtf16, &isUtf8); + GateRef dataUtf16 = GetNormalStringData(stringInfoGate); + Branch(IsUtf16String(stringInfoGate.GetString()), &isUtf16, &isUtf8); Bind(&isUtf16); { result = ZExtInt16ToInt32(Load(VariableType::INT16(), PtrAdd(dataUtf16, @@ -52,7 +52,8 @@ GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) return ret; } -GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef obj, GateRef index) +GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef index, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -65,8 +66,8 @@ GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef ob Label isUtf16(env); Label isUtf8(env); Label allocString(env); - GateRef dataUtf = GetNormalStringData(obj); - Branch(IsUtf16String(obj), &isUtf16, &isUtf8); + GateRef dataUtf = GetNormalStringData(stringInfoGate); + Branch(IsUtf16String(stringInfoGate.GetString()), &isUtf16, &isUtf8); Bind(&isUtf16); { GateRef dataAddr = PtrAdd(dataUtf, PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint16_t)))); @@ -121,7 +122,8 @@ GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef ob return ret; } -GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue, GateRef from, + GateRef len, const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -148,7 +150,7 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue Branch(Int32Equal(from, Int32(0)), &fromEqualZero, &next); Bind(&fromEqualZero); { - GateRef thisLen = GetLengthFromString(thisValue); + GateRef thisLen = stringInfoGate.GetLength(); Branch(Int32Equal(len, thisLen), &exit, &next); } Bind(&next); @@ -156,12 +158,12 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue Branch(IsUtf8String(thisValue), &isUtf8, &isUtf16); Bind(&isUtf8); { - result = FastSubUtf8String(glue, thisValue, from, len); + result = FastSubUtf8String(glue, from, len, stringInfoGate); Jump(&exit); } Bind(&isUtf16); { - result = FastSubUtf16String(glue, thisValue, from, len); + result = FastSubUtf16String(glue, from, len, stringInfoGate); Jump(&exit); } } @@ -172,7 +174,8 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue return ret; } -GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -187,7 +190,7 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisV Bind(&afterNew); { GateRef dst = PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET)); - GateRef source = PtrAdd(GetNormalStringData(thisValue), ZExtInt32ToPtr(from)); + GateRef source = PtrAdd(GetNormalStringData(stringInfoGate), ZExtInt32ToPtr(from)); CopyChars(glue, dst, source, len, IntPtr(sizeof(uint8_t)), VariableType::INT8()); Jump(&exit); } @@ -197,7 +200,8 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisV return ret; } -GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -211,7 +215,7 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef this Label isUtf16Next(env); GateRef fromOffset = PtrMul(ZExtInt32ToPtr(from), IntPtr(sizeof(uint16_t) / sizeof(uint8_t))); - GateRef source = PtrAdd(GetNormalStringData(thisValue), fromOffset); + GateRef source = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); GateRef canBeCompressed = CanBeCompressed(source, len, true); NewObjectStubBuilder newBuilder(this); newBuilder.SetParameters(glue, 0); @@ -227,7 +231,7 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef this } Bind(&afterNew); { - GateRef source1 = PtrAdd(GetNormalStringData(thisValue), fromOffset); + GateRef source1 = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); GateRef dst = PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET)); Branch(canBeCompressed, &isUtf8Next, &isUtf16Next); Bind(&isUtf8Next); @@ -503,7 +507,19 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhsData, bool lhsIsUtf8 return ret; } -GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateRef pos) + +void BuiltinsStringStubBuilder::StoreParent(GateRef glue, GateRef object, GateRef parent) +{ + Store(VariableType::JS_POINTER(), glue, object, IntPtr(SlicedString::PARENT_OFFSET), parent); +} + +void BuiltinsStringStubBuilder::StoreStartIndex(GateRef glue, GateRef object, GateRef startIndex) +{ + Store(VariableType::INT32(), glue, object, IntPtr(SlicedString::STARTINDEX_OFFSET), startIndex); +} + +GateRef BuiltinsStringStubBuilder::StringIndexOf(const StringInfoGateRef &lStringInfoGate, + const StringInfoGateRef &rStringInfoGate, GateRef pos) { auto env = GetEnvironment(); Label entry(env); @@ -521,8 +537,8 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR Label rhsIsUtf16(env); Label posRMaxNotGreaterLhs(env); - GateRef lhsCount = GetLengthFromString(lhs); - GateRef rhsCount = GetLengthFromString(rhs); + GateRef lhsCount = lStringInfoGate.GetLength(); + GateRef rhsCount = rStringInfoGate.GetLength(); Branch(Int32GreaterThan(pos, lhsCount), &exit, &nextCount); Bind(&nextCount); @@ -550,14 +566,14 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR GateRef posRMax = Int32Add(*posTag, rhsCount); Branch(Int32GreaterThan(posRMax, lhsCount), &exit, &posRMaxNotGreaterLhs); Bind(&posRMaxNotGreaterLhs); - GateRef rhsData = GetNormalStringData(rhs); - GateRef lhsData = GetNormalStringData(lhs); - Branch(IsUtf8String(rhs), &rhsIsUtf8, &rhsIsUtf16); + GateRef rhsData = GetNormalStringData(rStringInfoGate); + GateRef lhsData = GetNormalStringData(lStringInfoGate); + Branch(IsUtf8String(rStringInfoGate.GetString()), &rhsIsUtf8, &rhsIsUtf16); Bind(&rhsIsUtf8); { Label lhsIsUtf8(env); Label lhsIsUtf16(env); - Branch(IsUtf8String(lhs), &lhsIsUtf8, &lhsIsUtf16); + Branch(IsUtf8String(lStringInfoGate.GetString()), &lhsIsUtf8, &lhsIsUtf16); Bind(&lhsIsUtf8); { result = StringIndexOf(lhsData, true, rhsData, true, *posTag, max, rhsCount); @@ -573,7 +589,7 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR { Label lhsIsUtf8(env); Label lhsIsUtf16(env); - Branch(IsUtf8String(lhs), &lhsIsUtf8, &lhsIsUtf16); + Branch(IsUtf8String(lStringInfoGate.GetString()), &lhsIsUtf8, &lhsIsUtf16); Bind(&lhsIsUtf8); { result = StringIndexOf(lhsData, true, rhsData, false, *posTag, max, rhsCount); @@ -594,4 +610,49 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR env->SubCfgExit(); return ret; } + +void FlatStringStubBuilder::FlattenString(GateRef glue, GateRef str, Label *fastPath) +{ + auto env = GetEnvironment(); + Label notLineString(env); + Label exit(env); + length_ = GetLengthFromString(str); + Branch(BoolOr(IsLineString(str), IsConstantString(str)), &exit, ¬LineString); + Bind(¬LineString); + { + Label isTreeString(env); + Label notTreeString(env); + Label isSlicedString(env); + Branch(IsTreeString(str), &isTreeString, ¬TreeString); + Bind(&isTreeString); + { + Label isFlat(env); + Label notFlat(env); + Branch(TreeStringIsFlat(str), &isFlat, ¬Flat); + Bind(&isFlat); + { + flatString_.WriteVariable(GetFirstFromTreeString(str)); + Jump(fastPath); + } + Bind(¬Flat); + { + flatString_.WriteVariable(CallRuntime(glue, RTSTUB_ID(SlowFlattenString), { str })); + Jump(fastPath); + } + } + Bind(¬TreeString); + Branch(IsSlicedString(str), &isSlicedString, &exit); + Bind(&isSlicedString); + { + flatString_.WriteVariable(GetParentFromSlicedString(str)); + startIndex_.WriteVariable(GetStartIndexFromSlicedString(str)); + Jump(fastPath); + } + } + Bind(&exit); + { + flatString_.WriteVariable(str); + Jump(fastPath); + } +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_string_stub_builder.h b/ecmascript/compiler/builtins/builtins_string_stub_builder.h index 471b83c1e74a47aea900c63c8dcaa900e37fa18c..c5b6c26f7b4c83ffe11d5670c1141c78baadd4a9 100644 --- a/ecmascript/compiler/builtins/builtins_string_stub_builder.h +++ b/ecmascript/compiler/builtins/builtins_string_stub_builder.h @@ -18,6 +18,9 @@ #include "ecmascript/compiler/stub_builder-inl.h" namespace panda::ecmascript::kungfu { +class FlatStringStubBuilder; +struct StringInfoGateRef; + class BuiltinsStringStubBuilder : public StubBuilder { public: explicit BuiltinsStringStubBuilder(StubBuilder *parent) @@ -27,21 +30,90 @@ public: NO_COPY_SEMANTIC(BuiltinsStringStubBuilder); void GenerateCircuit() override {} - GateRef StringAt(GateRef obj, GateRef index); - GateRef FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len); - GateRef FastSubUtf8String(GateRef glue, GateRef thisValue, GateRef from, GateRef len); - GateRef FastSubUtf16String(GateRef glue, GateRef thisValue, GateRef from, GateRef len); + GateRef StringAt(const StringInfoGateRef &stringInfoGate, GateRef index); + GateRef FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate); + GateRef FastSubUtf8String(GateRef glue, GateRef from, GateRef len, const StringInfoGateRef &stringInfoGate); + GateRef FastSubUtf16String(GateRef glue, GateRef from, GateRef len, const StringInfoGateRef &stringInfoGate); void CopyChars(GateRef glue, GateRef dst, GateRef source, GateRef sourceLength, GateRef size, VariableType type); void CopyUtf16AsUtf8(GateRef glue, GateRef src, GateRef dst, GateRef sourceLength); GateRef StringIndexOf(GateRef lhsData, bool lhsIsUtf8, GateRef rhsData, bool rhsIsUtf8, GateRef pos, GateRef max, GateRef rhsCount); - GateRef StringIndexOf(GateRef lhs, GateRef rhs, GateRef pos); - GateRef CreateFromEcmaString(GateRef glue, GateRef obj, GateRef index); + GateRef StringIndexOf(const StringInfoGateRef &lStringInfoGate, + const StringInfoGateRef &rStringInfoGate, GateRef pos); + GateRef CreateFromEcmaString(GateRef glue, GateRef index, const StringInfoGateRef &stringInfoGate); + void StoreParent(GateRef glue, GateRef object, GateRef parent); + void StoreStartIndex(GateRef glue, GateRef object, GateRef startIndex); private: GateRef CanBeCompressed(GateRef utf16Data, GateRef utf16Len, bool isUtf16); GateRef GetUtf16Data(GateRef stringData, GateRef index); GateRef IsASCIICharacter(GateRef data); GateRef GetUtf8Data(GateRef stringData, GateRef index); }; + +class FlatStringStubBuilder : public StubBuilder { +public: + explicit FlatStringStubBuilder(StubBuilder *parent) + : StubBuilder(parent) {} + ~FlatStringStubBuilder() override = default; + NO_MOVE_SEMANTIC(FlatStringStubBuilder); + NO_COPY_SEMANTIC(FlatStringStubBuilder); + void GenerateCircuit() override {} + + void FlattenString(GateRef glue, GateRef str, Label *fastPath); + GateRef GetParentFromSlicedString(GateRef string) + { + GateRef offset = IntPtr(SlicedString::PARENT_OFFSET); + return Load(VariableType::JS_POINTER(), string, offset); + } + GateRef GetStartIndexFromSlicedString(GateRef string) + { + GateRef offset = IntPtr(SlicedString::STARTINDEX_OFFSET); + return Load(VariableType::INT32(), string, offset); + } + + GateRef GetFlatString() + { + return flatString_.ReadVariable(); + } + + GateRef GetStartIndex() + { + return startIndex_.ReadVariable(); + } + + GateRef GetLength() + { + return length_; + } + +private: + Variable flatString_ { GetEnvironment(), VariableType::JS_POINTER(), NextVariableId(), Undefined() }; + Variable startIndex_ { GetEnvironment(), VariableType::INT32(), NextVariableId(), Int32(0) }; + GateRef length_ { Circuit::NullGate() }; +}; + +struct StringInfoGateRef { + GateRef string_ { Circuit::NullGate() }; + GateRef startIndex_ { Circuit::NullGate() }; + GateRef length_ { Circuit::NullGate() }; + StringInfoGateRef(FlatStringStubBuilder *flatString) : string_(flatString->GetFlatString()), + startIndex_(flatString->GetStartIndex()), + length_(flatString->GetLength()) {} + GateRef GetString() const + { + return string_; + } + + GateRef GetStartIndex() const + { + return startIndex_; + } + + GateRef GetLength() const + { + return length_; + } +}; } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BUILTINS_STRING_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_stubs.cpp b/ecmascript/compiler/builtins/builtins_stubs.cpp index cd7719479064840652f8d3d24bdab3ce40577803..c04149abdbed702be6a6b35745ac5c368c1f8f04 100644 --- a/ecmascript/compiler/builtins/builtins_stubs.cpp +++ b/ecmascript/compiler/builtins/builtins_stubs.cpp @@ -16,14 +16,18 @@ #include "ecmascript/compiler/builtins/builtins_stubs.h" #include "ecmascript/base/number_helper.h" +#include "ecmascript/compiler/builtins/builtins_array_stub_builder.h" #include "ecmascript/compiler/builtins/builtins_call_signature.h" +#include "ecmascript/compiler/builtins/builtins_function_stub_builder.h" #include "ecmascript/compiler/builtins/builtins_string_stub_builder.h" #include "ecmascript/compiler/builtins/containers_vector_stub_builder.h" #include "ecmascript/compiler/builtins/containers_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_collection_stub_builder.h" #include "ecmascript/compiler/interpreter_stub-inl.h" #include "ecmascript/compiler/llvm_ir_builder.h" #include "ecmascript/compiler/new_object_stub_builder.h" #include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/stub_builder.h" #include "ecmascript/compiler/variable_type.h" #include "ecmascript/js_date.h" #include "ecmascript/js_primitive_ref.h" @@ -170,10 +174,10 @@ DECLARE_BUILTINS(CharCodeAt) Branch(IsString(thisValue), &isString, &slowPath); Bind(&isString); { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); Bind(&flattenFastPath); - GateRef thisLen = GetLengthFromString(*thisFlat); + GateRef thisLen = GetLengthFromString(thisValue); Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); Bind(&posTagNotUndefined); { @@ -197,7 +201,8 @@ DECLARE_BUILTINS(CharCodeAt) Bind(&posNotLessZero); { BuiltinsStringStubBuilder stringBuilder(this); - res = IntToTaggedPtr(stringBuilder.StringAt(*thisFlat, *pos)); + StringInfoGateRef stringInfoGate(&thisFlat); + res = IntToTaggedPtr(stringBuilder.StringAt(stringInfoGate, *pos)); Jump(&exit); } } @@ -288,14 +293,16 @@ DECLARE_BUILTINS(IndexOf) } Bind(&nextCount); { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - DEFVARIABLE(searchFlat, VariableType::JS_POINTER(), searchTag); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); Bind(&flattenFastPath); - FlattenString(searchTag, &searchFlat, &flattenFastPath1, &slowPath); + FlatStringStubBuilder searchFlat(this); + searchFlat.FlattenString(glue, searchTag, &flattenFastPath1); Bind(&flattenFastPath1); BuiltinsStringStubBuilder stringBuilder(this); - GateRef resPos = stringBuilder.StringIndexOf(*thisFlat, *searchFlat, *pos); + StringInfoGateRef thisStringInfoGate(&thisFlat); + StringInfoGateRef searchStringInfoGate(&searchFlat); + GateRef resPos = stringBuilder.StringIndexOf(thisStringInfoGate, searchStringInfoGate, *pos); Branch(Int32GreaterThanOrEqual(resPos, Int32(0)), &resPosGreaterZero, &exit); Bind(&resPosGreaterZero); { @@ -358,6 +365,8 @@ DECLARE_BUILTINS(Substring) Label startNotGreatEnd(env); Label thisIsHeapobject(env); Label flattenFastPath(env); + Label sliceString(env); + Label fastSubstring(env); Branch(TaggedIsUndefinedOrNull(thisValue), &slowPath, &objNotUndefinedAndNull); Bind(&objNotUndefinedAndNull); @@ -461,12 +470,22 @@ DECLARE_BUILTINS(Substring) Bind(&countRes); { GateRef len = Int32Sub(*to, *from); - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); Bind(&flattenFastPath); { + Branch(Int32GreaterThanOrEqual(len, Int32(SlicedString::MIN_SLICED_ECMASTRING_LENGTH)), + &sliceString, &fastSubstring); + Bind(&sliceString); + { + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + newBuilder.AllocSlicedStringObject(&res, &exit, *from, len, &thisFlat); + } + Bind(&fastSubstring); BuiltinsStringStubBuilder stringBuilder(this); - res = stringBuilder.FastSubString(glue, *thisFlat, *from, len); + StringInfoGateRef stringInfoGate(&thisFlat); + res = stringBuilder.FastSubString(glue, thisValue, *from, len, stringInfoGate); Jump(&exit); } } @@ -512,10 +531,10 @@ DECLARE_BUILTINS(CharAt) Branch(IsString(thisValue), &isString, &slowPath); Bind(&isString); { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); Bind(&flattenFastPath); - GateRef thisLen = GetLengthFromString(*thisFlat); + GateRef thisLen = GetLengthFromString(thisValue); Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); Bind(&posTagNotUndefined); { @@ -539,7 +558,8 @@ DECLARE_BUILTINS(CharAt) Bind(&posNotLessZero); { BuiltinsStringStubBuilder stringBuilder(this); - res = stringBuilder.CreateFromEcmaString(glue, *thisFlat, *pos); + StringInfoGateRef stringInfoGate(&thisFlat); + res = stringBuilder.CreateFromEcmaString(glue, *pos, stringInfoGate); Jump(&exit); } } @@ -562,230 +582,17 @@ DECLARE_BUILTINS(CharAt) Return(*res); } -DECLARE_BUILTINS(VectorForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::VECTOR_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(VectorForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(VectorReplaceAllElements) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::VECTOR_REPLACEALLELEMENTS); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(VectorReplaceAllElements)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(StackForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::STACK_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(StackForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(PlainArrayForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::PLAINARRAY_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(PlainArrayForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(QueueForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.QueueCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::QUEUE_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(QueueForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(DequeForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.DequeCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::DEQUE_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(DequeForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(LightWeightMapForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLightWeightCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIGHTWEIGHTMAP_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LightWeightMapForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(LightWeightSetForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLightWeightCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIGHTWEIGHTSET_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LightWeightSetForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(HashMapForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersHashCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::HASHMAP_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(HashMapForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(HashSetForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersHashCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::HASHSET_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(HashSetForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(LinkedListForEach) +DECLARE_BUILTINS(FunctionPrototypeApply) { auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); Label exit(env); Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLinkedListCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LINKEDLIST_FOREACH); + BuiltinsFunctionStubBuilder functionStubBuilder(this); + functionStubBuilder.Apply(glue, thisValue, numArgs, &res, &exit, &slowPath); Bind(&slowPath); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LinkedListForEach)); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(FunctionPrototypeApply)); res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); Jump(&exit); } @@ -793,174 +600,91 @@ DECLARE_BUILTINS(LinkedListForEach) Return(*res); } -DECLARE_BUILTINS(ListForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLinkedListCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIST_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ListForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); +#define DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER(StubName, Method, methodType, resultVariableType) \ +DECLARE_BUILTINS(StubName) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, VariableType::resultVariableType(), Undefined()); \ + Label exit(env); \ + Label slowPath(env); \ + ContainersStubBuilder containersBuilder(this); \ + containersBuilder.Method(glue, thisValue, numArgs, &res, &exit, &slowPath, ContainersType::methodType); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(StubName)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ } -DECLARE_BUILTINS(ArrayListForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::ARRAYLIST_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ArrayListForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); +#define BUILTINS_WITH_CONTAINERS_STUB_BUILDER(V) \ + V(ArrayListForEach, ContainersCommonFuncCall, ARRAYLIST_FOREACH, JS_POINTER) \ + V(DequeForEach, DequeCommonFuncCall, DEQUE_FOREACH, JS_POINTER) \ + V(HashMapForEach, ContainersHashCall, HASHMAP_FOREACH, JS_POINTER) \ + V(HashSetForEach, ContainersHashCall, HASHSET_FOREACH, JS_POINTER) \ + V(LightWeightMapForEach, ContainersLightWeightCall, LIGHTWEIGHTMAP_FOREACH, JS_POINTER) \ + V(LightWeightSetForEach, ContainersLightWeightCall, LIGHTWEIGHTSET_FOREACH, JS_POINTER) \ + V(LinkedListForEach, ContainersLinkedListCall, LINKEDLIST_FOREACH, JS_POINTER) \ + V(ListForEach, ContainersLinkedListCall, LIST_FOREACH, JS_POINTER) \ + V(PlainArrayForEach, ContainersCommonFuncCall, PLAINARRAY_FOREACH, JS_POINTER) \ + V(QueueForEach, QueueCommonFuncCall, QUEUE_FOREACH, JS_POINTER) \ + V(StackForEach, ContainersCommonFuncCall, STACK_FOREACH, JS_POINTER) \ + V(VectorForEach, ContainersCommonFuncCall, VECTOR_FOREACH, JS_POINTER) \ + V(ArrayListReplaceAllElements, ContainersCommonFuncCall, ARRAYLIST_REPLACEALLELEMENTS, JS_POINTER) \ + V(VectorReplaceAllElements, ContainersCommonFuncCall, VECTOR_REPLACEALLELEMENTS, JS_POINTER) + +BUILTINS_WITH_CONTAINERS_STUB_BUILDER(DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER) + +#undef DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER +#undef BUILTINS_WITH_CONTAINERS_STUB_BUILDER + +#define DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER(Method, resultVariableType) \ +DECLARE_BUILTINS(Array##Method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, VariableType::resultVariableType(), Undefined()); \ + Label exit(env); \ + Label slowPath(env); \ + BuiltinsArrayStubBuilder arrayStubBuilder(this); \ + arrayStubBuilder.Method(glue, thisValue, numArgs, &res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(Array##Method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ } -DECLARE_BUILTINS(ArrayListReplaceAllElements) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::ARRAYLIST_REPLACEALLELEMENTS); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ArrayListReplaceAllElements)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} +#define BUILTINS_WITH_ARRAY_STUB_BUILDER(V) \ + V(Concat, JS_POINTER) \ + V(Filter, JS_POINTER) \ + V(ForEach, JS_ANY) \ + V(IndexOf, JS_ANY) \ + V(LastIndexOf, JS_ANY) \ + V(Slice, JS_POINTER) \ + V(Reverse, JS_POINTER) -DECLARE_BUILTINS(FunctionPrototypeApply) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); - Label exit(env); - Label slowPath(env); - Label targetIsCallable(env); - Label targetIsUndefined(env); - Label targetNotUndefined(env); - Label isHeapObject(env); - //1. If IsCallable(func) is false, throw a TypeError exception - Branch(TaggedIsHeapObject(thisValue), &isHeapObject, &slowPath); - Bind(&isHeapObject); - { - Branch(IsCallable(thisValue), &targetIsCallable, &slowPath); - Bind(&targetIsCallable); - { - GateRef thisArg = GetCallArg0(numArgs); - GateRef arrayObj = GetCallArg1(numArgs); - // 2. If argArray is null or undefined, then - Branch(TaggedIsUndefined(arrayObj), &targetIsUndefined, &targetNotUndefined); - Bind(&targetIsUndefined); - { - // a. Return Call(func, thisArg). - res = JSCallDispatch(glue, thisValue, Int32(0), 0, Circuit::NullGate(), - JSCallMode::CALL_GETTER, { thisArg }); - Jump(&exit); - } - Bind(&targetNotUndefined); - { - // 3. Let argList be CreateListFromArrayLike(argArray). - GateRef elements = BuildArgumentsListFastElements(glue, arrayObj); - Label targetIsHole(env); - Label targetNotHole(env); - Branch(TaggedIsHole(elements), &targetIsHole, &targetNotHole); - Bind(&targetIsHole); - { - GateRef argList = CreateListFromArrayLike(glue, arrayObj); - // 4. ReturnIfAbrupt(argList). - Label isPendingException(env); - Label noPendingException(env); - Branch(HasPendingException(glue), &isPendingException, &noPendingException); - Bind(&isPendingException); - { - Jump(&slowPath); - } - Bind(&noPendingException); - { - GateRef argsLength = GetLengthOfTaggedArray(argList); - GateRef argv = PtrAdd(argList, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, argv, thisArg }); - Jump(&exit); - } - } - Bind(&targetNotHole); - { - // 6. Return Call(func, thisArg, argList). - Label taggedIsStableJsArg(env); - Label taggedNotStableJsArg(env); - Branch(IsStableJSArguments(glue, arrayObj), &taggedIsStableJsArg, &taggedNotStableJsArg); - Bind(&taggedIsStableJsArg); - { - GateRef hClass = LoadHClass(arrayObj); - GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); - GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); - GateRef length = TaggedGetInt(result); - GateRef argsLength = MakeArgListWithHole(glue, elements, length); - GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg }); - Jump(&exit); - } - Bind(&taggedNotStableJsArg); - { - GateRef length = GetLengthOfJsArray(glue, arrayObj); - GateRef argsLength = MakeArgListWithHole(glue, elements, length); - GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg }); - Jump(&exit); - } - } - } - } - } +BUILTINS_WITH_ARRAY_STUB_BUILDER(DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER) - Bind(&slowPath); - { - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} +#undef DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER +#undef BUILTINS_WITH_ARRAY_STUB_BUILDER DECLARE_BUILTINS(BooleanConstructor) { auto env = GetEnvironment(); DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + Label newTargetIsHeapObject(env); Label newTargetIsJSFunction(env); Label slowPath(env); Label exit(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath); + Bind(&newTargetIsHeapObject); Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); Bind(&newTargetIsJSFunction); { @@ -1000,10 +724,13 @@ DECLARE_BUILTINS(DateConstructor) auto env = GetEnvironment(); DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + Label newTargetIsHeapObject(env); Label newTargetIsJSFunction(env); Label slowPath(env); Label exit(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath); + Bind(&newTargetIsHeapObject); Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); Bind(&newTargetIsJSFunction); { @@ -1082,10 +809,13 @@ DECLARE_BUILTINS(ArrayConstructor) auto env = GetEnvironment(); DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + Label newTargetIsHeapObject(env); Label newTargetIsJSFunction(env); Label slowPath(env); Label exit(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath); + Bind(&newTargetIsHeapObject); Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); Bind(&newTargetIsJSFunction); { @@ -1165,7 +895,7 @@ DECLARE_BUILTINS(ArrayConstructor) newBuilder.SetParameters(glue, 0); res = newBuilder.NewJSArrayWithSize(intialHClass, *arrayLength); GateRef lengthOffset = IntPtr(JSArray::LENGTH_OFFSET); - Store(VariableType::JS_ANY(), glue, *res, lengthOffset, Int64ToTaggedInt(*arrayLength)); + Store(VariableType::INT32(), glue, *res, lengthOffset, TruncInt64ToInt32(*arrayLength)); GateRef accessor = GetGlobalConstantValue(VariableType::JS_ANY(), glue, ConstantIndex::ARRAY_LENGTH_ACCESSOR); SetPropertyInlinedProps(glue, *res, intialHClass, accessor, @@ -1187,4 +917,56 @@ DECLARE_BUILTINS(ArrayConstructor) Bind(&exit); Return(*res); } -} // namespace panda::ecmascript::kungfu \ No newline at end of file + +#define DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(type, method, retType, retDefaultValue) \ +DECLARE_BUILTINS(type##method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, retType, retDefaultValue); \ + Label thisCollectionObj(env); \ + Label slowPath(env); \ + Label exit(env); \ + BuiltinsCollectionStubBuilder builder(this, glue, thisValue, numArgs); \ + builder.method(&res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(type##method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ +} + +// Set.protetype.Clear +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Clear, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Values +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Values, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Entries +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Entries, VariableType::JS_ANY(), Undefined()); +// Set.protetype.ForEach +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, ForEach, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Add +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Add, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Delete +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Delete, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Has +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Has, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Clear +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Clear, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Values +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Values, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Entries +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Entries, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Keys +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Keys, VariableType::JS_ANY(), Undefined()); +// Map.protetype.ForEach +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, ForEach, VariableType::JS_ANY(), Undefined()); +// Map.protetype.set +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Set, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Delete +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Delete, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Has +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Has, VariableType::JS_ANY(), Undefined()); +#undef DECLARE_BUILTINS_COLLECTION_STUB_BUILDER +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2404124e5e30fe631a2e419773de93d419762793 --- /dev/null +++ b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/linked_hashtable_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/linked_hash_table.h" +#include "ecmascript/js_set.h" +#include "ecmascript/js_map.h" + +namespace panda::ecmascript::kungfu { +template +void LinkedHashTableStubBuilder::Rehash( + GateRef linkedTable, GateRef newTable) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + + GateRef numberOfAllElements = Int32Add(GetNumberOfElements(linkedTable), + GetNumberOfDeletedElements(linkedTable)); + + DEFVARIABLE(desEntry, VariableType::INT32(), Int32(0)); + DEFVARIABLE(currentDeletedElements, VariableType::INT32(), Int32(0)); + SetNextTable(linkedTable, newTable); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*i, numberOfAllElements), &next, &loopExit); + Bind(&next); + + GateRef fromIndex = EntryToIndex(linkedTable, *i); + DEFVARIABLE(key, VariableType::JS_ANY(), GetElement(linkedTable, fromIndex)); + Label hole(env); + Label notHole(env); + Branch(TaggedIsHole(*key), &hole, ¬Hole); + Bind(&hole); + { + currentDeletedElements = Int32Add(*currentDeletedElements, Int32(1)); + SetDeletedNum(linkedTable, *i, *currentDeletedElements); + Jump(&loopEnd); + } + Bind(¬Hole); + { + Label weak(env); + Label notWeak(env); + Branch(TaggedIsWeak(*key), &weak, ¬Weak); + Bind(&weak); + { + key = RemoveTaggedWeakTag(*key); + Jump(¬Weak); + } + Bind(¬Weak); + + GateRef hash = GetHash(*key); + GateRef bucket = HashToBucket(newTable, hash); + InsertNewEntry(newTable, bucket, *desEntry); + GateRef desIndex = EntryToIndex(newTable, *desEntry); + + Label loopHead1(env); + Label loopEnd1(env); + Label next1(env); + Label loopExit1(env); + DEFVARIABLE(j, VariableType::INT32(), Int32(0)); + Jump(&loopHead1); + LoopBegin(&loopHead1); + { + Branch(Int32LessThan(*j, Int32(LinkedHashTableObject::ENTRY_SIZE)), &next1, &loopExit1); + Bind(&next1); + GateRef ele = GetElement(linkedTable, Int32Add(fromIndex, *j)); + SetElement(newTable, Int32Add(desIndex, *j), ele); + Jump(&loopEnd1); + } + Bind(&loopEnd1); + j = Int32Add(*j, Int32(1)); + LoopEnd(&loopHead1); + Bind(&loopExit1); + desEntry = Int32Add(*desEntry, Int32(1)); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + i = Int32Add(*i, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + + SetNumberOfElements(newTable, GetNumberOfElements(linkedTable)); + SetNumberOfDeletedElements(newTable, Int32(0)); + env->SubCfgExit(); +} + +template +GateRef LinkedHashTableStubBuilder::GrowCapacity( + GateRef linkedTable, GateRef numberOfAddedElements) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), linkedTable); + + GateRef hasSufficient = HasSufficientCapacity(linkedTable, numberOfAddedElements); + Label grow(env); + Branch(hasSufficient, &exit, &grow); + Bind(&grow); + { + GateRef newCapacity = ComputeCapacity(Int32Add(GetNumberOfElements(linkedTable), numberOfAddedElements)); + GateRef newTable = Create(newCapacity); + Rehash(linkedTable, newTable); + res = newTable; + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::ComputeCapacity( + GateRef atLeastSpaceFor) +{ + if constexpr (std::is_same_v) { + return TaggedGetInt(CallRuntime(glue_, RTSTUB_ID(LinkedHashMapComputeCapacity), { + IntToTaggedInt(atLeastSpaceFor) })); + } else { + return TaggedGetInt(CallRuntime(glue_, RTSTUB_ID(LinkedHashSetComputeCapacity), { + IntToTaggedInt(atLeastSpaceFor) })); + } +} + +template +void LinkedHashTableStubBuilder::RemoveEntry( + GateRef linkedTable, GateRef entry) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + GateRef index = EntryToIndex(linkedTable, entry); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*i, Int32(LinkedHashTableObject::ENTRY_SIZE)), &next, &loopExit); + Bind(&next); + + GateRef idx = Int32Add(index, *i); + SetElement(linkedTable, idx, Hole()); + Jump(&loopEnd); + } + Bind(&loopEnd); + i = Int32Add(*i, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + + GateRef newNofe = Int32Sub(GetNumberOfElements(linkedTable), Int32(1)); + SetNumberOfElements(linkedTable, newNofe); + GateRef newNofd = Int32Add(GetNumberOfDeletedElements(linkedTable), Int32(1)); + SetNumberOfDeletedElements(linkedTable, newNofd); + env->SubCfgExit(); +} + +template +GateRef LinkedHashTableStubBuilder::HasSufficientCapacity( + GateRef linkedTable, GateRef numOfAddElements) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(res, VariableType::BOOL(), False()); + + GateRef numberOfElements = GetNumberOfElements(linkedTable); + GateRef numOfDelElements = GetNumberOfDeletedElements(linkedTable); + GateRef nof = Int32Add(numberOfElements, numOfAddElements); + GateRef capacity = GetCapacity(linkedTable); + GateRef less = Int32LessThan(nof, capacity); + GateRef half = Int32Div(Int32Sub(capacity, nof), Int32(2)); + GateRef lessHalf = Int32LessThanOrEqual(numOfDelElements, half); + + Label lessLable(env); + Branch(BoolAnd(less, lessHalf), &lessLable, &exit); + Bind(&lessLable); + { + Label need(env); + Branch(Int32LessThanOrEqual(Int32Add(nof, Int32Div(nof, Int32(2))), capacity), &need, &exit); + Bind(&need); + { + res = True(); + Jump(&exit); + } + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::GetHash(GateRef key) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(res, VariableType::INT32(), Int32(0)); + + Label symbolKey(env); + Label stringCheck(env); + Branch(TaggedIsSymbol(key), &symbolKey, &stringCheck); + Bind(&symbolKey); + { + res = Load(VariableType::INT32(), key, IntPtr(JSSymbol::HASHFIELD_OFFSET)); + Jump(&exit); + } + Bind(&stringCheck); + Label stringKey(env); + Label slowGetHash(env); + Branch(TaggedIsString(key), &stringKey, &slowGetHash); + Bind(&stringKey); + { + res = GetHashcodeFromString(glue_, key); + Jump(&exit); + } + Bind(&slowGetHash); + { + // GetHash(); + GateRef hash = CallRuntime(glue_, RTSTUB_ID(GetLinkedHash), { key }); + res = GetInt32OfTInt(hash); + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::HashObjectIsMatch( + GateRef key, GateRef other) +{ + return SameValueZero(glue_, key, other); +} + +template +GateRef LinkedHashTableStubBuilder::FindElement( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + + DEFVARIABLE(res, VariableType::INT32(), Int32(-1)); + Label exit(env); + Label isKey(env); + Branch(IsKey(key), &isKey, &exit); + Bind(&isKey); + { + GateRef hash = GetHash(key); + GateRef bucket = HashToBucket(linkedTable, hash); + GateRef index = BucketToIndex(bucket); + DEFVARIABLE(entry, VariableType::JS_ANY(), GetElement(linkedTable, index)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(TaggedIsHole(*entry), &loopExit, &next); + Bind(&next); + + DEFVARIABLE(element, VariableType::JS_ANY(), GetKey(linkedTable, TaggedGetInt(*entry))); + Label notHole(env); + Branch(TaggedIsHole(*element), &loopEnd, ¬Hole); + Bind(¬Hole); + { + Label weak(env); + Label notWeak(env); + Branch(TaggedIsWeak(*element), &weak, ¬Weak); + Bind(&weak); + { + element = RemoveTaggedWeakTag(*element); + Jump(¬Weak); + } + Bind(¬Weak); + Label match(env); + Branch(HashObjectIsMatch(key, *element), &match, &loopEnd); + Bind(&match); + { + res = TaggedGetInt(*entry); + Jump(&loopExit); + } + } + } + Bind(&loopEnd); + entry = GetNextEntry(linkedTable, TaggedGetInt(*entry)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::GetDeletedElementsAt( + GateRef linkedTable, GateRef entry) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + DEFVARIABLE(res, VariableType::INT32(), Int32(0)); + DEFVARIABLE(currentEntry, VariableType::INT32(), Int32Sub(entry, Int32(1))); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32GreaterThanOrEqual(*currentEntry, Int32(0)), &next, &loopExit); + Bind(&next); + GateRef key = GetKey(linkedTable, *currentEntry); + Label hole(env); + Branch(TaggedIsHole(key), &hole, &loopEnd); + Bind(&hole); + { + GateRef deletedNum = GetDeletedNum(linkedTable, *currentEntry); + res = deletedNum; + Jump(&exit); + } + } + Bind(&loopEnd); + currentEntry = Int32Sub(*currentEntry, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::Create(GateRef numberOfElements) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + + // new LinkedHashTable + GateRef length = CalNewTaggedArrayLength(numberOfElements); + NewObjectStubBuilder newBuilder(this); + GateRef array = newBuilder.NewTaggedArray(glue_, length); + + Label noException(env); + Branch(TaggedIsException(array), &exit, &noException); + Bind(&noException); + { + // SetNumberOfElements + SetNumberOfElements(array, Int32(0)); + // SetNumberOfDeletedElements + SetNumberOfDeletedElements(array, Int32(0)); + // SetCapacity + SetCapacity(array, numberOfElements); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); + return array; +} + +template +GateRef LinkedHashTableStubBuilder::Clear(GateRef linkedTable) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + Label setLinked(env); + + GateRef newTable = Create(Int32(LinkedHashTableType::MIN_CAPACITY)); + Label noException(env); + Branch(TaggedIsException(newTable), &exit, &noException); + Bind(&noException); + + GateRef cap = GetCapacity(linkedTable); + Label capGreaterZero(env); + Branch(Int32GreaterThan(cap, Int32(0)), &capGreaterZero, &exit); + Bind(&capGreaterZero); + { + // NextTable + SetNextTable(linkedTable, newTable); + // SetNumberOfDeletedElements + SetNumberOfDeletedElements(linkedTable, Int32(-1)); + Jump(&exit); + } + + Bind(&exit); + env->SubCfgExit(); + return newTable; +} + +template GateRef LinkedHashTableStubBuilder::Clear(GateRef); +template GateRef LinkedHashTableStubBuilder::Clear(GateRef); + +template +GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef srcLinkedTable, GateRef numArgs) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + + // caller checked callbackFnHandle callable + GateRef callbackFnHandle = GetCallArg0(numArgs); + GateRef thisArg = GetCallArg1(numArgs); + DEFVARIABLE(linkedTable, VariableType::JS_ANY(), srcLinkedTable); + + GateRef numberOfElements = GetNumberOfElements(*linkedTable); + GateRef numberOfDeletedElements = GetNumberOfDeletedElements(*linkedTable); + GateRef tmpTotalElements = Int32Add(numberOfElements, numberOfDeletedElements); + DEFVARIABLE(totalElements, VariableType::INT32(), tmpTotalElements); + DEFVARIABLE(index, VariableType::INT32(), Int32(0)); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*index, *totalElements), &next, &loopExit); + Bind(&next); + GateRef valueIndex = *index; + + GateRef key = GetKey(*linkedTable, *index); + index = Int32Add(*index, Int32(1)); + Label keyNotHole(env); + Branch(TaggedIsHole(key), &loopEnd, &keyNotHole); + Bind(&keyNotHole); + + GateRef value = key; + if constexpr (std::is_same_v) { + value = GetValue(*linkedTable, valueIndex); + } + Label hasException(env); + Label notHasException(env); + GateRef retValue = JSCallDispatch(glue_, callbackFnHandle, Int32(NUM_MANDATORY_JSFUNC_ARGS), 0, + Circuit::NullGate(), JSCallMode::CALL_THIS_ARG3_WITH_RETURN, { thisArg, value, key, thisValue }); + Branch(HasPendingException(glue_), &hasException, ¬HasException); + Bind(&hasException); + { + res = retValue; + Jump(&exit); + } + Bind(¬HasException); + { + // Maybe add or delete, get next table + GateRef tmpNextTable = GetNextTable(*linkedTable); + DEFVARIABLE(nextTable, VariableType::JS_ANY(), tmpNextTable); + Label loopHead1(env); + Label loopEnd1(env); + Label next1(env); + Label loopExit1(env); + Jump(&loopHead1); + LoopBegin(&loopHead1); + { + Branch(TaggedIsHole(*nextTable), &loopExit1, &next1); + Bind(&next1); + GateRef deleted = GetDeletedElementsAt(*linkedTable, *index); + index = Int32Sub(*index, deleted); + linkedTable = *nextTable; + nextTable = GetNextTable(*linkedTable); + Jump(&loopEnd1); + } + Bind(&loopEnd1); + LoopEnd(&loopHead1); + Bind(&loopExit1); + // update totalElements + GateRef numberOfEle = GetNumberOfElements(*linkedTable); + GateRef numberOfDeletedEle = GetNumberOfDeletedElements(*linkedTable); + totalElements = Int32Add(numberOfEle, numberOfDeletedEle); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + + Bind(&exit); + env->SubCfgExit(); + return *res; +} + +template GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef linkedTable, GateRef numArgs); +template GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef linkedTable, GateRef numArgs); + +template +GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), linkedTable); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Label notFind(env); + Branch(Int32Equal(entry, Int32(-1)), ¬Find, &findEntry); + Bind(&findEntry); + { + SetValue(linkedTable, entry, value); + Jump(&exit); + } + Bind(¬Find); + { + GateRef newTable = GrowCapacity(linkedTable, Int32(1)); + res = newTable; + GateRef hash = GetHash(key); + GateRef bucket = HashToBucket(newTable, hash); + GateRef numberOfElements = GetNumberOfElements(newTable); + + GateRef newEntry = Int32Add(numberOfElements, GetNumberOfDeletedElements(newTable)); + InsertNewEntry(newTable, bucket, newEntry); + SetKey(newTable, newEntry, key); + SetValue(newTable, newEntry, value); + GateRef newNumberOfElements = Int32Add(numberOfElements, Int32(1)); + SetNumberOfElements(newTable, newNumberOfElements); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value); +template GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value); + +template +GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), TaggedFalse()); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Branch(Int32Equal(entry, Int32(-1)), &exit, &findEntry); + Bind(&findEntry); + { + RemoveEntry(linkedTable, entry); + res = TaggedTrue(); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key); +template GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key); + +template +GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), TaggedFalse()); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Branch(Int32Equal(entry, Int32(-1)), &exit, &findEntry); + Bind(&findEntry); + { + res = TaggedTrue(); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key); +template GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key); +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..74d60f3b52d48cc7b22decd44fdd8e018caf8d23 --- /dev/null +++ b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H +#include "ecmascript/compiler/stub_builder-inl.h" + +namespace panda::ecmascript::kungfu { + +template +class LinkedHashTableStubBuilder : public BuiltinsStubBuilder { +public: + explicit LinkedHashTableStubBuilder(BuiltinsStubBuilder *parent, GateRef glue) + : BuiltinsStubBuilder(parent), glue_(glue) {} + ~LinkedHashTableStubBuilder() override = default; + NO_MOVE_SEMANTIC(LinkedHashTableStubBuilder); + NO_COPY_SEMANTIC(LinkedHashTableStubBuilder); + void GenerateCircuit() override {} + + GateRef Create(GateRef numberOfElements); + GateRef Clear(GateRef linkedTable); + GateRef ForEach(GateRef thisValue, GateRef linkedTable, GateRef numArgs); + GateRef Insert(GateRef linkedTable, GateRef key, GateRef value); + GateRef Delete(GateRef linkedTable, GateRef key); + GateRef Has(GateRef linkedTable, GateRef key); + +private: + GateRef IsKey(GateRef key) + { + return TaggedIsNotHole(key); + } + + GateRef HashToBucket(GateRef linkedTable, GateRef hash) + { + GateRef cap = GetCapacity(linkedTable); + return Int32And(hash, Int32Sub(cap, Int32(1))); + } + + GateRef BucketToIndex(GateRef bucket) + { + return Int32Add(bucket, Int32(LinkedHashTableType::ELEMENTS_START_INDEX)); + } + + GateRef GetHash(GateRef key); + GateRef HashObjectIsMatch(GateRef key, GateRef other); + GateRef FindElement(GateRef linkedTable, GateRef key); + GateRef GetKey(GateRef linkedTable, GateRef entry) + { + GateRef index = EntryToIndex(linkedTable, entry); + return GetElement(linkedTable, index); + } + + void SetKey(GateRef linkedTable, GateRef entry, GateRef key) + { + GateRef index = EntryToIndex(linkedTable, entry); + SetElement(linkedTable, index, key); + } + + GateRef GetValue(GateRef linkedTable, GateRef entry) + { + GateRef index = EntryToIndex(linkedTable, entry); + GateRef valueIndex = Int32(LinkedHashTableObject::ENTRY_VALUE_INDEX); + return GetElement(linkedTable, Int32Add(index, valueIndex)); + } + + void SetValue(GateRef linkedTable, GateRef entry, GateRef value) + { + GateRef index = EntryToIndex(linkedTable, entry); + GateRef valueIndex = Int32(LinkedHashTableObject::ENTRY_VALUE_INDEX); + SetElement(linkedTable, Int32Add(index, valueIndex), value); + } + + GateRef EntryToIndex(GateRef linkedTable, GateRef entry) + { + int32_t startIndex = LinkedHashTableType::ELEMENTS_START_INDEX; + int32_t entrySize = LinkedHashTableObject::ENTRY_SIZE; + GateRef sumEntrySize = Int32Mul(entry, Int32Add(Int32(entrySize), Int32(1))); + return Int32Add(Int32(startIndex), Int32Add(GetCapacity(linkedTable), sumEntrySize)); + } + + GateRef GetElement(GateRef linkedTable, GateRef index) + { + return GetValueFromTaggedArray(linkedTable, index); + } + + void SetElement(GateRef linkedTable, GateRef index, GateRef value) + { + SetValueToTaggedArray(VariableType::JS_ANY(), glue_, linkedTable, index, value); + } + + GateRef GetDeletedNum(GateRef linkedTable, GateRef entry) + { + return TaggedGetInt(GetNextEntry(linkedTable, entry)); + } + + void SetDeletedNum(GateRef linkedTable, GateRef entry, GateRef num) + { + SetNextEntry(linkedTable, entry, IntToTaggedInt(num)); + } + + GateRef GetNextEntry(GateRef linkedTable, GateRef entry) + { + GateRef entryIndex = EntryToIndex(linkedTable, entry); + return GetElement(linkedTable, Int32Add(entryIndex, Int32(LinkedHashTableObject::ENTRY_SIZE))); + } + + void SetNextEntry(GateRef linkedTable, GateRef entry, GateRef nextEntry) + { + GateRef entryIndex = EntryToIndex(linkedTable, entry); + SetElement(linkedTable, Int32Add(entryIndex, Int32(LinkedHashTableObject::ENTRY_SIZE)), nextEntry); + } + + GateRef GetCapacity(GateRef linkedTable) + { + GateRef capacityIndex = Int32(LinkedHashTableType::CAPACITY_INDEX); + GateRef capacity = GetValueFromTaggedArray(linkedTable, capacityIndex); + return TaggedGetInt(capacity); + } + + void SetCapacity(GateRef linkedTable, GateRef numberOfElements) + { + GateRef capacityIndex = Int32(LinkedHashTableType::CAPACITY_INDEX); + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, capacityIndex, + IntToTaggedInt(numberOfElements)); + } + + GateRef GetNumberOfElements(GateRef linkedTable) + { + int32_t elementsIndex = LinkedHashTableType::NUMBER_OF_ELEMENTS_INDEX; + GateRef tmpNumberOfElements = GetValueFromTaggedArray(linkedTable, Int32(elementsIndex)); + return TaggedGetInt(tmpNumberOfElements); + } + + void SetNumberOfElements(GateRef linkedTable, GateRef num) + { + int32_t elementsIndex = LinkedHashTableType::NUMBER_OF_ELEMENTS_INDEX; + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, Int32(elementsIndex), + IntToTaggedInt(num)); + } + + GateRef GetNumberOfDeletedElements(GateRef linkedTable) + { + GateRef deletedIndex = Int32(LinkedHashTableType::NUMBER_OF_DELETED_ELEMENTS_INDEX); + GateRef tmpNumberOfDeletedElements = GetValueFromTaggedArray(linkedTable, deletedIndex); + return TaggedGetInt(tmpNumberOfDeletedElements); + } + + void SetNumberOfDeletedElements(GateRef linkedTable, GateRef num) + { + GateRef deletedIndex = Int32(LinkedHashTableType::NUMBER_OF_DELETED_ELEMENTS_INDEX); + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, deletedIndex, IntToTaggedInt(num)); + } + + GateRef GetNextTable(GateRef linkedTable) + { + GateRef nextTableIndex = Int32(LinkedHashTableType::NEXT_TABLE_INDEX); + return GetValueFromTaggedArray(linkedTable, nextTableIndex); + } + + void SetNextTable(GateRef linkedTable, GateRef nexTable) + { + GateRef nextTableIndex = Int32(LinkedHashTableType::NEXT_TABLE_INDEX); + SetValueToTaggedArray(VariableType::JS_POINTER(), glue_, linkedTable, nextTableIndex, nexTable); + } + + GateRef CalNewTaggedArrayLength(GateRef numberOfElements) + { + GateRef startIndex = Int32(LinkedHashTableType::ELEMENTS_START_INDEX); + GateRef entrySize = Int32(LinkedHashTableObject::ENTRY_SIZE); + GateRef nEntrySize = Int32Mul(numberOfElements, Int32Add(entrySize, Int32(1))); + GateRef length = Int32Add(startIndex, Int32Add(numberOfElements, nEntrySize)); + return length; + } + + void InsertNewEntry(GateRef linkedTable, GateRef bucket, GateRef entry) + { + GateRef bucketIndex = BucketToIndex(bucket); + GateRef previousEntry = GetElement(linkedTable, bucketIndex); + SetNextEntry(linkedTable, entry, previousEntry); + SetElement(linkedTable, bucketIndex, IntToTaggedInt(entry)); + } + + GateRef GetDeletedElementsAt(GateRef linkedTable, GateRef entry); + GateRef GrowCapacity(GateRef linkedTable, GateRef numberOfAddedElements); + GateRef HasSufficientCapacity(GateRef linkedTable, GateRef numOfAddElements); + void Rehash(GateRef linkedTable, GateRef newTable); + GateRef ComputeCapacity(GateRef atLeastSpaceFor); + void RemoveEntry(GateRef linkedTable, GateRef entry); + + GateRef glue_; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins_lowering.cpp b/ecmascript/compiler/builtins_lowering.cpp index 7fdd479d346b906adaadd4836359c0ab2dc1867b..0d4bf368e807fa72e2a6c4ff753f5fc3b5e3e95c 100644 --- a/ecmascript/compiler/builtins_lowering.cpp +++ b/ecmascript/compiler/builtins_lowering.cpp @@ -36,6 +36,12 @@ void BuiltinLowering::LowerTypedCallBuitin(GateRef gate) case BUILTINS_STUB_ID(LocaleCompare): LowerTypedLocaleCompare(gate); break; + case BUILTINS_STUB_ID(SORT): + LowerTypedArraySort(gate); + break; + case BUILTINS_STUB_ID(STRINGIFY): + LowerTypedStringify(gate); + break; default: break; } @@ -237,6 +243,14 @@ void BuiltinLowering::LowerTypedLocaleCompare(GateRef gate) ReplaceHirWithValue(gate, result); } +void BuiltinLowering::LowerTypedArraySort(GateRef gate) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef thisObj = acc_.GetValueIn(gate, 0); + GateRef result = LowerCallRuntime(glue, gate, RTSTUB_ID(ArraySort), { thisObj }); + ReplaceHirWithValue(gate, result); +} + GateRef BuiltinLowering::LowerCallTargetCheck(Environment *env, GateRef gate) { builder_.SetEnvironment(env); @@ -266,10 +280,24 @@ GateRef BuiltinLowering::CheckPara(GateRef gate, GateRef funcCheck) case BuiltinsStubCSigns::ID::SQRT: // NumberSpeculativeRetype is checked return funcCheck; + case BuiltinsStubCSigns::ID::LocaleCompare: + case BuiltinsStubCSigns::ID::SORT: + // Don't need check para + return funcCheck; default: { LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); } } } + +void BuiltinLowering::LowerTypedStringify(GateRef gate) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef value = acc_.GetValueIn(gate, 0); + std::vector args; + args.emplace_back(value); + GateRef result = LowerCallRuntime(glue, gate, RTSTUB_ID(FastStringify), args); + ReplaceHirWithValue(gate, result); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins_lowering.h b/ecmascript/compiler/builtins_lowering.h index 2853ed1950ac17e5a7eee6e7d84c3cfbb40a8e87..068bcc494f39ec3cfd0e3111e2007491040f38d7 100644 --- a/ecmascript/compiler/builtins_lowering.h +++ b/ecmascript/compiler/builtins_lowering.h @@ -31,6 +31,7 @@ public: void LowerTypedSqrt(GateRef gate); GateRef CheckPara(GateRef gate, GateRef funcCheck); void LowerTypedLocaleCompare(GateRef gate); + void LowerTypedArraySort(GateRef gate); private: void LowerTypedTrigonometric(GateRef gate, BuiltinsStubCSigns::ID id); @@ -41,6 +42,7 @@ private: GateRef LowerCallRuntime(GateRef glue, GateRef gate, int index, const std::vector &args, bool useLabel = false); void ReplaceHirWithValue(GateRef hirGate, GateRef value, bool noThrow = false); + void LowerTypedStringify(GateRef gate); Circuit *circuit_ {nullptr}; CircuitBuilder builder_; diff --git a/ecmascript/compiler/bytecode_circuit_builder.cpp b/ecmascript/compiler/bytecode_circuit_builder.cpp index b1dbee7c98ddb20cc21e3328f60bf7090abf920d..e2ea618e485909d5cacc0caf747f3f21e1494336 100644 --- a/ecmascript/compiler/bytecode_circuit_builder.cpp +++ b/ecmascript/compiler/bytecode_circuit_builder.cpp @@ -1055,8 +1055,8 @@ void BytecodeCircuitBuilder::BuildSubCircuit() ASSERT(stateCur != Circuit::NullGate()); ASSERT(dependCur != Circuit::NullGate()); if (IsEntryBlock(bb.id)) { - if (!isInline_) { - stateCur = circuit_->NewGate(circuit_->UpdateHotness(), {stateCur, dependCur}); + if (NeedCheckSafePointAndStackOver()) { + stateCur = circuit_->NewGate(circuit_->CheckSafePointAndStackOver(), {stateCur, dependCur}); dependCur = stateCur; } auto &bbNext = graph_[bb.id + 1]; @@ -1125,7 +1125,7 @@ size_t BytecodeCircuitBuilder::LoopExitCount(size_t from, size_t to) ASSERT(bbNext.loopDepth >= headDep); size_t nextDep = bbNext.loopDepth - headDep; ASSERT(bb.loopDepth >= nextDep); - return bb.loopDepth > nextDep; + return bb.loopDepth - nextDep; } GateRef BytecodeCircuitBuilder::NewValueFromPredBB(BytecodeRegion &bb, size_t idx, @@ -1420,9 +1420,7 @@ void BytecodeCircuitBuilder::BuildCircuit() } auto type = typeRecorder_.GetType(bcIndex); - if (HasValidType(type)) { - gateAcc_.SetGateType(gate, type); - } + gateAcc_.SetGateType(gate, type); auto pgoType = typeRecorder_.GetOrUpdatePGOType(tsManager_, gateAcc_.TryGetPcOffset(gate), type); gateAcc_.TrySetPGOType(gate, pgoType); @@ -1458,6 +1456,15 @@ void BytecodeCircuitBuilder::BuildCircuit() gateAcc_.NewIn(gate, inIdx, defVreg); } else { GateRef defAcc = ResolveDef(bb, bcIndex, 0, true); + if (!Bytecodes::IsCallOp(bytecodeInfo.GetOpcode())) { + gateAcc_.NewIn(gate, inIdx, defAcc); + continue; + } + auto oldGt = gateAcc_.GetGateType(defAcc).GetGTRef(); + GateType callTargetType = typeRecorder_.GetCallTargetType(bcIndex); + if (!tsManager_->MethodOffsetIsVaild(oldGt) && !callTargetType.IsAnyType()) { + gateAcc_.SetGateType(defAcc, callTargetType); + } gateAcc_.NewIn(gate, inIdx, defAcc); } } diff --git a/ecmascript/compiler/bytecode_circuit_builder.h b/ecmascript/compiler/bytecode_circuit_builder.h index 5e2e8ad5de43fe552a5ba97020988bbc99ccf899..df092e37fcb41856622230de854e6d1dc0c1a884 100644 --- a/ecmascript/compiler/bytecode_circuit_builder.h +++ b/ecmascript/compiler/bytecode_circuit_builder.h @@ -265,10 +265,12 @@ public: std::string name, const CString &recordName, PGOProfilerDecoder *decoder, - bool isInline) + bool isInline, + bool enableOptTrackField) : tsManager_(tsManager), circuit_(circuit), file_(jsPandaFile), method_(methodLiteral), gateAcc_(circuit), argAcc_(circuit, method_), - typeRecorder_(jsPandaFile, method_, tsManager, recordName, decoder, methodPCInfo, bytecodes), + typeRecorder_(jsPandaFile, method_, tsManager, recordName, decoder, methodPCInfo, bytecodes, + enableOptTrackField), hasTypes_(hasTypes), enableLog_(enableLog), enableTypeLowering_(enableTypeLowering), pcOffsets_(methodPCInfo.pcOffsets), frameStateBuilder_(this, circuit, methodLiteral), @@ -315,6 +317,19 @@ public: return jsGatesToByteCode_.at(gate); } + bool IsBcIndexByGate(GateRef gate) const + { + if (jsGatesToByteCode_.find(gate) == jsGatesToByteCode_.end()) { + return false; + } + return true; + } + + bool NeedCheckSafePointAndStackOver() const + { + return !isInline_ && !method_->IsNoGC(); + } + void UpdateBcIndexGate(GateRef gate, uint32_t bcIndex) { ASSERT(gateAcc_.GetOpCode(gate) == OpCode::JS_BYTECODE); @@ -417,6 +432,18 @@ public: return typeRecorder_.GetRwOpType(GetPcOffsetByGate(gate)); } + ElementsKind GetElementsKind(GateRef gate) const + { + return typeRecorder_.GetElementsKind(GetPcOffsetByGate(gate)); + } + + ElementsKind GetArrayElementsKind(GateRef gate) const + { + auto type = typeRecorder_.GetType(GetBcIndexByGate(gate)); + auto pgoType = typeRecorder_.GetOrUpdatePGOType(tsManager_, gateAcc_.TryGetPcOffset(gate), type); + return typeRecorder_.GetElementsKind(pgoType); + } + bool ShouldPGOTypeInfer(GateRef gate) const { return jsGatesToByteCode_.find(gate) != jsGatesToByteCode_.end(); @@ -488,6 +515,8 @@ public: return (!HasTryCatch()) && (loopHeads_.size() != 0); } + size_t LoopExitCount(size_t from, size_t to); + GateRef GetFrameArgs() const { return argAcc_.GetFrameArgs(); @@ -568,7 +597,6 @@ private: void PrintDefsitesInfo(const std::unordered_map> &defsitesInfo); void BuildRegionInfo(); void BuildFrameArgs(); - size_t LoopExitCount(size_t from, size_t to); void CollectLoopBack(); void ComputeLoopDepth(size_t loopHead); void CountLoopBackEdge(size_t fromId, size_t toId); @@ -578,11 +606,6 @@ private: return (IsFirstBasicBlock(bbId) && bcIndex == 0 && reg == GetNumberVRegs()); } - inline bool HasValidType(GateType type) - { - return HasTypes() && !type.IsAnyType() && !tsManager_->IsPGOGT(type.GetGTRef()); - } - TSManager *tsManager_; Circuit *circuit_; std::vector> byteCodeToJSGates_; diff --git a/ecmascript/compiler/bytecode_info_collector.cpp b/ecmascript/compiler/bytecode_info_collector.cpp index 6a03a7059c6b01ff083a3be4767c5beae59a943a..436d9a705a8befc9813aa304095535ccd92fa4b3 100644 --- a/ecmascript/compiler/bytecode_info_collector.cpp +++ b/ecmascript/compiler/bytecode_info_collector.cpp @@ -15,10 +15,10 @@ #include "ecmascript/compiler/bytecode_info_collector.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/compiler/type_recorder.h" #include "ecmascript/interpreter/interpreter-inl.h" #include "ecmascript/jspandafile/type_literal_extractor.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/pgo_profiler/pgo_profiler_decoder.h" #include "ecmascript/ts_types/ts_type_parser.h" #include "libpandafile/code_data_accessor.h" @@ -34,7 +34,7 @@ BytecodeInfoCollector::BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFil size_t maxAotMethodSize, bool enableCollectLiteralInfo) : vm_(vm), jsPandaFile_(jsPandaFile), - bytecodeInfo_(maxAotMethodSize), + bytecodeInfo_(maxAotMethodSize, jsPandaFile), pfDecoder_(pfDecoder), enableCollectLiteralInfo_(enableCollectLiteralInfo) { @@ -67,7 +67,7 @@ void BytecodeInfoCollector::ProcessClasses() MethodLiteral *methods = jsPandaFile_->GetMethodLiterals(); const panda_file::File *pf = jsPandaFile_->GetPandaFile(); size_t methodIdx = 0; - std::map> processedInsns; + std::map> processedMethod; Span classIndexes = jsPandaFile_->GetClasses(); auto &recordNames = bytecodeInfo_.GetRecordNames(); @@ -82,7 +82,7 @@ void BytecodeInfoCollector::ProcessClasses() panda_file::ClassDataAccessor cda(*pf, classId); CString desc = utf::Mutf8AsCString(cda.GetDescriptor()); const CString recordName = JSPandaFile::ParseEntryPoint(desc); - cda.EnumerateMethods([this, methods, &methodIdx, pf, &processedInsns, + cda.EnumerateMethods([this, methods, &methodIdx, pf, &processedMethod, &recordNames, &methodPcInfos, &recordName, &methodIndexes, &classConstructIndexes] (panda_file::MethodDataAccessor &mda) { auto methodId = mda.GetMethodId(); @@ -114,19 +114,19 @@ void BytecodeInfoCollector::ProcessClasses() panda_file::CodeDataAccessor codeDataAccessor(*pf, codeId.value()); uint32_t codeSize = codeDataAccessor.GetCodeSize(); const uint8_t *insns = codeDataAccessor.GetInstructions(); - auto it = processedInsns.find(insns); - if (it == processedInsns.end()) { + auto it = processedMethod.find(methodOffset); + if (it == processedMethod.end()) { std::vector classNameVec; CollectMethodPcsFromBC(codeSize, insns, methodLiteral, classNameVec, recordName, methodOffset, classConstructIndexes); - processedInsns[insns] = std::make_pair(methodPcInfos.size() - 1, methodOffset); + processedMethod[methodOffset] = std::make_pair(methodPcInfos.size() - 1, methodOffset); // collect className and literal offset for type infer if (EnableCollectLiteralInfo()) { CollectClassLiteralInfo(methodLiteral, classNameVec); } } - SetMethodPcInfoIndex(methodOffset, processedInsns[insns]); + SetMethodPcInfoIndex(methodOffset, processedMethod[methodOffset]); jsPandaFile_->SetMethodLiteralToMap(methodLiteral); pfDecoder_.MatchAndMarkMethod(recordName, name.c_str(), methodId); }); @@ -242,6 +242,7 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u auto &pcOffsets = methodPcInfos.back().pcOffsets; const uint8_t *curPc = bcIns.GetAddress(); bool canFastCall = true; + bool noGC = true; while (bcIns.GetAddress() != bcInsLast.GetAddress()) { bool fastCallFlag = true; @@ -250,16 +251,20 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u canFastCall = false; } CollectModuleInfoFromBC(bcIns, method, recordName); - CollectConstantPoolIndexInfoFromBC(bcIns, method); + CollectConstantPoolIndexInfoFromBC(bcIns, method, bcIndex); pgoBCInfo_.Record(bcIns, bcIndex, recordName, method); + if (noGC && !bytecodes_.GetBytecodeMetaData(curPc).IsNoGC()) { + noGC = false; + } curPc = bcIns.GetAddress(); auto nextInst = bcIns.GetNext(); bcIns = nextInst; pcOffsets.emplace_back(curPc); bcIndex++; } - bytecodeInfo_.SetMethodOffsetToCanFastCall(methodOffset, canFastCall); + bytecodeInfo_.SetMethodOffsetToFastCallInfo(methodOffset, canFastCall, noGC); method->SetIsFastCall(canFastCall); + method->SetNoGCBit(noGC); } void BytecodeInfoCollector::SetMethodPcInfoIndex(uint32_t methodOffset, @@ -620,7 +625,8 @@ void BytecodeInfoCollector::CollectRecordReferenceREL() { auto &recordNames = bytecodeInfo_.GetRecordNames(); for (auto &record : recordNames) { - if (jsPandaFile_->HasTSTypes(record) && jsPandaFile_->IsModule(vm_->GetJSThread(), record)) { + JSRecordInfo info = jsPandaFile_->FindRecordInfo(record); + if (jsPandaFile_->HasTSTypes(info)|| jsPandaFile_->IsModule(info)) { CollectRecordImportInfo(record); CollectRecordExportInfo(record); } @@ -650,11 +656,11 @@ void BytecodeInfoCollector::CollectRecordImportInfo(const CString &recordName) for (size_t index = 0; index < length; index++) { JSTaggedValue resolvedBinding = moduleArray->Get(index); // if resolvedBinding.IsHole(), means that importname is * or it belongs to empty Aot module. - if (resolvedBinding.IsHole()) { + if (!resolvedBinding.IsResolvedIndexBinding()) { continue; } ResolvedIndexBinding *binding = ResolvedIndexBinding::Cast(resolvedBinding.GetTaggedObject()); - CString resolvedRecord = ModuleManager::GetRecordName(binding->GetModule()); + CString resolvedRecord = SourceTextModule::GetRecordName(binding->GetModule()); auto bindingIndex = binding->GetIndex(); if (bytecodeInfo_.HasExportIndexToRecord(resolvedRecord, bindingIndex)) { bytecodeInfo_.AddImportRecordInfoToRecord(recordName, resolvedRecord, index, bindingIndex); @@ -682,11 +688,11 @@ void BytecodeInfoCollector::CollectRecordExportInfo(const CString &recordName) starExportEntry.Update(starEntriesArray->Get(index)); JSTaggedValue moduleRequest = starExportEntry->GetModuleRequest(); CString moduleRequestName = ConvertToString(EcmaString::Cast(moduleRequest.GetTaggedObject())); - if (base::PathHelper::IsNativeModuleRequest(moduleRequestName)) { + if (ModulePathHelper::IsNativeModuleRequest(moduleRequestName)) { return; } CString baseFileName = jsPandaFile_->GetJSPandaFileDesc(); - CString entryPoint = base::PathHelper::ConcatFileNameWithMerge(thread, jsPandaFile_, + CString entryPoint = ModulePathHelper::ConcatFileNameWithMerge(thread, jsPandaFile_, baseFileName, recordName, moduleRequestName); if (jsPandaFile_->HasTypeSummaryOffset(entryPoint)) { bytecodeInfo_.AddStarExportToRecord(recordName, entryPoint); @@ -704,7 +710,7 @@ void BytecodeInfoCollector::RearrangeInnerMethods() } void BytecodeInfoCollector::CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, - const MethodLiteral *method) + const MethodLiteral *method, uint32_t bcIndex) { BytecodeInstruction::Opcode opcode = static_cast(bcIns.GetOpcode()); uint32_t methodOffset = method->GetMethodId().GetOffset(); @@ -738,7 +744,7 @@ void BytecodeInfoCollector::CollectConstantPoolIndexInfoFromBC(const BytecodeIns case BytecodeInstruction::Opcode::STGLOBALVAR_IMM16_ID16: case BytecodeInstruction::Opcode::LDBIGINT_ID16: { auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::STRING, index, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::STRING, index, methodOffset, bcIndex); break; } case BytecodeInstruction::Opcode::DEFINEFUNC_IMM8_ID16_IMM8: @@ -746,33 +752,35 @@ void BytecodeInfoCollector::CollectConstantPoolIndexInfoFromBC(const BytecodeIns case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM8_ID16_IMM8: case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM16_ID16_IMM8: { auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, index, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, index, methodOffset, bcIndex); break; } case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: { auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::OBJECT_LITERAL, index, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::OBJECT_LITERAL, index, methodOffset, bcIndex); break; } case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM8_ID16: case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM16_ID16: { auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::ARRAY_LITERAL, index, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::ARRAY_LITERAL, index, methodOffset, bcIndex); break; } case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: { auto methodIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset, bcIndex); auto literalIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, + methodOffset, bcIndex); break; } case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: { auto methodIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset, bcIndex); auto literalIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, methodOffset); + AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, + methodOffset, bcIndex); break; } default: @@ -821,12 +829,24 @@ uint32_t LexEnvManager::GetTargetLexEnv(uint32_t methodId, uint32_t level) const return offset; } -void ConstantPoolInfo::AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset) +uint64_t ConstantPoolInfo::GetItemKey(uint32_t index, uint32_t methodOffset) +{ + panda_file::IndexAccessor indexAccessor(*jsPandaFile_->GetPandaFile(), + panda_file::File::EntityId(methodOffset)); + uint64_t result = 0; + result = static_cast(indexAccessor.GetHeaderIndex()); + result = result << CONSTPOOL_MASK; + result |= index; + return result; +} + +void ConstantPoolInfo::AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset, uint32_t bcIndex) { + uint64_t key = GetItemKey(index, methodOffset); Item &item = GetCPItem(type); - if (item.find(index) != item.end()) { + if (item.find(key) != item.end()) { return; } - item.insert({index, ItemData {index, methodOffset, nullptr}}); + item.insert({key, ItemData {index, methodOffset, nullptr, bcIndex}}); } } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/bytecode_info_collector.h b/ecmascript/compiler/bytecode_info_collector.h index 153ae3ae7a71b226754480d26cec0a4d23aa97cf..c1d5073a597ecdb41cef76cf5af9eb4f6e1b1cd8 100644 --- a/ecmascript/compiler/bytecode_info_collector.h +++ b/ecmascript/compiler/bytecode_info_collector.h @@ -54,6 +54,8 @@ namespace panda::ecmascript::kungfu { * of global and function f will be created when methods are executed. */ +using PGOProfilerDecoder = pgo::PGOProfilerDecoder; + enum class LexicalEnvStatus : uint8_t { VIRTUAL_LEXENV, REALITY_LEXENV @@ -408,12 +410,14 @@ public: uint32_t index {0}; uint32_t outerMethodOffset {0}; CString *recordName {nullptr}; + uint32_t bcIndex {0}; }; // key:constantpool index, value:ItemData - using Item = std::unordered_map; + using Item = std::unordered_map; - ConstantPoolInfo() : items_(ItemType::ITEM_TYPE_NUM, Item{}) {} + ConstantPoolInfo(JSPandaFile* jsPandaFile) : + items_(ItemType::ITEM_TYPE_NUM, Item{}), jsPandaFile_(jsPandaFile) {} Item& GetCPItem(ItemType type) { @@ -421,15 +425,24 @@ public: return items_[type]; } - void AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset); + void AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset, uint32_t bcIndex); private: + static constexpr uint32_t CONSTPOOL_MASK = 32; + uint64_t GetItemKey(uint32_t index, uint32_t methodOffset); + std::vector items_; + JSPandaFile* jsPandaFile_ {nullptr}; +}; + +struct FastCallInfo { + bool canFastCall_ {false}; + bool isNoGC_ {false}; }; class BCInfo { public: - explicit BCInfo(size_t maxAotMethodSize) - : maxMethodSize_(maxAotMethodSize) + explicit BCInfo(size_t maxAotMethodSize, JSPandaFile* jsPandaFile) + : cpInfo_(jsPandaFile), maxMethodSize_(maxAotMethodSize) { } @@ -500,9 +513,9 @@ public: return skippedMethods_.size(); } - void AddIndexToCPInfo(ConstantPoolInfo::ItemType type, uint32_t index, uint32_t methodOffset) + void AddIndexToCPInfo(ConstantPoolInfo::ItemType type, uint32_t index, uint32_t methodOffset, uint32_t bcIndex) { - cpInfo_.AddIndexToCPItem(type, index, methodOffset); + cpInfo_.AddIndexToCPItem(type, index, methodOffset, bcIndex); } template @@ -611,29 +624,34 @@ public: return recordToImportRecordsInfo_; } - bool IterateMethodOffsetToCanFastCall(uint32_t methodOffset, bool *isValid) + FastCallInfo IterateMethodOffsetToFastCallInfo(uint32_t methodOffset, bool *isValid) { - auto iter = methodOffsetToCanFastCall_.find(methodOffset); - if (iter != methodOffsetToCanFastCall_.end()) { + auto iter = methodOffsetToFastCallInfos_.find(methodOffset); + if (iter != methodOffsetToFastCallInfos_.end()) { *isValid = true; return iter->second; } *isValid = false; - return false; + return FastCallInfo(); } - void SetMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall) + void SetMethodOffsetToFastCallInfo(uint32_t methodOffset, bool canFastCall, bool noGC) { - if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) { - methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall); + if (methodOffsetToFastCallInfos_.find(methodOffset) == methodOffsetToFastCallInfos_.end()) { + methodOffsetToFastCallInfos_.emplace(methodOffset, FastCallInfo { canFastCall, noGC }); } } void ModifyMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall) { - methodOffsetToCanFastCall_.erase(methodOffset); - if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) { - methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall); + auto iter = methodOffsetToFastCallInfos_.find(methodOffset); + bool isNoGC = false; + if (iter != methodOffsetToFastCallInfos_.end()) { + isNoGC = iter->second.isNoGC_; + } + methodOffsetToFastCallInfos_.erase(methodOffset); + if (methodOffsetToFastCallInfos_.find(methodOffset) == methodOffsetToFastCallInfos_.end()) { + methodOffsetToFastCallInfos_.emplace(methodOffset, FastCallInfo { canFastCall, isNoGC }); } } private: @@ -649,7 +667,7 @@ private: std::unordered_map functionTypeIdToMethodOffset_ {}; std::unordered_map recordNameToExportInfo_ {}; std::unordered_map recordToImportRecordsInfo_ {}; - std::unordered_map methodOffsetToCanFastCall_ {}; + std::unordered_map methodOffsetToFastCallInfos_ {}; }; class LexEnvManager { @@ -696,6 +714,11 @@ public: return enableCollectLiteralInfo_; } + Bytecodes* GetByteCodes() + { + return &bytecodes_; + } + BCInfo& GetBytecodeInfo() { return bytecodeInfo_; @@ -746,9 +769,9 @@ private: } void AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType type, - uint32_t index, uint32_t methodOffset) + uint32_t index, uint32_t methodOffset, uint32_t bcIndex) { - bytecodeInfo_.AddIndexToCPInfo(type, index, methodOffset); + bytecodeInfo_.AddIndexToCPInfo(type, index, methodOffset, bcIndex); } inline std::string GetClassName(const EntityId entityId) @@ -780,7 +803,8 @@ private: bool *canFastCall); void CollectModuleInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method, const CString &recordName); - void CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method); + void CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method, + uint32_t bcIndex); void IterateLiteral(const MethodLiteral *method, std::vector &classOffsetVector); void StoreClassTypeOffset(const uint32_t typeOffset, std::vector &classOffsetVector); void CollectClassLiteralInfo(const MethodLiteral *method, const std::vector &classNameVec); @@ -802,6 +826,7 @@ private: bool enableCollectLiteralInfo_ {false}; std::set classDefBCIndexes_ {}; LexEnvManager* envManager_ {nullptr}; + Bytecodes bytecodes_; }; } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BYTECODE_INFO_COLLECTOR_H diff --git a/ecmascript/compiler/bytecodes.cpp b/ecmascript/compiler/bytecodes.cpp index 77692ec1ec5d5c8a1cb508d6b82a9bbb8480e528..61d475cd5c3f51cfd7a1a3cd2fa5ba2cc2d9f9d5 100644 --- a/ecmascript/compiler/bytecodes.cpp +++ b/ecmascript/compiler/bytecodes.cpp @@ -59,6 +59,40 @@ BytecodeMetaData BytecodeMetaData::InitBytecodeMetaData(const uint8_t *pc) break; } + switch (inst.GetOpcode()) { + case EcmaOpcode::MOV_V4_V4: + case EcmaOpcode::MOV_V8_V8: + case EcmaOpcode::MOV_V16_V16: + case EcmaOpcode::STA_V8: + case EcmaOpcode::LDA_V8: + case EcmaOpcode::LDHOLE: + case EcmaOpcode::LDAI_IMM32: + case EcmaOpcode::FLDAI_IMM64: + case EcmaOpcode::LDFUNCTION: + case EcmaOpcode::TYPEOF_IMM8: + case EcmaOpcode::TYPEOF_IMM16: + case EcmaOpcode::LDNAN: + case EcmaOpcode::LDINFINITY: + case EcmaOpcode::LDUNDEFINED: + case EcmaOpcode::LDNULL: + case EcmaOpcode::LDTRUE: + case EcmaOpcode::LDFALSE: + case EcmaOpcode::LDSYMBOL: + case EcmaOpcode::LDGLOBAL: + case EcmaOpcode::LDBIGINT_ID16: + case EcmaOpcode::LDLEXVAR_IMM4_IMM4: + case EcmaOpcode::LDLEXVAR_IMM8_IMM8: + case EcmaOpcode::WIDE_LDLEXVAR_PREF_IMM16_IMM16: + case EcmaOpcode::WIDE_LDPATCHVAR_PREF_IMM16: + case EcmaOpcode::LDA_STR_ID16: + case EcmaOpcode::RETURN: + case EcmaOpcode::RETURNUNDEFINED: + flags |= BytecodeFlags::NO_GC; + break; + default: + break; + } + switch (inst.GetOpcode()) { case EcmaOpcode::TYPEOF_IMM8: case EcmaOpcode::TYPEOF_IMM16: @@ -198,7 +232,6 @@ BytecodeMetaData BytecodeMetaData::InitBytecodeMetaData(const uint8_t *pc) kind = BytecodeKind::SUSPEND; break; case EcmaOpcode::RESUMEGENERATOR: - case EcmaOpcode::CREATEOBJECTWITHEXCLUDEDKEYS_IMM8_V8_V8: kind = BytecodeKind::RESUME; break; case EcmaOpcode::DEBUGGER: diff --git a/ecmascript/compiler/bytecodes.h b/ecmascript/compiler/bytecodes.h index 618e756600fe9900b18a4179bf75227b650864dd..39b7038ee2958ca75bb86b97cba2dbb14da31b97 100644 --- a/ecmascript/compiler/bytecodes.h +++ b/ecmascript/compiler/bytecodes.h @@ -49,6 +49,7 @@ enum BytecodeFlags : uint32_t { READ_FUNC = 1 << 9, READ_NEWTARGET = 1 << 10, READ_ARGC = 1 << 11, + NO_GC = 1 << 12, }; enum BytecodeKind : uint32_t { @@ -70,7 +71,7 @@ class BytecodeMetaData { public: static constexpr uint32_t MAX_OPCODE_SIZE = 16; static constexpr uint32_t MAX_SIZE_BITS = 4; - static constexpr uint32_t BYTECODE_FLAGS_SIZE = 12; + static constexpr uint32_t BYTECODE_FLAGS_SIZE = 13; static constexpr uint32_t BYTECODE_KIND_SIZE = 4; using OpcodeField = panda::BitField; @@ -113,6 +114,11 @@ public: return HasFlag(BytecodeFlags::WRITE_ENV); } + bool IsNoGC() const + { + return HasFlag(BytecodeFlags::NO_GC); + } + bool IsMov() const { return GetKind() == BytecodeKind::MOV; @@ -308,6 +314,49 @@ public: return bytecodes_[primary]; } + static bool IsCallOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CALLARG0_IMM8: + case EcmaOpcode::CALLARG1_IMM8_V8: + case EcmaOpcode::CALLARGS2_IMM8_V8_V8: + case EcmaOpcode::CALLARGS3_IMM8_V8_V8_V8: + case EcmaOpcode::CALLRANGE_IMM8_IMM8_V8: + case EcmaOpcode::WIDE_CALLRANGE_PREF_IMM16_V8: + case EcmaOpcode::CALLTHIS0_IMM8_V8: + case EcmaOpcode::CALLTHIS1_IMM8_V8_V8: + case EcmaOpcode::CALLTHIS2_IMM8_V8_V8_V8: + case EcmaOpcode::CALLTHIS3_IMM8_V8_V8_V8_V8: + case EcmaOpcode::CALLTHISRANGE_IMM8_IMM8_V8: + case EcmaOpcode::WIDE_CALLTHISRANGE_PREF_IMM16_V8: + return true; + default: + return false; + } + } + + static bool IsCreateObjectWithBufferOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: + return true; + default: + return false; + } + } + + static bool IsCreateArrayWithBufferOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + return true; + default: + return false; + } + } + private: static uint8_t ReadByte(const uint8_t *pc) { diff --git a/ecmascript/compiler/call_signature.cpp b/ecmascript/compiler/call_signature.cpp index fd21f5f9a63213ad198d5863d165c5240dd0d8bd..ea3dd65a373ebfda3c5a9edb3f06b6d19fca4bcb 100644 --- a/ecmascript/compiler/call_signature.cpp +++ b/ecmascript/compiler/call_signature.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/compiler/call_signature.h" +#include "ecmascript/compiler/variable_type.h" #if defined(__clang__) #pragma clang diagnostic push @@ -644,13 +645,17 @@ DEF_CALL_SIGNATURE(ConstructorCheck) DEF_CALL_SIGNATURE(CreateEmptyArray) { - // 1 : 1 input parameters - CallSignature signature("CreateEmptyArray", 0, 1, + // 5 : 5 input parameters + CallSignature signature("CreateEmptyArray", 0, 5, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); *callSign = signature; - // 1 : 1 input parameters - std::array params = { + // 5 : 5 input parameters + std::array params = { VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // jsFunc + VariableType::JS_ANY(), // pc + VariableType::INT32(), // profileTypeInfo + VariableType::INT32(), // slotId }; callSign->SetParameters(params.data()); callSign->SetCallConv(CallSignature::CallConv::CCallConv); @@ -658,15 +663,18 @@ DEF_CALL_SIGNATURE(CreateEmptyArray) DEF_CALL_SIGNATURE(CreateArrayWithBuffer) { - // 3 : 3 input parameters - CallSignature signature("CreateArrayWithBuffer", 0, 3, + // 6 : 6 input parameters + CallSignature signature("CreateArrayWithBuffer", 0, 6, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); *callSign = signature; - // 3 : 3 input parameters - std::array params = { + // 6 : 6 input parameters + std::array params = { VariableType::NATIVE_POINTER(), // glue VariableType::INT32(), // index VariableType::JS_ANY(), // jsFunc + VariableType::JS_ANY(), // pc + VariableType::INT32(), // profileTypeInfo + VariableType::INT32(), // slotId }; callSign->SetParameters(params.data()); callSign->SetCallConv(CallSignature::CallConv::CCallConv); @@ -1120,6 +1128,27 @@ DEF_CALL_SIGNATURE(ResumeUncaughtFrameAndReturn) callSign->SetCallConv(CallSignature::CallConv::GHCCallConv); } +DEF_CALL_SIGNATURE(ResumeRspAndRollback) +{ + // 8 : 8 input parameters + CallSignature resumeRspAndRollback("ResumeRspAndRollback", 0, 8, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = resumeRspAndRollback; + std::array params = { // 8 : 8 input parameters + VariableType::NATIVE_POINTER(), + VariableType::NATIVE_POINTER(), + VariableType::NATIVE_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_ANY(), + VariableType::INT32(), + VariableType::NATIVE_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); + callSign->SetCallConv(CallSignature::CallConv::GHCCallConv); +} + DEF_CALL_SIGNATURE(StringsAreEquals) { // 2 : 2 input parameters @@ -1150,6 +1179,21 @@ DEF_CALL_SIGNATURE(BigIntEquals) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(BigIntSameValueZero) +{ + // 1 : 1 input parameters + CallSignature bigIntSameValueZero("BigIntSameValueZero", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::BOOL()); + *callSign = bigIntSameValueZero; + std::array params = { // 2 : 2 input parameters + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + #define PUSH_CALL_ARGS_AND_DISPATCH_SIGNATURE_COMMON(name) \ /* 1 : 1 input parameters */ \ CallSignature signature(#name, 0, 1, \ @@ -1377,6 +1421,22 @@ DEF_CALL_SIGNATURE(DebugPrint) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(DebugPrintCustom) +{ + // 1 : 1 input parameters + CallSignature debugPrintCustom("DebugPrintCustom", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = debugPrintCustom; + // 1 : 1 input parameters + std::array params = { + VariableType::NATIVE_POINTER() + }; + callSign->SetVariadicArgs(true); + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(DebugPrintInstruction) { // 2 : 2 input parameters @@ -1411,13 +1471,15 @@ DEF_CALL_SIGNATURE(Comment) DEF_CALL_SIGNATURE(ProfileCall) { - // 2 : 2 input parameters - CallSignature callProfilerInstruction("ProfileCall", 0, 2, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + // 4 : 4 input parameters + CallSignature callProfilerInstruction("ProfileCall", 0, 4, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); *callSign = callProfilerInstruction; - // 2 : 2 input parameters - std::array params = { + // 4 : 4 input parameters + std::array params = { VariableType::NATIVE_POINTER(), VariableType::JS_ANY(), + VariableType::JS_ANY(), + VariableType::INT32(), }; callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); @@ -1456,7 +1518,7 @@ DEF_CALL_SIGNATURE(ProfileCreateObject) VariableType::JS_ANY(), VariableType::INT32(), VariableType::JS_ANY(), - VariableType::JS_ANY(), + VariableType::INT32(), }; callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); @@ -1501,6 +1563,24 @@ DEF_CALL_SIGNATURE(ProfileObjLayout) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(ProfileObjIndex) +{ + // 4: 4 input parameters + CallSignature layoutProfInstruction("ProfileObjIndex", 0, 4, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = layoutProfInstruction; + // 4: 4 input parameters + std::array params = { // 4 : 4 input parameters + VariableType::NATIVE_POINTER(), + VariableType::JS_ANY(), + VariableType::INT32(), + VariableType::JS_ANY(), + }; + callSign->SetVariadicArgs(true); + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(FatalPrint) { // 1 : 1 input parameters @@ -1517,6 +1597,22 @@ DEF_CALL_SIGNATURE(FatalPrint) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(FatalPrintCustom) +{ + // 1 : 1 input parameters + CallSignature fatalPrintCustom("FatalPrintCustom", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = fatalPrintCustom; + // 1 : 1 input parameters + std::array params = { + VariableType::NATIVE_POINTER() + }; + callSign->SetVariadicArgs(true); + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(GetActualArgvNoGC) { CallSignature index("GetActualArgvNoGC", 0, 1, ArgumentsOrder::DEFAULT_ORDER, VariableType::NATIVE_POINTER()); @@ -1675,6 +1771,20 @@ DEF_CALL_SIGNATURE(DoubleToInt) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(DoubleToLength) +{ + // 1 : 1 input parameters + CallSignature index("DoubleToLength", 0, 1, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = index; + // 1 : 1 input parameters + std::array params = { + VariableType::FLOAT64(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(MarkingBarrier) { // 4 : 4 input parameters @@ -1944,4 +2054,20 @@ DEF_CALL_SIGNATURE(EndCallTimer) callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } + +DEF_CALL_SIGNATURE(GetCharFromEcmaString) +{ + // 3 : 3 input parameters + CallSignature signature("GetCharFromEcmaString", 0, 3, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 3 : 3 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // ecmaString + VariableType::INT32(), // index + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/call_signature.h b/ecmascript/compiler/call_signature.h index a0c47c0e7da37a79d22306dda5b5ebcc61de1638..b6bb921b6ac11defd3f2f347b3cd635d7ad4da71 100644 --- a/ecmascript/compiler/call_signature.h +++ b/ecmascript/compiler/call_signature.h @@ -21,7 +21,6 @@ #include #include "ecmascript/compiler/variable_type.h" -#include "ecmascript/compiler/test_stubs_signature.h" #include "libpandabase/macros.h" #include "libpandabase/utils/bit_field.h" @@ -410,9 +409,12 @@ private: V(ResumeRspAndReturn) \ V(ResumeCaughtFrameAndDispatch) \ V(ResumeUncaughtFrameAndReturn) \ + V(ResumeRspAndRollback) \ V(StringsAreEquals) \ V(BigIntEquals) \ + V(BigIntSameValueZero) \ V(DebugPrint) \ + V(DebugPrintCustom) \ V(DebugPrintInstruction) \ V(Comment) \ V(ProfileCall) \ @@ -420,10 +422,13 @@ private: V(ProfileCreateObject) \ V(ProfileOpType) \ V(ProfileObjLayout) \ + V(ProfileObjIndex) \ V(FatalPrint) \ + V(FatalPrintCustom) \ V(GetActualArgvNoGC) \ V(InsertOldToNewRSet) \ V(DoubleToInt) \ + V(DoubleToLength) \ V(FloatMod) \ V(FloatSqrt) \ V(FloatCos) \ @@ -458,7 +463,7 @@ private: V(CallReturnWithArgv) \ V(StartCallTimer) \ V(EndCallTimer) \ - TEST_STUB_SIGNATRUE_LIST(V) + V(GetCharFromEcmaString) #define DECL_CALL_SIGNATURE(name) \ class name##CallSignature final { \ diff --git a/ecmascript/compiler/circuit.cpp b/ecmascript/compiler/circuit.cpp index 9baf7aa906e4d813f52da1dcf4c7c633c620eb6d..0a52da19a6b7787c41c407dd104425e6a7a6fde3 100644 --- a/ecmascript/compiler/circuit.cpp +++ b/ecmascript/compiler/circuit.cpp @@ -21,10 +21,12 @@ #include "ecmascript/platform/map.h" namespace panda::ecmascript::kungfu { -Circuit::Circuit(NativeAreaAllocator* allocator, DebugInfo* debugInfo, const char* funcName, bool isArch64) +Circuit::Circuit(NativeAreaAllocator* allocator, DebugInfo* debugInfo, const char* funcName, + bool isArch64, panda::ecmascript::FrameType type) : circuitSize_(0), gateCount_(0), time_(1), + frameType_(type), isArch64_(isArch64), chunk_(allocator), root_(Circuit::NullGate()), diff --git a/ecmascript/compiler/circuit.h b/ecmascript/compiler/circuit.h index 739fc6e3ba50b4d4cb533cd9f49051d4c99bd601..6afd5307a95e110d51456b13b98e651ec15f5429 100644 --- a/ecmascript/compiler/circuit.h +++ b/ecmascript/compiler/circuit.h @@ -41,7 +41,7 @@ enum class VisitState : uint8_t { class Circuit { // note: calling NewGate could make all saved Gate* invalid public: explicit Circuit(NativeAreaAllocator* allocator, DebugInfo* dInfo = nullptr, const char* funcName = nullptr, - bool isArch64 = true); + bool isArch64 = true, FrameType type = FrameType::OPTIMIZED_FRAME); ~Circuit(); NO_COPY_SEMANTIC(Circuit); NO_MOVE_SEMANTIC(Circuit); @@ -132,6 +132,14 @@ public: GATE_META_DATA_LIST_WITH_BOOL(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_WITH_BOOL_VALUE_IN(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(size_t value, bool flag) \ + { \ + return metaBuilder_.NAME(value, flag); \ + } + GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(DECLARE_GATE_META_WITH_BOOL_VALUE_IN) +#undef DECLARE_GATE_META_WITH_BOOL_VALUE_IN + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* NAME(uint64_t value, uint64_t pcOffset) \ { \ @@ -140,6 +148,14 @@ public: GATE_META_DATA_LIST_WITH_PC_OFFSET(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_FOR_CALL(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(uint64_t value, uint64_t pcOffset, bool noGC) \ + { \ + return metaBuilder_.NAME(value, pcOffset, noGC); \ + } + GATE_META_DATA_LIST_FOR_CALL(DECLARE_GATE_META_FOR_CALL) +#undef DECLARE_GATE_META_FOR_CALL + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* NAME(uint64_t pcOffset) const \ { \ @@ -167,6 +183,11 @@ public: return metaBuilder_.TypedBinaryOp(value, binOp, type); } + const GateMetaData* TypedCallTargetCheckOp(uint32_t numIns, uint64_t value, TypedCallTargetCheckOp checkOp) + { + return metaBuilder_.TypedCallTargetCheckOp(numIns, value, checkOp); + } + GateRef DeadGate() { if (dead_ == NullGate()) { @@ -183,7 +204,7 @@ public: bool IsOptimizedJSFunctionFrame() const { - return frameType_ == panda::ecmascript::FrameType::OPTIMIZED_JS_FUNCTION_FRAME + return frameType_ == FrameType::OPTIMIZED_JS_FUNCTION_FRAME || frameType_ == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME; } @@ -240,7 +261,7 @@ private: std::map, GateRef> constantCache_ {}; std::map, GateRef> constantDataCache_ {}; std::map initialEnvCache_ {}; - panda::ecmascript::FrameType frameType_ {panda::ecmascript::FrameType::OPTIMIZED_FRAME}; + panda::ecmascript::FrameType frameType_ {FrameType::OPTIMIZED_FRAME}; bool isArch64_ { false }; Chunk chunk_; diff --git a/ecmascript/compiler/circuit_builder-inl.h b/ecmascript/compiler/circuit_builder-inl.h index 2e03957d2e4c271b73c3e58c22477e484b14beb2..e9c879e2f19189c8c1f0dc2d986dd304dd961f29 100644 --- a/ecmascript/compiler/circuit_builder-inl.h +++ b/ecmascript/compiler/circuit_builder-inl.h @@ -232,6 +232,20 @@ GateRef CircuitBuilder::BinaryOp(GateRef x, GateRef y) return Circuit::NullGate(); } +template +GateRef CircuitBuilder::BinaryOpWithOverflow(GateRef x, GateRef y) +{ + if (Op == OpCode::ADD) { + return BinaryArithmetic(circuit_->AddWithOverflow(), Type, x, y); + } else if (Op == OpCode::SUB) { + return BinaryArithmetic(circuit_->SubWithOverflow(), Type, x, y); + } else if (Op == OpCode::MUL) { + return BinaryArithmetic(circuit_->MulWithOverflow(), Type, x, y); + } + UNREACHABLE(); + return Circuit::NullGate(); +} + GateRef CircuitBuilder::IntPtrLSR(GateRef x, GateRef y) { auto ptrSize = env_->Is32Bit() ? MachineType::I32 : MachineType::I64; @@ -395,6 +409,13 @@ GateRef CircuitBuilder::TaggedIsGeneratorObject(GateRef x) return LogicAnd(isHeapObj, isAsyncGeneratorObj); } +GateRef CircuitBuilder::TaggedIsJSArray(GateRef x) +{ + GateRef objType = GetObjectType(LoadHClass(x)); + GateRef isJSArray = Equal(objType, Int32(static_cast(JSType::JS_ARRAY))); + return isJSArray; +} + GateRef CircuitBuilder::TaggedIsPropertyBox(GateRef x) { return LogicAnd(TaggedIsHeapObject(x), @@ -493,6 +514,16 @@ GateRef CircuitBuilder::BooleanToTaggedBooleanPtr(GateRef x) return Int64ToTaggedPtr(Int64Or(val, Int64(JSTaggedValue::TAG_BOOLEAN_MASK))); } +GateRef CircuitBuilder::BooleanToInt32(GateRef x) +{ + return ZExtInt1ToInt32(x); +} + +GateRef CircuitBuilder::BooleanToFloat64(GateRef x) +{ + return ChangeInt32ToFloat64(ZExtInt1ToInt32(x)); +} + GateRef CircuitBuilder::Float32ToTaggedDoublePtr(GateRef x) { GateRef val = ExtFloat32ToDouble(x); @@ -601,6 +632,15 @@ inline GateRef CircuitBuilder::IsJSFunctionWithBit(GateRef obj) return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::IsJSFunctionBit::START_BIT)), Int32(0)); } +inline GateRef CircuitBuilder::IsOptimizedAndNotFastCall(GateRef obj) +{ + GateRef hClass = LoadHClass(obj); + GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); + GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); + GateRef optimizedFastCallBitsInBitfield = Int32And(bitfield, Int32(JSHClass::OPTIMIZED_FASTCALL_BITS)); + return Equal(optimizedFastCallBitsInBitfield, Int32(JSHClass::OPTIMIZED_BIT)); +} + inline GateRef CircuitBuilder::IsOptimized(GateRef obj) { GateRef hClass = LoadHClass(obj); @@ -656,13 +696,22 @@ GateRef CircuitBuilder::IsDictionaryModeByHClass(GateRef hClass) Int32(0)); } -GateRef CircuitBuilder::IsIsStableElementsByHClass(GateRef hClass) +GateRef CircuitBuilder::GetElementsKindByHClass(GateRef hClass) +{ + GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); + GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); + return Int32And(Int32LSR(bitfield, + Int32(JSHClass::ElementsKindBits::START_BIT)), + Int32((1LLU << JSHClass::ElementsKindBits::SIZE) - 1)); +} + +GateRef CircuitBuilder::HasConstructorByHClass(GateRef hClass) { GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::IsStableElementsBit::START_BIT)), - Int32((1LU << JSHClass::IsStableElementsBit::SIZE) - 1)), + Int32(JSHClass::HasConstructorBits::START_BIT)), + Int32((1LU << JSHClass::HasConstructorBits::SIZE) - 1)), Int32(0)); } @@ -707,15 +756,22 @@ GateRef CircuitBuilder::IsClassConstructor(GateRef object) GateRef hClass = LoadHClass(object); GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::ClassConstructorBit::START_BIT)), - Int32((1LU << JSHClass::ClassConstructorBit::SIZE) - 1)), - Int32(0)); + return IsClassConstructorWithBitField(bitfield); } GateRef CircuitBuilder::IsClassConstructorWithBitField(GateRef bitfield) { - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::ClassConstructorBit::START_BIT)), Int32(0)); + auto classBitMask = 1LU << JSHClass::IsClassConstructorOrPrototypeBit::START_BIT; + auto ctorBitMask = 1LU << JSHClass::ConstructorBit::START_BIT; + auto mask = Int32(classBitMask | ctorBitMask); + auto classCtor = Int32And(bitfield, mask); + return Int32Equal(classCtor, mask); +} + +GateRef CircuitBuilder::HasConstructor(GateRef object) +{ + GateRef hClass = LoadHClass(object); + return HasConstructorByHClass(hClass); } GateRef CircuitBuilder::IsConstructor(GateRef object) @@ -736,10 +792,16 @@ GateRef CircuitBuilder::IsClassPrototype(GateRef object) GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); // decode - return NotEqual( - Int32And(Int32LSR(bitfield, Int32(JSHClass::ClassPrototypeBit::START_BIT)), - Int32((1LU << JSHClass::ClassPrototypeBit::SIZE) - 1)), - Int32(0)); + return IsClassPrototypeWithBitField(bitfield); +} + +GateRef CircuitBuilder::IsClassPrototypeWithBitField(GateRef bitfield) +{ + auto classBitMask = 1LU << JSHClass::IsClassConstructorOrPrototypeBit::START_BIT; + auto ptBitMask = 1LU << JSHClass::IsPrototypeBit::START_BIT; + auto mask = Int32(classBitMask | ptBitMask); + auto classPt = Int32And(bitfield, mask); + return Int32Equal(classPt, mask); } GateRef CircuitBuilder::IsExtensible(GateRef object) @@ -842,6 +904,37 @@ GateRef CircuitBuilder::GetObjectSizeFromHClass(GateRef hClass) return PtrMul(ZExtInt32ToPtr(objectSizeInWords), IntPtr(JSTaggedValue::TaggedTypeSize())); } +GateRef CircuitBuilder::IsTreeString(GateRef obj) +{ + GateRef objectType = GetObjectType(LoadHClass(obj)); + return Int32Equal(objectType, Int32(static_cast(JSType::TREE_STRING))); +} + +GateRef CircuitBuilder::IsSlicedString(GateRef obj) +{ + GateRef objectType = GetObjectType(LoadHClass(obj)); + return Int32Equal(objectType, Int32(static_cast(JSType::SLICED_STRING))); +} + +GateRef CircuitBuilder::TreeStringIsFlat(GateRef string) +{ + GateRef second = GetSecondFromTreeString(string); + GateRef len = GetLengthFromString(second); + return Int32Equal(len, Int32(0)); +} + +GateRef CircuitBuilder::GetFirstFromTreeString(GateRef string) +{ + GateRef offset = IntPtr(TreeEcmaString::FIRST_OFFSET); + return Load(VariableType::JS_POINTER(), string, offset); +} + +GateRef CircuitBuilder::GetSecondFromTreeString(GateRef string) +{ + GateRef offset = IntPtr(TreeEcmaString::SECOND_OFFSET); + return Load(VariableType::JS_POINTER(), string, offset); +} + template GateRef CircuitBuilder::TypedBinaryOp(GateRef x, GateRef y, GateType xType, GateType yType, GateType gateType, PGOSampleType sampleType) @@ -857,6 +950,50 @@ GateRef CircuitBuilder::TypedBinaryOp(GateRef x, GateRef y, GateType xType, Gate return numberBinaryOp; } +template +GateRef CircuitBuilder::JSNoGCCallThisTargetTypeCheck(GateType type, GateRef func, GateRef methodId, GateRef gate) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.GetFrameState(gate); + GateRef ret = GetCircuit()->NewGate(circuit_->TypedCallTargetCheckOp(CircuitBuilder::GATE_TWO_VALUESIN, + static_cast(type.Value()), Op), MachineType::I1, + {currentControl, currentDepend, func, methodId, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +template +GateRef CircuitBuilder::JSCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex, GateRef gate) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.GetFrameState(gate); + GateRef ret = GetCircuit()->NewGate(circuit_->TypedCallTargetCheckOp(CircuitBuilder::GATE_TWO_VALUESIN, + static_cast(type.Value()), Op), MachineType::I1, + {currentControl, currentDepend, func, methodIndex, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +template +GateRef CircuitBuilder::JSCallThisTargetTypeCheck(GateType type, GateRef func, GateRef gate) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.GetFrameState(gate); + GateRef ret = GetCircuit()->NewGate(circuit_->TypedCallTargetCheckOp(1, static_cast(type.Value()), Op), + MachineType::I1, {currentControl, currentDepend, func, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + template GateRef CircuitBuilder::TypedUnaryOp(GateRef x, GateType xType, GateType gateType) { @@ -872,13 +1009,13 @@ GateRef CircuitBuilder::TypedUnaryOp(GateRef x, GateType xType, GateType gateTyp } template -GateRef CircuitBuilder::TypedConditionJump(GateRef x, GateType xType) +GateRef CircuitBuilder::TypedConditionJump(GateRef x, GateType xType, BranchKind branchKind) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto machineType = MachineType::NOVALUE; - auto jumpOp = TypedConditionJump(machineType, Op, xType, {currentControl, currentDepend, x}); + auto jumpOp = TypedConditionJump(machineType, Op, branchKind, xType, {currentControl, currentDepend, x}); currentLabel->SetControl(jumpOp); currentLabel->SetDepend(jumpOp); return jumpOp; @@ -973,6 +1110,18 @@ GateRef CircuitBuilder::LogicOr(GateRef x, GateRef y) return ret; } +GateRef CircuitBuilder::LoadFromTaggedArray(GateRef array, size_t index) +{ + auto dataOffset = TaggedArray::DATA_OFFSET + index * JSTaggedValue::TaggedTypeSize(); + return LoadConstOffset(VariableType::JS_ANY(), array, dataOffset); +} + +GateRef CircuitBuilder::StoreToTaggedArray(GateRef array, size_t index, GateRef value) +{ + auto dataOffset = TaggedArray::DATA_OFFSET + index * JSTaggedValue::TaggedTypeSize(); + return StoreConstOffset(VariableType::JS_ANY(), array, dataOffset, value); +} + int CircuitBuilder::NextVariableId() { return env_->NextVariableId(); @@ -1078,30 +1227,21 @@ inline GateRef CircuitBuilder::IsBase(GateRef ctor) return Int32LessThanOrEqual(kind, Int32(static_cast(FunctionKind::CLASS_CONSTRUCTOR))); } -inline GateRef CircuitBuilder::TypedCallBuiltin(GateRef hirGate, GateRef x, BuiltinsStubCSigns::ID id) +inline GateRef CircuitBuilder::TypedCallBuiltin(GateRef hirGate, const std::vector &args, + BuiltinsStubCSigns::ID id) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); - GateRef idGate = Int8(static_cast(id)); - auto numberMathOp = TypedCallOperator(hirGate, MachineType::I64, {currentControl, currentDepend, x, idGate}); - currentLabel->SetControl(numberMathOp); - currentLabel->SetDepend(numberMathOp); - return numberMathOp; -} -inline GateRef CircuitBuilder::TypedCallThis3Builtin(GateRef hirGate, GateRef thisObj, GateRef a0, GateRef a1, - GateRef a2, BuiltinsStubCSigns::ID id) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - GateRef idGate = Int8(static_cast(id)); - auto numberMathOp = TypedCallOperator(hirGate, MachineType::I64, - {currentControl, currentDepend, thisObj, a0, a1, a2, idGate}); - currentLabel->SetControl(numberMathOp); - currentLabel->SetDepend(numberMathOp); - return numberMathOp; + std::vector inList { currentControl, currentDepend }; + inList.insert(inList.end(), args.begin(), args.end()); + inList.push_back(Int8(static_cast(id))); + + auto builtinOp = TypedCallOperator(hirGate, MachineType::I64, inList); + currentLabel->SetControl(builtinOp); + currentLabel->SetDepend(builtinOp); + return builtinOp; } inline GateRef CircuitBuilder::GetMethodId(GateRef func) diff --git a/ecmascript/compiler/circuit_builder.cpp b/ecmascript/compiler/circuit_builder.cpp index 11ee3c26f44a550a1e4531a010fe0847bed85e9b..3f89dbf2b4736526de322841be4301877ff815e2 100644 --- a/ecmascript/compiler/circuit_builder.cpp +++ b/ecmascript/compiler/circuit_builder.cpp @@ -64,9 +64,10 @@ GateRef CircuitBuilder::UndefineConstant() return circuit_->GetConstantGate(MachineType::I64, JSTaggedValue::VALUE_UNDEFINED, type); } -GateRef CircuitBuilder::Branch(GateRef state, GateRef condition) +GateRef CircuitBuilder::Branch(GateRef state, GateRef condition, uint32_t trueWeight, uint32_t falseWeight) { - return circuit_->NewGate(circuit_->IfBranch(), { state, condition }); + auto value = BranchAccessor::ToValue(trueWeight, falseWeight); + return circuit_->NewGate(circuit_->IfBranch(value), { state, condition }); } GateRef CircuitBuilder::SwitchBranch(GateRef state, GateRef index, int caseCounts) @@ -127,20 +128,40 @@ GateRef CircuitBuilder::DependRelay(GateRef state, GateRef depend) return circuit_->NewGate(circuit_->DependRelay(), { state, depend }); } +GateRef CircuitBuilder::ReadSp() +{ + return circuit_->NewGate(circuit_->ReadSp(), MachineType::I64, GateType::NJSValue()); +} + GateRef CircuitBuilder::Arguments(size_t index) { auto argListOfCircuit = circuit_->GetArgRoot(); return GetCircuit()->NewArg(MachineType::I64, index, GateType::NJSValue(), argListOfCircuit); } -GateRef CircuitBuilder::ObjectTypeCheck(GateType type, GateRef gate, GateRef index) +GateRef CircuitBuilder::ObjectTypeCheck(GateType type, bool isHeapObject, GateRef gate, GateRef hclassIndex) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->ObjectTypeCheck(static_cast(type.Value())), - MachineType::I1, {currentControl, currentDepend, gate, index, frameState}, GateType::NJSValue()); + ObjectTypeAccessor accessor(type, isHeapObject); + GateRef ret = GetCircuit()->NewGate(circuit_->ObjectTypeCheck(accessor.ToValue()), MachineType::I1, + {currentControl, currentDepend, gate, hclassIndex, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::ObjectTypeCompare(GateType type, bool isHeapObject, GateRef gate, GateRef hclassIndex) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.FindNearestFrameState(currentDepend); + ObjectTypeAccessor accessor(type, isHeapObject); + GateRef ret = GetCircuit()->NewGate(circuit_->ObjectTypeCompare(accessor.ToValue()), MachineType::I1, + {currentControl, currentDepend, gate, hclassIndex, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); return ret; @@ -158,25 +179,65 @@ GateRef CircuitBuilder::HeapObjectCheck(GateRef gate, GateRef frameState) return ret; } -GateRef CircuitBuilder::StableArrayCheck(GateRef gate) +GateRef CircuitBuilder::StableArrayCheck(GateRef gate, ElementsKind kind, ArrayMetaDataAccessor::Mode mode) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.FindNearestFrameState(currentDepend); + ArrayMetaDataAccessor accessor(kind, mode); + GateRef ret = GetCircuit()->NewGate(circuit_->StableArrayCheck(accessor.ToValue()), + MachineType::I1, {currentControl, currentDepend, gate, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::COWArrayCheck(GateRef gate) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->StableArrayCheck(), + GateRef ret = GetCircuit()->NewGate(circuit_->COWArrayCheck(), MachineType::I1, {currentControl, currentDepend, gate, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); return ret; } -GateRef CircuitBuilder::HClassStableArrayCheck(GateRef gate, GateRef frameState) +GateRef CircuitBuilder::EcmaStringCheck(GateRef gate) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); - GateRef ret = GetCircuit()->NewGate(circuit_->HClassStableArrayCheck(), + auto frameState = acc_.FindNearestFrameState(currentDepend); + GateRef ret = GetCircuit()->NewGate(circuit_->EcmaStringCheck(), + MachineType::I1, {currentControl, currentDepend, gate, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::FlattenStringCheck(GateRef gate) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.FindNearestFrameState(currentDepend); + GateRef ret = GetCircuit()->NewGate(circuit_->FlattenStringCheck(), + MachineType::I1, {currentControl, currentDepend, gate, frameState}, GateType::NJSValue()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::HClassStableArrayCheck(GateRef gate, GateRef frameState, ArrayMetaDataAccessor accessor) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + GateRef ret = GetCircuit()->NewGate(circuit_->HClassStableArrayCheck(accessor.ToValue()), MachineType::I1, {currentControl, currentDepend, gate, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); @@ -220,6 +281,19 @@ GateRef CircuitBuilder::LoadTypedArrayLength(GateType type, GateRef gate) return ret; } +GateRef CircuitBuilder::RangeGuard(GateRef gate, uint32_t left, uint32_t right) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + UInt32PairAccessor accessor(left, right); + GateRef ret = GetCircuit()->NewGate(circuit_->RangeGuard(accessor.ToValue()), + MachineType::I64, {currentControl, currentDepend, gate}, GateType::IntType()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + GateRef CircuitBuilder::IndexCheck(GateType type, GateRef gate, GateRef index) { auto currentLabel = env_->GetCurrentLabel(); @@ -291,18 +365,20 @@ GateType CircuitBuilder::GetGateTypeOfValueType(ValueType type) } } -GateRef CircuitBuilder::CheckAndConvert(GateRef gate, ValueType src, ValueType dst) +GateRef CircuitBuilder::CheckAndConvert(GateRef gate, ValueType src, ValueType dst, ConvertSupport support) { auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto stateSplit = acc_.FindNearestStateSplit(currentDepend); auto frameState = acc_.GetFrameState(stateSplit); - GateRef state = acc_.GetState(stateSplit); MachineType machineType = GetMachineTypeOfValueType(dst); GateType gateType = GetGateTypeOfValueType(dst); - uint64_t value = ValuePairTypeAccessor::ToValue(src, dst); + uint64_t value = ValuePairTypeAccessor::ToValue(src, dst, support); GateRef ret = GetCircuit()->NewGate(circuit_->CheckAndConvert(value), - machineType, {state, gate, frameState}, gateType); + machineType, {currentControl, currentDepend, gate, frameState}, gateType); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); return ret; } @@ -315,11 +391,26 @@ GateRef CircuitBuilder::Convert(GateRef gate, ValueType src, ValueType dst) return ret; } +GateRef CircuitBuilder::ConvertBoolToInt32(GateRef gate, ConvertSupport support) +{ + return CheckAndConvert(gate, ValueType::BOOL, ValueType::INT32, support); +} + +GateRef CircuitBuilder::ConvertBoolToFloat64(GateRef gate, ConvertSupport support) +{ + return CheckAndConvert(gate, ValueType::BOOL, ValueType::FLOAT64, support); +} + GateRef CircuitBuilder::ConvertInt32ToFloat64(GateRef gate) { return Convert(gate, ValueType::INT32, ValueType::FLOAT64); } +GateRef CircuitBuilder::ConvertUInt32ToFloat64(GateRef gate) +{ + return Convert(gate, ValueType::UINT32, ValueType::FLOAT64); +} + GateRef CircuitBuilder::ConvertFloat64ToInt32(GateRef gate) { return Convert(gate, ValueType::FLOAT64, ValueType::INT32); @@ -340,11 +431,21 @@ GateRef CircuitBuilder::ConvertInt32ToTaggedInt(GateRef gate) return Convert(gate, ValueType::INT32, ValueType::TAGGED_INT); } +GateRef CircuitBuilder::ConvertUInt32ToTaggedNumber(GateRef gate) +{ + return Convert(gate, ValueType::UINT32, ValueType::TAGGED_NUMBER); +} + GateRef CircuitBuilder::ConvertInt32ToBool(GateRef gate) { return Convert(gate, ValueType::INT32, ValueType::BOOL); } +GateRef CircuitBuilder::ConvertUInt32ToBool(GateRef gate) +{ + return Convert(gate, ValueType::UINT32, ValueType::BOOL); +} + GateRef CircuitBuilder::ConvertFloat64ToBool(GateRef gate) { return Convert(gate, ValueType::FLOAT64, ValueType::BOOL); @@ -365,6 +466,11 @@ GateRef CircuitBuilder::ConvertFloat64ToTaggedDouble(GateRef gate) return Convert(gate, ValueType::FLOAT64, ValueType::TAGGED_DOUBLE); } +GateRef CircuitBuilder::CheckUInt32AndConvertToInt32(GateRef gate) +{ + return CheckAndConvert(gate, ValueType::UINT32, ValueType::INT32); +} + GateRef CircuitBuilder::CheckTaggedIntAndConvertToInt32(GateRef gate) { return CheckAndConvert(gate, ValueType::TAGGED_INT, ValueType::INT32); @@ -411,12 +517,12 @@ GateRef CircuitBuilder::TryPrimitiveTypeCheck(GateType type, GateRef gate) return ret; } -GateRef CircuitBuilder::CallTargetCheck(GateRef function, GateRef id, GateRef param, const char* comment) +GateRef CircuitBuilder::CallTargetCheck(GateRef gate, GateRef function, GateRef id, GateRef param, const char* comment) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); + auto frameState = acc_.GetFrameState(gate); GateRef ret = GetCircuit()->NewGate(circuit_->TypedCallCheck(), MachineType::I1, { currentControl, currentDepend, function, id, param, frameState}, @@ -427,65 +533,14 @@ GateRef CircuitBuilder::CallTargetCheck(GateRef function, GateRef id, GateRef pa return ret; } -GateRef CircuitBuilder::JSCallTargetFromDefineFuncCheck(GateType type, GateRef func) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->JSCallTargetFromDefineFuncCheck(static_cast(type.Value())), - MachineType::I1, {currentControl, currentDepend, func, frameState}, GateType::NJSValue()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::JSCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->JSCallTargetTypeCheck(static_cast(type.Value())), - MachineType::I1, {currentControl, currentDepend, func, methodIndex, frameState}, GateType::NJSValue()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::JSFastCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->JSFastCallTargetTypeCheck(static_cast(type.Value())), - MachineType::I1, {currentControl, currentDepend, func, methodIndex, frameState}, GateType::NJSValue()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::JSCallThisTargetTypeCheck(GateType type, GateRef func) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->JSCallThisTargetTypeCheck(static_cast(type.Value())), - MachineType::I1, {currentControl, currentDepend, func, frameState}, GateType::NJSValue()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::JSFastCallThisTargetTypeCheck(GateType type, GateRef func) +GateRef CircuitBuilder::JSCallTargetFromDefineFuncCheck(GateType type, GateRef func, GateRef gate) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->JSFastCallThisTargetTypeCheck(static_cast(type.Value())), + auto frameState = acc_.GetFrameState(gate); + GateRef ret = GetCircuit()->NewGate(circuit_->TypedCallTargetCheckOp(1, static_cast(type.Value()), + TypedCallTargetCheckOp::JSCALL_IMMEDIATE_AFTER_FUNC_DEF), MachineType::I1, {currentControl, currentDepend, func, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); @@ -543,13 +598,14 @@ GateRef CircuitBuilder::GetSuperConstructor(GateRef ctor) return ret; } -GateRef CircuitBuilder::TypedCallOperator(GateRef hirGate, MachineType type, const std::initializer_list& args) +GateRef CircuitBuilder::TypedCallOperator(GateRef hirGate, MachineType type, const std::vector &inList) { ASSERT(acc_.GetOpCode(hirGate) == OpCode::JS_BYTECODE); - auto numValueIn = args.size() - 2; // 2: state & depend + auto numValueIn = inList.size() - 2; // 2: state & depend uint64_t pcOffset = acc_.TryGetPcOffset(hirGate); ASSERT(pcOffset != 0); - return GetCircuit()->NewGate(circuit_->TypedCallBuiltin(numValueIn, pcOffset), type, args, GateType::AnyType()); + return GetCircuit()->NewGate(circuit_->TypedCallBuiltin(numValueIn, pcOffset), type, inList.size(), inList.data(), + GateType::AnyType()); } GateRef CircuitBuilder::AddWithOverflow(GateRef left, GateRef right) @@ -605,40 +661,27 @@ GateRef CircuitBuilder::Float64CheckRightIsZero(GateRef right) return ret; } -GateRef CircuitBuilder::ValueCheckNegOverflow(GateRef value) +GateRef CircuitBuilder::LexVarIsHoleCheck(GateRef value) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->ValueCheckNegOverflow(), + GateRef ret = GetCircuit()->NewGate(circuit_->LexVarIsHoleCheck(), MachineType::I1, {currentControl, currentDepend, value, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); return ret; } -GateRef CircuitBuilder::NegativeIndexCheck(GateRef index) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->NegativeIndexCheck(), - MachineType::I1, {currentControl, currentDepend, index, frameState}, GateType::NJSValue()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::LargeIndexCheck(GateRef index, GateRef length) +GateRef CircuitBuilder::ValueCheckNegOverflow(GateRef value) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); auto frameState = acc_.FindNearestFrameState(currentDepend); - GateRef ret = GetCircuit()->NewGate(circuit_->LargeIndexCheck(), - MachineType::I1, {currentControl, currentDepend, index, length, frameState}, GateType::IntType()); + GateRef ret = GetCircuit()->NewGate(circuit_->ValueCheckNegOverflow(), + MachineType::I1, {currentControl, currentDepend, value, frameState}, GateType::NJSValue()); currentLabel->SetControl(ret); currentLabel->SetDepend(ret); return ret; @@ -692,10 +735,10 @@ GateRef CircuitBuilder::TypeConvert(MachineType type, GateType typeFrom, GateTyp type, inList.size(), inList.data(), GateType::AnyType()); } -GateRef CircuitBuilder::TypedConditionJump(MachineType type, TypedJumpOp jumpOp, GateType typeVal, - const std::vector& inList) +GateRef CircuitBuilder::TypedConditionJump(MachineType type, TypedJumpOp jumpOp, BranchKind branchKind, + GateType typeVal, const std::vector& inList) { - uint64_t value = TypedJumpAccessor::ToValue(typeVal, jumpOp); + uint64_t value = TypedJumpAccessor::ToValue(typeVal, jumpOp, branchKind); return GetCircuit()->NewGate(circuit_->TypedConditionJump(value), type, inList.size(), inList.data(), GateType::Empty()); } @@ -1030,9 +1073,11 @@ GateRef CircuitBuilder::Call(const CallSignature* cs, GateRef glue, GateRef targ } else if (cs->IsRuntimeNGCStub()) { meta = circuit_->NoGcRuntimeCall(numValuesIn); } else if (cs->IsOptimizedStub()) { - meta = circuit_->CallOptimized(numValuesIn); + bool isNoGC = acc_.GetNoGCFlag(hirGate); + meta = circuit_->CallOptimized(numValuesIn, isNoGC); } else if (cs->IsOptimizedFastCallStub()) { - meta = circuit_->FastCallOptimized(numValuesIn); + bool isNoGC = acc_.GetNoGCFlag(hirGate); + meta = circuit_->FastCallOptimized(numValuesIn, isNoGC); } else { LOG_ECMA(FATAL) << "unknown call operator"; UNREACHABLE(); @@ -1045,6 +1090,19 @@ GateRef CircuitBuilder::Call(const CallSignature* cs, GateRef glue, GateRef targ return result; } +GateRef CircuitBuilder::StoreMemory(MemoryType Op, VariableType type, GateRef receiver, GateRef index, GateRef value) +{ + auto opIdx = static_cast(Op); + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto ret = GetCircuit()->NewGate(GetCircuit()->StoreMemory(opIdx), type.GetMachineType(), + {currentControl, currentDepend, receiver, index, value}, type.GetGateType()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + GateRef CircuitBuilder::NoLabelCallRuntime(GateRef glue, GateRef depend, size_t index, std::vector &args, GateRef hirGate) { @@ -1150,6 +1208,18 @@ GateRef CircuitBuilder::LoadArrayLength(GateRef array) return ret; } +GateRef CircuitBuilder::LoadStringLength(GateRef string) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto ret = GetCircuit()->NewGate(circuit_->LoadStringLength(), MachineType::I64, + { currentControl, currentDepend, string }, GateType::IntType()); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + GateRef CircuitBuilder::LoadConstOffset(VariableType type, GateRef receiver, size_t offset) { auto currentLabel = env_->GetCurrentLabel(); @@ -1212,7 +1282,7 @@ GateRef CircuitBuilder::Construct(GateRef hirGate, std::vector args) return callGate; } -GateRef CircuitBuilder::TypedCall(GateRef hirGate, std::vector args) +GateRef CircuitBuilder::TypedCall(GateRef hirGate, std::vector args, bool isNoGC) { ASSERT(acc_.GetOpCode(hirGate) == OpCode::JS_BYTECODE); auto currentLabel = env_->GetCurrentLabel(); @@ -1223,14 +1293,14 @@ GateRef CircuitBuilder::TypedCall(GateRef hirGate, std::vector args) ASSERT(pcOffset != 0); args.insert(args.begin(), currentDepend); args.insert(args.begin(), currentControl); - auto callGate = GetCircuit()->NewGate(circuit_->TypedCall(bitfield, pcOffset), MachineType::I64, + auto callGate = GetCircuit()->NewGate(circuit_->TypedCall(bitfield, pcOffset, isNoGC), MachineType::I64, args.size(), args.data(), GateType::AnyType()); currentLabel->SetControl(callGate); currentLabel->SetDepend(callGate); return callGate; } -GateRef CircuitBuilder::TypedFastCall(GateRef hirGate, std::vector args) +GateRef CircuitBuilder::TypedFastCall(GateRef hirGate, std::vector args, bool isNoGC) { ASSERT(acc_.GetOpCode(hirGate) == OpCode::JS_BYTECODE); auto currentLabel = env_->GetCurrentLabel(); @@ -1241,7 +1311,7 @@ GateRef CircuitBuilder::TypedFastCall(GateRef hirGate, std::vector args ASSERT(pcOffset != 0); args.insert(args.begin(), currentDepend); args.insert(args.begin(), currentControl); - auto callGate = GetCircuit()->NewGate(circuit_->TypedFastCall(bitfield, pcOffset), MachineType::I64, + auto callGate = GetCircuit()->NewGate(circuit_->TypedFastCall(bitfield, pcOffset, isNoGC), MachineType::I64, args.size(), args.data(), GateType::AnyType()); currentLabel->SetControl(callGate); currentLabel->SetDepend(callGate); @@ -1310,6 +1380,17 @@ GateRef CircuitBuilder::GetGlobalEnv() return newGate; } +GateRef CircuitBuilder::GetGlobalEnvObj(GateRef env, size_t index) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentDepend = currentLabel->GetDepend(); + auto newGate = GetCircuit()->NewGate(circuit_->GetGlobalEnvObj(index), MachineType::I64, + { currentDepend, env }, + GateType::AnyType()); + currentLabel->SetDepend(newGate); + return newGate; +} + GateRef CircuitBuilder::GetGlobalEnvObjHClass(GateRef env, size_t index) { auto currentLabel = env_->GetCurrentLabel(); @@ -1357,6 +1438,26 @@ GateRef CircuitBuilder::TaggedIsString(GateRef obj) return ret; } +GateRef CircuitBuilder::TaggedIsSymbol(GateRef obj) +{ + Label entry(env_); + SubCfgEntry(&entry); + Label exit(env_); + DEFVAlUE(result, env_, VariableType::BOOL(), False()); + Label isHeapObject(env_); + Branch(TaggedIsHeapObject(obj), &isHeapObject, &exit); + Bind(&isHeapObject); + { + GateRef objType = GetObjectType(LoadHClass(obj)); + result = Equal(objType, Int32(static_cast(JSType::SYMBOL))); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + SubCfgExit(); + return ret; +} + GateRef CircuitBuilder::TaggedIsStringOrSymbol(GateRef obj) { Label entry(env_); @@ -1511,18 +1612,37 @@ GateRef CircuitBuilder::ComputeTaggedArraySize(GateRef length) PtrMul(IntPtr(JSTaggedValue::TaggedTypeSize()), length)); } -GateRef CircuitBuilder::CreateArray(size_t arraySize) +GateRef CircuitBuilder::CreateArray(ElementsKind kind, uint32_t arraySize) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); auto currentDepend = currentLabel->GetDepend(); - GateRef newGate = GetCircuit()->NewGate(circuit_->CreateArray(arraySize), MachineType::I64, + ArrayMetaDataAccessor accessor(kind, ArrayMetaDataAccessor::Mode::CREATE, arraySize); + GateRef newGate = GetCircuit()->NewGate(circuit_->CreateArray(accessor.ToValue()), MachineType::I64, { currentControl, currentDepend }, GateType::TaggedValue()); currentLabel->SetControl(newGate); currentLabel->SetDepend(newGate); return newGate; } +GateRef CircuitBuilder::CreateArrayWithBuffer(ElementsKind kind, ArrayMetaDataAccessor::Mode mode, + GateRef constPoolIndex, GateRef elementIndex) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.FindNearestFrameState(currentDepend); + ArrayMetaDataAccessor accessor(kind, mode); + GateRef newGate = GetCircuit()->NewGate(circuit_->CreateArrayWithBuffer(accessor.ToValue()), + MachineType::I64, + { currentControl, currentDepend, constPoolIndex, + elementIndex, frameState }, + GateType::NJSValue()); + currentLabel->SetControl(newGate); + currentLabel->SetDepend(newGate); + return newGate; +} + GateRef CircuitBuilder::StartAllocate() { auto currentLabel = env_->GetCurrentLabel(); @@ -1745,11 +1865,12 @@ void CircuitBuilder::Jump(Label *label) env_->SetCurrentLabel(nullptr); } -void CircuitBuilder::Branch(GateRef condition, Label *trueLabel, Label *falseLabel) +void CircuitBuilder::Branch(GateRef condition, Label *trueLabel, Label *falseLabel, + uint32_t trueWeight, uint32_t falseWeight) { auto currentLabel = env_->GetCurrentLabel(); auto currentControl = currentLabel->GetControl(); - GateRef ifBranch = Branch(currentControl, condition); + GateRef ifBranch = Branch(currentControl, condition, trueWeight, falseWeight); currentLabel->SetControl(ifBranch); GateRef ifTrue = IfTrue(ifBranch); trueLabel->AppendPredecessor(GetCurrentLabel()); @@ -2046,4 +2167,108 @@ void CircuitBuilder::ClearConstantCache(GateRef gate) auto gateType = acc_.GetGateType(gate); GetCircuit()->ClearConstantCache(machineType, value, gateType); } + +GateRef CircuitBuilder::InsertTypedBinaryop(GateRef left, GateRef right, GateType leftType, GateType rightType, + GateType gateType, PGOSampleType sampleType, TypedBinOp op) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + uint64_t operandTypes = GatePairTypeAccessor::ToValue(leftType, rightType); + auto ret = GetCircuit()->NewGate(circuit_->TypedBinaryOp(operandTypes, op, sampleType), + MachineType::I64, + {currentControl, currentDepend, left, right}, + gateType); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::InsertRangeCheckPredicate(GateRef left, TypedBinOp cond, GateRef right) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + auto frameState = acc_.FindNearestFrameState(currentDepend); + TypedBinaryAccessor accessor(GateType::IntType(), cond); + auto ret = GetCircuit()->NewGate(circuit_->RangeCheckPredicate(accessor.ToValue()), + MachineType::I32, + {currentControl, currentDepend, left, right, frameState}, + GateType::IntType()); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::InsertStableArrayCheck(GateRef array) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + GateRef frameState = acc_.FindNearestFrameState(currentDepend); + ElementsKind kind = acc_.TryGetElementsKind(array); + ArrayMetaDataAccessor::Mode mode = ArrayMetaDataAccessor::Mode::LOAD_LENGTH; + ArrayMetaDataAccessor accessor(kind, mode); + auto ret = GetCircuit()->NewGate(circuit_->StableArrayCheck(accessor.ToValue()), + MachineType::I1, + {currentControl, currentDepend, array, frameState}, + GateType::NJSValue()); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::InsertTypedArrayCheck(GateType type, GateRef array) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + GateRef frameState = acc_.FindNearestFrameState(currentDepend); + auto ret = GetCircuit()->NewGate(circuit_->TypedArrayCheck(static_cast(type.Value())), + MachineType::I1, + {currentControl, currentDepend, array, frameState}, + GateType::NJSValue()); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; +} + +GateRef CircuitBuilder::InsertLoadArrayLength(GateRef array, bool isTypedArray) +{ + auto currentLabel = env_->GetCurrentLabel(); + auto currentControl = currentLabel->GetControl(); + auto currentDepend = currentLabel->GetDepend(); + GateType arrayType = acc_.GetGateType(array); + if (isTypedArray) { + InsertTypedArrayCheck(arrayType, array); + currentControl = currentLabel->GetControl(); + currentDepend = currentLabel->GetDepend(); + auto ret = GetCircuit()->NewGate(circuit_->LoadTypedArrayLength(static_cast(arrayType.Value())), + MachineType::I64, + { currentControl, currentDepend, array }, + GateType::IntType()); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; + } else { + InsertStableArrayCheck(array); + currentControl = currentLabel->GetControl(); + currentDepend = currentLabel->GetDepend(); + auto ret = GetCircuit()->NewGate(circuit_->LoadArrayLength(), + MachineType::I64, + { currentControl, currentDepend, array }, + GateType::IntType()); + acc_.ReplaceInAfterInsert(currentControl, currentDepend, ret); + currentLabel->SetControl(ret); + currentLabel->SetDepend(ret); + return ret; + } + UNREACHABLE(); + return Circuit::NullGate(); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/circuit_builder.h b/ecmascript/compiler/circuit_builder.h index 3e13d0091494f609d0f85ad6be9fba2a3673937f..4c79184f87d912d5ba32f5cc7f89e1fc41de8698 100644 --- a/ecmascript/compiler/circuit_builder.h +++ b/ecmascript/compiler/circuit_builder.h @@ -40,6 +40,9 @@ class Environment; class Label; class Variable; class StubBuilder; +class TSHCRLowering; +class NTypeHCRLowering; +class SlowPathLowering; #define BINARY_ARITHMETIC_METHOD_LIST_WITH_BITWIDTH(V) \ V(Int16Add, Add, MachineType::I16) \ @@ -106,6 +109,7 @@ class StubBuilder; V(TruncInt64ToInt1, Trunc, MachineType::I1) \ V(TruncInt64ToInt16, Trunc, MachineType::I16) \ V(TruncInt32ToInt1, Trunc, MachineType::I1) \ + V(TruncInt32ToInt8, Trunc, MachineType::I8) \ V(TruncInt32ToInt16, Trunc, MachineType::I16) \ V(TruncFloatToInt64, TruncFloatToInt64, MachineType::I64) \ V(TruncFloatToInt32, TruncFloatToInt32, MachineType::I32) \ @@ -243,27 +247,33 @@ public: ~CircuitBuilder() = default; NO_MOVE_SEMANTIC(CircuitBuilder); NO_COPY_SEMANTIC(CircuitBuilder); + static constexpr uint32_t GATE_TWO_VALUESIN = 2; // low level interface GateRef HeapObjectCheck(GateRef gate, GateRef frameState); - GateRef StableArrayCheck(GateRef gate); - GateRef HClassStableArrayCheck(GateRef gate, GateRef frameState); + GateRef StableArrayCheck(GateRef gate, ElementsKind kind, ArrayMetaDataAccessor::Mode mode); + GateRef COWArrayCheck(GateRef gate); + GateRef EcmaStringCheck(GateRef gate); + GateRef FlattenStringCheck(GateRef gate); + GateRef HClassStableArrayCheck(GateRef gate, GateRef frameState, ArrayMetaDataAccessor accessor); GateRef ArrayGuardianCheck(GateRef frameState); GateRef TypedArrayCheck(GateType type, GateRef gate); GateRef LoadTypedArrayLength(GateType type, GateRef gate); + GateRef RangeGuard(GateRef gate, uint32_t left, uint32_t right); GateRef IndexCheck(GateType type, GateRef gate, GateRef index); - GateRef ObjectTypeCheck(GateType type, GateRef gate, GateRef hclassOffset); + GateRef ObjectTypeCheck(GateType type, bool isHeapObject, GateRef gate, GateRef hclassIndex); + GateRef ObjectTypeCompare(GateType type, bool isHeapObject, GateRef gate, GateRef hclassIndex); GateRef TryPrimitiveTypeCheck(GateType type, GateRef gate); - GateRef CallTargetCheck(GateRef function, GateRef id, GateRef param, const char* comment = nullptr); - GateRef JSCallTargetFromDefineFuncCheck(GateType type, GateRef func); - GateRef JSCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex); - GateRef JSFastCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex); - GateRef JSCallThisTargetTypeCheck(GateType type, GateRef func); - GateRef JSFastCallThisTargetTypeCheck(GateType type, GateRef func); + GateRef CallTargetCheck(GateRef gate, GateRef function, GateRef id, GateRef param, const char* comment = nullptr); + GateRef JSCallTargetFromDefineFuncCheck(GateType type, GateRef func, GateRef gate); + template + GateRef JSCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex, GateRef gate); + template + GateRef JSCallThisTargetTypeCheck(GateType type, GateRef func, GateRef gate); + template + inline GateRef JSNoGCCallThisTargetTypeCheck(GateType type, GateRef func, GateRef methodId, GateRef gate); GateRef DeoptCheck(GateRef condition, GateRef frameState, DeoptType type); - GateRef TypedCallOperator(GateRef hirGate, MachineType type, const std::initializer_list& args); - inline GateRef TypedCallBuiltin(GateRef hirGate, GateRef x, BuiltinsStubCSigns::ID id); - inline GateRef TypedCallThis3Builtin(GateRef hirGate, GateRef thisObj, GateRef a0, GateRef a1, GateRef a2, - BuiltinsStubCSigns::ID id); + GateRef TypedCallOperator(GateRef hirGate, MachineType type, const std::vector& inList); + inline GateRef TypedCallBuiltin(GateRef hirGate, const std::vector &args, BuiltinsStubCSigns::ID id); GateRef TypeConvert(MachineType type, GateType typeFrom, GateType typeTo, const std::vector& inList); GateRef AddWithOverflow(GateRef left, GateRef right); GateRef SubWithOverflow(GateRef left, GateRef right); @@ -273,9 +283,8 @@ public: GateRef Int32CheckRightIsZero(GateRef right); GateRef Float64CheckRightIsZero(GateRef right); GateRef ValueCheckNegOverflow(GateRef value); - GateRef NegativeIndexCheck(GateRef index); - GateRef LargeIndexCheck(GateRef index, GateRef length); GateRef OverflowCheck(GateRef value); + GateRef LexVarIsHoleCheck(GateRef value); GateRef Int32UnsignedUpperBoundCheck(GateRef value, GateRef upperBound); GateRef Int32DivWithCheck(GateRef left, GateRef right); MachineType GetMachineTypeOfValueType(ValueType type); @@ -292,8 +301,15 @@ public: GateRef ConvertFloat64ToTaggedDouble(GateRef gate); GateRef ConvertFloat64ToInt32(GateRef gate); GateRef ConvertInt32ToFloat64(GateRef gate); - GateRef CheckAndConvert(GateRef gate, ValueType src, ValueType dst); + GateRef ConvertBoolToInt32(GateRef gate, ConvertSupport support); + GateRef ConvertBoolToFloat64(GateRef gate, ConvertSupport support); + GateRef ConvertUInt32ToBool(GateRef gate); + GateRef ConvertUInt32ToTaggedNumber(GateRef gate); + GateRef ConvertUInt32ToFloat64(GateRef gate); + GateRef CheckAndConvert( + GateRef gate, ValueType src, ValueType dst, ConvertSupport support = ConvertSupport::ENABLE); GateRef ConvertHoleAsUndefined(GateRef receiver); + GateRef CheckUInt32AndConvertToInt32(GateRef gate); GateRef CheckTaggedIntAndConvertToInt32(GateRef gate); GateRef CheckTaggedDoubleAndConvertToInt32(GateRef gate); GateRef CheckTaggedNumberAndConvertToInt32(GateRef gate); @@ -302,7 +318,13 @@ public: GateRef CheckTaggedNumberAndConvertToFloat64(GateRef gate); GateRef CheckTaggedNumberAndConvertToBool(GateRef gate); GateRef CheckTaggedBooleanAndConvertToBool(GateRef gate); - GateRef TypedConditionJump(MachineType type, TypedJumpOp jumpOp, GateType typeVal, + GateRef InsertStableArrayCheck(GateRef array); + GateRef InsertLoadArrayLength(GateRef array, bool isTypedArray); + GateRef InsertTypedArrayCheck(GateType type, GateRef array); + GateRef InsertTypedBinaryop(GateRef left, GateRef right, GateType leftType, GateType rightType, + GateType gateType, PGOSampleType sampleType, TypedBinOp op); + GateRef InsertRangeCheckPredicate(GateRef left, TypedBinOp cond, GateRef right); + GateRef TypedConditionJump(MachineType type, TypedJumpOp jumpOp, BranchKind branchKind, GateType typeVal, const std::vector& inList); GateRef TypedNewAllocateThis(GateRef ctor, GateRef hclassIndex, GateRef frameState); GateRef TypedSuperAllocateThis(GateRef superCtor, GateRef newTarget, GateRef frameState); @@ -328,7 +350,8 @@ public: GateRef ExceptionConstant(); GateRef RelocatableData(uint64_t val); GateRef Alloca(size_t size); - GateRef Branch(GateRef state, GateRef condition); + GateRef Branch(GateRef state, GateRef condition, + uint32_t leftWeight = 1, uint32_t rightWeight = 1); // 1: default branch weight GateRef SwitchBranch(GateRef state, GateRef index, int caseCounts); GateRef Return(GateRef state, GateRef depend, GateRef value); GateRef ReturnVoid(GateRef state, GateRef depend); @@ -340,6 +363,7 @@ public: GateRef SwitchCase(GateRef switchBranch, int64_t value); GateRef DefaultCase(GateRef switchBranch); GateRef DependRelay(GateRef state, GateRef depend); + GateRef ReadSp(); GateRef BinaryArithmetic(const GateMetaData* meta, MachineType machineType, GateRef left, GateRef right, GateType gateType = GateType::Empty()); GateRef BinaryCmp(const GateMetaData* meta, GateRef left, GateRef right); @@ -452,6 +476,7 @@ public: inline GateRef TaggedIsAsyncGeneratorObject(GateRef x); inline GateRef TaggedIsJSGlobalObject(GateRef x); inline GateRef TaggedIsGeneratorObject(GateRef x); + inline GateRef TaggedIsJSArray(GateRef x); inline GateRef TaggedIsPropertyBox(GateRef x); inline GateRef TaggedIsWeak(GateRef x); inline GateRef TaggedIsPrototypeHandler(GateRef x); @@ -469,6 +494,8 @@ public: inline GateRef ToTaggedIntPtr(GateRef x); inline GateRef DoubleToTaggedDoublePtr(GateRef x); inline GateRef BooleanToTaggedBooleanPtr(GateRef x); + inline GateRef BooleanToInt32(GateRef x); + inline GateRef BooleanToFloat64(GateRef x); inline GateRef Float32ToTaggedDoublePtr(GateRef x); inline GateRef TaggedDoublePtrToFloat32(GateRef x); inline GateRef TaggedIntPtrToFloat32(GateRef x); @@ -490,11 +517,14 @@ public: inline GateRef IntPtrGreaterThan(GateRef x, GateRef y); template inline GateRef BinaryOp(GateRef x, GateRef y); + template + inline GateRef BinaryOpWithOverflow(GateRef x, GateRef y); inline GateRef GetLengthFromTaggedArray(GateRef array); inline GateRef GetValueFromTaggedArray(GateRef array, GateRef index); inline void SetValueToTaggedArray(VariableType valType, GateRef glue, GateRef array, GateRef index, GateRef val); GateRef TaggedIsString(GateRef obj); GateRef TaggedIsStringOrSymbol(GateRef obj); + GateRef TaggedIsSymbol(GateRef obj); inline GateRef GetGlobalConstantString(ConstantIndex index); inline GateRef LoadObjectFromWeakRef(GateRef x); GateRef ComputeTaggedArraySize(GateRef length); @@ -508,7 +538,7 @@ public: template inline GateRef TypedUnaryOp(GateRef x, GateType xType, GateType gateType); template - inline GateRef TypedConditionJump(GateRef x, GateType xType); + inline GateRef TypedConditionJump(GateRef x, GateType xType, BranchKind branchKind); inline GateRef PrimitiveToNumber(GateRef x, VariableType type); // middle ir: object operations @@ -517,17 +547,20 @@ public: GateRef LoadElement(GateRef receiver, GateRef index); template GateRef StoreElement(GateRef receiver, GateRef index, GateRef value); + GateRef StoreMemory(MemoryType Op, VariableType type, GateRef receiver, GateRef index, GateRef value); GateRef LoadProperty(GateRef receiver, GateRef propertyLookupResult, bool isFunction); GateRef StoreProperty(GateRef receiver, GateRef propertyLookupResult, GateRef value); GateRef LoadArrayLength(GateRef array); + GateRef LoadStringLength(GateRef string); GateRef Construct(GateRef hirGate, std::vector args); - GateRef TypedCall(GateRef hirGate, std::vector args); - GateRef TypedFastCall(GateRef hirGate, std::vector args); + GateRef TypedCall(GateRef hirGate, std::vector args, bool isNoGC); + GateRef TypedFastCall(GateRef hirGate, std::vector args, bool isNoGC); GateRef CallGetter(GateRef hirGate, GateRef receiver, GateRef propertyLookupResult, const char* comment = nullptr); GateRef CallSetter(GateRef hirGate, GateRef receiver, GateRef propertyLookupResult, GateRef value, const char* comment = nullptr); GateRef GetConstPool(GateRef jsFunc); GateRef GetGlobalEnv(); + GateRef GetGlobalEnvObj(GateRef env, size_t index); GateRef GetGlobalEnvObjHClass(GateRef env, size_t index); GateRef GetGlobalConstantValue(ConstantIndex index); GateRef LoadConstOffset(VariableType type, GateRef receiver, size_t offset); @@ -537,6 +570,7 @@ public: inline GateRef LoadHClass(GateRef object); inline GateRef IsJSFunction(GateRef obj); inline GateRef IsJSFunctionWithBit(GateRef obj); + inline GateRef IsOptimizedAndNotFastCall(GateRef obj); inline GateRef IsOptimized(GateRef obj); inline GateRef IsOptimizedWithBitField(GateRef bitfield); inline GateRef CanFastCall(GateRef obj); @@ -546,7 +580,8 @@ public: inline GateRef IsJsType(GateRef object, JSType type); inline GateRef GetObjectType(GateRef hClass); inline GateRef IsDictionaryModeByHClass(GateRef hClass); - inline GateRef IsIsStableElementsByHClass(GateRef hClass); + inline GateRef GetElementsKindByHClass(GateRef hClass); + inline GateRef HasConstructorByHClass(GateRef hClass); inline GateRef IsStableElements(GateRef hClass); inline GateRef IsStableArguments(GateRef hClass); inline GateRef IsStableArray(GateRef hClass); @@ -554,8 +589,10 @@ public: inline GateRef IsDictionaryElement(GateRef hClass); inline GateRef IsClassConstructor(GateRef object); inline GateRef IsClassConstructorWithBitField(GateRef bitfield); + inline GateRef HasConstructor(GateRef object); inline GateRef IsConstructor(GateRef object); inline GateRef IsClassPrototype(GateRef object); + inline GateRef IsClassPrototypeWithBitField(GateRef object); inline GateRef IsExtensible(GateRef object); inline GateRef GetExpectedNumOfArgs(GateRef method); inline GateRef TaggedObjectIsEcmaObject(GateRef obj); @@ -568,6 +605,11 @@ public: inline GateRef LogicOr(GateRef x, GateRef y); inline GateRef BothAreString(GateRef x, GateRef y); inline GateRef GetObjectSizeFromHClass(GateRef hClass); + inline GateRef IsTreeString(GateRef obj); + inline GateRef IsSlicedString(GateRef obj); + inline GateRef TreeStringIsFlat(GateRef string); + inline GateRef GetFirstFromTreeString(GateRef string); + inline GateRef GetSecondFromTreeString(GateRef string); GateRef GetGlobalObject(GateRef glue); GateRef GetMethodFromFunction(GateRef function); GateRef GetModuleFromFunction(GateRef function); @@ -595,7 +637,9 @@ public: GateRef StartAllocate(); GateRef FinishAllocate(); GateRef HeapAlloc(GateRef size, GateType type, RegionSpaceFlag flag); - GateRef CreateArray(size_t arraySize); + GateRef CreateArray(ElementsKind kind, uint32_t arraySize); + GateRef CreateArrayWithBuffer(ElementsKind kind, ArrayMetaDataAccessor::Mode mode, + GateRef constPoolIndex, GateRef elementIndex); void SetEnvironment(Environment *env) { @@ -626,7 +670,8 @@ public: inline void Bind(Label *label); inline void Bind(Label *label, bool justSlowPath); void Jump(Label *label); - void Branch(GateRef condition, Label *trueLabel, Label *falseLabel); + void Branch(GateRef condition, Label *trueLabel, Label *falseLabel, + uint32_t trueWeight = 1, uint32_t falseWeight = 1); // 1: default branch weight void Switch(GateRef index, Label *defaultLabel, int64_t *keysValue, Label *keysLabel, int numberOfKeys); void LoopBegin(Label *loopHead); void LoopEnd(Label *loopHead); @@ -634,14 +679,17 @@ public: inline GateRef GetState() const; inline GateRef GetDepend() const; inline StateDepend GetStateDepend() const; - inline void SetDepend(GateRef depend); - inline void SetState(GateRef state); GateRef GetGlobalEnvValue(VariableType type, GateRef env, size_t index); GateRef IsBase(GateRef ctor); inline GateRef GetMethodId(GateRef func); + inline GateRef LoadFromTaggedArray(GateRef array, size_t index); + inline GateRef StoreToTaggedArray(GateRef array, size_t index, GateRef value); private: + inline void SetDepend(GateRef depend); + inline void SetState(GateRef state); + #define ARITHMETIC_UNARY_OP_WITH_BITWIDTH(NAME, OPCODEID, MACHINETYPEID) \ inline GateRef NAME(GateRef x) \ { \ @@ -656,6 +704,9 @@ private: Environment *env_ {nullptr}; CompilationConfig *cmpCfg_ {nullptr}; friend StubBuilder; + friend TSHCRLowering; + friend NTypeHCRLowering; + friend SlowPathLowering; }; class Label { @@ -720,7 +771,7 @@ private: } void MergeControl(GateRef control) { - if (predeControl_ == -1) { + if (predeControl_ == Circuit::NullGate()) { predeControl_ = control; control_ = predeControl_; } else { @@ -751,9 +802,9 @@ private: GateRef ReadVariableRecursive(Variable *var); Environment *env_; GateRef control_; - GateRef predeControl_ {-1}; - GateRef depend_ {-1}; - GateRef loopDepend_ {-1}; + GateRef predeControl_ {Circuit::NullGate()}; + GateRef depend_ {Circuit::NullGate()}; + GateRef loopDepend_ {Circuit::NullGate()}; std::vector otherPredeControls_; bool isSealed_ {false}; std::map valueMap_; diff --git a/ecmascript/compiler/code_generator.h b/ecmascript/compiler/code_generator.h index 114e020bf8299f57368e0ec2a767718645aa4bc5..a28ea5ab4cc994b799b15f6adbe474655f6af5df 100644 --- a/ecmascript/compiler/code_generator.h +++ b/ecmascript/compiler/code_generator.h @@ -17,11 +17,111 @@ #define ECMASCRIPT_COMPILER_CODE_GENERATOR_H #include "ecmascript/compiler/circuit.h" +#include "ecmascript/compiler/binary_section.h" #include "ecmascript/jspandafile/method_literal.h" namespace panda::ecmascript::kungfu { using ControlFlowGraph = std::vector>; class CompilationConfig; +class CompilerLog; + +struct CodeInfo { + using sectionInfo = std::pair; + CodeInfo(); + + ~CodeInfo(); + + class CodeSpace { + public: + static CodeSpace *GetInstance(); + + uint8_t *Alloca(uintptr_t size, bool isReq, size_t alignSize); + + private: + CodeSpace(); + ~CodeSpace(); + + static constexpr size_t REQUIRED_SECS_LIMIT = (1 << 29); // 512M + static constexpr size_t UNREQUIRED_SECS_LIMIT = (1 << 28); // 256M + + // start point of the buffer reserved for sections required in executing phase + uint8_t *reqSecs_ {nullptr}; + size_t reqBufPos_ {0}; + // start point of the buffer reserved for sections not required in executing phase + uint8_t *unreqSecs_ {nullptr}; + size_t unreqBufPos_ {0}; + }; + + uint8_t *AllocaInReqSecBuffer(uintptr_t size, size_t alignSize = 0); + + uint8_t *AllocaInNotReqSecBuffer(uintptr_t size, size_t alignSize = 0); + + uint8_t *AllocaCodeSection(uintptr_t size, const char *sectionName); + + uint8_t *AllocaDataSection(uintptr_t size, const char *sectionName); + + void SaveFunc2Addr(std::string funcName, uint32_t address); + + const std::map &GetFunc2Addr() const + { + return func2Addr; + } + + void Reset(); + + uint8_t *GetSectionAddr(ElfSecName sec) const; + + size_t GetSectionSize(ElfSecName sec) const; + + std::vector> GetCodeInfo() const; + + template + void IterateSecInfos(const Callback &cb) const + { + for (size_t i = 0; i < secInfos_.size(); i++) { + if (secInfos_[i].second == 0) { + continue; + } + cb(i, secInfos_[i]); + } + } + +private: + std::array(ElfSecName::SIZE)> secInfos_; + std::vector> codeInfo_ {}; // info for disasssembler, planed to be deprecated + std::map func2Addr; + bool alreadyPageAlign_ {false}; +}; + +class Assembler { +public: + explicit Assembler() = default; + virtual ~Assembler() = default; + virtual void Run(const CompilerLog &log, bool fastCompileMode) = 0; + + uintptr_t GetSectionAddr(ElfSecName sec) const + { + return reinterpret_cast(codeInfo_.GetSectionAddr(sec)); + } + + uint32_t GetSectionSize(ElfSecName sec) const + { + return static_cast(codeInfo_.GetSectionSize(sec)); + } + + template + void IterateSecInfos(const Callback &cb) const + { + codeInfo_.IterateSecInfos(cb); + } + + const CodeInfo &GetCodeInfo() const + { + return codeInfo_; + } +protected: + CodeInfo codeInfo_ {}; +}; class CodeGeneratorImpl { public: diff --git a/ecmascript/compiler/common_stubs.cpp b/ecmascript/compiler/common_stubs.cpp index c7c6e742c545742434ecec2bd0d5940f0e1c6a1f..ee48e23a07f2edd7066a19a99742a503a47f6eec 100644 --- a/ecmascript/compiler/common_stubs.cpp +++ b/ecmascript/compiler/common_stubs.cpp @@ -17,6 +17,7 @@ #include "ecmascript/base/number_helper.h" #include "ecmascript/compiler/access_object_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_string_stub_builder.h" #include "ecmascript/compiler/interpreter_stub.h" #include "ecmascript/compiler/llvm_ir_builder.h" #include "ecmascript/compiler/new_object_stub_builder.h" @@ -216,7 +217,7 @@ void InstanceofStubBuilder::GenerateCircuit() GateRef jsFunc = TaggedArgument(3); // 3 : 4th para GateRef slotId = Int32Argument(4); // 4 : 5th pars GateRef profileTypeInfo = UpdateProfileTypeInfo(glue, jsFunc); - Return(InstanceOf(glue, object, target, profileTypeInfo, slotId)); + Return(InstanceOf(glue, object, target, profileTypeInfo, slotId, ProfileOperation())); } void IncStubBuilder::GenerateCircuit() @@ -307,7 +308,7 @@ void GetPropertyByIndexStubBuilder::GenerateCircuit() GateRef glue = PtrArgument(0); GateRef receiver = TaggedArgument(1); GateRef index = Int32Argument(2); /* 2 : 3rd parameter is index */ - Return(GetPropertyByIndex(glue, receiver, index)); + Return(GetPropertyByIndex(glue, receiver, index, ProfileOperation())); } void SetPropertyByIndexStubBuilder::GenerateCircuit() @@ -316,7 +317,7 @@ void SetPropertyByIndexStubBuilder::GenerateCircuit() GateRef receiver = TaggedArgument(1); GateRef index = Int32Argument(2); /* 2 : 3rd parameter is index */ GateRef value = TaggedArgument(3); /* 3 : 4th parameter is value */ - Return(SetPropertyByIndex(glue, receiver, index, value, false)); + Return(SetPropertyByIndex(glue, receiver, index, value, false, ProfileOperation())); } void SetPropertyByIndexWithOwnStubBuilder::GenerateCircuit() @@ -325,7 +326,7 @@ void SetPropertyByIndexWithOwnStubBuilder::GenerateCircuit() GateRef receiver = TaggedArgument(1); GateRef index = Int32Argument(2); /* 2 : 3rd parameter is index */ GateRef value = TaggedArgument(3); /* 3 : 4th parameter is value */ - Return(SetPropertyByIndex(glue, receiver, index, value, true)); + Return(SetPropertyByIndex(glue, receiver, index, value, true, ProfileOperation())); } void GetPropertyByNameStubBuilder::GenerateCircuit() @@ -399,7 +400,7 @@ void DeprecatedGetPropertyByValueStubBuilder::GenerateCircuit() GateRef glue = PtrArgument(0); GateRef receiver = TaggedArgument(1); GateRef key = TaggedArgument(2); // 2 : 3rd para - Return(GetPropertyByValue(glue, receiver, key)); + Return(GetPropertyByValue(glue, receiver, key, ProfileOperation())); } void SetPropertyByValueStubBuilder::GenerateCircuit() @@ -442,7 +443,7 @@ void TryLdGlobalByNameStubBuilder::GenerateCircuit() AccessObjectStubBuilder builder(this, jsFunc); StringIdInfo info = { 0, 0, StringIdInfo::Offset::INVALID, StringIdInfo::Length::INVALID }; GateRef profileTypeInfo = UpdateProfileTypeInfo(glue, jsFunc); - Return(builder.TryLoadGlobalByName(glue, id, info, profileTypeInfo, slotId)); + Return(builder.TryLoadGlobalByName(glue, id, info, profileTypeInfo, slotId, ProfileOperation())); } void TryStGlobalByNameStubBuilder::GenerateCircuit() @@ -455,7 +456,7 @@ void TryStGlobalByNameStubBuilder::GenerateCircuit() AccessObjectStubBuilder builder(this, jsFunc); StringIdInfo info = { 0, 0, StringIdInfo::Offset::INVALID, StringIdInfo::Length::INVALID }; GateRef profileTypeInfo = UpdateProfileTypeInfo(glue, jsFunc); - Return(builder.TryStoreGlobalByName(glue, id, info, value, profileTypeInfo, slotId)); + Return(builder.TryStoreGlobalByName(glue, id, info, value, profileTypeInfo, slotId, ProfileOperation())); } void LdGlobalVarStubBuilder::GenerateCircuit() @@ -467,7 +468,7 @@ void LdGlobalVarStubBuilder::GenerateCircuit() AccessObjectStubBuilder builder(this, jsFunc); StringIdInfo info = { 0, 0, StringIdInfo::Offset::INVALID, StringIdInfo::Length::INVALID }; GateRef profileTypeInfo = UpdateProfileTypeInfo(glue, jsFunc); - Return(builder.LoadGlobalVar(glue, id, info, profileTypeInfo, slotId)); + Return(builder.LoadGlobalVar(glue, id, info, profileTypeInfo, slotId, ProfileOperation())); } void StGlobalVarStubBuilder::GenerateCircuit() @@ -505,7 +506,7 @@ void TryLoadICByNameStubBuilder::GenerateCircuit() &hclassNotEqualFirstValue); Bind(&hclassEqualFirstValue); { - Return(LoadICWithHandler(glue, receiver, receiver, secondValue)); + Return(LoadICWithHandler(glue, receiver, receiver, secondValue, ProfileOperation())); } Bind(&hclassNotEqualFirstValue); { @@ -513,7 +514,7 @@ void TryLoadICByNameStubBuilder::GenerateCircuit() Branch(TaggedIsHole(cachedHandler), &receiverNotHeapObject, &cachedHandlerNotHole); Bind(&cachedHandlerNotHole); { - Return(LoadICWithHandler(glue, receiver, receiver, cachedHandler)); + Return(LoadICWithHandler(glue, receiver, receiver, cachedHandler, ProfileOperation())); } } } @@ -546,7 +547,7 @@ void TryLoadICByValueStubBuilder::GenerateCircuit() &hclassEqualFirstValue, &hclassNotEqualFirstValue); Bind(&hclassEqualFirstValue); - Return(LoadElement(glue, receiver, key)); + Return(LoadElement(glue, receiver, key, ProfileOperation())); Bind(&hclassNotEqualFirstValue); { Branch(Int64Equal(firstValue, key), &firstValueEqualKey, &receiverNotHeapObject); @@ -555,7 +556,7 @@ void TryLoadICByValueStubBuilder::GenerateCircuit() auto cachedHandler = CheckPolyHClass(secondValue, hclass); Branch(TaggedIsHole(cachedHandler), &receiverNotHeapObject, &cachedHandlerNotHole); Bind(&cachedHandlerNotHole); - Return(LoadICWithHandler(glue, receiver, receiver, cachedHandler)); + Return(LoadICWithHandler(glue, receiver, receiver, cachedHandler, ProfileOperation())); } } } @@ -624,7 +625,7 @@ void TryStoreICByValueStubBuilder::GenerateCircuit() &hclassEqualFirstValue, &hclassNotEqualFirstValue); Bind(&hclassEqualFirstValue); - Return(ICStoreElement(glue, receiver, key, value, secondValue)); + Return(ICStoreElement(glue, receiver, key, value, secondValue, ProfileOperation())); Bind(&hclassNotEqualFirstValue); { Branch(Int64Equal(firstValue, key), &firstValueEqualKey, &receiverNotHeapObject); @@ -671,8 +672,10 @@ void ConstructorCheckStubBuilder::GenerateCircuit() void CreateEmptyArrayStubBuilder::GenerateCircuit() { GateRef glue = PtrArgument(0); + GateRef slotId = Int32Argument(4); // 4 : 5th para NewObjectStubBuilder newBuilder(this); - Return(newBuilder.CreateEmptyArray(glue)); + Return(newBuilder.CreateEmptyArray(glue, Undefined(), Undefined(), + Undefined(), slotId, ProfileOperation())); } void CreateArrayWithBufferStubBuilder::GenerateCircuit() @@ -680,8 +683,10 @@ void CreateArrayWithBufferStubBuilder::GenerateCircuit() GateRef glue = PtrArgument(0); GateRef index = Int32Argument(1); GateRef jsFunc = TaggedArgument(2); // 2 : 3rd para + GateRef slotId = Int32Argument(5); // 5 : 6th para NewObjectStubBuilder newBuilder(this); - Return(newBuilder.CreateArrayWithBuffer(glue, index, jsFunc)); + Return(newBuilder.CreateArrayWithBuffer(glue, index, jsFunc, Undefined(), + Undefined(), slotId, ProfileOperation())); } void NewJSObjectStubBuilder::GenerateCircuit() @@ -888,6 +893,22 @@ void JsProxyCallInternalStubBuilder::GenerateCircuit() Return(*result); } +void GetCharFromEcmaStringStubBuilder::GenerateCircuit() +{ + auto env = GetEnvironment(); + GateRef glue = PtrArgument(0); + GateRef str = TaggedArgument(1); + GateRef index = Int32Argument(2); + Label flattenFastPath(env); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, str, &flattenFastPath); + Bind(&flattenFastPath); + BuiltinsStringStubBuilder builder(this); + StringInfoGateRef stringInfoGate(&thisFlat); + GateRef result = builder.CreateFromEcmaString(glue, index, stringInfoGate); + Return(result); +} + CallSignature CommonStubCSigns::callSigns_[CommonStubCSigns::NUM_OF_STUBS]; void CommonStubCSigns::Initialize() diff --git a/ecmascript/compiler/common_stubs.h b/ecmascript/compiler/common_stubs.h index e990f492b5d6e6b2bac54ad205799a20cd9a3948..2ecf1ac6c8ea0e30e1cf0d65cbd14205ede4d2b2 100644 --- a/ecmascript/compiler/common_stubs.h +++ b/ecmascript/compiler/common_stubs.h @@ -17,7 +17,6 @@ #define ECMASCRIPT_COMPILER_COMMON_STUBS_H #include "ecmascript/compiler/stub_builder.h" -#include "ecmascript/compiler/test_stubs.h" namespace panda::ecmascript::kungfu { #define COMMON_STUB_LIST(V) \ @@ -77,11 +76,11 @@ namespace panda::ecmascript::kungfu { V(CreateArrayWithBuffer) \ V(NewJSObject) \ V(JsBoundCallInternal) \ - V(JsProxyCallInternal) + V(JsProxyCallInternal) \ + V(GetCharFromEcmaString) #define COMMON_STUB_ID_LIST(V) \ - COMMON_STUB_LIST(V) \ - TEST_STUB_SIGNATRUE_LIST(V) + COMMON_STUB_LIST(V) #define DECLARE_STUB_CLASS(name) \ class name##StubBuilder : public StubBuilder { \ diff --git a/ecmascript/compiler/compiler_log.cpp b/ecmascript/compiler/compiler_log.cpp index c6f1fea87c76227dca20d99d15fce9f3a105ad33..3eb148510a089d7bb104ae490aadaa9de1dc7b37 100644 --- a/ecmascript/compiler/compiler_log.cpp +++ b/ecmascript/compiler/compiler_log.cpp @@ -313,4 +313,5 @@ void CompilerLog::SetPGOMismatchResult(uint32_t &totalMethodCount, uint32_t &mis mismatchPGOMethodCount_ = mismatchMethodCount; mismatchPGOMethodSet_ = std::move(mismatchMethodSet); } + } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/early_elimination.cpp b/ecmascript/compiler/early_elimination.cpp index a53c3d5a81d8865b3a63d48203f400bd062a807e..12506ae438ded8c287bc34c18ddbfef1a5029b5e 100644 --- a/ecmascript/compiler/early_elimination.cpp +++ b/ecmascript/compiler/early_elimination.cpp @@ -85,6 +85,7 @@ GateRef EarlyElimination::VisitGate(GateRef gate) case OpCode::LOAD_TYPED_ARRAY_LENGTH: case OpCode::TYPED_ARRAY_CHECK: case OpCode::OBJECT_TYPE_CHECK: + case OpCode::OBJECT_TYPE_COMPARE: case OpCode::STABLE_ARRAY_CHECK: case OpCode::INDEX_CHECK: case OpCode::TYPED_CALL_CHECK: @@ -92,6 +93,10 @@ GateRef EarlyElimination::VisitGate(GateRef gate) case OpCode::TYPED_BINARY_OP: case OpCode::TYPED_UNARY_OP: case OpCode::JSINLINETARGET_TYPE_CHECK: + case OpCode::INLINE_ACCESSOR_CHECK: + case OpCode::LOAD_GETTER: + case OpCode::LOAD_SETTER: + case OpCode::ECMA_STRING_CHECK: return TryEliminateGate(gate); case OpCode::STATE_SPLIT: return TryEliminateFrameState(gate); @@ -234,6 +239,7 @@ DependInfoNode* EarlyElimination::UpdateWrite(GateRef gate, DependInfoNode* depe case OpCode::STORE_PROPERTY_NO_BARRIER: case OpCode::STORE_CONST_OFFSET: case OpCode::STORE_ELEMENT: + case OpCode::STORE_MEMORY: return dependInfo->UpdateStoreProperty(this, gate); default: return new (chunk_) DependInfoNode(chunk_); @@ -242,26 +248,43 @@ DependInfoNode* EarlyElimination::UpdateWrite(GateRef gate, DependInfoNode* depe bool EarlyElimination::MayAccessOneMemory(GateRef lhs, GateRef rhs) { - if (acc_.GetOpCode(rhs) == OpCode::STORE_ELEMENT) { - return acc_.GetOpCode(lhs) == OpCode::LOAD_ELEMENT; - } + auto rop = acc_.GetOpCode(rhs); auto lop = acc_.GetOpCode(lhs); - ASSERT(acc_.GetOpCode(rhs) == OpCode::STORE_PROPERTY || - acc_.GetOpCode(rhs) == OpCode::STORE_PROPERTY_NO_BARRIER || - acc_.GetOpCode(rhs) == OpCode::STORE_CONST_OFFSET); - if (lop == OpCode::LOAD_PROPERTY) { - auto loff = acc_.GetValueIn(lhs, 1); - auto roff = acc_.GetValueIn(rhs, 1); - ASSERT(acc_.GetOpCode(loff) == OpCode::CONSTANT); - ASSERT(acc_.GetOpCode(roff) == OpCode::CONSTANT); - return loff == roff; - } else if (lop == OpCode::LOAD_CONST_OFFSET) { - auto loff = acc_.GetOffset(lhs); - auto roff = acc_.GetOffset(rhs); - return loff == roff; - } else { - return false; + switch (rop) { + case OpCode::STORE_MEMORY: + ASSERT(acc_.GetMemoryType(rhs) == MemoryType::ELEMENT_TYPE); + return acc_.GetOpCode(lhs) == OpCode::LOAD_ELEMENT; + case OpCode::STORE_ELEMENT: { + if (lop == OpCode::LOAD_ELEMENT) { + bool lopIsTypedArray = acc_.TypedOpIsTypedArray(lhs, TypedOpKind::TYPED_LOAD_OP); + bool ropIsTypedArray = acc_.TypedOpIsTypedArray(rhs, TypedOpKind::TYPED_STORE_OP); + return lopIsTypedArray == ropIsTypedArray; + } + return false; + } + case OpCode::STORE_PROPERTY: + case OpCode::STORE_PROPERTY_NO_BARRIER: { + if (lop == OpCode::LOAD_PROPERTY) { + auto loff = acc_.GetValueIn(lhs, 1); + auto roff = acc_.GetValueIn(rhs, 1); + ASSERT(acc_.GetOpCode(loff) == OpCode::CONSTANT); + ASSERT(acc_.GetOpCode(roff) == OpCode::CONSTANT); + return loff == roff; + } + break; + } + case OpCode::STORE_CONST_OFFSET: { + if (lop == OpCode::LOAD_CONST_OFFSET) { + auto loff = acc_.GetOffset(lhs); + auto roff = acc_.GetOffset(rhs); + return loff == roff; + } + break; + } + default: + break; } + return false; } bool EarlyElimination::CompareOrder(GateRef lhs, GateRef rhs) @@ -309,13 +332,19 @@ bool EarlyElimination::CheckReplacement(GateRef lhs, GateRef rhs) break; } case OpCode::TYPED_ARRAY_CHECK: - case OpCode::OBJECT_TYPE_CHECK: case OpCode::INDEX_CHECK: { if (acc_.GetParamGateType(lhs) != acc_.GetParamGateType(rhs)) { return false; } break; } + case OpCode::OBJECT_TYPE_CHECK: + case OpCode::OBJECT_TYPE_COMPARE: { + if (acc_.GetObjectTypeAccessor(lhs).GetType() != acc_.GetObjectTypeAccessor(rhs).GetType()) { + return false; + } + break; + } case OpCode::LOAD_CONST_OFFSET: { if (acc_.GetOffset(lhs) != acc_.GetOffset(rhs)) { return false; @@ -328,6 +357,13 @@ bool EarlyElimination::CheckReplacement(GateRef lhs, GateRef rhs) } break; } + case OpCode::LOAD_GETTER: + case OpCode::LOAD_SETTER: { + if (acc_.TryGetValue(lhs) != acc_.TryGetValue(rhs)) { + return false; + } + break; + } default: break; } @@ -456,6 +492,18 @@ GateRef DependInfoNode::LookupCheckedNode(EarlyElimination* elimination, GateRef return gate; } +void DependInfoNode::GetGates(std::vector& gates) const +{ + ChunkStack st(chunk_); + for (Node* node = head_; node != nullptr; node = node->next) { + st.push(node->gate); + } + while (!st.empty()) { + gates.emplace_back(st.top()); + st.pop(); + } +} + GateRef DependInfoNode::LookupNode(EarlyElimination* elimination, GateRef gate) { for (Node* node = head_; node != nullptr; node = node->next) { diff --git a/ecmascript/compiler/early_elimination.h b/ecmascript/compiler/early_elimination.h index 2b8ba33e565598f6059615c46d37d5f4d2f36710..b1a710613ceebd64a4dae640d13da5f38f6d2163 100644 --- a/ecmascript/compiler/early_elimination.h +++ b/ecmascript/compiler/early_elimination.h @@ -37,6 +37,7 @@ public: DependInfoNode* UpdateStoreProperty(EarlyElimination* elimination, GateRef gate); bool Equals(DependInfoNode* that); void Merge(EarlyElimination* elimination, DependInfoNode* that); + void GetGates(std::vector& gates) const; void CopyFrom(DependInfoNode *other) { head_ = other->head_; diff --git a/ecmascript/compiler/file_generators.cpp b/ecmascript/compiler/file_generators.cpp index e58c3afdd536c2d4f1ab627b39542e7260f8b82d..fd697a041e6950edf739f0f4acd75e5516fc9550 100644 --- a/ecmascript/compiler/file_generators.cpp +++ b/ecmascript/compiler/file_generators.cpp @@ -20,6 +20,9 @@ #include "ecmascript/snapshot/mem/snapshot.h" #include "ecmascript/stackmap/ark_stackmap_builder.h" #include "ecmascript/stackmap/llvm_stackmap_parser.h" +#include "ecmascript/compiler/litecg_ir_builder.h" +#include "ecmascript/compiler/litecg_codegen.h" +#include "litecg.h" namespace panda::ecmascript::kungfu { void Module::CollectStackMapDes(ModuleSectionDes& des) const @@ -38,7 +41,7 @@ void Module::CollectStackMapDes(ModuleSectionDes& des) const std::shared_ptr ptr = nullptr; uint32_t size = 0; ArkStackMapBuilder builder; - std::tie(ptr, size) = builder.Run(std::move(stackmapPtr), textAddr, llvmModule_->GetTriple()); + std::tie(ptr, size) = builder.Run(std::move(stackmapPtr), textAddr, irModule_->GetTriple()); des.EraseSec(ElfSecName::LLVM_STACKMAP); des.SetArkStackMapPtr(ptr); des.SetArkStackMapSize(size); @@ -67,32 +70,38 @@ void Module::CollectAnStackMapDes(ModuleSectionDes& des, uint64_t textOffset, void Module::CollectFuncEntryInfo(std::map &addr2name, StubFileInfo &stubInfo, uint32_t moduleIndex, const CompilerLog &log) { - auto engine = assembler_->GetEngine(); - auto callSigns = llvmModule_->GetCSigns(); + if (irModule_->GetModuleKind() != MODULE_LLVM) { + std::cout << "CollectFuncEntryInfo is not supported for litecg currently" << std::endl; + return; + } + LLVMModule *llvmModule = static_cast(irModule_); + LLVMAssembler *assembler = static_cast(assembler_); + auto engine = assembler->GetEngine(); + auto callSigns = llvmModule->GetCSigns(); std::vector entrys; - for (size_t j = 0; j < llvmModule_->GetFuncCount(); j++) { - LLVMValueRef func = llvmModule_->GetFunction(j); + for (size_t j = 0; j < llvmModule->GetFuncCount(); j++) { + LLVMValueRef func = llvmModule->GetFunction(j); ASSERT(func != nullptr); uintptr_t entry = reinterpret_cast(LLVMGetPointerToGlobal(engine, func)); entrys.push_back(entry); } - auto codeBuff = assembler_->GetSectionAddr(ElfSecName::TEXT); - const size_t funcCount = llvmModule_->GetFuncCount(); + auto codeBuff = assembler->GetSectionAddr(ElfSecName::TEXT); + const size_t funcCount = llvmModule->GetFuncCount(); funcCount_ = funcCount; startIndex_ = stubInfo.GetEntrySize(); for (size_t j = 0; j < funcCount; j++) { auto cs = callSigns[j]; - LLVMValueRef func = llvmModule_->GetFunction(j); + LLVMValueRef func = llvmModule->GetFunction(j); ASSERT(func != nullptr); - int delta = assembler_->GetFpDeltaPrevFramSp(func, log); + int delta = assembler->GetFpDeltaPrevFramSp(func, log); ASSERT(delta >= 0 && (delta % sizeof(uintptr_t) == 0)); uint32_t funcSize = 0; if (j < funcCount - 1) { funcSize = entrys[j + 1] - entrys[j]; } else { - funcSize = codeBuff + assembler_->GetSectionSize(ElfSecName::TEXT) - entrys[j]; + funcSize = codeBuff + assembler->GetSectionSize(ElfSecName::TEXT) - entrys[j]; } - kungfu::CalleeRegAndOffsetVec info = assembler_->GetCalleeReg2Offset(func, log); + kungfu::CalleeRegAndOffsetVec info = assembler->GetCalleeReg2Offset(func, log); stubInfo.AddEntry(cs->GetTargetKind(), false, false, cs->GetID(), entrys[j] - codeBuff, moduleIndex, delta, funcSize, info); ASSERT(!cs->GetName().empty()); @@ -103,58 +112,139 @@ void Module::CollectFuncEntryInfo(std::map &addr2name, S void Module::CollectFuncEntryInfo(std::map &addr2name, AnFileInfo &aotInfo, uint32_t moduleIndex, const CompilerLog &log) { - auto engine = assembler_->GetEngine(); - std::vector> funcInfo; // entry idx delta - std::vector calleeSaveRegisters; // entry idx delta - // 1.Compile all functions and collect function infos - llvmModule_->IteratefuncIndexMap([&](size_t idx, LLVMValueRef func, bool isFastCall) { - uint64_t funcEntry = reinterpret_cast(LLVMGetPointerToGlobal(engine, func)); - uint64_t length = 0; - std::string funcName(LLVMGetValueName2(func, reinterpret_cast(&length))); - ASSERT(length != 0); - addr2name[funcEntry] = funcName; - int delta = assembler_->GetFpDeltaPrevFramSp(func, log); - ASSERT(delta >= 0 && (delta % sizeof(uintptr_t) == 0)); - funcInfo.emplace_back(std::tuple(funcEntry, idx, delta, isFastCall)); - kungfu::CalleeRegAndOffsetVec info = assembler_->GetCalleeReg2Offset(func, log); - calleeSaveRegisters.emplace_back(info); - }); - // 2.After all functions compiled, the module sections would be fixed - uintptr_t textAddr = GetTextAddr(); - uint32_t textSize = GetTextSize(); - uint32_t rodataSize = GetRODataSize(); - aotInfo.AlignTextSec(); - aotInfo.UpdateCurTextSecOffset(rodataSize); - - const size_t funcCount = funcInfo.size(); - funcCount_ = funcCount; - startIndex_ = aotInfo.GetEntrySize(); - // 3.Add function entries based on the module sections - for (size_t i = 0; i < funcInfo.size(); i++) { - uint64_t funcEntry; - size_t idx; - int delta; - bool isFastCall; - uint32_t funcSize; - std::tie(funcEntry, idx, delta, isFastCall) = funcInfo[i]; - if (i < funcCount - 1) { - funcSize = std::get<0>(funcInfo[i + 1]) - funcEntry; - } else { - funcSize = textAddr + textSize - funcEntry; + if (irModule_->GetModuleKind() == MODULE_LLVM) { + LLVMAssembler *assembler = static_cast(assembler_); + auto engine = assembler->GetEngine(); + std::vector> funcInfo; // entry idx delta + std::vector calleeSaveRegisters; // entry idx delta + // 1.Compile all functions and collect function infos + LLVMModule *llvmModule = static_cast(irModule_); + llvmModule->IteratefuncIndexMap([&](size_t idx, LLVMValueRef func, bool isFastCall) { + uint64_t funcEntry = reinterpret_cast(LLVMGetPointerToGlobal(engine, func)); + uint64_t length = 0; + std::string funcName(LLVMGetValueName2(func, reinterpret_cast(&length))); + ASSERT(length != 0); + addr2name[funcEntry] = funcName; + int delta = assembler->GetFpDeltaPrevFramSp(func, log); + ASSERT(delta >= 0 && (delta % sizeof(uintptr_t) == 0)); + funcInfo.emplace_back(std::tuple(funcEntry, idx, delta, isFastCall)); + kungfu::CalleeRegAndOffsetVec info = assembler->GetCalleeReg2Offset(func, log); + calleeSaveRegisters.emplace_back(info); + }); + // 2.After all functions compiled, the module sections would be fixed + uintptr_t textAddr = GetTextAddr(); + uint32_t textSize = GetTextSize(); + uintptr_t rodataAddrBeforeText = 0; + uint32_t rodataSizeBeforeText = 0; + uintptr_t rodataAddrAfterText = 0; + uint32_t rodataSizeAfterText = 0; + std::tie(rodataAddrBeforeText, rodataSizeBeforeText, rodataAddrAfterText, rodataSizeAfterText) = + GetMergedRODataAddrAndSize(textAddr); + aotInfo.AlignTextSec(AOTFileInfo::PAGE_ALIGN); + if (rodataSizeBeforeText != 0) { + aotInfo.UpdateCurTextSecOffset(rodataSizeBeforeText); + aotInfo.AlignTextSec(AOTFileInfo::TEXT_SEC_ALIGN); + } + + const size_t funcCount = funcInfo.size(); + funcCount_ = funcCount; + startIndex_ = aotInfo.GetEntrySize(); + // 3.Add function entries based on the module sections + for (size_t i = 0; i < funcInfo.size(); i++) { + uint64_t funcEntry; + size_t idx; + int delta; + bool isFastCall; + uint32_t funcSize; + std::tie(funcEntry, idx, delta, isFastCall) = funcInfo[i]; + if (i < funcCount - 1) { + funcSize = std::get<0>(funcInfo[i + 1]) - funcEntry; + } else { + funcSize = textAddr + textSize - funcEntry; + } + auto found = addr2name[funcEntry].find(panda::ecmascript::JSPandaFile::ENTRY_FUNCTION_NAME); + bool isMainFunc = found != std::string::npos; + uint64_t offset = funcEntry - textAddr + aotInfo.GetCurTextSecOffset(); + aotInfo.AddEntry(CallSignature::TargetKind::JSFUNCTION, isMainFunc, isFastCall, idx, + offset, moduleIndex, delta, funcSize, calleeSaveRegisters[i]); + } + aotInfo.UpdateCurTextSecOffset(textSize); + if (rodataSizeAfterText != 0) { + aotInfo.AlignTextSec(AOTFileInfo::DATA_SEC_ALIGN); + aotInfo.UpdateCurTextSecOffset(rodataSizeAfterText); + } + } else { + std::vector> funcInfo; // entry idx delta + std::vector calleeSaveRegisters; // entry idx delta + // 1.Compile all functions and collect function infos + LMIRModule *lmirModule = static_cast(irModule_); + LiteCGAssembler *assembler = static_cast(assembler_); + const auto &func2Addr = assembler->GetCodeInfo().GetFunc2Addr(); + lmirModule->IteratefuncIndexMap([&](size_t idx, std::string funcName, bool isFastCall) { + auto itr = func2Addr.find(funcName); + if (itr == func2Addr.end()) { + LOG_COMPILER(FATAL) << "get function address from emitter failed"; + UNREACHABLE(); + } + uint64_t funcEntry = itr->second; + addr2name[funcEntry] = funcName; + int delta = 0; // TODO: assembler->GetFpDeltaPrevFramSp(func, log); + ASSERT(delta >= 0 && (delta % sizeof(uintptr_t) == 0)); + funcInfo.emplace_back(std::tuple(funcEntry, idx, delta, isFastCall)); + kungfu::CalleeRegAndOffsetVec info(0); // TODO: assembler->GetCalleeReg2Offset(func, log); + calleeSaveRegisters.emplace_back(info); + }); + // 2.After all functions compiled, the module sections would be fixed + uintptr_t textAddr = GetTextAddr(); + uint32_t textSize = GetTextSize(); + uint32_t rodataSizeBeforeText = 0; + uint32_t rodataSizeAfterText = 0; + + aotInfo.AlignTextSec(AOTFileInfo::PAGE_ALIGN); + if (rodataSizeBeforeText != 0) { + aotInfo.UpdateCurTextSecOffset(rodataSizeBeforeText); + aotInfo.AlignTextSec(AOTFileInfo::TEXT_SEC_ALIGN); + } + + const size_t funcCount = funcInfo.size(); + funcCount_ = funcCount; + startIndex_ = aotInfo.GetEntrySize(); + // 3.Add function entries based on the module sections + for (size_t i = 0; i < funcInfo.size(); i++) { + uint64_t funcEntry = 0; + size_t idx; + int delta; + bool isFastCall; + uint32_t funcSize; + std::tie(funcEntry, idx, delta, isFastCall) = funcInfo[i]; + if (i < funcCount - 1) { + funcSize = std::get<0>(funcInfo[i + 1]) - funcEntry; + } else { + funcSize = textAddr + textSize - funcEntry; + } + auto found = addr2name[funcEntry].find(panda::ecmascript::JSPandaFile::ENTRY_FUNCTION_NAME); + bool isMainFunc = found != std::string::npos; + uint64_t offset = funcEntry; + aotInfo.AddEntry(CallSignature::TargetKind::JSFUNCTION, isMainFunc, isFastCall, idx, + offset, moduleIndex, delta, funcSize, calleeSaveRegisters[i]); + } + aotInfo.UpdateCurTextSecOffset(textSize); + if (rodataSizeAfterText != 0) { + aotInfo.AlignTextSec(AOTFileInfo::DATA_SEC_ALIGN); + aotInfo.UpdateCurTextSecOffset(rodataSizeAfterText); } - auto found = addr2name[funcEntry].find(panda::ecmascript::JSPandaFile::ENTRY_FUNCTION_NAME); - bool isMainFunc = found != std::string::npos; - uint64_t offset = funcEntry - textAddr + aotInfo.GetCurTextSecOffset(); - aotInfo.AddEntry(CallSignature::TargetKind::JSFUNCTION, isMainFunc, isFastCall, idx, - offset, moduleIndex, delta, funcSize, calleeSaveRegisters[i]); } - aotInfo.UpdateCurTextSecOffset(textSize); } void Module::CollectModuleSectionDes(ModuleSectionDes &moduleDes) const { + if (irModule_->GetModuleKind() != MODULE_LLVM) { + std::cout << "CollectModuleSectionDes is not supported for litecg currently" << std::endl; + return; + } ASSERT(assembler_ != nullptr); - assembler_->IterateSecInfos([&](size_t i, std::pair secInfo) { + LLVMAssembler *assembler = static_cast(assembler_); + assembler->IterateSecInfos([&](size_t i, std::pair secInfo) { auto curSec = ElfSection(i); ElfSecName sec = curSec.GetElfEnumValue(); if (IsRelaSection(sec)) { @@ -173,7 +263,8 @@ void Module::CollectAnModuleSectionDes(ModuleSectionDes &moduleDes, uint64_t tex std::vector &pc2DeoptVec) const { ASSERT(assembler_ != nullptr); - assembler_->IterateSecInfos([&](size_t i, std::pair secInfo) { + LLVMAssembler *assembler = static_cast(assembler_); + assembler->IterateSecInfos([&](size_t i, std::pair secInfo) { auto curSec = ElfSection(i); ElfSecName sec = curSec.GetElfEnumValue(); // aot need relocated; stub don't need collect relocated section @@ -194,22 +285,27 @@ uintptr_t Module::GetSectionAddr(ElfSecName sec) const return assembler_->GetSectionAddr(sec); } -void Module::RunAssembler(const CompilerLog &log) +void Module::RunAssembler(const CompilerLog &log, bool fastCompileMode) { - assembler_->Run(log); + assembler_->Run(log, fastCompileMode); } void Module::DisassemblerFunc(std::map &addr2name, uint64_t textOffset, const CompilerLog &log, const MethodLogList &logList, std::ostringstream &codeStream) { - assembler_->Disassemble(addr2name, textOffset, log, logList, codeStream); + if (irModule_->GetModuleKind() != MODULE_LLVM) { + std::cout << "DisassemblerFunc is not supported for litecg currently" << std::endl; + return; + } + auto *assembler = static_cast(assembler_); + assembler->Disassemble(addr2name, textOffset, log, logList, codeStream); } void Module::DestroyModule() { - if (llvmModule_ != nullptr) { - delete llvmModule_; - llvmModule_ = nullptr; + if (irModule_ != nullptr) { + delete irModule_; + irModule_ = nullptr; } if (assembler_ != nullptr) { delete assembler_; @@ -263,7 +359,24 @@ void StubFileGenerator::DisassembleAsmStubs(std::map &ad uint64_t AOTFileGenerator::RollbackTextSize(Module *module) { - return aotInfo_.GetCurTextSecOffset() - module->GetSectionSize(ElfSecName::TEXT); + uint64_t textAddr = module->GetSectionAddr(ElfSecName::TEXT); + uint32_t textSize = module->GetSectionSize(ElfSecName::TEXT); + uint64_t rodataAddrBeforeText = 0; + uint32_t rodataSizeBeforeText = 0; + uint64_t rodataAddrAfterText = 0; + uint32_t rodataSizeAfterText = 0; + if (module->GetModule()->GetModuleKind() == MODULE_LLVM) { + std::tie(rodataAddrBeforeText, rodataSizeBeforeText, rodataAddrAfterText, rodataSizeAfterText) = + module->GetMergedRODataAddrAndSize(textAddr); + } + uint64_t textStart = 0; + if (rodataSizeAfterText == 0) { + textStart = aotInfo_.GetCurTextSecOffset() - textSize; + } else { + textStart = aotInfo_.GetCurTextSecOffset() - textSize - rodataSizeAfterText; + textStart = AlignDown(textStart, AOTFileInfo::DATA_SEC_ALIGN); + } + return textStart; } void AOTFileGenerator::CollectCodeInfo(Module *module, uint32_t moduleIdx) @@ -289,11 +402,18 @@ uint32_t AOTFileGenerator::GetModuleVecSize() const return modulePackage_.size(); } -Module* AOTFileGenerator::AddModule(const std::string &name, const std::string &triple, LOptions option, bool logDebug) +Module* AOTFileGenerator::AddModule(const std::string &name, const std::string &triple, + [[maybe_unused]] LOptions option, bool logDebug) { +#if 1 + LMIRModule *irModule = new LMIRModule(vm_->GetNativeAreaAllocator(), name, logDebug, triple); + LiteCGAssembler* ass = new LiteCGAssembler(*irModule); + modulePackage_.emplace_back(Module(irModule, ass)); +#else LLVMModule* m = new LLVMModule(vm_->GetNativeAreaAllocator(), name, logDebug, triple); LLVMAssembler* ass = new LLVMAssembler(m, option); modulePackage_.emplace_back(Module(m, ass)); +#endif return &modulePackage_.back(); } @@ -353,7 +473,8 @@ void AOTFileGenerator::CompileLatestModuleThenDestroy() uint32_t latestModuleIdx = GetModuleVecSize() - 1; { TimeScope timescope("LLVMIROpt", const_cast(log_)); - latestModule->RunAssembler(*(log_)); + bool fastCompileMode = vm_->GetJSOptions().GetFastAOTCompileMode(); + latestModule->RunAssembler(*(log_), fastCompileMode); } { TimeScope timescope("LLVMCodeGen", const_cast(log_)); diff --git a/ecmascript/compiler/file_generators.h b/ecmascript/compiler/file_generators.h index 5319f50018c41d4ea1acc1da6c895e072d6221e8..33bb342a1bae3eaa7026aae1fe67f3f3a3e6a354 100644 --- a/ecmascript/compiler/file_generators.h +++ b/ecmascript/compiler/file_generators.h @@ -16,18 +16,20 @@ #ifndef ECMASCRIPT_COMPILER_FILE_GENERATORS_H #define ECMASCRIPT_COMPILER_FILE_GENERATORS_H +#include "ecmascript/base/number_helper.h" #include "ecmascript/compiler/aot_file/aot_file_manager.h" #include "ecmascript/compiler/assembler_module.h" #include "ecmascript/compiler/compiler_log.h" #include "ecmascript/compiler/llvm_codegen.h" #include "ecmascript/compiler/llvm_ir_builder.h" +#include "ecmascript/compiler/ir_module.h" namespace panda::ecmascript::kungfu { class Module { public: Module() = default; - Module(LLVMModule *module, LLVMAssembler *assembler) - : llvmModule_(module), assembler_(assembler) + Module(IRModule *module, Assembler *assembler) + : irModule_(module), assembler_(assembler) { } @@ -58,16 +60,30 @@ public: uintptr_t GetSectionAddr(ElfSecName sec) const; - void RunAssembler(const CompilerLog &log); + std::tuple GetMergedRODataAddrAndSize(uint64_t textAddr) const + { + uint64_t addrBeforeText = base::MAX_UINT64_VALUE; + uint32_t sizeBeforeText = 0; + uint64_t addrAfterText = base::MAX_UINT64_VALUE; + uint32_t sizeAfterText = 0; + for (uint8_t i = static_cast(ElfSecName::RODATA); i <= static_cast(ElfSecName::RODATA_CST32); + i++) { + UpdateRODataInfo(textAddr, addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText, + static_cast(i)); + } + return std::make_tuple(addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText); + } + + void RunAssembler(const CompilerLog &log, bool fastCompileMode); void DisassemblerFunc(std::map &addr2name, uint64_t textOffset, const CompilerLog &log, const MethodLogList &logList, std::ostringstream &codeStream); void DestroyModule(); - LLVMModule* GetModule() const + IRModule* GetModule() const { - return llvmModule_; + return irModule_; } private: @@ -81,13 +97,26 @@ private: return assembler_->GetSectionSize(ElfSecName::TEXT); } - uint32_t GetRODataSize() const + void UpdateRODataInfo(uint64_t textAddr, uint64_t &addrBeforeText, uint32_t &sizeBeforeText, + uint64_t &addrAfterText, uint32_t &sizeAfterText, ElfSecName sec) const { - return assembler_->GetSectionSize(ElfSecName::RODATA_CST8); + uint64_t curSectionAddr = GetSectionAddr(sec); + if (curSectionAddr == 0) { + ASSERT(GetSectionSize(sec) == 0); + return; + } + ASSERT(curSectionAddr != textAddr); + if (curSectionAddr < textAddr) { + addrBeforeText = (curSectionAddr < addrBeforeText) ? curSectionAddr : addrBeforeText; + sizeBeforeText += GetSectionSize(sec); + } else { + addrAfterText = (curSectionAddr < addrAfterText) ? curSectionAddr : addrAfterText; + sizeAfterText += GetSectionSize(sec); + } } - LLVMModule *llvmModule_ {nullptr}; - LLVMAssembler *assembler_ {nullptr}; + IRModule *irModule_ {nullptr}; + Assembler *assembler_ {nullptr}; // record current module first function index in StubFileInfo/AnFileInfo uint32_t startIndex_ {static_cast(-1)}; uint32_t funcCount_ {0}; @@ -121,7 +150,7 @@ protected: void RunLLVMAssembler() { for (auto m : modulePackage_) { - m.RunAssembler(*(log_)); + m.RunAssembler(*(log_), false); } } diff --git a/ecmascript/compiler/frame_states.cpp b/ecmascript/compiler/frame_states.cpp index d40fdbae62d9620d51e2592b199d5665313639ab..8d015b3a7c59a3a030a7077c9d3be70c2af9c3cc 100644 --- a/ecmascript/compiler/frame_states.cpp +++ b/ecmascript/compiler/frame_states.cpp @@ -26,6 +26,7 @@ FrameStateBuilder::FrameStateBuilder(BytecodeCircuitBuilder *builder, gateAcc_(circuit), bcEndStateInfos_(circuit->chunk()), bbBeginStateInfos_(circuit->chunk()), + loopExitStateInfos_(circuit->chunk()), postOrderList_(circuit->chunk()) { } @@ -35,6 +36,7 @@ FrameStateBuilder::~FrameStateBuilder() liveOutResult_ = nullptr; bcEndStateInfos_.clear(); bbBeginStateInfos_.clear(); + loopExitStateInfos_.clear(); builder_ = nullptr; } @@ -64,7 +66,7 @@ GateRef FrameStateBuilder::BuildFrameStateGate(size_t pcOffset, GateRef frameVal {frameArgs, frameValues, preFrameState}); } -void FrameStateBuilder::BindStateSplit(GateRef state, GateRef depend, GateRef frameState) +GateRef FrameStateBuilder::BindStateSplit(GateRef state, GateRef depend, GateRef frameState) { GateRef stateSplit = circuit_->NewGate(circuit_->StateSplit(), {state, depend, frameState}); auto uses = gateAcc_.Uses(depend); @@ -78,6 +80,7 @@ void FrameStateBuilder::BindStateSplit(GateRef state, GateRef depend, GateRef fr if (builder_->IsLogEnabled()) { gateAcc_.ShortPrint(frameState); } + return stateSplit; } void FrameStateBuilder::BindStateSplit(GateRef gate, GateRef frameState) @@ -157,7 +160,7 @@ bool FrameStateBuilder::MergeIntoPredBC(uint32_t predPc, size_t diff) auto value = frameInfo->ValuesAt(i); // if value not null, merge pred if (value == Circuit::NullGate() && predValue != Circuit::NullGate()) { - predValue = TryGetLoopExitValue(predValue, diff); + predValue = TryGetLoopExitValue(predValue, diff, i); frameInfo->SetValuesAt(i, predValue); changed = true; } @@ -173,6 +176,53 @@ GateRef FrameStateBuilder::GetPreBBInput(BytecodeRegion *bb, BytecodeRegion *pre return gate; } +GateRef FrameStateBuilder::GetPredStateGateBetweenBB(BytecodeRegion *bb, BytecodeRegion *predBb) +{ + GateRef gate = bb->stateCurrent; + if (bb->numOfLoopBacks != 0) { + ASSERT(bb->loopbackBlocks.size() != 0); + ASSERT(gateAcc_.GetStateCount(gate) > 1); + auto forwardState = gateAcc_.GetState(gate, 0); // 0: fowward + auto loopBackState = gateAcc_.GetState(gate, 1); // 1: back + size_t backIndex = 0; + size_t forwardIndex = 0; + for (size_t i = 0; i < bb->numOfStatePreds; ++i) { + auto predId = std::get<0>(bb->expandedPreds.at(i)); + if (bb->loopbackBlocks.count(predId)) { + if (predId == predBb->id) { + if (bb->numOfLoopBacks == 1) { + return loopBackState; + } + return gateAcc_.GetState(loopBackState, backIndex); + } + backIndex++; + } else { + if (predId == predBb->id) { + auto mergeCount = bb->numOfStatePreds - bb->numOfLoopBacks; + if (mergeCount == 1) { + return forwardState; + } + return gateAcc_.GetState(forwardState, forwardIndex); + } + forwardIndex++; + } + } + UNREACHABLE(); + return Circuit::NullGate(); + } + + ASSERT(gateAcc_.GetStateCount(gate) == bb->numOfStatePreds); + // The phi input nodes need to be traversed in reverse order, because there is a bb with multiple def points + for (size_t i = bb->numOfStatePreds - 1; i >= 0; --i) { + auto predId = std::get<0>(bb->expandedPreds.at(i)); + if (predId == predBb->id) { + return gateAcc_.GetState(gate, i); + } + } + UNREACHABLE(); + return Circuit::NullGate(); +} + GateRef FrameStateBuilder::GetPhiComponent(BytecodeRegion *bb, BytecodeRegion *predBb, GateRef phi) { ASSERT(gateAcc_.GetOpCode(phi) == OpCode::VALUE_SELECTOR); @@ -237,7 +287,7 @@ bool FrameStateBuilder::MergeIntoPredBB(BytecodeRegion *bb, BytecodeRegion *pred auto target = GetPreBBInput(bb, predBb, phi); if (target != Circuit::NullGate()) { auto diff = LoopExitCount(predBb, bb); - target = TryGetLoopExitValue(target, diff); + target = TryGetLoopExitValue(target, diff, accumulatorIndex_); predLiveout->SetValuesAt(accumulatorIndex_, target); } } @@ -252,7 +302,7 @@ bool FrameStateBuilder::MergeIntoPredBB(BytecodeRegion *bb, BytecodeRegion *pred continue; } auto diff = LoopExitCount(predBb, bb); - target = TryGetLoopExitValue(target, diff); + target = TryGetLoopExitValue(target, diff, reg); predLiveout->SetValuesAt(reg, target); } } @@ -331,6 +381,7 @@ void FrameStateBuilder::BuildFrameState() bcEndStateInfos_.resize(builder_->GetLastBcIndex() + 1, nullptr); // 1: +1 pcOffsets size auto size = builder_->GetBasicBlockCount(); bbBeginStateInfos_.resize(size, nullptr); + loopExitStateInfos_.resize(circuit_->GetMaxGateId() + 1, nullptr); liveOutResult_ = CreateEmptyStateInfo(); BuildPostOrderList(size); ComputeLiveState(); @@ -406,18 +457,33 @@ bool FrameStateBuilder::IsAsyncResolveOrSusp(const BytecodeInfo &bytecodeInfo) return opcode == EcmaOpcode::SUSPENDGENERATOR_V8 || opcode == EcmaOpcode::ASYNCGENERATORRESOLVE_V8_V8_V8; } -void FrameStateBuilder::BuildStateSplitAfter(size_t index) +void FrameStateBuilder::BuildStateSplitAfter(size_t index, BytecodeRegion& bb) { auto gate = builder_->GetGateByBcIndex(index); ASSERT(gateAcc_.GetOpCode(gate) == OpCode::JS_BYTECODE); - auto pcOffset = builder_->GetPcOffset(index + 1); // 1: for after - auto stateInfo = GetFrameInfoAfter(index); + auto nextIndex = GetNearestNextIndex(index, bb); + if (builder_->GetBytecodeInfo(nextIndex).IsCall()) { + return; + } + auto pcOffset = builder_->GetPcOffset(nextIndex); + auto stateInfo = GetFrameInfoAfter(nextIndex - 1); // 1: after prev bc GateRef frameValues = BuildFrameValues(stateInfo); GateRef frameStateAfter = BuildFrameStateGate( pcOffset, frameValues, FrameStateOutput::Invalid()); BindStateSplit(gate, gate, frameStateAfter); } +size_t FrameStateBuilder::GetNearestNextIndex(size_t index, BytecodeRegion& bb) const +{ + index++; + auto gate = builder_->GetGateByBcIndex(index); + while ((gate == Circuit::NullGate() || gateAcc_.IsConstant(gate)) && index < bb.end) { + index++; + gate = builder_->GetGateByBcIndex(index); + } + return index; +} + void FrameStateBuilder::BuildStateSplitBefore(BytecodeRegion& bb, size_t index) { auto pcOffset = builder_->GetPcOffset(index); @@ -433,17 +499,66 @@ void FrameStateBuilder::BuildStateSplitBefore(BytecodeRegion& bb, size_t index) } } -bool FrameStateBuilder::ShouldInsertFrameStateBefore(BytecodeRegion& bb, - const BytecodeInfo &bytecodeInfo, size_t index) -{ - // add Call State Split for inline - if (bytecodeInfo.IsCall()) { - return true; +void FrameStateBuilder::FindLoopExit(GateRef gate) { + // if find the bytecode gate, return. + if (builder_->IsBcIndexByGate(gate)) { + return; + } + + // if find the loopExit, do process. + if (gateAcc_.GetOpCode(gate) == OpCode::LOOP_EXIT) { + GateRef findBefore = gateAcc_.GetState(gate); + while (!builder_->IsBcIndexByGate(findBefore)) { + findBefore = gateAcc_.GetState(findBefore); + } + + // Build stateSplit After + size_t index = builder_->GetBcIndexByGate(findBefore); + auto pcOffset = builder_->GetPcOffset(index); + auto stateInfo = GetOrOCreateLoopExitStateInfo(gate); + ASSERT(stateInfo != nullptr); + GateRef frameValues = BuildFrameValues(stateInfo); + GateRef frameStateAfter = BuildFrameStateGate( + pcOffset, frameValues, FrameStateOutput::Invalid()); + + GateRef stateIn = gate; // stateIn - LoopExit + GateRef dependIn = Circuit::NullGate(); // dependIn - LoopExitDepend + auto uses = gateAcc_.Uses(gate); + for (auto useIt = uses.begin(); useIt != uses.end();) { + if (gateAcc_.GetOpCode(*useIt) == OpCode::LOOP_EXIT_DEPEND) { + dependIn = *useIt; + } + useIt++; + } + ASSERT(dependIn != Circuit::NullGate()); + + // Bind stateSplit after the loopExit and loopExitDepend. + BindStateSplit(stateIn, dependIn, frameStateAfter); } + + // continue to find the loopExit. + ASSERT(gateAcc_. GetStateCount(gate) == 1); + FindLoopExit(gateAcc_.GetState(gate)); +} + +bool FrameStateBuilder::ShouldInsertFrameStateBefore(BytecodeRegion& bb, size_t index) +{ auto gate = builder_->GetGateByBcIndex(index); if (index == bb.start) { if (bb.numOfStatePreds > 1) { // 1: > 1 is merge + // backward to find loop exits, insert stateSplit before loopExit. + for (auto &predBb: bb.preds) { + if (LoopExitCount(predBb, &bb) > 0) { + auto target = GetPredStateGateBetweenBB(&bb, predBb); + FindLoopExit(target); + } + } return true; + } else if (bb.numOfStatePreds == 1) { // 1: == 1 maybe loopexit + auto predBb = (bb.preds.size() > 0) ? bb.preds.at(0) : bb.trys.at(0); + if (LoopExitCount(predBb, &bb) > 0) { + return true; + } } if (gateAcc_.GetOpCode(bb.dependCurrent) == OpCode::GET_EXCEPTION) { return true; @@ -463,7 +578,12 @@ bool FrameStateBuilder::ShouldInsertFrameStateBefore(BytecodeRegion& bb, void FrameStateBuilder::BuildFrameState(BytecodeRegion& bb, const BytecodeInfo &bytecodeInfo, size_t index) { - bool needStateSplitBefore = ShouldInsertFrameStateBefore(bb, bytecodeInfo, index); + // Not bind state split for Call + if (bytecodeInfo.IsCall()) { + BuildCallFrameState(index, bb); + } + + bool needStateSplitBefore = ShouldInsertFrameStateBefore(bb, index); auto gate = builder_->GetGateByBcIndex(index); if (needStateSplitBefore && index != bb.start) { auto depend = gateAcc_.GetDep(gate); @@ -477,11 +597,21 @@ void FrameStateBuilder::BuildFrameState(BytecodeRegion& bb, if (!bytecodeInfo.NoSideEffects() && !bytecodeInfo.IsThrow()) { if (!gateAcc_.HasIfExceptionUse(gate)) { - BuildStateSplitAfter(index); + BuildStateSplitAfter(index, bb); } } } +void FrameStateBuilder::BuildCallFrameState(size_t index, BytecodeRegion& bb) +{ + auto pcOffset = builder_->GetPcOffset(index); + auto stateInfo = GetFrameInfoBefore(bb, index); + GateRef frameValues = BuildFrameValues(stateInfo); + GateRef frameState = BuildFrameStateGate(pcOffset, frameValues, FrameStateOutput::Invalid()); + auto gate = builder_->GetGateByBcIndex(index); + gateAcc_.ReplaceFrameStateIn(gate, frameState); +} + void FrameStateBuilder::BindBBStateSplit() { auto& dfsList = builder_->GetDfsList(); @@ -490,7 +620,11 @@ void FrameStateBuilder::BindBBStateSplit() if (builder_->IsFirstBasicBlock(bb.id)) { BuildStateSplitBefore(bb, bb.start); } + if (builder_->IsEntryBlock(bb.id)) { + BuildStateSplitBefore(bb, bb.start); + } ASSERT(!bb.isDead); + builder_->EnumerateBlock(bb, [&](const BytecodeInfo &bytecodeInfo) -> bool { auto &iterator = bb.GetBytecodeIterator(); auto index = iterator.Index(); @@ -545,23 +679,20 @@ void FrameStateBuilder::UpdateVirtualRegistersOfResume(GateRef gate) size_t FrameStateBuilder::LoopExitCount(BytecodeRegion* bb, BytecodeRegion* bbNext) { - size_t headDep = ((bbNext->numOfLoopBacks > 0) && (bbNext->loopbackBlocks.count(bb->id) == 0)) ? 1 : 0; - if (bbNext->loopDepth < headDep) { - // loop optimization disabled. - return 0; - } - size_t nextDep = bbNext->loopDepth - headDep; - ASSERT(bb->loopDepth >= nextDep); - return bb->loopDepth > nextDep; + return builder_->LoopExitCount(bb->id, bbNext->id); } -GateRef FrameStateBuilder::TryGetLoopExitValue(GateRef value, size_t diff) +GateRef FrameStateBuilder::TryGetLoopExitValue(GateRef value, size_t diff, size_t reg) { if ((gateAcc_.GetOpCode(value) != OpCode::LOOP_EXIT_VALUE) || (diff == 0)) { return value; } + for (size_t i = 0; i < diff; ++i) { ASSERT(gateAcc_.GetOpCode(value) == OpCode::LOOP_EXIT_VALUE); + GateRef loopExit = gateAcc_.GetState(value); + FrameStateInfo* loopExitFrameInfo = GetOrOCreateLoopExitStateInfo(loopExit); + loopExitFrameInfo->SetValuesAt(reg, value); value = gateAcc_.GetValueIn(value); } return value; diff --git a/ecmascript/compiler/frame_states.h b/ecmascript/compiler/frame_states.h index b3732bff7e071b103193926357788135b839a27d..87fc617dd117ee09164c6102616bcaad63bc0b57 100644 --- a/ecmascript/compiler/frame_states.h +++ b/ecmascript/compiler/frame_states.h @@ -101,7 +101,7 @@ private: { UpdateVirtualRegister(accumulatorIndex_, gate); } - void BindStateSplit(GateRef state, GateRef depend, GateRef frameState); + GateRef BindStateSplit(GateRef state, GateRef depend, GateRef frameState); void BindStateSplit(GateRef gate, GateRef frameState); void BindBBStateSplit(); void UpdateVirtualRegister(size_t id, size_t index, GateRef gate); @@ -113,11 +113,12 @@ private: bool ComputeLiveOut(size_t bbId); void ComputeLiveState(); void ComputeLiveOutBC(uint32_t index, const BytecodeInfo &bytecodeInfo, size_t bbId); + void FindLoopExit(GateRef gate); bool IsAsyncResolveOrSusp(const BytecodeInfo &bytecodeInfo); bool MergeIntoPredBC(uint32_t predPc, size_t diff); bool MergeIntoPredBB(BytecodeRegion *bb, BytecodeRegion *predBb); size_t LoopExitCount(BytecodeRegion *bb, BytecodeRegion *bbNext); - GateRef TryGetLoopExitValue(GateRef value, size_t diff); + GateRef TryGetLoopExitValue(GateRef value, size_t diff, size_t reg); FrameStateInfo *GetOrOCreateBCEndStateInfo(uint32_t bcIndex) { auto currentInfo = bcEndStateInfos_[bcIndex]; @@ -127,8 +128,39 @@ private: } return currentInfo; } - FrameStateInfo *GetBBBeginStateInfo(size_t bbId) const + FrameStateInfo *GetOrOCreateLoopExitStateInfo(GateRef gate) { + ASSERT(gateAcc_.GetOpCode(gate) == OpCode::LOOP_EXIT); + size_t idx = gateAcc_.GetId(gate); + ASSERT(idx < circuit_ -> GetMaxGateId()); + auto currentInfo = loopExitStateInfos_[idx]; + if (currentInfo == nullptr) { + currentInfo = CreateEmptyStateInfo(); + loopExitStateInfos_[idx] = currentInfo; + } + return currentInfo; + } + FrameStateInfo *GetEntryBBBeginStateInfo() + { + auto entry = CreateEmptyStateInfo(); + auto first = bbBeginStateInfos_.at(1); // 1: first block + for (size_t i = 0; i < numVregs_; ++i) { + auto value = first->ValuesAt(i); + if (value == Circuit::NullGate()) { + continue; + } + if (gateAcc_.IsValueSelector(value)) { + value = gateAcc_.GetValueIn(value); + } + entry->SetValuesAt(i, value); + } + return entry; + } + FrameStateInfo *GetBBBeginStateInfo(size_t bbId) + { + if (bbId == 0) { // 0: entry block + return GetEntryBBBeginStateInfo(); + } return bbBeginStateInfos_.at(bbId); } void UpdateVirtualRegistersOfSuspend(GateRef gate); @@ -139,10 +171,12 @@ private: GateRef GetPreBBInput(BytecodeRegion *bb, BytecodeRegion *predBb, GateRef gate); GateRef GetPhiComponent(BytecodeRegion *bb, BytecodeRegion *predBb, GateRef phi); void BuildFrameState(BytecodeRegion& bb, const BytecodeInfo &bytecodeInfo, size_t index); - void BuildStateSplitAfter(size_t index); + void BuildStateSplitAfter(size_t index, BytecodeRegion& bb); void BuildStateSplitBefore(BytecodeRegion& bb, size_t index); - bool ShouldInsertFrameStateBefore(BytecodeRegion& bb, - const BytecodeInfo &bytecodeInfo, size_t index); + bool ShouldInsertFrameStateBefore(BytecodeRegion& bb, size_t index); + void BuildCallFrameState(size_t index, BytecodeRegion& bb); + size_t GetNearestNextIndex(size_t index, BytecodeRegion& bb) const; + GateRef GetPredStateGateBetweenBB(BytecodeRegion *bb, BytecodeRegion *predBb); BytecodeCircuitBuilder *builder_{nullptr}; FrameStateInfo *liveOutResult_{nullptr}; @@ -152,6 +186,7 @@ private: GateAccessor gateAcc_; ChunkVector bcEndStateInfos_; ChunkVector bbBeginStateInfos_; + ChunkVector loopExitStateInfos_; ChunkVector postOrderList_; }; } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/gate.cpp b/ecmascript/compiler/gate.cpp index cf8fe3f4ef4405e84b18d598aed619831314e37e..797df28d15c23e9dc8a05b4dfea46f5b73f837c4 100644 --- a/ecmascript/compiler/gate.cpp +++ b/ecmascript/compiler/gate.cpp @@ -14,6 +14,8 @@ */ #include "ecmascript/compiler/gate.h" +#include +#include namespace panda::ecmascript::kungfu { void Gate::CheckNullInput() const @@ -718,7 +720,10 @@ void Gate::Print(std::string bytecode, bool inListPreview, size_t highlightIdx) log += ((bytecode.compare("") == 0) ? "" : "\"bytecode\":\"") + bytecode; log += ((bytecode.compare("") == 0) ? "" : "\", "); log += "\"MType\":\"" + MachineTypeStr(GetMachineType()) + ", "; - log += "bitfield=" + std::to_string(TryGetValue()) + ", "; + + std::ostringstream oss; + oss << std::hex << TryGetValue(); + log += "bitfield=0x" + oss.str() + ", "; log += "type=" + GateTypeStr(type_) + ", "; log += "stamp=" + std::to_string(static_cast(stamp_)) + ", "; log += "mark=" + std::to_string(static_cast(mark_)) + ", "; @@ -774,7 +779,9 @@ void Gate::ShortPrint(std::string bytecode, bool inListPreview, size_t highlight log += ((bytecode.compare("") == 0) ? "" : "bytecode=") + bytecode; log += ((bytecode.compare("") == 0) ? "" : ", "); log += "\"MType\"=\"" + MachineTypeStr(GetMachineType()) + ", "; - log += "bitfield=" + std::to_string(TryGetValue()) + ", "; + std::ostringstream oss; + oss << std::hex << TryGetValue(); + log += "bitfield=0x" + oss.str() + ", "; log += "type=" + GateTypeStr(type_) + ", "; log += "\", in=["; diff --git a/ecmascript/compiler/gate.h b/ecmascript/compiler/gate.h index 0f6b0a0f7d0b812c26de4945b1e543e4e4863a83..3cf497a2dc8256686fc93a153526d094624b2bf9 100644 --- a/ecmascript/compiler/gate.h +++ b/ecmascript/compiler/gate.h @@ -222,9 +222,14 @@ public: return OneParameterMetaData::Cast(meta_); } - const TypedBinaryMegaData* GetTypedBinaryMegaData() const + const TypedBinaryMetaData* GetTypedBinaryMetaData() const { - return TypedBinaryMegaData::Cast(meta_); + return TypedBinaryMetaData::Cast(meta_); + } + + const TypedCallTargetCheckMetaData* GetTypedCallTargetCheckMetaData() const + { + return TypedCallTargetCheckMetaData::Cast(meta_); } const StringMetaData* GetStringMetaData() const @@ -243,6 +248,11 @@ public: return BoolMetaData::Cast(meta_); } + const TypedCallMetaData* GetTypedCallMetaData() const + { + return TypedCallMetaData::Cast(meta_); + } + std::string MachineTypeStr(MachineType machineType) const; std::string GateTypeStr(GateType gateType) const; ~Gate() = default; diff --git a/ecmascript/compiler/gate_accessor.cpp b/ecmascript/compiler/gate_accessor.cpp index cc51e40c24c9e908e3514c81524103d6cad806c4..d4cffe469f469a6db0886b8c3eb79c91813f1db6 100644 --- a/ecmascript/compiler/gate_accessor.cpp +++ b/ecmascript/compiler/gate_accessor.cpp @@ -97,26 +97,56 @@ size_t GateAccessor::GetOffset(GateRef gate) const return gatePtr->GetOneParameterMetaData()->GetValue(); } +uint32_t GateAccessor::GetTrueWeight(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::IF_BRANCH); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + auto accessor = BranchAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return accessor.GetTrueWeight(); +} + +uint32_t GateAccessor::GetFalseWeight(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::IF_BRANCH); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + auto accessor = BranchAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return accessor.GetFalseWeight(); +} + +bool GateAccessor::HasBranchWeight(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::IF_BRANCH); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + auto accessor = BranchAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return (accessor.GetTrueWeight() != 0) || (accessor.GetFalseWeight() != 0); +} + size_t GateAccessor::GetIndex(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::GET_GLOBAL_ENV_OBJ_HCLASS || - GetOpCode(gate) == OpCode::GET_GLOBAL_CONSTANT_VALUE); + GetOpCode(gate) == OpCode::GET_GLOBAL_CONSTANT_VALUE || + GetOpCode(gate) == OpCode::GET_GLOBAL_ENV_OBJ); Gate *gatePtr = circuit_->LoadGatePtr(gate); return gatePtr->GetOneParameterMetaData()->GetValue(); } -size_t GateAccessor::GetArraySize(GateRef gate) const +uint32_t GateAccessor::GetArraySize(GateRef gate) const { - ASSERT(GetOpCode(gate) == OpCode::CREATE_ARRAY); + ASSERT(GetOpCode(gate) == OpCode::CREATE_ARRAY || + GetOpCode(gate) == OpCode::CREATE_ARRAY_WITH_BUFFER); Gate *gatePtr = circuit_->LoadGatePtr(gate); - return gatePtr->GetOneParameterMetaData()->GetValue(); + auto array = gatePtr->GetOneParameterMetaData()->GetValue(); + return ArrayMetaDataAccessor(array).GetArrayLength(); } -void GateAccessor::SetArraySize(GateRef gate, size_t size) +void GateAccessor::SetArraySize(GateRef gate, uint32_t size) { - ASSERT(GetOpCode(gate) == OpCode::CREATE_ARRAY); + ASSERT(GetOpCode(gate) == OpCode::CREATE_ARRAY || + GetOpCode(gate) == OpCode::CREATE_ARRAY_WITH_BUFFER); Gate *gatePtr = circuit_->LoadGatePtr(gate); - const_cast(gatePtr->GetOneParameterMetaData())->SetValue(size); + ArrayMetaDataAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); + accessor.SetArrayLength(size); + const_cast(gatePtr->GetOneParameterMetaData())->SetValue(accessor.ToValue()); } TypedUnaryAccessor GateAccessor::GetTypedUnAccessor(GateRef gate) const @@ -126,6 +156,12 @@ TypedUnaryAccessor GateAccessor::GetTypedUnAccessor(GateRef gate) const return TypedUnaryAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); } +TypedBinaryAccessor GateAccessor::GetTypedBinaryAccessor(GateRef gate) const +{ + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return TypedBinaryAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); +} + TypedJumpAccessor GateAccessor::GetTypedJumpAccessor(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::TYPED_CONDITION_JUMP); @@ -133,6 +169,41 @@ TypedJumpAccessor GateAccessor::GetTypedJumpAccessor(GateRef gate) const return TypedJumpAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); } +ArrayMetaDataAccessor GateAccessor::GetArrayMetaDataAccessor(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::STABLE_ARRAY_CHECK || + GetOpCode(gate) == OpCode::HCLASS_STABLE_ARRAY_CHECK || + GetOpCode(gate) == OpCode::CREATE_ARRAY || + GetOpCode(gate) == OpCode::CREATE_ARRAY_WITH_BUFFER); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return ArrayMetaDataAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); +} + +ObjectTypeAccessor GateAccessor::GetObjectTypeAccessor(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::OBJECT_TYPE_CHECK || + GetOpCode(gate) == OpCode::OBJECT_TYPE_COMPARE); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return ObjectTypeAccessor(gatePtr->GetOneParameterMetaData()->GetValue()); +} + +bool GateAccessor::TypedOpIsTypedArray(GateRef gate, TypedOpKind kind) const +{ + switch (kind) { + case TypedOpKind::TYPED_LOAD_OP: { + TypedLoadOp op = GetTypedLoadOp(gate); + return TypedLoadOp::TYPED_ARRAY_FIRST <= op && op <=TypedLoadOp::TYPED_ARRAY_LAST; + } + case TypedOpKind::TYPED_STORE_OP: { + TypedStoreOp op = GetTypedStoreOp(gate); + return TypedStoreOp::TYPED_ARRAY_FIRST <= op && op <= TypedStoreOp::TYPED_ARRAY_LAST; + } + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + TypedLoadOp GateAccessor::GetTypedLoadOp(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::LOAD_ELEMENT); @@ -147,18 +218,32 @@ TypedStoreOp GateAccessor::GetTypedStoreOp(GateRef gate) const return static_cast(gatePtr->GetOneParameterMetaData()->GetValue()); } +TypedCallTargetCheckOp GateAccessor::GetTypedCallTargetCheckOp(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::TYPED_CALLTARGETCHECK_OP); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return gatePtr->GetTypedCallTargetCheckMetaData()->GetTypedCallTargetCheckOp(); +} + +MemoryType GateAccessor::GetMemoryType(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::STORE_MEMORY); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return static_cast(gatePtr->GetOneParameterMetaData()->GetValue()); +} + TypedBinOp GateAccessor::GetTypedBinaryOp(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::TYPED_BINARY_OP); Gate *gatePtr = circuit_->LoadGatePtr(gate); - return gatePtr->GetTypedBinaryMegaData()->GetTypedBinaryOp(); + return gatePtr->GetTypedBinaryMetaData()->GetTypedBinaryOp(); } PGOSampleType GateAccessor::GetTypedBinaryType(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::TYPED_BINARY_OP); Gate *gatePtr = circuit_->LoadGatePtr(gate); - return gatePtr->GetTypedBinaryMegaData()->GetType(); + return gatePtr->GetTypedBinaryMetaData()->GetType(); } bool GateAccessor::HasNumberType(GateRef gate) const @@ -188,19 +273,24 @@ GlobalTSTypeRef GateAccessor::GetFuncGT(GateRef gate) const GateType GateAccessor::GetParamGateType(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::PRIMITIVE_TYPE_CHECK || - GetOpCode(gate) == OpCode::OBJECT_TYPE_CHECK || GetOpCode(gate) == OpCode::TYPED_ARRAY_CHECK || GetOpCode(gate) == OpCode::INDEX_CHECK || - GetOpCode(gate) == OpCode::JSCALLTARGET_TYPE_CHECK || - GetOpCode(gate) == OpCode::JSCALLTHISTARGET_TYPE_CHECK || - GetOpCode(gate) == OpCode::JSFASTCALLTARGET_TYPE_CHECK || - GetOpCode(gate) == OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK || - GetOpCode(gate) == OpCode::JSCALLTARGET_FROM_DEFINEFUNC_CHECK); + GetOpCode(gate) == OpCode::TYPED_CALLTARGETCHECK_OP || + GetOpCode(gate) == OpCode::CREATE_ARRAY_WITH_BUFFER); Gate *gatePtr = circuit_->LoadGatePtr(gate); GateTypeAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); return accessor.GetGateType(); } +bool GateAccessor::IsConvertSupport(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::CONVERT || + GetOpCode(gate) == OpCode::CHECK_AND_CONVERT); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + ValuePairTypeAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return accessor.IsConvertSupport(); +} + ValueType GateAccessor::GetSrcType(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::CONVERT || @@ -238,6 +328,22 @@ GateType GateAccessor::GetRightType(GateRef gate) const return accessor.GetRightType(); } +uint32_t GateAccessor::GetFirstValue(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::RANGE_GUARD); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + UInt32PairAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return accessor.GetFirstValue(); +} + +uint32_t GateAccessor::GetSecondValue(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::RANGE_GUARD); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + UInt32PairAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); + return accessor.GetSecondValue(); +} + size_t GateAccessor::GetVirtualRegisterIndex(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::SAVE_REGISTER || @@ -264,7 +370,33 @@ bool GateAccessor::IsVtable(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::LOAD_PROPERTY); Gate *gatePtr = circuit_->LoadGatePtr(gate); - return gatePtr->GetBoolMetaData()->getBool(); + return gatePtr->GetBoolMetaData()->GetBool(); +} + +bool GateAccessor::GetNoGCFlag(GateRef gate) const +{ + if (gate == Circuit::NullGate()) { + return false; + } + OpCode op = GetOpCode(gate); + if (op != OpCode::TYPEDCALL && op != OpCode::TYPEDFASTCALL) { + return false; + } + return TypedCallIsNoGC(gate); +} + +bool GateAccessor::TypedCallIsNoGC(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::TYPEDCALL || GetOpCode(gate) == OpCode::TYPEDFASTCALL); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return gatePtr->GetTypedCallMetaData()->IsNoGC(); +} + +bool GateAccessor::IsNoGC(GateRef gate) const +{ + ASSERT(GetOpCode(gate) == OpCode::CALL_OPTIMIZED || GetOpCode(gate) == OpCode::FAST_CALL_OPTIMIZED); + Gate *gatePtr = circuit_->LoadGatePtr(gate); + return gatePtr->GetBoolMetaData()->GetBool(); } uint32_t GateAccessor::TryGetPcOffset(GateRef gate) const @@ -276,11 +408,12 @@ uint32_t GateAccessor::TryGetPcOffset(GateRef gate) const return gatePtr->GetJSBytecodeMetaData()->GetPcOffset(); case OpCode::TYPED_CALL_BUILTIN: case OpCode::CONSTRUCT: - case OpCode::TYPEDCALL: - case OpCode::TYPEDFASTCALL: case OpCode::CALL_GETTER: case OpCode::CALL_SETTER: return static_cast(gatePtr->GetOneParameterMetaData()->GetValue()); + case OpCode::TYPEDCALL: + case OpCode::TYPEDFASTCALL: + return static_cast(gatePtr->GetTypedCallMetaData()->GetValue()); case OpCode::FRAME_STATE: { UInt32PairAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue()); return accessor.GetFirstValue(); @@ -298,6 +431,9 @@ PGOSampleType GateAccessor::TryGetPGOType(GateRef gate) const if (op == OpCode::JS_BYTECODE) { return gatePtr->GetJSBytecodeMetaData()->GetType(); } + if (op == OpCode::TYPED_BINARY_OP) { + return GetTypedBinaryType(gate); + } return PGOSampleType::NoneType(); } @@ -310,6 +446,43 @@ void GateAccessor::TrySetPGOType(GateRef gate, PGOSampleType type) } } +ElementsKind GateAccessor::TryGetElementsKind(GateRef gate) const +{ + Gate *gatePtr = circuit_->LoadGatePtr(gate); + OpCode op = GetOpCode(gate); + if (op == OpCode::JS_BYTECODE) { + return gatePtr->GetJSBytecodeMetaData()->GetElementsKind(); + } + return ElementsKind::GENERIC; +} + +ElementsKind GateAccessor::TryGetArrayElementsKind(GateRef gate) const +{ + Gate *gatePtr = circuit_->LoadGatePtr(gate); + OpCode op = GetOpCode(gate); + if (op == OpCode::JS_BYTECODE) { + ElementsKind kind = gatePtr->GetJSBytecodeMetaData()->GetElementsKind(); + if (Elements::IsGeneric(kind)) { + return kind; + } + std::vector kinds = gatePtr->GetJSBytecodeMetaData()->GetElementsKinds(); + for (auto &x : kinds) { + kind = Elements::MergeElementsKind(kind, x); + } + return kind; + } + return ElementsKind::GENERIC; +} + +void GateAccessor::TrySetElementsKind(GateRef gate, ElementsKind kind) +{ + Gate *gatePtr = circuit_->LoadGatePtr(gate); + OpCode op = GetOpCode(gate); + if (op == OpCode::JS_BYTECODE) { + const_cast(gatePtr->GetJSBytecodeMetaData())->SetElementsKind(kind); + } +} + EcmaOpcode GateAccessor::GetByteCodeOpcode(GateRef gate) const { ASSERT(GetOpCode(gate) == OpCode::JS_BYTECODE); @@ -488,6 +661,33 @@ bool GateAccessor::IsSelector(GateRef g) const return (op == OpCode::VALUE_SELECTOR) || (op == OpCode::DEPEND_SELECTOR); } +bool GateAccessor::IsIn(GateRef g, GateRef in) const +{ + size_t n = GetNumIns(g); + for (size_t id = 0; id < n; id++) { + GateRef i = GetIn(g, id); + if (i == in) { + return true; + } + } + return false; +} + +bool GateAccessor::IsSimpleState(GateRef g) const +{ + auto op = GetOpCode(g); + return (op == OpCode::IF_TRUE || + op == OpCode::IF_FALSE || + op == OpCode::SWITCH_CASE || + op == OpCode::DEFAULT_CASE || + op == OpCode::LOOP_BACK || + op == OpCode::MERGE || + op == OpCode::VALUE_SELECTOR || + op == OpCode::DEPEND_SELECTOR || + op == OpCode::DEPEND_RELAY || + op == OpCode::ORDINARY_BLOCK); +} + bool GateAccessor::IsControlCase(GateRef gate) const { return circuit_->IsControlCase(gate); @@ -769,55 +969,7 @@ void GateAccessor::ReplaceHirAndDeleteIfException(GateRef hirGate, void GateAccessor::EliminateRedundantPhi() { - std::vector gateList; - GetAllGates(gateList); - std::queue workList; - std::set inList; - for (auto gate : gateList) { - if (IsValueSelector(gate)) { - workList.push(gate); - inList.insert(gate); - } - } - - while (!workList.empty()) { - auto cur = workList.front(); - workList.pop(); - ASSERT(IsValueSelector(cur)); - GateRef first = GetValueIn(cur, 0); - bool sameIns = true; - bool selfUse = first == cur; - auto valueNum = GetNumValueIn(cur); - for (size_t i = 1; i < valueNum; ++i) { - GateRef input = GetValueIn(cur, i); - if (input != first) { - sameIns = false; - } - if (input == cur) { - ASSERT(IsLoopHead(GetState(cur))); - selfUse = true; - } - } - if ((!sameIns) && (!selfUse)) { - inList.erase(cur); - continue; - } - auto use = Uses(cur); - for (auto it = use.begin(); it != use.end(); ++it) { - if (((*it) == cur) || (!IsValueSelector(*it)) || inList.count(*it)) { - // selfUse or notPhi or inListPhi - continue; - } - workList.push(*it); - inList.insert(*it); - } - UpdateAllUses(cur, first); - } - for (auto phi : inList) { - ASSERT(IsValueSelector(phi)); - DeleteGate(phi); - } - return; + GraphEditor::EliminateRedundantPhi(circuit_); } UseIterator GateAccessor::DeleteGate(const UseIterator &useIt) @@ -1053,6 +1205,96 @@ void GateAccessor::ReplaceGate(GateRef gate, GateRef state, GateRef depend, Gate DeleteGate(gate); } +// When Insert newGate, all the stateIn from state and dependIn from depend can be replaced to newGate +void GateAccessor::ReplaceInAfterInsert(GateRef state, GateRef depend, GateRef newGate) +{ + auto uses = Uses(state); + for (auto useIt = uses.begin(); useIt != uses.end();) { + if (IsStateIn(useIt) && (*useIt != newGate)) { + ASSERT(newGate != Circuit::NullGate()); + // Exception, for example, IF_TRUE / IF_FALSE -> DEPEND_RELAY, + // or LOOP_BEGIN / MERGE -> DEPEND_SELECTOR cannot be replaced + if (!IsState(*useIt)) { + useIt++; + continue; + } + useIt = ReplaceIn(useIt, newGate); + } else { + useIt++; + } + } + + uses = Uses(depend); + for (auto useIt = uses.begin(); useIt != uses.end();) { + if (IsDependIn(useIt) && (*useIt != newGate)) { + ASSERT(newGate != Circuit::NullGate()); + if (!IsState(*useIt)) { + useIt++; + continue; + } + useIt = ReplaceIn(useIt, newGate); + } else { + useIt++; + } + } +} + +// When loopExit, find stateSplit after DEPEND_SELECTOR +void GateAccessor::GetFrameStateDependIn(GateRef gate, GateRef &dependIn) +{ + auto uses = Uses(gate); + size_t stateSplitCount = 0; + GateRef stateSplit = Circuit::NullGate(); + for (auto it = uses.begin(); it != uses.end();) { + if (GetOpCode(*it) == OpCode::STATE_SPLIT) { + ASSERT(stateSplitCount < 1); // only one state Split; + stateSplitCount++; + stateSplit = *it; + break; + } else { + ++it; + } + } + + ASSERT(stateSplitCount <= 1); + if (stateSplitCount == 1 && stateSplit != Circuit::NullGate()) { + dependIn = stateSplit; + } +} + +// When ifOp or loopExit, insertAfter +// stateIn: IF_TRUE / IF_FALSE / MERGE +// dependIn: DEPEND_RELAY / DEPEND_SELECTOR, if stateSplit follow closely, after the stateSplit. + +void GateAccessor::GetStateInAndDependIn(GateRef insertAfter, GateRef &stateIn, GateRef &dependIn) +{ + if (GetOpCode(insertAfter) == OpCode::IF_TRUE || GetOpCode(insertAfter) == OpCode::IF_FALSE) { + auto uses = Uses(insertAfter); + for (auto it = uses.begin(); it != uses.end();) { + if (GetOpCode(*it) == OpCode::DEPEND_RELAY) { + stateIn = insertAfter; + dependIn = (*it); + break; + } else { + ++it; + } + } + } else if (GetOpCode(insertAfter) == OpCode::MERGE) { + auto uses = Uses(insertAfter); + for (auto it = uses.begin(); it != uses.end();) { + if (GetOpCode(*it) == OpCode::DEPEND_SELECTOR) { + stateIn = insertAfter; + dependIn = (*it); + GetFrameStateDependIn(*it, dependIn); + break; + } else { + ++it; + } + } + } + ASSERT(GetDependCount(dependIn) > 0); +} + GateRef GateAccessor::GetFrameState(GateRef gate) const { ASSERT(HasFrameState(gate)); @@ -1216,4 +1458,37 @@ bool GateAccessor::HasIfExceptionUse(GateRef gate) const } return false; } + +bool GateAccessor::IsHeapObjectFromElementsKind(GateRef gate) +{ + OpCode opcode = GetOpCode(gate); + if (opcode == OpCode::JS_BYTECODE) { + auto bc = GetByteCodeOpcode(gate); + if (bc == EcmaOpcode::LDOBJBYVALUE_IMM8_V8 || bc == EcmaOpcode::LDOBJBYVALUE_IMM16_V8 || + bc == EcmaOpcode::LDTHISBYVALUE_IMM8 || bc == EcmaOpcode::LDTHISBYVALUE_IMM16) { + ElementsKind kind = TryGetElementsKind(gate); + return Elements::IsObject(kind); + } + return false; + } + + if (opcode == OpCode::LOAD_ELEMENT) { + TypedLoadOp typedOp = GetTypedLoadOp(gate); + return typedOp == TypedLoadOp::ARRAY_LOAD_OBJECT_ELEMENT; + } + + return false; +} + +bool GateAccessor::IsLoopBackUse(const UseIterator &useIt) const +{ + if (IsStateIn(useIt)) { + return (useIt.GetIndex() == 1) && IsLoopHead(*useIt); + } + if ((IsValueSelector(*useIt) && IsValueIn(useIt)) || + (IsDependSelector(*useIt) && IsDependIn(useIt))) { + return (useIt.GetIndex() == 2) && IsLoopHead(GetState(*useIt)); + } + return false; +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/gate_accessor.h b/ecmascript/compiler/gate_accessor.h index 12a7494264a1bffe1d1bf56a64f118e9c80a39e3..bbed79053de0ea429d1fd2d751152d09c11051cb 100644 --- a/ecmascript/compiler/gate_accessor.h +++ b/ecmascript/compiler/gate_accessor.h @@ -18,7 +18,8 @@ #include "ecmascript/compiler/circuit.h" #include "ecmascript/compiler/gate_meta_data.h" -#include "ecmascript/pgo_profiler/pgo_profiler_type.h" +#include "ecmascript/elements.h" +#include "ecmascript/pgo_profiler/types/pgo_profiler_type.h" namespace panda::ecmascript::kungfu { @@ -380,25 +381,40 @@ public: ICmpCondition GetICmpCondition(GateRef gate) const; FCmpCondition GetFCmpCondition(GateRef gate) const; size_t GetOffset(GateRef gate) const; + uint32_t GetTrueWeight(GateRef gate) const; + uint32_t GetFalseWeight(GateRef gate) const; + bool HasBranchWeight(GateRef gate) const; size_t GetIndex(GateRef gate) const; - size_t GetArraySize(GateRef gate) const; - void SetArraySize(GateRef gate, size_t size); + uint32_t GetArraySize(GateRef gate) const; + void SetArraySize(GateRef gate, uint32_t size); size_t GetVirtualRegisterIndex(GateRef gate) const; + bool TypedOpIsTypedArray(GateRef gate, TypedOpKind kind) const; TypedLoadOp GetTypedLoadOp(GateRef gate) const; TypedStoreOp GetTypedStoreOp(GateRef gate) const; + MemoryType GetMemoryType(GateRef gate) const; TypedBinOp GetTypedBinaryOp(GateRef gate) const; + TypedCallTargetCheckOp GetTypedCallTargetCheckOp(GateRef gate) const; PGOSampleType GetTypedBinaryType(GateRef gate) const; bool HasNumberType(GateRef gate) const; GlobalTSTypeRef GetFuncGT(GateRef gate) const; GateType GetParamGateType(GateRef gate) const; TypedUnaryAccessor GetTypedUnAccessor(GateRef gate) const; + TypedBinaryAccessor GetTypedBinaryAccessor(GateRef gate) const; TypedJumpAccessor GetTypedJumpAccessor(GateRef gate) const; + ArrayMetaDataAccessor GetArrayMetaDataAccessor(GateRef gate) const; + ObjectTypeAccessor GetObjectTypeAccessor(GateRef gate) const; uint64_t GetConstantValue(GateRef gate) const; const ChunkVector& GetConstantString(GateRef gate) const; bool IsVtable(GateRef gate) const; + bool GetNoGCFlag(GateRef gate) const; + bool TypedCallIsNoGC(GateRef gate) const; + bool IsNoGC(GateRef gate) const; uint32_t TryGetPcOffset(GateRef gate) const; PGOSampleType TryGetPGOType(GateRef gate) const; void TrySetPGOType(GateRef gate, PGOSampleType type); + ElementsKind TryGetElementsKind(GateRef gate) const; + ElementsKind TryGetArrayElementsKind(GateRef gate) const; + void TrySetElementsKind(GateRef gate, ElementsKind kind); EcmaOpcode GetByteCodeOpcode(GateRef gate) const; void Print(GateRef gate) const; void ShortPrint(GateRef gate) const; @@ -413,6 +429,7 @@ public: UseIterator ReplaceIn(const UseIterator &useIt, GateRef replaceGate); // Add for lowering GateType GetGateType(GateRef gate) const; + bool IsConvertSupport(GateRef gate) const; ValueType GetSrcType(GateRef gate) const; ValueType GetDstType(GateRef gate) const; void SetGateType(GateRef gate, GateType gt); @@ -426,6 +443,9 @@ public: size_t GetInValueCount(GateRef gate) const; size_t GetInValueStarts(GateRef gate) const; void UpdateAllUses(GateRef gate, GateRef replaceValueIn); + void ReplaceInAfterInsert(GateRef state, GateRef depend, GateRef newGate); + void GetFrameStateDependIn(GateRef gate, GateRef &dependIn); + void GetStateInAndDependIn(GateRef insertAfter, GateRef &stateIn, GateRef &dependIn); void ReplaceIn(GateRef gate, size_t index, GateRef in); void ReplaceStateIn(GateRef gate, GateRef in, size_t index = 0); void ReplaceDependIn(GateRef gate, GateRef in, size_t index = 0); @@ -439,6 +459,7 @@ public: int GetInt32FromConstant(GateRef gate) const; bool IsInGateNull(GateRef gate, size_t idx) const; bool IsSelector(GateRef g) const; + bool IsSimpleState(GateRef g) const; bool IsValueSelector(GateRef g) const; bool IsControlCase(GateRef gate) const; bool IsLoopExit(GateRef gate) const; @@ -481,6 +502,8 @@ public: void ReplaceGate(GateRef gate, GateRef state, GateRef depend, GateRef value); GateType GetLeftType(GateRef gate) const; GateType GetRightType(GateRef gate) const; + uint32_t GetFirstValue(GateRef gate) const; + uint32_t GetSecondValue(GateRef gate) const; GateRef GetGlueFromArgList() const; void GetArgsOuts(std::vector& outs) const; void GetReturnOuts(std::vector& outs) const; @@ -494,6 +517,8 @@ public: void DeleteGateIfNoUse(GateRef gate); GateRef GetDependSelectorFromMerge(GateRef gate); bool HasIfExceptionUse(GateRef gate) const; + bool IsIn(GateRef g, GateRef in) const; + bool IsHeapObjectFromElementsKind(GateRef gate); GateRef GetCircuitRoot() const { @@ -536,6 +561,7 @@ public: void ReplaceHirDirectly(GateRef hirGate, StateDepend replacement, GateRef value); void ReplaceHirAndDeleteIfException(GateRef hirGate, StateDepend replacement, GateRef value); + bool IsLoopBackUse(const UseIterator &useIt) const; private: const GateMetaData *GetMetaData(GateRef gate) const; UseIterator ReplaceHirIfSuccess(const UseIterator &useIt, GateRef state); @@ -606,6 +632,7 @@ private: friend class Circuit; friend class LLVMIRBuilder; + friend class LiteCGIRBuilder; friend class Scheduler; friend class LoopPeeling; }; diff --git a/ecmascript/compiler/gate_meta_data.cpp b/ecmascript/compiler/gate_meta_data.cpp index 56eba99d5f6175db4e24403306e99f04f795fdbf..a51be11353da97eeb873ac8fb33b85bda2892538 100644 --- a/ecmascript/compiler/gate_meta_data.cpp +++ b/ecmascript/compiler/gate_meta_data.cpp @@ -112,9 +112,11 @@ std::string GateMetaData::Str(OpCode opcode) #define GATE_NAME_MAP(NAME, OP, R, S, D, V) { OpCode::OP, #OP }, IMMUTABLE_META_DATA_CACHE_LIST(GATE_NAME_MAP) GATE_META_DATA_LIST_WITH_BOOL(GATE_NAME_MAP) + GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(GATE_NAME_MAP) GATE_META_DATA_LIST_WITH_SIZE(GATE_NAME_MAP) GATE_META_DATA_LIST_WITH_ONE_PARAMETER(GATE_NAME_MAP) GATE_META_DATA_LIST_WITH_PC_OFFSET(GATE_NAME_MAP) + GATE_META_DATA_LIST_FOR_CALL(GATE_NAME_MAP) GATE_META_DATA_LIST_WITH_PC_OFFSET_FIXED_VALUE(GATE_NAME_MAP) #undef GATE_NAME_MAP #define GATE_NAME_MAP(OP) { OpCode::OP, #OP }, @@ -228,7 +230,6 @@ bool GateMetaData::IsVirtualState() const switch (opcode_) { case OpCode::GET_EXCEPTION: case OpCode::STATE_SPLIT: - case OpCode::CHECK_AND_CONVERT: return true; default: return false; @@ -318,6 +319,15 @@ const GateMetaData* GateMetaBuilder::NAME(bool value) \ GATE_META_DATA_LIST_WITH_BOOL(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_WITH_BOOL_VALUE_IN(NAME, OP, R, S, D, V) \ +const GateMetaData* GateMetaBuilder::NAME(size_t value, bool flag) \ +{ \ + auto meta = new (chunk_) BoolMetaData(OpCode::OP, R, S, D, V, flag); \ + return meta; \ +} +GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(DECLARE_GATE_META_WITH_BOOL_VALUE_IN) +#undef DECLARE_GATE_META_WITH_BOOL_VALUE_IN + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* GateMetaBuilder::NAME(size_t value) \ { \ @@ -386,6 +396,16 @@ const GateMetaData* GateMetaBuilder::NAME(uint64_t value, uint64_t pcOffset) GATE_META_DATA_LIST_WITH_PC_OFFSET(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_FOR_CALL(NAME, OP, R, S, D, V) \ +const GateMetaData* GateMetaBuilder::NAME(uint64_t value, uint64_t pcOffset, bool noGC) \ +{ \ + auto meta = new (chunk_) TypedCallMetaData(OpCode::OP, R, S, D, value, pcOffset, noGC); \ + meta->SetKind(GateMetaData::Kind::TYPED_CALL); \ + return meta; \ +} +GATE_META_DATA_LIST_FOR_CALL(DECLARE_GATE_META_FOR_CALL) +#undef DECLARE_GATE_META_FOR_CALL + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* GateMetaBuilder::NAME(uint64_t pcOffset) const \ { \ diff --git a/ecmascript/compiler/gate_meta_data.h b/ecmascript/compiler/gate_meta_data.h index 3e71ef9afcdce394b065c57ff5c59d3bc3f48a6e..bc9784c490d45ea920a656a64809aa6c4bb50531 100644 --- a/ecmascript/compiler/gate_meta_data.h +++ b/ecmascript/compiler/gate_meta_data.h @@ -23,11 +23,14 @@ #include "ecmascript/mem/chunk.h" #include "ecmascript/mem/chunk_containers.h" -#include "ecmascript/pgo_profiler/pgo_profiler_type.h" +#include "ecmascript/elements.h" +#include "ecmascript/pgo_profiler/types/pgo_profiler_type.h" #include "libpandabase/macros.h" namespace panda::ecmascript::kungfu { using GateRef = int32_t; +using PGOSampleType = pgo::PGOSampleType; +using PGORWOpType = pgo::PGORWOpType; enum MachineType : uint8_t { // Bit width NOVALUE = 0, ANYVALUE, @@ -42,6 +45,15 @@ enum MachineType : uint8_t { // Bit width F64, }; +enum class TypedOpKind : uint8_t { + TYPED_BIN_OP, + TYPED_CALL_TARGET_CHECK_OP, + TYPED_UN_OP, + TYPED_JUMP_OP, + TYPED_STORE_OP, + TYPED_LOAD_OP, +}; + enum class TypedBinOp : uint8_t { TYPED_ADD = 0, TYPED_SUB, @@ -64,6 +76,16 @@ enum class TypedBinOp : uint8_t { TYPED_EXP, }; +enum class TypedCallTargetCheckOp : uint8_t { + JSCALL_IMMEDIATE_AFTER_FUNC_DEF = 0, + JSCALL, + JSCALL_FAST, + JSCALLTHIS, + JSCALLTHIS_FAST, + JSCALLTHIS_NOGC, + JSCALLTHIS_FAST_NOGC, +}; + enum class TypedUnOp : uint8_t { TYPED_NEG = 0, TYPED_NOT, @@ -78,6 +100,23 @@ enum class TypedJumpOp : uint8_t { TYPED_JNEZ, }; +enum class BranchKind : uint8_t { + NORMAL_BRANCH = 0, + TRUE_BRANCH, + FALSE_BRANCH, + STRONG_TRUE_BRANCH, + STRONG_FALSE_BRANCH, +}; + +class BranchWeight { +public: + static constexpr uint32_t ZERO_WEIGHT = 0; + static constexpr uint32_t ONE_WEIGHT = 1; + static constexpr uint32_t WEAK_WEIGHT = 10; + static constexpr uint32_t STRONG_WEIGHT = 1000; + static constexpr uint32_t DEOPT_WEIGHT = 2000; +}; + #define GATE_META_DATA_DEOPT_REASON(V) \ V(NotInt, NOTINT) \ V(NotDouble, NOTDOUBLE) \ @@ -85,27 +124,24 @@ enum class TypedJumpOp : uint8_t { V(NotBool, NOTBOOL) \ V(NotHeapObject, NOTHEAPOBJECT) \ V(NotStableArray, NOTSARRAY) \ - V(NotI32Array, NOTI32ARRAY) \ - V(NotF32Array, NOTF32ARRAY) \ - V(NotF64Array, NOTF64ARRAY) \ + V(NotArray, NOTARRAY) \ V(NotOnHeap, NOTONHEAP) \ V(InconsistentHClass, INCONSISTENTHCLASS) \ V(NotNewObj, NOTNEWOBJ) \ - V(NotArrayIndex, NOTARRAYIDX) \ - V(NotI32ArrayIndex, NOTI32ARRAYIDX) \ - V(NotF32ArrayIndex, NOTF32ARRAYIDX) \ - V(NotF64ArrayIndex, NOTF64ARRAYIDX) \ + V(NotLegalIndex, NOTLEGALIDX) \ V(NotIncOverflow, NOTINCOV) \ V(NotDecOverflow, NOTDECOV) \ V(NotNegativeOverflow, NOTNEGOV) \ V(NotCallTarget, NOTCALLTGT) \ V(NotJSCallTarget, NOTJSCALLTGT) \ + V(CowArray, COWARRAY) \ V(DivideZero, DIVZERO) \ - V(NegativeIndex, NEGTIVEINDEX) \ - V(LargeIndex, LARGEINDEX) \ V(InlineFail, INLINEFAIL) \ V(NotJSFastCallTarget, NOTJSFASTCALLTGT) \ - V(ModZero, MODZERO) + V(LexVarIsHole, LEXVARISHOLE) \ + V(ModZero, MODZERO) \ + V(Int32Overflow, INT32OVERFLOW) \ + V(NotString, NOTSTRING) enum class DeoptType : uint8_t { NOTCHECK = 0, @@ -148,16 +184,43 @@ enum class FCmpCondition : uint8_t { enum class TypedStoreOp : uint8_t { ARRAY_STORE_ELEMENT = 0, + INT8ARRAY_STORE_ELEMENT, + UINT8ARRAY_STORE_ELEMENT, + UINT8CLAMPEDARRAY_STORE_ELEMENT, + INT16ARRAY_STORE_ELEMENT, + UINT16ARRAY_STORE_ELEMENT, INT32ARRAY_STORE_ELEMENT, + UINT32ARRAY_STORE_ELEMENT, FLOAT32ARRAY_STORE_ELEMENT, FLOAT64ARRAY_STORE_ELEMENT, + + TYPED_ARRAY_FIRST = INT8ARRAY_STORE_ELEMENT, + TYPED_ARRAY_LAST = FLOAT64ARRAY_STORE_ELEMENT, +}; + +enum class MemoryType : uint8_t { + ELEMENT_TYPE = 0, }; enum class TypedLoadOp : uint8_t { - ARRAY_LOAD_ELEMENT = 0, + ARRAY_LOAD_INT_ELEMENT = 0, + ARRAY_LOAD_DOUBLE_ELEMENT, + ARRAY_LOAD_OBJECT_ELEMENT, + ARRAY_LOAD_TAGGED_ELEMENT, + ARRAY_LOAD_HOLE_TAGGED_ELEMENT, + INT8ARRAY_LOAD_ELEMENT, + UINT8ARRAY_LOAD_ELEMENT, + UINT8CLAMPEDARRAY_LOAD_ELEMENT, + INT16ARRAY_LOAD_ELEMENT, + UINT16ARRAY_LOAD_ELEMENT, INT32ARRAY_LOAD_ELEMENT, + UINT32ARRAY_LOAD_ELEMENT, FLOAT32ARRAY_LOAD_ELEMENT, FLOAT64ARRAY_LOAD_ELEMENT, + STRING_LOAD_ELEMENT, + + TYPED_ARRAY_FIRST = INT8ARRAY_LOAD_ELEMENT, + TYPED_ARRAY_LAST = FLOAT64ARRAY_LOAD_ELEMENT, }; std::string MachineTypeToStr(MachineType machineType); @@ -187,11 +250,10 @@ std::string MachineTypeToStr(MachineType machineType); V(Int32CheckRightIsZero, INT32_CHECK_RIGHT_IS_ZERO, GateFlags::CHECKABLE, 1, 1, 1) \ V(Float64CheckRightIsZero, FLOAT64_CHECK_RIGHT_IS_ZERO, GateFlags::CHECKABLE, 1, 1, 1) \ V(ValueCheckNegOverflow, VALUE_CHECK_NEG_OVERFLOW, GateFlags::CHECKABLE, 1, 1, 1) \ - V(NegativeIndexCheck, NEGATIVE_INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(LargeIndexCheck, LARGE_INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ V(OverflowCheck, OVERFLOW_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ V(Int32UnsignedUpperBoundCheck, INT32_UNSIGNED_UPPER_BOUND_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ - V(Int32DivWithCheck, INT32_DIV_WITH_CHECK, GateFlags::CHECKABLE, 1, 1, 2) + V(Int32DivWithCheck, INT32_DIV_WITH_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ + V(LexVarIsHoleCheck, LEX_VAR_IS_HOLE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) #define UNARY_GATE_META_DATA_CACHE_LIST(V) \ V(Zext, ZEXT, GateFlags::NONE_FLAG, 0, 0, 1) \ @@ -210,55 +272,59 @@ std::string MachineTypeToStr(MachineType machineType); V(UnsignedFloatToInt, UNSIGNED_FLOAT_TO_INT, GateFlags::NONE_FLAG, 0, 0, 1) \ V(Bitcast, BITCAST, GateFlags::NONE_FLAG, 0, 0, 1) -#define IMMUTABLE_META_DATA_CACHE_LIST(V) \ - V(CircuitRoot, CIRCUIT_ROOT, GateFlags::NONE_FLAG, 0, 0, 0) \ - V(StateEntry, STATE_ENTRY, GateFlags::ROOT, 0, 0, 0) \ - V(DependEntry, DEPEND_ENTRY, GateFlags::ROOT, 0, 0, 0) \ - V(ReturnList, RETURN_LIST, GateFlags::ROOT, 0, 0, 0) \ - V(ArgList, ARG_LIST, GateFlags::ROOT, 0, 0, 0) \ - V(Return, RETURN, GateFlags::HAS_ROOT, 1, 1, 1) \ - V(ReturnVoid, RETURN_VOID, GateFlags::HAS_ROOT, 1, 1, 0) \ - V(Throw, THROW, GateFlags::CONTROL, 1, 1, 1) \ - V(OrdinaryBlock, ORDINARY_BLOCK, GateFlags::CONTROL, 1, 0, 0) \ - V(IfBranch, IF_BRANCH, GateFlags::CONTROL, 1, 0, 1) \ - V(IfTrue, IF_TRUE, GateFlags::CONTROL, 1, 0, 0) \ - V(IfFalse, IF_FALSE, GateFlags::CONTROL, 1, 0, 0) \ - V(LoopBegin, LOOP_BEGIN, GateFlags::CONTROL, 2, 0, 0) \ - V(LoopBack, LOOP_BACK, GateFlags::CONTROL, 1, 0, 0) \ - V(LoopExit, LOOP_EXIT, GateFlags::CONTROL, 1, 0, 0) \ - V(LoopExitDepend, LOOP_EXIT_DEPEND, GateFlags::FIXED, 1, 1, 0) \ - V(LoopExitValue, LOOP_EXIT_VALUE, GateFlags::FIXED, 1, 0, 1) \ - V(DependRelay, DEPEND_RELAY, GateFlags::FIXED, 1, 1, 0) \ - V(IfSuccess, IF_SUCCESS, GateFlags::CONTROL, 1, 0, 0) \ - V(IfException, IF_EXCEPTION, GateFlags::CONTROL, 1, 1, 0) \ - V(GetException, GET_EXCEPTION, GateFlags::NONE_FLAG, 1, 1, 0) \ - V(GetConstPool, GET_CONSTPOOL, GateFlags::NO_WRITE, 0, 1, 1) \ - V(GetGlobalEnv, GET_GLOBAL_ENV, GateFlags::NO_WRITE, 0, 1, 0) \ - V(StateSplit, STATE_SPLIT, GateFlags::CHECKABLE, 1, 1, 0) \ - V(Load, LOAD, GateFlags::NO_WRITE, 0, 1, 1) \ - V(Store, STORE, GateFlags::NONE_FLAG, 0, 1, 2) \ - V(TypedCallCheck, TYPED_CALL_CHECK, GateFlags::CHECKABLE, 1, 1, 3) \ - V(HeapObjectCheck, HEAP_OBJECT_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(StableArrayCheck, STABLE_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(ArrayGuardianCheck, ARRAY_GUARDIAN_CHECK, GateFlags::CHECKABLE, 1, 1, 0) \ - V(HClassStableArrayCheck, HCLASS_STABLE_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(DeoptCheck, DEOPT_CHECK, GateFlags::NO_WRITE, 1, 1, 3) \ - V(StoreProperty, STORE_PROPERTY, GateFlags::NONE_FLAG, 1, 1, 3) \ - V(StorePropertyNoBarrier, STORE_PROPERTY_NO_BARRIER, GateFlags::NONE_FLAG, 1, 1, 3) \ - V(ToLength, TO_LENGTH, GateFlags::NONE_FLAG, 1, 1, 1) \ - V(DefaultCase, DEFAULT_CASE, GateFlags::CONTROL, 1, 0, 0) \ - V(LoadArrayLength, LOAD_ARRAY_LENGTH, GateFlags::NO_WRITE, 1, 1, 1) \ - V(TypedNewAllocateThis, TYPED_NEW_ALLOCATE_THIS, GateFlags::CHECKABLE, 1, 1, 2) \ - V(TypedSuperAllocateThis, TYPED_SUPER_ALLOCATE_THIS, GateFlags::CHECKABLE, 1, 1, 2) \ - V(GetSuperConstructor, GET_SUPER_CONSTRUCTOR, GateFlags::NO_WRITE, 1, 1, 1) \ - V(UpdateHotness, UPDATE_HOTNESS, GateFlags::NO_WRITE, 1, 1, 0) \ - V(Dead, DEAD, GateFlags::NONE_FLAG, 0, 0, 0) \ - V(FrameArgs, FRAME_ARGS, GateFlags::NONE_FLAG, 0, 0, 4) \ - V(GetEnv, GET_ENV, GateFlags::NONE_FLAG, 0, 0, 1) \ - V(ConvertHoleAsUndefined, CONVERT_HOLE_AS_UNDEFINED, GateFlags::NO_WRITE, 1, 1, 1) \ - V(StartAllocate, START_ALLOCATE, GateFlags::NONE_FLAG, 0, 1, 0) \ - V(FinishAllocate, FINISH_ALLOCATE, GateFlags::NONE_FLAG, 0, 1, 0) \ - BINARY_GATE_META_DATA_CACHE_LIST(V) \ +#define IMMUTABLE_META_DATA_CACHE_LIST(V) \ + V(CircuitRoot, CIRCUIT_ROOT, GateFlags::NONE_FLAG, 0, 0, 0) \ + V(StateEntry, STATE_ENTRY, GateFlags::ROOT, 0, 0, 0) \ + V(DependEntry, DEPEND_ENTRY, GateFlags::ROOT, 0, 0, 0) \ + V(ReturnList, RETURN_LIST, GateFlags::ROOT, 0, 0, 0) \ + V(ArgList, ARG_LIST, GateFlags::ROOT, 0, 0, 0) \ + V(Return, RETURN, GateFlags::HAS_ROOT, 1, 1, 1) \ + V(ReturnVoid, RETURN_VOID, GateFlags::HAS_ROOT, 1, 1, 0) \ + V(Throw, THROW, GateFlags::CONTROL, 1, 1, 1) \ + V(OrdinaryBlock, ORDINARY_BLOCK, GateFlags::CONTROL, 1, 0, 0) \ + V(IfTrue, IF_TRUE, GateFlags::CONTROL, 1, 0, 0) \ + V(IfFalse, IF_FALSE, GateFlags::CONTROL, 1, 0, 0) \ + V(LoopBegin, LOOP_BEGIN, GateFlags::CONTROL, 2, 0, 0) \ + V(LoopBack, LOOP_BACK, GateFlags::CONTROL, 1, 0, 0) \ + V(LoopExit, LOOP_EXIT, GateFlags::CONTROL, 1, 0, 0) \ + V(LoopExitDepend, LOOP_EXIT_DEPEND, GateFlags::FIXED, 1, 1, 0) \ + V(LoopExitValue, LOOP_EXIT_VALUE, GateFlags::FIXED, 1, 0, 1) \ + V(DependRelay, DEPEND_RELAY, GateFlags::FIXED, 1, 1, 0) \ + V(IfSuccess, IF_SUCCESS, GateFlags::CONTROL, 1, 0, 0) \ + V(IfException, IF_EXCEPTION, GateFlags::CONTROL, 1, 1, 0) \ + V(GetException, GET_EXCEPTION, GateFlags::NONE_FLAG, 1, 1, 0) \ + V(GetConstPool, GET_CONSTPOOL, GateFlags::NO_WRITE, 0, 1, 1) \ + V(GetGlobalEnv, GET_GLOBAL_ENV, GateFlags::NO_WRITE, 0, 1, 0) \ + V(StateSplit, STATE_SPLIT, GateFlags::CHECKABLE, 1, 1, 0) \ + V(Load, LOAD, GateFlags::NO_WRITE, 0, 1, 1) \ + V(Store, STORE, GateFlags::NONE_FLAG, 0, 1, 2) \ + V(TypedCallCheck, TYPED_CALL_CHECK, GateFlags::CHECKABLE, 1, 1, 3) \ + V(HeapObjectCheck, HEAP_OBJECT_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(COWArrayCheck, COW_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(ArrayGuardianCheck, ARRAY_GUARDIAN_CHECK, GateFlags::CHECKABLE, 1, 1, 0) \ + V(EcmaStringCheck, ECMA_STRING_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(FlattenStringCheck, FLATTEN_STRING_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(DeoptCheck, DEOPT_CHECK, GateFlags::NO_WRITE, 1, 1, 3) \ + V(StoreProperty, STORE_PROPERTY, GateFlags::NONE_FLAG, 1, 1, 3) \ + V(StorePropertyNoBarrier, STORE_PROPERTY_NO_BARRIER, GateFlags::NONE_FLAG, 1, 1, 3) \ + V(ToLength, TO_LENGTH, GateFlags::NONE_FLAG, 1, 1, 1) \ + V(DefaultCase, DEFAULT_CASE, GateFlags::CONTROL, 1, 0, 0) \ + V(LoadArrayLength, LOAD_ARRAY_LENGTH, GateFlags::NO_WRITE, 1, 1, 1) \ + V(LoadStringLength, LOAD_STRING_LENGTH, GateFlags::NO_WRITE, 1, 1, 1) \ + V(TypedNewAllocateThis, TYPED_NEW_ALLOCATE_THIS, GateFlags::CHECKABLE, 1, 1, 2) \ + V(TypedSuperAllocateThis, TYPED_SUPER_ALLOCATE_THIS, GateFlags::CHECKABLE, 1, 1, 2) \ + V(GetSuperConstructor, GET_SUPER_CONSTRUCTOR, GateFlags::NO_WRITE, 1, 1, 1) \ + V(CheckSafePointAndStackOver, CHECK_SAFEPOINT_AND_STACKOVER, GateFlags::NO_WRITE, 1, 1, 0) \ + V(Dead, DEAD, GateFlags::NONE_FLAG, 0, 0, 0) \ + V(FrameArgs, FRAME_ARGS, GateFlags::NONE_FLAG, 0, 0, 4) \ + V(GetEnv, GET_ENV, GateFlags::NONE_FLAG, 0, 0, 1) \ + V(ConvertHoleAsUndefined, CONVERT_HOLE_AS_UNDEFINED, GateFlags::NO_WRITE, 1, 1, 1) \ + V(StartAllocate, START_ALLOCATE, GateFlags::NONE_FLAG, 0, 1, 0) \ + V(FinishAllocate, FINISH_ALLOCATE, GateFlags::NONE_FLAG, 0, 1, 0) \ + V(LoadGetter, LOAD_GETTER, GateFlags::NO_WRITE, 0, 1, 2) \ + V(LoadSetter, LOAD_SETTER, GateFlags::NO_WRITE, 0, 1, 2) \ + V(ReadSp, READSP, GateFlags::NONE_FLAG, 0, 0, 0) \ + BINARY_GATE_META_DATA_CACHE_LIST(V) \ UNARY_GATE_META_DATA_CACHE_LIST(V) #define GATE_META_DATA_LIST_WITH_VALUE_IN(V) \ @@ -266,9 +332,7 @@ std::string MachineTypeToStr(MachineType machineType); V(FrameValues, FRAME_VALUES, GateFlags::NONE_FLAG, 0, 0, value) \ V(RuntimeCall, RUNTIME_CALL, GateFlags::NONE_FLAG, 0, 1, value) \ V(RuntimeCallWithArgv, RUNTIME_CALL_WITH_ARGV, GateFlags::NONE_FLAG, 0, 1, value) \ - V(NoGcRuntimeCall, NOGC_RUNTIME_CALL, GateFlags::NONE_FLAG, 0, 1, value) \ - V(CallOptimized, CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \ - V(FastCallOptimized, FAST_CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \ + V(NoGcRuntimeCall, NOGC_RUNTIME_CALL, GateFlags::NO_WRITE, 0, 1, value) \ V(Call, CALL, GateFlags::NONE_FLAG, 0, 1, value) \ V(BytecodeCall, BYTECODE_CALL, GateFlags::NONE_FLAG, 0, 1, value) \ V(DebuggerBytecodeCall, DEBUGGER_BYTECODE_CALL, GateFlags::NONE_FLAG, 0, 1, value) \ @@ -278,7 +342,9 @@ std::string MachineTypeToStr(MachineType machineType); #define GATE_META_DATA_LIST_WITH_PC_OFFSET(V) \ V(TypedCallBuiltin, TYPED_CALL_BUILTIN, GateFlags::NO_WRITE, 1, 1, value) \ - V(Construct, CONSTRUCT, GateFlags::NONE_FLAG, 1, 1, value) \ + V(Construct, CONSTRUCT, GateFlags::NONE_FLAG, 1, 1, value) + +#define GATE_META_DATA_LIST_FOR_CALL(V) \ V(TypedCall, TYPEDCALL, GateFlags::NONE_FLAG, 1, 1, value) \ V(TypedFastCall, TYPEDFASTCALL, GateFlags::NONE_FLAG, 1, 1, value) @@ -286,32 +352,28 @@ std::string MachineTypeToStr(MachineType machineType); V(CallGetter, CALL_GETTER, GateFlags::NONE_FLAG, 1, 1, 2) \ V(CallSetter, CALL_SETTER, GateFlags::NONE_FLAG, 1, 1, 3) -#define GATE_META_DATA_LIST_WITH_SIZE(V) \ - V(Merge, MERGE, GateFlags::CONTROL, value, 0, 0) \ - V(DependSelector, DEPEND_SELECTOR, GateFlags::FIXED, 1, value, 0) \ +#define GATE_META_DATA_LIST_WITH_SIZE(V) \ + V(Merge, MERGE, GateFlags::CONTROL, value, 0, 0) \ + V(DependSelector, DEPEND_SELECTOR, GateFlags::FIXED, 1, value, 0) \ GATE_META_DATA_LIST_WITH_VALUE_IN(V) -#define GATE_META_DATA_LIST_WITH_GATE_TYPE(V) \ - V(PrimitiveTypeCheck, PRIMITIVE_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(ObjectTypeCheck, OBJECT_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ - V(JSCallTargetFromDefineFuncCheck, JSCALLTARGET_FROM_DEFINEFUNC_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(JSCallTargetTypeCheck, JSCALLTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ - V(JSFastCallTargetTypeCheck, JSFASTCALLTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ - V(JSCallThisTargetTypeCheck, JSCALLTHISTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(JSFastCallThisTargetTypeCheck, JSFASTCALLTHISTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(TypedArrayCheck, TYPED_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ - V(LoadTypedArrayLength, LOAD_TYPED_ARRAY_LENGTH, GateFlags::NO_WRITE, 1, 1, 1) \ - V(IndexCheck, INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ - V(TypedUnaryOp, TYPED_UNARY_OP, GateFlags::NO_WRITE, 1, 1, 1) \ - V(TypedConditionJump, TYPED_CONDITION_JUMP, GateFlags::NO_WRITE, 1, 1, 1) \ - V(TypedConvert, TYPE_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \ - V(CheckAndConvert, CHECK_AND_CONVERT, GateFlags::CHECKABLE, 1, 0, 1) \ - V(Convert, CONVERT, GateFlags::NONE_FLAG, 0, 0, 1) \ - V(JSInlineTargetTypeCheck, JSINLINETARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) +#define GATE_META_DATA_LIST_WITH_GATE_TYPE(V) \ + V(PrimitiveTypeCheck, PRIMITIVE_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(TypedArrayCheck, TYPED_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(LoadTypedArrayLength, LOAD_TYPED_ARRAY_LENGTH, GateFlags::NO_WRITE, 1, 1, 1) \ + V(IndexCheck, INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ + V(TypedUnaryOp, TYPED_UNARY_OP, GateFlags::NO_WRITE, 1, 1, 1) \ + V(TypedConditionJump, TYPED_CONDITION_JUMP, GateFlags::NO_WRITE, 1, 1, 1) \ + V(TypedConvert, TYPE_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \ + V(CheckAndConvert, CHECK_AND_CONVERT, GateFlags::CHECKABLE, 1, 1, 1) \ + V(Convert, CONVERT, GateFlags::NONE_FLAG, 0, 0, 1) \ + V(JSInlineTargetTypeCheck, JSINLINETARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ + V(InlineAccessorCheck, INLINE_ACCESSOR_CHECK, GateFlags::CHECKABLE, 1, 1, 2) #define GATE_META_DATA_LIST_WITH_VALUE(V) \ V(Icmp, ICMP, GateFlags::NONE_FLAG, 0, 0, 2) \ V(Fcmp, FCMP, GateFlags::NONE_FLAG, 0, 0, 2) \ + V(IfBranch, IF_BRANCH, GateFlags::CONTROL, 1, 0, 1) \ V(Alloca, ALLOCA, GateFlags::NONE_FLAG, 0, 0, 0) \ V(SwitchBranch, SWITCH_BRANCH, GateFlags::CONTROL, 1, 0, 1) \ V(SwitchCase, SWITCH_CASE, GateFlags::CONTROL, 1, 0, 0) \ @@ -320,13 +382,22 @@ std::string MachineTypeToStr(MachineType machineType); V(StoreConstOffset, STORE_CONST_OFFSET, GateFlags::NONE_FLAG, 1, 1, 2) \ V(LoadElement, LOAD_ELEMENT, GateFlags::NO_WRITE, 1, 1, 2) \ V(StoreElement, STORE_ELEMENT, GateFlags::NONE_FLAG, 1, 1, 3) \ + V(StoreMemory, STORE_MEMORY, GateFlags::NONE_FLAG, 1, 1, 3) \ V(RestoreRegister, RESTORE_REGISTER, GateFlags::NONE_FLAG, 0, 0, 1) \ V(Constant, CONSTANT, GateFlags::NONE_FLAG, 0, 0, 0) \ V(RelocatableData, RELOCATABLE_DATA, GateFlags::NONE_FLAG, 0, 0, 0) \ + V(GetGlobalEnvObj, GET_GLOBAL_ENV_OBJ, GateFlags::NO_WRITE, 0, 1, 1) \ V(GetGlobalEnvObjHClass, GET_GLOBAL_ENV_OBJ_HCLASS, GateFlags::NO_WRITE, 0, 1, 1) \ V(GetGlobalConstantValue, GET_GLOBAL_CONSTANT_VALUE, GateFlags::NO_WRITE, 0, 1, 0) \ V(FrameState, FRAME_STATE, GateFlags::HAS_FRAME_STATE, 0, 0, 2) \ - V(CreateArray, CREATE_ARRAY, GateFlags::NONE_FLAG, 1, 1, 0) + V(CreateArray, CREATE_ARRAY, GateFlags::NONE_FLAG, 1, 1, 0) \ + V(CreateArrayWithBuffer, CREATE_ARRAY_WITH_BUFFER, GateFlags::CHECKABLE, 1, 1, 2) \ + V(RangeGuard, RANGE_GUARD, GateFlags::NO_WRITE, 1, 1, 1) \ + V(StableArrayCheck, STABLE_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(HClassStableArrayCheck, HCLASS_STABLE_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \ + V(ObjectTypeCheck, OBJECT_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \ + V(ObjectTypeCompare, OBJECT_TYPE_COMPARE, GateFlags::CHECKABLE, 1, 1, 2) \ + V(RangeCheckPredicate, RANGE_CHECK_PREDICATE, GateFlags::CHECKABLE, 1, 1, 2) #define GATE_META_DATA_LIST_WITH_ONE_PARAMETER(V) \ V(Arg, ARG, GateFlags::HAS_ROOT, 0, 0, 0) \ @@ -336,9 +407,14 @@ std::string MachineTypeToStr(MachineType machineType); #define GATE_META_DATA_LIST_WITH_BOOL(V) \ V(LoadProperty, LOAD_PROPERTY, GateFlags::NO_WRITE, 1, 1, 2) \ +#define GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(V) \ + V(CallOptimized, CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \ + V(FastCallOptimized, FAST_CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \ + #define GATE_OPCODE_LIST(V) \ V(JS_BYTECODE) \ V(TYPED_BINARY_OP) \ + V(TYPED_CALLTARGETCHECK_OP) \ V(CONSTSTRING) enum class OpCode : uint8_t { @@ -348,8 +424,10 @@ enum class OpCode : uint8_t { GATE_META_DATA_LIST_WITH_SIZE(DECLARE_GATE_OPCODE) GATE_META_DATA_LIST_WITH_ONE_PARAMETER(DECLARE_GATE_OPCODE) GATE_META_DATA_LIST_WITH_PC_OFFSET(DECLARE_GATE_OPCODE) + GATE_META_DATA_LIST_FOR_CALL(DECLARE_GATE_OPCODE) GATE_META_DATA_LIST_WITH_PC_OFFSET_FIXED_VALUE(DECLARE_GATE_OPCODE) GATE_META_DATA_LIST_WITH_BOOL(DECLARE_GATE_OPCODE) + GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(DECLARE_GATE_OPCODE) #undef DECLARE_GATE_OPCODE #define DECLARE_GATE_OPCODE(NAME) NAME, GATE_OPCODE_LIST(DECLARE_GATE_OPCODE) @@ -379,6 +457,8 @@ public: MUTABLE_STRING, JSBYTECODE, TYPED_BINARY_OP, + TYPED_CALLTARGETCHECK_OP, + TYPED_CALL, }; GateMetaData() = default; GateMetaData(OpCode opcode, GateFlags flags, @@ -487,7 +567,7 @@ public: bool IsOneParameterKind() const { return GetKind() == Kind::IMMUTABLE_ONE_PARAMETER || GetKind() == Kind::MUTABLE_ONE_PARAMETER || - GetKind() == Kind::TYPED_BINARY_OP; + GetKind() == Kind::TYPED_BINARY_OP || GetKind() == Kind::TYPED_CALLTARGETCHECK_OP; } bool IsStringType() const @@ -601,7 +681,7 @@ public: return static_cast(meta); } - bool getBool() const + bool GetBool() const { return value_; } @@ -644,10 +724,29 @@ public: { return opcode_; } + + void SetElementsKind(ElementsKind kind) + { + elementsKind_ = kind; + elementsKinds_.emplace_back(kind); + } + + ElementsKind GetElementsKind() const + { + return elementsKind_; + } + + std::vector GetElementsKinds() const + { + return elementsKinds_; + } + private: EcmaOpcode opcode_; uint32_t pcOffset_; PGOSampleType type_; + ElementsKind elementsKind_ {ElementsKind::GENERIC}; + std::vector elementsKinds_ {}; }; class OneParameterMetaData : public GateMetaData { @@ -679,19 +778,66 @@ private: uint64_t value_ { 0 }; }; -class TypedBinaryMegaData : public OneParameterMetaData { +class TypedCallMetaData : public OneParameterMetaData { +public: + TypedCallMetaData(OpCode opcode, GateFlags flags, uint32_t statesIn, + uint16_t dependsIn, uint32_t valuesIn, uint64_t value, bool noGC) + : OneParameterMetaData(opcode, flags, statesIn, dependsIn, valuesIn, value), + noGC_(noGC) + { + SetKind(GateMetaData::Kind::TYPED_CALL); + } + + static const TypedCallMetaData* Cast(const GateMetaData* meta) + { + meta->AssertKind(GateMetaData::Kind::TYPED_CALL); + return static_cast(meta); + } + + bool IsNoGC() const + { + return noGC_; + } +private: + bool noGC_; +}; + +class TypedCallTargetCheckMetaData : public OneParameterMetaData { +public: + TypedCallTargetCheckMetaData(uint32_t valuesIn, uint64_t value, TypedCallTargetCheckOp checkOp) + : OneParameterMetaData(OpCode::TYPED_CALLTARGETCHECK_OP, GateFlags::CHECKABLE, 1, 1, valuesIn, value), + checkOp_(checkOp) + { + SetKind(GateMetaData::Kind::TYPED_CALLTARGETCHECK_OP); + } + + static const TypedCallTargetCheckMetaData* Cast(const GateMetaData* meta) + { + meta->AssertKind(GateMetaData::Kind::TYPED_CALLTARGETCHECK_OP); + return static_cast(meta); + } + + TypedCallTargetCheckOp GetTypedCallTargetCheckOp() const + { + return checkOp_; + } +private: + TypedCallTargetCheckOp checkOp_; +}; + +class TypedBinaryMetaData : public OneParameterMetaData { public: - TypedBinaryMegaData(uint64_t value, TypedBinOp binOp, PGOSampleType type) + TypedBinaryMetaData(uint64_t value, TypedBinOp binOp, PGOSampleType type) : OneParameterMetaData(OpCode::TYPED_BINARY_OP, GateFlags::NO_WRITE, 1, 1, 2, value), // 2: valuesIn binOp_(binOp), type_(type) { SetKind(GateMetaData::Kind::TYPED_BINARY_OP); } - static const TypedBinaryMegaData* Cast(const GateMetaData* meta) + static const TypedBinaryMetaData* Cast(const GateMetaData* meta) { meta->AssertKind(GateMetaData::Kind::TYPED_BINARY_OP); - return static_cast(meta); + return static_cast(meta); } TypedBinOp GetTypedBinaryOp() const @@ -767,21 +913,26 @@ public: return static_cast(RightBits::Get(bitField_)); } - static uint16_t ToValue(ValueType srcType, ValueType dstType) + bool IsConvertSupport() const + { + return ConvertSupportBits::Get(bitField_) == ConvertSupport::ENABLE; + } + + static uint64_t ToValue(ValueType srcType, ValueType dstType, ConvertSupport support = ConvertSupport::ENABLE) { uint8_t srcVlaue = static_cast(srcType); uint8_t dstVlaue = static_cast(dstType); - return LeftBits::Encode(srcVlaue) | RightBits::Encode(dstVlaue); + return LeftBits::Encode(srcVlaue) | RightBits::Encode(dstVlaue) | ConvertSupportBits::Encode(support); } private: using LeftBits = panda::BitField; using RightBits = LeftBits::NextField; + using ConvertSupportBits = RightBits::NextField; uint64_t bitField_; }; - class GatePairTypeAccessor { public: // type bits shift @@ -839,10 +990,72 @@ private: uint64_t bitField_; }; +class BranchAccessor { +public: + // type bits shift + static constexpr int OPRAND_TYPE_BITS = 32; + explicit BranchAccessor(uint64_t value) : bitField_(value) {} + + int32_t GetTrueWeight() const + { + return TrueWeightBits::Get(bitField_); + } + + int32_t GetFalseWeight() const + { + return FalseWeightBits::Get(bitField_); + } + + static uint64_t ToValue(uint32_t trueWeight, uint32_t falseWeight) + { + return TrueWeightBits::Encode(trueWeight) + | FalseWeightBits::Encode(falseWeight); + } +private: + using TrueWeightBits = panda::BitField; + using FalseWeightBits = TrueWeightBits::NextField; + + uint64_t bitField_; +}; + +class TypedBinaryAccessor { +public: + // type bits shift + static constexpr int OPRAND_TYPE_BITS = 32; + explicit TypedBinaryAccessor(uint64_t value) : bitField_(value) {} + explicit TypedBinaryAccessor(GateType gate, TypedBinOp binOp) + { + bitField_ = TypedValueBits::Encode(gate.Value()) | TypedBinOpBits::Encode(binOp); + } + + GateType GetTypeValue() const + { + return GateType(TypedValueBits::Get(bitField_)); + } + + TypedBinOp GetTypedBinOp() const + { + return TypedBinOpBits::Get(bitField_); + } + + uint64_t ToValue() const + { + return bitField_; + } + +private: + using TypedValueBits = panda::BitField; + using TypedBinOpBits = TypedValueBits::NextField; + + uint64_t bitField_; +}; + class TypedJumpAccessor { public: // type bits shift static constexpr int OPRAND_TYPE_BITS = 32; + static constexpr int JUMP_OP_BITS = 8; + static constexpr int BRANCH_KIND_BITS = 8; explicit TypedJumpAccessor(uint64_t value) : bitField_(value) {} GateType GetTypeValue() const @@ -855,15 +1068,22 @@ public: return TypedJumpOpBits::Get(bitField_); } - static uint64_t ToValue(GateType typeValue, TypedJumpOp jumpOp) + BranchKind GetBranchKind() const + { + return BranchKindBits::Get(bitField_); + } + + static uint64_t ToValue(GateType typeValue, TypedJumpOp jumpOp, BranchKind branchKind) { return TypedValueBits::Encode(typeValue.Value()) - | TypedJumpOpBits::Encode(jumpOp); + | TypedJumpOpBits::Encode(jumpOp) + | BranchKindBits::Encode(branchKind); } private: using TypedValueBits = panda::BitField; - using TypedJumpOpBits = TypedValueBits::NextField; + using TypedJumpOpBits = TypedValueBits::NextField; + using BranchKindBits = TypedJumpOpBits::NextField; uint64_t bitField_; }; @@ -922,6 +1142,94 @@ private: uint64_t bitField_; }; + +class ArrayMetaDataAccessor { +public: + enum Mode : uint8_t { + CREATE = 0, + LOAD_ELEMENT, + STORE_ELEMENT, + LOAD_LENGTH + }; + + static constexpr int BITS_SIZE = 8; + static constexpr int ARRAY_LENGTH_BITS_SIZE = 32; + explicit ArrayMetaDataAccessor(uint64_t value) : bitField_(value) {} + explicit ArrayMetaDataAccessor(ElementsKind kind, Mode mode, uint32_t length = 0) + { + bitField_ = ElementsKindBits::Encode(kind) | ModeBits::Encode(mode) | ArrayLengthBits::Encode(length); + } + + ElementsKind GetElementsKind() const + { + return ElementsKindBits::Get(bitField_); + } + + void SetArrayLength(uint32_t length) + { + bitField_ = ArrayLengthBits::Update(bitField_, length); + } + + uint32_t GetArrayLength() const + { + return ArrayLengthBits::Get(bitField_); + } + + bool IsLoadElement() const + { + return GetMode() == Mode::LOAD_ELEMENT; + } + + uint64_t ToValue() const + { + return bitField_; + } + +private: + Mode GetMode() const + { + return ModeBits::Get(bitField_); + } + + using ElementsKindBits = panda::BitField; + using ModeBits = ElementsKindBits::NextField; + using ArrayLengthBits = ModeBits::NextField; + + uint64_t bitField_; +}; + +class ObjectTypeAccessor { +public: + static constexpr int TYPE_BITS_SIZE = 32; + static constexpr int IS_HEAP_OBJECT_BIT_SIZE = 1; + + explicit ObjectTypeAccessor(uint64_t value) : bitField_(value) {} + explicit ObjectTypeAccessor(GateType type, bool isHeapObject = false) + { + bitField_ = TypeBits::Encode(type.Value()) | IsHeapObjectBit::Encode(isHeapObject); + } + + GateType GetType() const + { + return GateType(TypeBits::Get(bitField_)); + } + + bool IsHeapObject() const + { + return IsHeapObjectBit::Get(bitField_); + } + + uint64_t ToValue() const + { + return bitField_; + } + +private: + using TypeBits = panda::BitField; + using IsHeapObjectBit = TypeBits::NextField; + + uint64_t bitField_; +}; } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_GATE_META_DATA_H diff --git a/ecmascript/compiler/gate_meta_data_builder.h b/ecmascript/compiler/gate_meta_data_builder.h index 972454ecccfe66daeabe32f0448cece34964e303..97919e8f5fbd78c27974dc0bcbd68fc742a38e0b 100644 --- a/ecmascript/compiler/gate_meta_data_builder.h +++ b/ecmascript/compiler/gate_meta_data_builder.h @@ -112,6 +112,11 @@ public: GATE_META_DATA_LIST_WITH_PC_OFFSET(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_FOR_CALL(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(uint64_t value, uint64_t pcOffset, bool noGC); + GATE_META_DATA_LIST_FOR_CALL(DECLARE_GATE_META_FOR_CALL) +#undef DECLARE_GATE_META_FOR_CALL + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* NAME(uint64_t pcOffset) const; GATE_META_DATA_LIST_WITH_PC_OFFSET_FIXED_VALUE(DECLARE_GATE_META) @@ -122,6 +127,11 @@ public: GATE_META_DATA_LIST_WITH_BOOL(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_WITH_BOOL_VALUE_IN(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(size_t value, bool flag); + GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(DECLARE_GATE_META_WITH_BOOL_VALUE_IN) +#undef DECLARE_GATE_META_WITH_BOOL_VALUE_IN + explicit GateMetaBuilder(Chunk* chunk); const GateMetaData* JSBytecode(size_t valuesIn, EcmaOpcode opcode, uint32_t pcOffset, GateFlags flags) @@ -131,7 +141,12 @@ public: const GateMetaData* TypedBinaryOp(uint64_t value, TypedBinOp binOp, PGOSampleType type) { - return new (chunk_) TypedBinaryMegaData(value, binOp, type); + return new (chunk_) TypedBinaryMetaData(value, binOp, type); + } + + const GateMetaData* TypedCallTargetCheckOp(uint32_t numIns, uint64_t value, TypedCallTargetCheckOp checkOp) + { + return new (chunk_) TypedCallTargetCheckMetaData(numIns, value, checkOp); } const GateMetaData* Nop() diff --git a/ecmascript/compiler/graph_editor.cpp b/ecmascript/compiler/graph_editor.cpp index 01e0d93318159974928f6267ef22758608e58e93..cd928a7f872c7effb234c70c0c5b7e444f929af8 100644 --- a/ecmascript/compiler/graph_editor.cpp +++ b/ecmascript/compiler/graph_editor.cpp @@ -26,6 +26,12 @@ void GraphEditor::RemoveDeadState(Circuit* circuit, GateRef gate) editor.RemoveGate(); } +void GraphEditor::EliminateRedundantPhi(Circuit* circuit) +{ + GraphEditor editor(circuit); + editor.EliminatePhi(); +} + void GraphEditor::ReplaceGate(GateRef gate) { auto uses = acc_.Uses(gate); @@ -108,4 +114,57 @@ void GraphEditor::PropagateMerge(const Edge& edge) acc_.DecreaseIn(gate, edge.GetIndex()); } } -} // namespace panda::ecmascript::kungfu \ No newline at end of file + +void GraphEditor::EliminatePhi() +{ + std::vector gateList; + acc_.GetAllGates(gateList); + std::queue workList; + std::set inList; + for (auto gate : gateList) { + if (acc_.IsValueSelector(gate)) { + workList.push(gate); + inList.insert(gate); + } + } + + while (!workList.empty()) { + auto cur = workList.front(); + workList.pop(); + ASSERT(acc_.IsValueSelector(cur)); + GateRef first = acc_.GetValueIn(cur, 0); + auto use = acc_.Uses(cur); + bool sameIns = true; + bool selfUse = first == cur; + bool noUses = use.begin() == use.end(); + auto valueNum = acc_.GetNumValueIn(cur); + for (size_t i = 1; i < valueNum; ++i) { + GateRef input = acc_.GetValueIn(cur, i); + if (input != first) { + sameIns = false; + } + if (input == cur) { + ASSERT(acc_.IsLoopHead(acc_.GetState(cur))); + selfUse = true; + } + } + if ((!sameIns) && (!selfUse) && (!noUses)) { + inList.erase(cur); + continue; + } + for (auto it = use.begin(); it != use.end(); ++it) { + if (((*it) == cur) || (!acc_.IsValueSelector(*it)) || inList.count(*it)) { + // selfUse or notPhi or inListPhi + continue; + } + workList.push(*it); + inList.insert(*it); + } + acc_.UpdateAllUses(cur, first); + } + for (auto phi : inList) { + ASSERT(acc_.IsValueSelector(phi)); + acc_.DeleteGate(phi); + } +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/graph_editor.h b/ecmascript/compiler/graph_editor.h index fb30ed7c0848fc543899568ae433098d54c807d8..95a8883ca188f81926d209d051d7cbc25a2d81ec 100644 --- a/ecmascript/compiler/graph_editor.h +++ b/ecmascript/compiler/graph_editor.h @@ -31,11 +31,13 @@ public: ~GraphEditor() = default; static void RemoveDeadState(Circuit* circuit, GateRef gate); + static void EliminateRedundantPhi(Circuit* circuit); private: void ReplaceGate(GateRef gate); void RemoveGate(); void PropagateGate(const Edge& edge); void PropagateMerge(const Edge& edge); + void EliminatePhi(); Circuit *circuit_ {nullptr}; GateAccessor acc_; @@ -43,4 +45,4 @@ private: ChunkVector workList_; }; } // panda::ecmascript::kungfu -#endif // ECMASCRIPT_COMPILER_GRAPH_VISITOR_H \ No newline at end of file +#endif // ECMASCRIPT_COMPILER_GRAPH_VISITOR_H diff --git a/ecmascript/compiler/graph_linearizer.cpp b/ecmascript/compiler/graph_linearizer.cpp index 92c219ead17ddd765b31fdeb39726364d3b853c4..a3116c4c4de18471e64dd315bb8f6e35f8ef18a4 100644 --- a/ecmascript/compiler/graph_linearizer.cpp +++ b/ecmascript/compiler/graph_linearizer.cpp @@ -22,6 +22,7 @@ void GraphLinearizer::Run(ControlFlowGraph &result) { LinearizeGraph(); LinearizeRegions(result); + if (IsLogEnabled()) { LOG_COMPILER(INFO) << ""; LOG_COMPILER(INFO) << "\033[34m" @@ -60,7 +61,7 @@ public: auto state = acc_.GetState(fixedGate); auto region = linearizer_->FindPredRegion(state); linearizer_->AddFixedGateToRegion(fixedGate, region); - linearizer_->ScheduleGate(fixedGate, region); + linearizer_->BindGate(fixedGate, region); } } @@ -102,9 +103,14 @@ public: case OpCode::SWITCH_CASE: case OpCode::STATE_ENTRY: case OpCode::IF_EXCEPTION: - case OpCode::IF_SUCCESS: + case OpCode::IF_SUCCESS: { linearizer_->CreateGateRegion(gate); + if (linearizer_->onlyBB_) { + GateRegion* region = linearizer_->GateToRegion(gate); + currentRegion_ = region; + } break; + } case OpCode::LOOP_BACK: case OpCode::IF_BRANCH: case OpCode::SWITCH_BRANCH: @@ -118,8 +124,14 @@ public: endStateList_.emplace_back(gate); } break; - default: + default: { + if (linearizer_->onlyBB_) { + auto& info = linearizer_->GetGateInfo(gate); + info.region = currentRegion_; + linearizer_->BindGate(gate, currentRegion_); + } break; + } } } } @@ -151,6 +163,7 @@ private: ChunkDeque pendingList_; ChunkVector endStateList_; GateAccessor acc_; + GateRegion* currentRegion_; bool scheduleLIR_; }; @@ -327,6 +340,7 @@ public: ComputeLoopInfo(); ComputeLoopExit(); ComputeLoopHeader(); + ComputeLoopDepth(); if (linearizer_->IsLogEnabled()) { for (size_t i = 0; i < numLoops_; i++) { auto& loopInfo = loops_[i]; @@ -508,6 +522,25 @@ public: } } + void ComputeLoopDepth() + { + auto size = linearizer_->regionList_.size(); + for (size_t cur = 0; cur < size; cur++) { + GateRegion* region = linearizer_->regionList_[cur]; + int loopDepth = 0; + int innerLoopIndex = -1; + for (int i = numLoops_ - 1; i >= 0; i--) { + auto& loopInfo = loops_[i]; + if (loopInfo.loopBodys->TestBit(cur)) { + loopDepth++; + innerLoopIndex = i; + } + } + region->SetLoopDepth(loopDepth); + region->SetInnerLoopIndex(innerLoopIndex); + } + } + bool CheckRegionDomLoopExist(GateRegion* region, LoopInfo& loopInfo) { if (loopInfo.loopExits == nullptr) { @@ -641,7 +674,7 @@ public: while (!pendingList_.empty()) { auto curGate = pendingList_.back(); pendingList_.pop_back(); - VisitScheduleGate(curGate); + ComputeLowerBoundAndScheduleGate(curGate); } } } @@ -669,7 +702,7 @@ public: } } - void VisitScheduleGate(GateRef curGate) + void ComputeLowerBoundAndScheduleGate(GateRef curGate) { auto& curInfo = linearizer_->GetGateInfo(curGate); if (!curInfo.IsSchedulable() || @@ -716,7 +749,7 @@ public: } } ASSERT(!linearizer_->IsScheduled(gate)); - linearizer_->ScheduleGate(gate, region); + linearizer_->BindGate(gate, region); } GateRegion* GetCommonDominatorOfAllUses(GateRef curGate) @@ -760,7 +793,7 @@ public: { for (auto gate : fixedGateList_) { GateRegion* region = linearizer_->GateToRegion(gate); - linearizer_->ScheduleGate(gate, region); + linearizer_->BindGate(gate, region); } #ifndef NDEBUG Verify(); @@ -800,11 +833,13 @@ void GraphLinearizer::LinearizeGraph() LoopInfoBuilder loopInfoBuilder(this, chunk_); loopInfoBuilder.Run(); } - GateScheduler scheduler(this); - scheduler.Prepare(); - scheduler.ScheduleUpperBound(); - scheduler.ScheduleFloatingGate(); - scheduler.ScheduleFixedGate(); + if (!onlyBB_) { + GateScheduler scheduler(this); + scheduler.Prepare(); + scheduler.ScheduleUpperBound(); + scheduler.ScheduleFloatingGate(); + scheduler.ScheduleFixedGate(); + } } void GraphLinearizer::CreateGateRegion(GateRef gate) @@ -822,18 +857,99 @@ void GraphLinearizer::CreateGateRegion(GateRef gate) void GraphLinearizer::LinearizeRegions(ControlFlowGraph &result) { + size_t liveNum = OptimizeCFG(); + ASSERT(result.size() == 0); - result.resize(regionList_.size()); + result.resize(liveNum); auto uses = acc_.Uses(acc_.GetArgRoot()); for (auto useIt = uses.begin(); useIt != uses.end(); useIt++) { regionList_.front()->gateList_.emplace_back(*useIt); } + size_t i = 0; + for (size_t id = 0; id < regionList_.size(); id++) { + GateRegion* r = regionList_[id]; + if (r->IsDead()) { + continue; + } + auto& gates = r->GetGates(); + auto& bb = result[i]; + bb.insert(bb.end(), gates.begin(), gates.end()); + i++; + } +} + +bool GateRegion::IsSimple(GateAccessor *acc) const +{ + for (auto g : gateList_) { + bool isSimple = acc->IsSimpleState(g); + bool complexOut = HasComplexOuts(); + if (!isSimple || complexOut) { + return false; + } + } + return true; +} + +size_t GraphLinearizer::OptimizeControls(GateRegion *region) +{ + size_t deads = 0; + GateRegion* target = region; + do { + GateRegion* succ = target->GetSimpleSuccRegion(); + if (succ == nullptr) { + break; + } + MoveAndClear(target, succ); + target = succ; + deads++; + } while (target->IsSimple(&acc_)); + return deads; +} + +void GraphLinearizer::MoveAndClear(GateRegion* from, GateRegion* to) +{ + ASSERT(from != to); + ASSERT(to->GetPreds().size() == 1); + for (GateRef g: from->GetGates()) { + ASSERT(acc_.IsSimpleState(g)); + OpCode op = acc_.GetOpCode(g); + switch (op) { + case OpCode::IF_TRUE: + case OpCode::IF_FALSE: + case OpCode::SWITCH_CASE: + case OpCode::DEFAULT_CASE: + case OpCode::LOOP_BACK: + case OpCode::ORDINARY_BLOCK: + case OpCode::MERGE: + case OpCode::VALUE_SELECTOR: + to->AddGate(g); + break; + default: + break; + } + } + for (auto p : from->GetPreds()) { + p->ReplaceSucc(from, to); + } + to->RemovePred(from); + from->SetDead(); +#ifndef NDEBUG + from->Clear(); +#endif +} + +size_t GraphLinearizer::OptimizeCFG() +{ + size_t liveNum = regionList_.size(); for (size_t i = 0; i < regionList_.size(); i++) { - auto region = regionList_[i]; - auto &gateList = region->gateList_; - result[i].insert(result[i].end(), gateList.begin(), gateList.end()); + GateRegion* src = regionList_[i]; + if (!src->IsDead() && src->IsSimple(&acc_)) { + size_t dead = OptimizeControls(src); + liveNum -= dead; + } } + return liveNum; } GateRegion* GraphLinearizer::FindPredRegion(GateRef input) @@ -863,12 +979,16 @@ GateRegion* GraphLinearizer::GetCommonDominator(GateRegion* left, GateRegion* ri void GraphLinearizer::PrintGraph(const char* title) { LOG_COMPILER(INFO) << "======================== " << title << " ========================"; + int bbIdx = 0; for (size_t i = 0; i < regionList_.size(); i++) { auto bb = regionList_[i]; + if (bb->IsDead()) { + continue; + } auto front = bb->gateList_.front(); auto opcode = acc_.GetOpCode(front); auto loopHeadId = bb->loopHead_ != nullptr ? bb->loopHead_->id_ : 0; - LOG_COMPILER(INFO) << "B" << bb->id_ << ": " << "depth: [" << bb->depth_ << "] " + LOG_COMPILER(INFO) << "B" << bb->id_ << "_LB" << bbIdx << ": " << "depth: [" << bb->depth_ << "] " << opcode << "(" << acc_.GetId(front) << ") " << "IDom B" << bb->iDominator_->id_ << " loop Header: " << loopHeadId; std::string log("\tPreds: "); @@ -885,6 +1005,7 @@ void GraphLinearizer::PrintGraph(const char* title) acc_.Print(*it); } LOG_COMPILER(INFO) << ""; + bbIdx++; } } } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/graph_linearizer.h b/ecmascript/compiler/graph_linearizer.h index da5a16351aeadcf1fc0455125b27a5d0a025f035..9f8a60aa03158295c0d5b71c71e7eeade596a40e 100644 --- a/ecmascript/compiler/graph_linearizer.h +++ b/ecmascript/compiler/graph_linearizer.h @@ -75,11 +75,96 @@ public: return state_; } + void SetDead() + { + stateKind_ = StateKind::DEAD; + } + + bool IsDead() const + { + return stateKind_ == StateKind::DEAD; + } + + bool IsSimple(GateAccessor *acc) const; + + bool HasComplexOuts() const + { + return succs_.size() > 1; + } + + GateRegion* GetSimpleSuccRegion() const + { + if (succs_.size() == 1) { + GateRegion* dst = succs_[0]; + if (dst->GetPreds().size() == 1) { + return dst; + } + } + return nullptr; + } + + void ReplaceSucc(GateRegion* oldSucc, GateRegion* newSucc) + { + for (size_t i = 0; i < succs_.size(); i++) { + if (succs_[i] == oldSucc) { + succs_[i] = newSucc; + } + } + newSucc->AddPred(this); + } + + bool RemovePred(GateRegion* removedRegion) + { + for (auto it = preds_.begin(); it != preds_.end(); it++) { + if (*it == removedRegion) { + preds_.erase(it); + return true; + } + } + return false; + } + + void AddPred(GateRegion* r) + { + for (auto p : preds_) { + if (p == r) { + return; + } + } + preds_.emplace_back(r); + } + + void AddGates(ChunkVector& gates) + { + gateList_.insert(gateList_.end(), gates.begin(), gates.end()); + } + + ChunkVector& GetGates() + { + return gateList_; + } + + ChunkVector& GetPreds() + { + return preds_; + } + size_t GetId() const { return id_; } + void Clear() + { + id_ = 0; + depth_ = INVALID_DEPTH; + iDominator_ = nullptr; + gateList_.clear(); + preds_.clear(); + succs_.clear(); + dominatedRegions_.clear(); + } + void SetLoopNumber(size_t loopNumber) { loopNumber_ = static_cast(loopNumber); @@ -95,15 +180,53 @@ public: return loopNumber_ >= 0; } + GateRegion* GetDominator() + { + return iDominator_; + } + + ChunkVector& GetDominatedRegions() + { + return dominatedRegions_; + } + + int32_t GetDepth() + { + return depth_; + } + + void SetLoopDepth(size_t loopDepth) + { + loopDepth_ = loopDepth; + } + + size_t GetLoopDepth() + { + return loopDepth_; + } + + void SetInnerLoopIndex(size_t innerLoopIndex) + { + innerLoopIndex_ = innerLoopIndex; + } + + int GetInnerLoopIndex() + { + return innerLoopIndex_; + } + private: enum StateKind { BRANCH, MERGE, LOOP_HEAD, OTHER, + DEAD }; static constexpr int32_t INVALID_DEPTH = -1; size_t id_ {0}; + size_t loopDepth_ {0}; // the loop nesting level of this block + int innerLoopIndex_ {-1}; // number of the innermost loop of this block int32_t depth_ {INVALID_DEPTH}; GateRegion* iDominator_ {nullptr}; GateRegion* loopHead_ {nullptr}; @@ -114,6 +237,7 @@ private: GateRef state_ {Circuit::NullGate()}; StateKind stateKind_ {StateKind::OTHER}; int32_t loopNumber_ {INVALID_DEPTH}; + friend class ArrayBoundsCheckElimination; friend class CFGBuilder; friend class GateScheduler; friend class ImmediateDominatorsGenerator; @@ -126,10 +250,10 @@ class GraphLinearizer { public: using ControlFlowGraph = std::vector>; - GraphLinearizer(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk) + GraphLinearizer(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk, bool onlyBB = false) : enableLog_(enableLog), methodName_(name), chunk_(chunk), circuit_(circuit), acc_(circuit), gateIdToGateInfo_(chunk), - regionList_(chunk), regionRootList_(chunk) {} + regionList_(chunk), regionRootList_(chunk), onlyBB_(onlyBB) {} void Run(ControlFlowGraph &result); private: @@ -240,7 +364,7 @@ private: regionRootList_.emplace_back(gate); } - void ScheduleGate(GateRef gate, GateRegion* region) + void BindGate(GateRef gate, GateRegion* region) { GateInfo& info = GetGateInfo(gate); info.region = region; @@ -283,6 +407,9 @@ private: return model_ == ScheduleModel::LIR; } + size_t OptimizeCFG(); + size_t OptimizeControls(GateRegion *region); + void MoveAndClear(GateRegion *from, GateRegion *to); void PrintGraph(const char* title); bool enableLog_ {false}; @@ -298,6 +425,9 @@ private: ChunkVector regionList_; ChunkVector regionRootList_; + bool onlyBB_ {false}; // dont schedule + + friend class ArrayBoundsCheckElimination; friend class CFGBuilder; friend class GateScheduler; friend class ImmediateDominatorsGenerator; diff --git a/ecmascript/compiler/ic_stub_builder.cpp b/ecmascript/compiler/ic_stub_builder.cpp index 5a8d54678b0c88a74a9c83eb12cbce7cdd749435..8912a1620e2a9c847e4eb2d1bc057c1cc63ff3f6 100644 --- a/ecmascript/compiler/ic_stub_builder.cpp +++ b/ecmascript/compiler/ic_stub_builder.cpp @@ -96,7 +96,8 @@ void ICStubBuilder::ValuedICAccessor(Variable* cachedHandler, Label *tryICHandle } } -void ICStubBuilder::LoadICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success) +void ICStubBuilder::LoadICByName( + Variable *result, Label *tryFastPath, Label *slowPath, Label *success, ProfileOperation callback) { auto env = GetEnvironment(); Label loadWithHandler(env); @@ -108,7 +109,7 @@ void ICStubBuilder::LoadICByName(Variable* result, Label* tryFastPath, Label *sl NamedICAccessor(&cachedHandler, &loadWithHandler); Bind(&loadWithHandler); { - GateRef ret = LoadICWithHandler(glue_, receiver_, receiver_, *cachedHandler); + GateRef ret = LoadICWithHandler(glue_, receiver_, receiver_, *cachedHandler, callback); result->WriteVariable(ret); Branch(TaggedIsHole(ret), slowPath_, success_); } @@ -132,7 +133,8 @@ void ICStubBuilder::StoreICByName(Variable* result, Label* tryFastPath, Label *s } } -void ICStubBuilder::LoadICByValue(Variable* result, Label* tryFastPath, Label *slowPath, Label *success) +void ICStubBuilder::LoadICByValue( + Variable *result, Label *tryFastPath, Label *slowPath, Label *success, ProfileOperation callback) { auto env = GetEnvironment(); Label loadWithHandler(env); @@ -145,13 +147,13 @@ void ICStubBuilder::LoadICByValue(Variable* result, Label* tryFastPath, Label *s ValuedICAccessor(&cachedHandler, &loadWithHandler, &loadElement); Bind(&loadElement); { - GateRef ret = LoadElement(glue_, receiver_, propKey_); + GateRef ret = LoadElement(glue_, receiver_, propKey_, callback); result->WriteVariable(ret); Branch(TaggedIsHole(ret), slowPath_, success_); } Bind(&loadWithHandler); { - GateRef ret = LoadICWithHandler(glue_, receiver_, receiver_, *cachedHandler); + GateRef ret = LoadICWithHandler(glue_, receiver_, receiver_, *cachedHandler, callback); result->WriteVariable(ret); Branch(TaggedIsHole(ret), slowPath_, success_); } @@ -170,7 +172,7 @@ void ICStubBuilder::StoreICByValue(Variable* result, Label* tryFastPath, Label * ValuedICAccessor(&cachedHandler, &storeWithHandler, &storeElement); Bind(&storeElement); { - GateRef ret = ICStoreElement(glue_, receiver_, propKey_, value_, secondValue); + GateRef ret = ICStoreElement(glue_, receiver_, propKey_, value_, secondValue, callback_); result->WriteVariable(ret); Branch(TaggedIsHole(ret), slowPath_, success_); } diff --git a/ecmascript/compiler/ic_stub_builder.h b/ecmascript/compiler/ic_stub_builder.h index c3ac0537381c380e023e726492da055f58426b88..34fefd3165fc780aefdd86b8b684ffcae0bb9dd3 100644 --- a/ecmascript/compiler/ic_stub_builder.h +++ b/ecmascript/compiler/ic_stub_builder.h @@ -44,9 +44,11 @@ public: propKey_ = propKey; } - void LoadICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); + void LoadICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success, + ProfileOperation callback); void StoreICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); - void LoadICByValue(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); + void LoadICByValue(Variable* result, Label* tryFastPath, Label *slowPath, Label *success, + ProfileOperation callback); void StoreICByValue(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); void TryLoadGlobalICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); void TryStoreGlobalICByName(Variable* result, Label* tryFastPath, Label *slowPath, Label *success); diff --git a/ecmascript/compiler/interpreter_stub.cpp b/ecmascript/compiler/interpreter_stub.cpp index 1ff29cb93d26d5bc6018ab19f72adc8e9e380ba9..f67bbe0811610c05c3cb724585a1ae4a04269e52 100644 --- a/ecmascript/compiler/interpreter_stub.cpp +++ b/ecmascript/compiler/interpreter_stub.cpp @@ -25,6 +25,7 @@ #include "ecmascript/compiler/profiler_stub_builder.h" #include "ecmascript/compiler/stub_builder-inl.h" #include "ecmascript/compiler/variable_type.h" +#include "ecmascript/dfx/vm_thread_control.h" #include "ecmascript/global_env_constants.h" #include "ecmascript/ic/profile_type_info.h" #include "ecmascript/interpreter/interpreter_assembly.h" @@ -128,7 +129,7 @@ void name##StubBuilder::GenerateCircuitImpl(GateRef glue, GateRef sp, GateRef pc Int8(VmThreadControl::VM_NEED_SUSPENSION))), &callRuntime, &dispatch); \ Bind(&callRuntime); \ { \ - if (callback.IsEmpty()) { \ + if (!(callback).IsEmpty()) { \ varProfileTypeInfo = CallRuntime(glue, RTSTUB_ID(UpdateHotnessCounterWithProf), { func }); \ } else { \ varProfileTypeInfo = CallRuntime(glue, RTSTUB_ID(UpdateHotnessCounter), { func }); \ @@ -162,6 +163,36 @@ void name##StubBuilder::GenerateCircuitImpl(GateRef glue, GateRef sp, GateRef pc CheckPendingException(glue, sp, pc, constpool, profileTypeInfo, acc, hotnessCounter, \ res, offset) +#define METHOD_ENTRY(func) \ + auto env = GetEnvironment(); \ + METHOD_ENTRY_ENV_DEFINED(func) + +#define METHOD_ENTRY_ENV_DEFINED(func) \ + GateRef isDebugModeOffset = IntPtr(JSThread::GlueData::GetIsDebugModeOffset(env->Is32Bit())); \ + GateRef isDebugMode = Load(VariableType::BOOL(), glue, isDebugModeOffset); \ + Label isDebugModeTrue(env); \ + Label isDebugModeFalse(env); \ + Branch(isDebugMode, &isDebugModeTrue, &isDebugModeFalse); \ + Bind(&isDebugModeTrue); \ + { \ + CallRuntime(glue, RTSTUB_ID(MethodEntry), { func }); \ + Jump(&isDebugModeFalse); \ + } \ + Bind(&isDebugModeFalse) + +#define METHOD_EXIT() \ + GateRef isDebugModeOffset = IntPtr(JSThread::GlueData::GetIsDebugModeOffset(env->Is32Bit())); \ + GateRef isDebugMode = Load(VariableType::BOOL(), glue, isDebugModeOffset); \ + Label isDebugModeTrue(env); \ + Label isDebugModeFalse(env); \ + Branch(isDebugMode, &isDebugModeTrue, &isDebugModeFalse); \ + Bind(&isDebugModeTrue); \ + { \ + CallRuntime(glue, RTSTUB_ID(MethodExit), {}); \ + Jump(&isDebugModeFalse); \ + } \ + Bind(&isDebugModeFalse) + template void InterpreterStubBuilder::DebugPrintInstruction() { @@ -427,6 +458,7 @@ DECLARE_ASM_HANDLER(HandleCreateemptyobject) { DEFVARIABLE(varAcc, VariableType::JS_ANY(), acc); GateRef res = CallRuntime(glue, RTSTUB_ID(CreateEmptyObject), {}); + callback.ProfileCreateObject(res); varAcc = res; DISPATCH_WITH_ACC(CREATEEMPTYOBJECT); } @@ -434,16 +466,24 @@ DECLARE_ASM_HANDLER(HandleCreateemptyobject) DECLARE_ASM_HANDLER(HandleCreateemptyarrayImm8) { DEFVARIABLE(varAcc, VariableType::JS_ANY(), acc); + DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); NewObjectStubBuilder newBuilder(this); - varAcc = newBuilder.CreateEmptyArray(glue); + GateRef frame = GetFrame(*varSp); + GateRef func = GetFunctionFromFrame(frame); + GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); + varAcc = newBuilder.CreateEmptyArray(glue, func, pc, profileTypeInfo, slotId, callback); DISPATCH_WITH_ACC(CREATEEMPTYARRAY_IMM8); } DECLARE_ASM_HANDLER(HandleCreateemptyarrayImm16) { DEFVARIABLE(varAcc, VariableType::JS_ANY(), acc); + DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); NewObjectStubBuilder newBuilder(this); - varAcc = newBuilder.CreateEmptyArray(glue); + GateRef frame = GetFrame(*varSp); + GateRef func = GetFunctionFromFrame(frame); + GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); + varAcc = newBuilder.CreateEmptyArray(glue, func, pc, profileTypeInfo, slotId, callback); DISPATCH_WITH_ACC(CREATEEMPTYARRAY_IMM16); } @@ -922,7 +962,7 @@ DECLARE_ASM_HANDLER(HandleInstanceofImm8V8) GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); GateRef target = acc; AccessObjectStubBuilder builder(this); - GateRef result = InstanceOf(glue, obj, target, profileTypeInfo, slotId); + GateRef result = InstanceOf(glue, obj, target, profileTypeInfo, slotId, callback); CHECK_PENDING_EXCEPTION(result, INT_PTR(INSTANCEOF_IMM8_V8)); } @@ -1470,7 +1510,7 @@ DECLARE_ASM_HANDLER(HandleStobjbyindexImm8V8Imm16) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false); + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -1496,7 +1536,7 @@ DECLARE_ASM_HANDLER(HandleStobjbyindexImm16V8Imm16) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false); + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -1521,7 +1561,7 @@ DECLARE_ASM_HANDLER(HandleWideStobjbyindexPrefV8Imm32) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false); + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, false, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -1554,7 +1594,7 @@ DECLARE_ASM_HANDLER(HandleStownbyindexImm16V8Imm16) Bind(¬ClassPrototype); { // fast path - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true); // acc is value + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true, callback); // acc is value Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -1587,7 +1627,7 @@ DECLARE_ASM_HANDLER(HandleStownbyindexImm8V8Imm16) Bind(¬ClassPrototype); { // fast path - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true); // acc is value + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true, callback); // acc is value Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -1619,7 +1659,7 @@ DECLARE_ASM_HANDLER(HandleWideStownbyindexPrefV8Imm32) Bind(¬ClassPrototype); { // fast path - GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true); // acc is value + GateRef result = SetPropertyByIndex(glue, receiver, index, acc, true, callback); // acc is value Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -2297,6 +2337,7 @@ DECLARE_ASM_HANDLER(HandleJnezImm32) DECLARE_ASM_HANDLER(HandleReturn) { auto env = GetEnvironment(); + METHOD_EXIT(); DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); DEFVARIABLE(varConstpool, VariableType::JS_POINTER(), constpool); @@ -2360,6 +2401,7 @@ DECLARE_ASM_HANDLER(HandleReturn) DECLARE_ASM_HANDLER(HandleReturnundefined) { auto env = GetEnvironment(); + METHOD_EXIT(); DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); DEFVARIABLE(varConstpool, VariableType::JS_POINTER(), constpool); @@ -2424,6 +2466,7 @@ DECLARE_ASM_HANDLER(HandleReturnundefined) DECLARE_ASM_HANDLER(HandleSuspendgeneratorV8) { auto env = GetEnvironment(); + METHOD_EXIT(); DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); DEFVARIABLE(varConstpool, VariableType::JS_POINTER(), constpool); @@ -2578,7 +2621,7 @@ DECLARE_ASM_HANDLER(HandleTryldglobalbynameImm8Id16) GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); AccessObjectStubBuilder builder(this); StringIdInfo info = { constpool, pc, StringIdInfo::Offset::BYTE_1, StringIdInfo::Length::BITS_16 }; - GateRef result = builder.TryLoadGlobalByName(glue, 0, info, profileTypeInfo, slotId); + GateRef result = builder.TryLoadGlobalByName(glue, 0, info, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(TRYLDGLOBALBYNAME_IMM8_ID16)); } @@ -2589,7 +2632,7 @@ DECLARE_ASM_HANDLER(HandleTryldglobalbynameImm16Id16) GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); AccessObjectStubBuilder builder(this); StringIdInfo info = { constpool, pc, StringIdInfo::Offset::BYTE_2, StringIdInfo::Length::BITS_16 }; - GateRef result = builder.TryLoadGlobalByName(glue, 0, info, profileTypeInfo, slotId); + GateRef result = builder.TryLoadGlobalByName(glue, 0, info, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(TRYLDGLOBALBYNAME_IMM16_ID16)); } @@ -2598,7 +2641,7 @@ DECLARE_ASM_HANDLER(HandleTrystglobalbynameImm8Id16) GateRef slotId = ZExtInt16ToInt32(ReadInst8_0(pc)); AccessObjectStubBuilder builder(this); StringIdInfo info = { constpool, pc, StringIdInfo::Offset::BYTE_1, StringIdInfo::Length::BITS_16 }; - GateRef result = builder.TryStoreGlobalByName(glue, 0, info, acc, profileTypeInfo, slotId); + GateRef result = builder.TryStoreGlobalByName(glue, 0, info, acc, profileTypeInfo, slotId, callback); CHECK_EXCEPTION(result, INT_PTR(TRYSTGLOBALBYNAME_IMM8_ID16)); } @@ -2607,7 +2650,7 @@ DECLARE_ASM_HANDLER(HandleTrystglobalbynameImm16Id16) GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); AccessObjectStubBuilder builder(this); StringIdInfo info = { constpool, pc, StringIdInfo::Offset::BYTE_2, StringIdInfo::Length::BITS_16 }; - GateRef result = builder.TryStoreGlobalByName(glue, 0, info, acc, profileTypeInfo, slotId); + GateRef result = builder.TryStoreGlobalByName(glue, 0, info, acc, profileTypeInfo, slotId, callback); CHECK_EXCEPTION(result, INT_PTR(TRYSTGLOBALBYNAME_IMM16_ID16)); } @@ -2618,7 +2661,7 @@ DECLARE_ASM_HANDLER(HandleLdglobalvarImm16Id16) GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); AccessObjectStubBuilder builder(this); StringIdInfo info = { constpool, pc, StringIdInfo::Offset::BYTE_2, StringIdInfo::Length::BITS_16 }; - GateRef result = builder.LoadGlobalVar(glue, 0, info, profileTypeInfo, slotId); + GateRef result = builder.LoadGlobalVar(glue, 0, info, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(LDGLOBALVAR_IMM16_ID16)); } @@ -2830,6 +2873,7 @@ DECLARE_ASM_HANDLER(HandleCreateasyncgeneratorobjV8) DECLARE_ASM_HANDLER(HandleAsyncgeneratorresolveV8V8V8) { auto env = GetEnvironment(); + METHOD_EXIT(); DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); DEFVARIABLE(varConstpool, VariableType::JS_POINTER(), constpool); @@ -3016,7 +3060,7 @@ DECLARE_ASM_HANDLER(HandleLdobjbyvalueImm8V8) GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); AccessObjectStubBuilder builder(this); - GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId); + GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(LDOBJBYVALUE_IMM8_V8)); } @@ -3030,7 +3074,7 @@ DECLARE_ASM_HANDLER(HandleLdobjbyvalueImm16V8) GateRef slotId = ZExtInt8ToInt32(ReadInst16_0(pc)); AccessObjectStubBuilder builder(this); - GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId); + GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(LDOBJBYVALUE_IMM16_V8)); } @@ -3093,7 +3137,7 @@ DECLARE_ASM_HANDLER(HandleLdobjbyindexImm8Imm16) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = GetPropertyByIndex(glue, receiver, index); + GateRef result = GetPropertyByIndex(glue, receiver, index, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -3118,7 +3162,7 @@ DECLARE_ASM_HANDLER(HandleLdobjbyindexImm16Imm16) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = GetPropertyByIndex(glue, receiver, index); + GateRef result = GetPropertyByIndex(glue, receiver, index, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -3143,7 +3187,7 @@ DECLARE_ASM_HANDLER(HandleWideLdobjbyindexPrefImm32) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = GetPropertyByIndex(glue, receiver, index); + GateRef result = GetPropertyByIndex(glue, receiver, index, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -3169,7 +3213,7 @@ DECLARE_ASM_HANDLER(HandleDeprecatedLdobjbyindexPrefV8Imm32) Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - GateRef result = GetPropertyByIndex(glue, receiver, index); + GateRef result = GetPropertyByIndex(glue, receiver, index, callback); Label notHole(env); Branch(TaggedIsHole(result), &slowPath, ¬Hole); Bind(¬Hole); @@ -3571,6 +3615,7 @@ DECLARE_ASM_HANDLER(HandleCallarg0Imm8) { GateRef actualNumArgs = Int32(InterpreterAssembly::ActualNumArgsOfCall::CALLARG0); GateRef func = acc; + METHOD_ENTRY(func); GateRef jumpSize = INT_PTR(CALLARG0_IMM8); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, JSCallMode::CALL_ARG0, {}, callback); @@ -3593,6 +3638,7 @@ DECLARE_ASM_HANDLER(HandleCallarg1Imm8V8) GateRef actualNumArgs = Int32(InterpreterAssembly::ActualNumArgsOfCall::CALLARG1); GateRef a0 = ReadInst8_1(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef jumpSize = INT_PTR(CALLARG1_IMM8_V8); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, @@ -3619,6 +3665,7 @@ DECLARE_ASM_HANDLER(HandleCallargs2Imm8V8V8) GateRef a0 = ReadInst8_1(pc); GateRef a1 = ReadInst8_2(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef a1Value = GetVregValue(sp, ZExtInt8ToPtr(a1)); GateRef jumpSize = INT_PTR(CALLARGS2_IMM8_V8_V8); @@ -3649,6 +3696,7 @@ DECLARE_ASM_HANDLER(HandleCallargs3Imm8V8V8V8) GateRef a1 = ReadInst8_2(pc); GateRef a2 = ReadInst8_3(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef a1Value = GetVregValue(sp, ZExtInt8ToPtr(a1)); GateRef a2Value = GetVregValue(sp, ZExtInt8ToPtr(a2)); @@ -3679,6 +3727,7 @@ DECLARE_ASM_HANDLER(HandleCallrangeImm8Imm8V8) { GateRef actualNumArgs = ZExtInt8ToInt32(ReadInst8_1(pc)); GateRef func = acc; + METHOD_ENTRY(func); GateRef argv = PtrAdd(sp, PtrMul(ZExtInt8ToPtr(ReadInst8_2(pc)), IntPtr(8))); // 8: byteSize GateRef jumpSize = INT_PTR(CALLRANGE_IMM8_IMM8_V8); GateRef numArgs = ZExtInt32ToPtr(actualNumArgs); @@ -3691,12 +3740,12 @@ DECLARE_ASM_HANDLER(HandleWideCallrangePrefImm16V8) { GateRef actualNumArgs = ZExtInt16ToInt32(ReadInst16_1(pc)); GateRef func = acc; + METHOD_ENTRY(func); GateRef argv = PtrAdd(sp, PtrMul(ZExtInt8ToPtr(ReadInst8_2(pc)), IntPtr(8))); // 8: byteSize GateRef jumpSize = INT_PTR(WIDE_CALLRANGE_PREF_IMM16_V8); GateRef numArgs = ZExtInt32ToPtr(actualNumArgs); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, - JSCallMode::CALL_WITH_ARGV, { numArgs, argv }, callback, - BytecodeInstruction::Format::IMM16); + JSCallMode::CALL_WITH_ARGV, { numArgs, argv }, callback); CHECK_PENDING_EXCEPTION(res, jumpSize); } @@ -3710,8 +3759,7 @@ DECLARE_ASM_HANDLER(HandleDeprecatedCallrangePrefImm16V8) GateRef jumpSize = INT_PTR(DEPRECATED_CALLRANGE_PREF_IMM16_V8); GateRef numArgs = ZExtInt32ToPtr(actualNumArgs); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, - JSCallMode::DEPRECATED_CALL_WITH_ARGV, { numArgs, argv }, callback, - BytecodeInstruction::Format::IMM16); + JSCallMode::DEPRECATED_CALL_WITH_ARGV, { numArgs, argv }, callback); CHECK_PENDING_EXCEPTION(res, jumpSize); } @@ -3720,6 +3768,7 @@ DECLARE_ASM_HANDLER(HandleCallthisrangeImm8Imm8V8) GateRef actualNumArgs = ZExtInt8ToInt32(ReadInst8_1(pc)); GateRef thisReg = ZExtInt8ToPtr(ReadInst8_2(pc)); GateRef func = acc; + METHOD_ENTRY(func); GateRef thisValue = GetVregValue(sp, thisReg); GateRef argv = PtrAdd(sp, PtrMul( PtrAdd(thisReg, IntPtr(1)), IntPtr(8))); // 1: skip this @@ -3735,14 +3784,14 @@ DECLARE_ASM_HANDLER(HandleWideCallthisrangePrefImm16V8) GateRef actualNumArgs = ZExtInt16ToInt32(ReadInst16_1(pc)); GateRef thisReg = ZExtInt8ToPtr(ReadInst8_3(pc)); GateRef func = acc; + METHOD_ENTRY(func); GateRef thisValue = GetVregValue(sp, thisReg); GateRef argv = PtrAdd(sp, PtrMul( PtrAdd(thisReg, IntPtr(1)), IntPtr(8))); // 1: skip this GateRef jumpSize = INT_PTR(WIDE_CALLTHISRANGE_PREF_IMM16_V8); GateRef numArgs = ZExtInt32ToPtr(actualNumArgs); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, - JSCallMode::CALL_THIS_WITH_ARGV, { numArgs, argv, thisValue }, - callback, BytecodeInstruction::Format::IMM16); + JSCallMode::CALL_THIS_WITH_ARGV, { numArgs, argv, thisValue }, callback); CHECK_PENDING_EXCEPTION(res, jumpSize); } @@ -3758,8 +3807,7 @@ DECLARE_ASM_HANDLER(HandleDeprecatedCallthisrangePrefImm16V8) GateRef jumpSize = INT_PTR(DEPRECATED_CALLTHISRANGE_PREF_IMM16_V8); GateRef numArgs = ZExtInt32ToPtr(actualNumArgs); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, - JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV, { numArgs, argv, thisValue }, - callback, BytecodeInstruction::Format::IMM16); + JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV, { numArgs, argv, thisValue }, callback); CHECK_PENDING_EXCEPTION(res, jumpSize); } @@ -3768,6 +3816,7 @@ DECLARE_ASM_HANDLER(HandleCallthis0Imm8V8) GateRef actualNumArgs = Int32(InterpreterAssembly::ActualNumArgsOfCall::CALLARG0); GateRef thisValue = GetVregValue(sp, ZExtInt8ToPtr(ReadInst8_1(pc))); GateRef func = acc; + METHOD_ENTRY(func); GateRef jumpSize = INT_PTR(CALLTHIS0_IMM8_V8); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, JSCallMode::CALL_THIS_ARG0, { thisValue }, callback); @@ -3780,6 +3829,7 @@ DECLARE_ASM_HANDLER(HandleCallthis1Imm8V8V8) GateRef thisValue = GetVregValue(sp, ZExtInt8ToPtr(ReadInst8_1(pc))); GateRef a0 = ReadInst8_2(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef jumpSize = INT_PTR(CALLTHIS1_IMM8_V8_V8); GateRef res = JSCallDispatch(glue, func, actualNumArgs, jumpSize, hotnessCounter, @@ -3794,6 +3844,7 @@ DECLARE_ASM_HANDLER(HandleCallthis2Imm8V8V8V8) GateRef a0 = ReadInst8_2(pc); GateRef a1 = ReadInst8_3(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef a1Value = GetVregValue(sp, ZExtInt8ToPtr(a1)); GateRef jumpSize = INT_PTR(CALLTHIS2_IMM8_V8_V8_V8); @@ -3810,6 +3861,7 @@ DECLARE_ASM_HANDLER(HandleCallthis3Imm8V8V8V8V8) GateRef a1 = ReadInst8_3(pc); GateRef a2 = ReadInst8_4(pc); GateRef func = acc; + METHOD_ENTRY(func); GateRef a0Value = GetVregValue(sp, ZExtInt8ToPtr(a0)); GateRef a1Value = GetVregValue(sp, ZExtInt8ToPtr(a1)); GateRef a2Value = GetVregValue(sp, ZExtInt8ToPtr(a2)); @@ -3823,9 +3875,11 @@ DECLARE_ASM_HANDLER(HandleCreatearraywithbufferImm8Id16) { GateRef imm = ZExtInt16ToInt32(ReadInst16_1(pc)); GateRef currentFunc = GetFunctionFromFrame(GetFrame(sp)); + GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); NewObjectStubBuilder newBuilder(this); - GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc); + GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc, pc, + profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_ACC(res, INT_PTR(CREATEARRAYWITHBUFFER_IMM8_ID16)); } @@ -3833,9 +3887,11 @@ DECLARE_ASM_HANDLER(HandleCreatearraywithbufferImm16Id16) { GateRef imm = ZExtInt16ToInt32(ReadInst16_2(pc)); GateRef currentFunc = GetFunctionFromFrame(GetFrame(sp)); + GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); NewObjectStubBuilder newBuilder(this); - GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc); + GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc, pc, + profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_ACC(res, INT_PTR(CREATEARRAYWITHBUFFER_IMM16_ID16)); } @@ -3843,9 +3899,11 @@ DECLARE_ASM_HANDLER(HandleDeprecatedCreatearraywithbufferPrefImm16) { GateRef imm = ZExtInt16ToInt32(ReadInst16_1(pc)); GateRef currentFunc = GetFunctionFromFrame(GetFrame(sp)); + GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); NewObjectStubBuilder newBuilder(this); - GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc); + GateRef res = newBuilder.CreateArrayWithBuffer(glue, imm, currentFunc, pc, + profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_ACC(res, INT_PTR(DEPRECATED_CREATEARRAYWITHBUFFER_PREF_IMM16)); } @@ -3857,7 +3915,7 @@ DECLARE_ASM_HANDLER(HandleCreateobjectwithbufferImm8Id16) GateRef result = GetObjectLiteralFromConstPool(glue, constpool, imm, module); GateRef currentEnv = GetEnvFromFrame(GetFrame(sp)); GateRef res = CallRuntime(glue, RTSTUB_ID(CreateObjectHavingMethod), { result, currentEnv }); - callback.ProfileCreateObject(result, res); + callback.ProfileCreateObject(res); CHECK_EXCEPTION_WITH_ACC(res, INT_PTR(CREATEOBJECTWITHBUFFER_IMM8_ID16)); } @@ -3869,7 +3927,7 @@ DECLARE_ASM_HANDLER(HandleCreateobjectwithbufferImm16Id16) GateRef result = GetObjectLiteralFromConstPool(glue, constpool, imm, module); GateRef currentEnv = GetEnvFromFrame(GetFrame(sp)); GateRef res = CallRuntime(glue, RTSTUB_ID(CreateObjectHavingMethod), { result, currentEnv }); - callback.ProfileCreateObject(result, res); + callback.ProfileCreateObject(res); CHECK_EXCEPTION_WITH_ACC(res, INT_PTR(CREATEOBJECTWITHBUFFER_IMM16_ID16)); } @@ -3924,6 +3982,7 @@ DECLARE_ASM_HANDLER(HandleNewobjrangeImm8Imm8V8) GateRef argv = PtrAdd(sp, PtrMul( PtrAdd(firstArgRegIdx, firstArgOffset), IntPtr(8))); // 8: skip function GateRef jumpSize = IntPtr(-BytecodeInstruction::Size(BytecodeInstruction::Format::IMM8_IMM8_V8)); + METHOD_ENTRY_ENV_DEFINED(ctor); res = JSCallDispatch(glue, ctor, actualNumArgs, jumpSize, hotnessCounter, JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV, { ZExtInt32ToPtr(actualNumArgs), argv, *thisObj }, callback); @@ -3994,10 +4053,10 @@ DECLARE_ASM_HANDLER(HandleNewobjrangeImm16Imm8V8) PtrAdd(firstArgRegIdx, firstArgOffset), IntPtr(8))); // 8: skip function GateRef jumpSize = IntPtr(-static_cast(BytecodeInstruction::Size(BytecodeInstruction::Format::IMM16_IMM8_V8))); + METHOD_ENTRY_ENV_DEFINED(ctor); res = JSCallDispatch(glue, ctor, actualNumArgs, jumpSize, hotnessCounter, JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV, - { ZExtInt32ToPtr(actualNumArgs), argv, *thisObj }, callback, - BytecodeInstruction::Format::IMM16); + { ZExtInt32ToPtr(actualNumArgs), argv, *thisObj }, callback); Jump(&threadCheck); } Bind(&slowPath); @@ -4066,8 +4125,7 @@ DECLARE_ASM_HANDLER(HandleWideNewobjrangePrefImm16V8) GateRef jumpSize = IntPtr(-BytecodeInstruction::Size(BytecodeInstruction::Format::PREF_IMM16_V8)); res = JSCallDispatch(glue, ctor, actualNumArgs, jumpSize, hotnessCounter, JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV, - { ZExtInt32ToPtr(actualNumArgs), argv, *thisObj }, callback, - BytecodeInstruction::Format::IMM16); + { ZExtInt32ToPtr(actualNumArgs), argv, *thisObj }, callback); Jump(&threadCheck); } Bind(&slowPath); @@ -4364,7 +4422,7 @@ DECLARE_ASM_HANDLER(HandleLdthisbyvalueImm16) GateRef slotId = ZExtInt16ToInt32(ReadInst16_0(pc)); AccessObjectStubBuilder builder(this); - GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId); + GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(LDTHISBYVALUE_IMM16)); } DECLARE_ASM_HANDLER(HandleLdthisbyvalueImm8) @@ -4376,7 +4434,7 @@ DECLARE_ASM_HANDLER(HandleLdthisbyvalueImm8) GateRef slotId = ZExtInt8ToInt32(ReadInst8_0(pc)); AccessObjectStubBuilder builder(this); - GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId); + GateRef result = builder.LoadObjByValue(glue, receiver, propKey, profileTypeInfo, slotId, callback); CHECK_EXCEPTION_WITH_VARACC(result, INT_PTR(LDTHISBYVALUE_IMM8)); } DECLARE_ASM_HANDLER(HandleStthisbynameImm16Id16) @@ -4588,10 +4646,80 @@ DECLARE_ASM_HANDLER(SingleStepDebugging) DECLARE_ASM_HANDLER(BCDebuggerEntry) { + auto env = GetEnvironment(); + Label callByteCodeChanged(env); + Label isFrameDroppedTrue(env); + Label isFrameDroppedFalse(env); + Label isEntryFrameDroppedPending(env); + Label isEntryFrameDroppedNotTrue(env); + Label pcEqualNullptr(env); + Label pcNotEqualNullptr(env); GateRef frame = GetFrame(sp); + GateRef isEntryFrameDropped = Load(VariableType::INT8(), glue, + IntPtr(JSThread::GlueData::GetEntryFrameDroppedStateOffset(env->Is32Bit()))); + Branch(Int8Equal(isEntryFrameDropped, Int8(JSThread::FrameDroppedState::StatePending)), + &isEntryFrameDroppedPending, &callByteCodeChanged); + Bind(&isEntryFrameDroppedPending); + { + Store(VariableType::INT8(), glue, glue, + IntPtr(JSThread::GlueData::GetEntryFrameDroppedStateOffset(env->Is32Bit())), + Int8(JSThread::FrameDroppedState::StateFalse)); + DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); + DEFVARIABLE(varAcc, VariableType::JS_ANY(), acc); + varPc = GetPcFromFrame(frame); + varAcc = GetAccFromFrame(frame); + Dispatch(glue, sp, *varPc, constpool, profileTypeInfo, *varAcc, hotnessCounter, IntPtr(0)); + } + Bind(&callByteCodeChanged); SetPcToFrame(glue, frame, pc); // NOTIFY_DEBUGGER_EVENT() CallRuntime(glue, RTSTUB_ID(NotifyBytecodePcChanged), {}); + GateRef isFrameDropped = Load(VariableType::BOOL(), glue, + IntPtr(JSThread::GlueData::GetIsFrameDroppedOffset(env->Is32Bit()))); + Branch(isFrameDropped, &isFrameDroppedTrue, &isFrameDroppedFalse); + Bind(&isFrameDroppedTrue); + { + DEFVARIABLE(varPc, VariableType::NATIVE_POINTER(), pc); + DEFVARIABLE(varSp, VariableType::NATIVE_POINTER(), sp); + DEFVARIABLE(varConstpool, VariableType::JS_POINTER(), constpool); + DEFVARIABLE(varProfileTypeInfo, VariableType::JS_POINTER(), profileTypeInfo); + DEFVARIABLE(varAcc, VariableType::JS_ANY(), acc); + DEFVARIABLE(varHotnessCounter, VariableType::INT32(), hotnessCounter); + GateRef state = GetFrame(*varSp); + GateRef currentSp = *varSp; + Store(VariableType::BOOL(), glue, glue, + IntPtr(JSThread::GlueData::GetIsFrameDroppedOffset(env->Is32Bit())), False()); + varSp = Load(VariableType::NATIVE_POINTER(), state, + IntPtr(AsmInterpretedFrame::GetBaseOffset(env->IsArch32Bit()))); + isEntryFrameDropped = Load(VariableType::INT8(), glue, + IntPtr(JSThread::GlueData::GetEntryFrameDroppedStateOffset(env->Is32Bit()))); + Branch(Int8Equal(isEntryFrameDropped, Int8(JSThread::FrameDroppedState::StateTrue)), + &pcEqualNullptr, &isEntryFrameDroppedNotTrue); + Bind(&isEntryFrameDroppedNotTrue); + GateRef prevState = GetFrame(*varSp); + varPc = GetPcFromFrame(prevState); + Branch(IntPtrEqual(*varPc, IntPtr(0)), &pcEqualNullptr, &pcNotEqualNullptr); + Bind(&pcEqualNullptr); + { + CallNGCRuntime(glue, RTSTUB_ID(ResumeRspAndReturn), { *varAcc, *varSp, currentSp }); + Return(); + } + Bind(&pcNotEqualNullptr); + { + GateRef function = GetFunctionFromFrame(prevState); + GateRef method = Load(VariableType::JS_ANY(), function, IntPtr(JSFunctionBase::METHOD_OFFSET)); + varConstpool = GetConstpoolFromMethod(method); + varProfileTypeInfo = GetProfileTypeInfoFromMethod(method); + varHotnessCounter = GetHotnessCounterFromMethod(method); + GateRef jumpSize = IntPtr(0); + CallNGCRuntime(glue, RTSTUB_ID(ResumeRspAndRollback), + { glue, currentSp, *varPc, *varConstpool, *varProfileTypeInfo, + *varAcc, *varHotnessCounter, jumpSize }); + Return(); + } + } + Bind(&isFrameDroppedFalse); + SetAccToFrame(glue, frame, acc); // goto normal handle stub DispatchDebugger(glue, sp, pc, constpool, profileTypeInfo, acc, hotnessCounter); } diff --git a/ecmascript/compiler/test_stubs_signature.h b/ecmascript/compiler/ir_builder.cpp similarity index 45% rename from ecmascript/compiler/test_stubs_signature.h rename to ecmascript/compiler/ir_builder.cpp index 402fab7fd280ff57fa7903a0a587bc74591f2906..052fb5bd1972e9e0093921bb314c5b678704df3c 100644 --- a/ecmascript/compiler/test_stubs_signature.h +++ b/ecmascript/compiler/ir_builder.cpp @@ -12,25 +12,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef ECMASCRIPT_COMPILER_TEST_STUBS_SIGNATURE_H -#define ECMASCRIPT_COMPILER_TEST_STUBS_SIGNATURE_H +#include "ecmascript/compiler/ir_builder.h" namespace panda::ecmascript::kungfu { -#ifndef NDEBUG -#define TEST_STUB_SIGNATRUE_LIST(V) \ - V(FooAOT) \ - V(BarAOT) \ - V(Foo1AOT) \ - V(Foo2AOT) \ - V(FooNativeAOT) \ - V(FooBoundAOT) \ - V(Bar1AOT) \ - V(FooProxyAOT) \ - V(FooProxy2AOT) \ - V(Bar2AOT) \ - V(TestAbsoluteAddressRelocation) -#else - #define TEST_STUB_SIGNATRUE_LIST(V) -#endif -} // namespace panda::ecmascript::kungfu -#endif +bool IsAddIntergerType(MachineType machineType) +{ + switch (machineType) { + case MachineType::I8: + case MachineType::I16: + case MachineType::I32: + case MachineType::I64: + case MachineType::ARCH: + return true; + default: + return false; + } +} + +bool IsMulIntergerType(MachineType machineType) +{ + switch (machineType) { + case MachineType::I32: + case MachineType::I64: + case MachineType::ARCH: + return true; + default: + return false; + } +} +} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/ir_builder.h b/ecmascript/compiler/ir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..5f72962d433e7ac0f80aa6eb029d93954393e261 --- /dev/null +++ b/ecmascript/compiler/ir_builder.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_IR_BUILDER_H +#define ECMASCRIPT_COMPILER_IR_BUILDER_H +#include "ecmascript/compiler/gate_meta_data.h" + +namespace panda::ecmascript::kungfu { +using OperandsVector = std::set; +enum class MachineRep { + K_NONE, + K_BIT, + K_WORD8, + K_WORD16, + K_WORD32, + K_WORD64, + // FP representations must be last, and in order of increasing size. + K_FLOAT32, + K_FLOAT64, + K_SIMD128, + K_PTR_1, // Tagged Pointer + K_META, +}; + +enum class CallInputs : size_t { + DEPEND = 0, + TARGET, + GLUE, + FIRST_PARAMETER +}; + +enum class CallExceptionKind : bool { + HAS_PC_OFFSET = true, + NO_PC_OFFSET = false +}; + +#define OPCODES(V) \ + V(Call, (GateRef gate, const std::vector &inList, OpCode op)) \ + V(RuntimeCall, (GateRef gate, const std::vector &inList)) \ + V(RuntimeCallWithArgv, (GateRef gate, const std::vector &inList)) \ + V(NoGcRuntimeCall, (GateRef gate, const std::vector &inList)) \ + V(BytecodeCall, (GateRef gate, const std::vector &inList)) \ + V(Alloca, (GateRef gate)) \ + V(Block, (int id, const OperandsVector &predecessors)) \ + V(Goto, (int block, int bbout)) \ + V(Parameter, (GateRef gate)) \ + V(Constant, (GateRef gate, std::bitset<64> value)) \ + V(ConstString, (GateRef gate, const ChunkVector &str)) \ + V(RelocatableData, (GateRef gate, uint64_t value)) \ + V(ZExtInt, (GateRef gate, GateRef e1)) \ + V(SExtInt, (GateRef gate, GateRef e1)) \ + V(FPExt, (GateRef gate, GateRef e1)) \ + V(FPTrunc, (GateRef gate, GateRef e1)) \ + V(Load, (GateRef gate, GateRef base)) \ + V(Store, (GateRef gate, GateRef base, GateRef value)) \ + V(IntRev, (GateRef gate, GateRef e1)) \ + V(Add, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Sub, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Mul, (GateRef gate, GateRef e1, GateRef e2)) \ + V(FloatDiv, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntDiv, (GateRef gate, GateRef e1, GateRef e2)) \ + V(UDiv, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntOr, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntAnd, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntXor, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntLsr, (GateRef gate, GateRef e1, GateRef e2)) \ + V(IntAsr, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Int32LessThanOrEqual, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Cmp, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Branch, (GateRef gate, GateRef cmp, GateRef btrue, GateRef bfalse)) \ + V(Switch, (GateRef gate, GateRef input, const std::vector &outList)) \ + V(SwitchCase, (GateRef gate, GateRef switchBranch, GateRef out)) \ + V(Phi, (GateRef gate, const std::vector &srcGates)) \ + V(Return, (GateRef gate, GateRef popCount, const std::vector &operands)) \ + V(ReturnVoid, (GateRef gate)) \ + V(CastIntXToIntY, (GateRef gate, GateRef e1)) \ + V(ChangeInt32ToDouble, (GateRef gate, GateRef e1)) \ + V(ChangeUInt32ToDouble, (GateRef gate, GateRef e1)) \ + V(ChangeDoubleToInt32, (GateRef gate, GateRef e1)) \ + V(BitCast, (GateRef gate, GateRef e1)) \ + V(IntLsl, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Mod, (GateRef gate, GateRef e1, GateRef e2)) \ + V(ChangeTaggedPointerToInt64, (GateRef gate, GateRef e1)) \ + V(ChangeInt64ToTagged, (GateRef gate, GateRef e1)) \ + V(DeoptCheck, (GateRef gate)) \ + V(TruncFloatToInt, (GateRef gate, GateRef e1)) \ + V(AddWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ + V(SubWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ + V(MulWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ + V(ExtractValue, (GateRef gate, GateRef e1, GateRef e2)) \ + V(Sqrt, (GateRef gate, GateRef e1)) \ + V(ReadSp, (GateRef gate)) + +bool IsAddIntergerType(MachineType machineType); +bool IsMulIntergerType(MachineType machineType); +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_IR_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/test_stubs.h b/ecmascript/compiler/ir_module.cpp similarity index 37% rename from ecmascript/compiler/test_stubs.h rename to ecmascript/compiler/ir_module.cpp index c7aa0ffecf63eddc024d4cacac6f1bc0f1a56e16..d8d804571919812640aa1ec99ac7d9942ffc9401 100644 --- a/ecmascript/compiler/test_stubs.h +++ b/ecmascript/compiler/ir_module.cpp @@ -13,27 +13,18 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_COMPILER_TEST_STUBS_H -#define ECMASCRIPT_COMPILER_TEST_STUBS_H - -#include "ecmascript/compiler/stub_builder.h" -#include "ecmascript/compiler/test_stubs_signature.h" +#include "ecmascript/compiler/ir_module.h" namespace panda::ecmascript::kungfu { -#ifndef NDEBUG -#define DECLARE_STUB_CLASS(name) \ - class name##StubBuilder : public StubBuilder { \ - public: \ - name##StubBuilder(CallSignature *callSignature, Environment *env) \ - : StubBuilder(callSignature, env) {} \ - ~name##StubBuilder() = default; \ - NO_MOVE_SEMANTIC(name##StubBuilder); \ - NO_COPY_SEMANTIC(name##StubBuilder); \ - void GenerateCircuit() override; \ - }; - TEST_STUB_SIGNATRUE_LIST(DECLARE_STUB_CLASS) -#undef DECLARE_STUB_CLASS -#endif +std::string IRModule::GetFuncName(const MethodLiteral *methodLiteral, + const JSPandaFile *jsPandaFile) +{ + auto offset = methodLiteral->GetMethodId().GetOffset(); + std::string fileName = jsPandaFile->GetFileName(); + std::string name = MethodLiteral::GetMethodName(jsPandaFile, methodLiteral->GetMethodId()); + name += std::string("@") + std::to_string(offset) + std::string("@") + fileName; + return name; } -#endif \ No newline at end of file + +} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/ir_module.h b/ecmascript/compiler/ir_module.h new file mode 100644 index 0000000000000000000000000000000000000000..7130d5aa8df07ffd0ef12bbd2aa7104d6c3444fd --- /dev/null +++ b/ecmascript/compiler/ir_module.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_IR_MODULE_H +#define ECMASCRIPT_COMPILER_IR_MODULE_H + +#include "ecmascript/compiler/debug_info.h" +#include "ecmascript/jspandafile/js_pandafile.h" + +namespace panda::ecmascript::kungfu { +enum ModuleKind : uint8_t +{ + MODULE_LITECG, + MODULE_LLVM, +}; + +class IRModule { +public: + IRModule(NativeAreaAllocator* allocator, bool logDbg, const std::string &triple) + { + tripleStr_ = triple; + CompilationConfig cfg(tripleStr_); + is64Bit_ = cfg.Is64Bit(); + triple_ = cfg.GetTriple(); + debugInfo_ = new DebugInfo(allocator, logDbg); + } + + virtual ~IRModule() + { + if (debugInfo_ == nullptr) { + return; + } + delete debugInfo_; + debugInfo_ = nullptr; + } + + DebugInfo* GetDebugInfo() const + { + return debugInfo_; + } + + Triple GetTriple() const + { + return triple_; + } + + const std::string &GetTripleStr() const + { + return tripleStr_; + } + + bool Is64Bit() const + { + return is64Bit_; + } + + bool Is32Bit() const + { + return !is64Bit_; + } + + virtual ModuleKind GetModuleKind() const = 0; + + std::string GetFuncName(const MethodLiteral *methodLiteral, const JSPandaFile *jsPandaFile); +private: + DebugInfo* debugInfo_ {nullptr}; + std::string tripleStr_; + Triple triple_; + bool is64Bit_ {false}; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_IR_MODULE_H + diff --git a/ecmascript/compiler/later_elimination.cpp b/ecmascript/compiler/later_elimination.cpp index 940de6d5983d9800ee218f391b151fb801db865f..016d75bcd26c63ccb6df4f0d53966eba7b7b3015 100644 --- a/ecmascript/compiler/later_elimination.cpp +++ b/ecmascript/compiler/later_elimination.cpp @@ -37,7 +37,7 @@ void LaterElimination::Run() GateRef LaterElimination::VisitDependEntry(GateRef gate) { - auto empty = new (chunk_) DependChainNodes(chunk_); + auto empty = new (chunk_) DependChains(chunk_); return UpdateDependChain(gate, empty); } @@ -47,11 +47,22 @@ GateRef LaterElimination::VisitGate(GateRef gate) switch (opcode) { case OpCode::GET_CONSTPOOL: case OpCode::GET_GLOBAL_ENV: + case OpCode::GET_GLOBAL_ENV_OBJ: case OpCode::GET_GLOBAL_ENV_OBJ_HCLASS: case OpCode::GET_GLOBAL_CONSTANT_VALUE: case OpCode::ARRAY_GUARDIAN_CHECK: case OpCode::HCLASS_STABLE_ARRAY_CHECK: case OpCode::HEAP_OBJECT_CHECK: + case OpCode::INT32_UNSIGNED_UPPER_BOUND_CHECK: + case OpCode::OVERFLOW_CHECK: + case OpCode::VALUE_CHECK_NEG_OVERFLOW: + case OpCode::FLOAT64_CHECK_RIGHT_IS_ZERO: + case OpCode::INT32_CHECK_RIGHT_IS_ZERO: + case OpCode::INT32_DIV_WITH_CHECK: + case OpCode::LEX_VAR_IS_HOLE_CHECK: + case OpCode::COW_ARRAY_CHECK: + case OpCode::FLATTEN_STRING_CHECK: + case OpCode::CHECK_AND_CONVERT: return TryEliminateGate(gate); case OpCode::DEPEND_SELECTOR: return TryEliminateDependSelector(gate); @@ -113,7 +124,7 @@ GateRef LaterElimination::TryEliminateDependSelector(GateRef gate) // all depend done. auto depend = acc_.GetDep(gate); auto dependChain = GetDependChain(depend); - DependChainNodes* copy = new (chunk_) DependChainNodes(chunk_); + DependChains* copy = new (chunk_) DependChains(chunk_); copy->CopyFrom(dependChain); for (size_t i = 1; i < dependCount; ++i) { // 1: second in auto dependIn = acc_.GetDep(gate, i); @@ -123,7 +134,7 @@ GateRef LaterElimination::TryEliminateDependSelector(GateRef gate) return UpdateDependChain(gate, copy); } -GateRef LaterElimination::UpdateDependChain(GateRef gate, DependChainNodes* dependChain) +GateRef LaterElimination::UpdateDependChain(GateRef gate, DependChains* dependChain) { ASSERT(dependChain != nullptr); auto oldDependChain = GetDependChain(gate); @@ -148,77 +159,27 @@ bool LaterElimination::CheckReplacement(GateRef lhs, GateRef rhs) } } auto opcode = acc_.GetOpCode(lhs); - if (opcode == OpCode::GET_GLOBAL_ENV_OBJ_HCLASS || - opcode == OpCode::GET_GLOBAL_CONSTANT_VALUE) { - if (acc_.GetIndex(lhs) != acc_.GetIndex(rhs)) { - return false; + switch (opcode) { + case OpCode::GET_GLOBAL_ENV_OBJ: + case OpCode::GET_GLOBAL_ENV_OBJ_HCLASS: + case OpCode::GET_GLOBAL_CONSTANT_VALUE: { + if (acc_.GetIndex(lhs) != acc_.GetIndex(rhs)) { + return false; + } + break; } - } - return true; -} - -void DependChainNodes::Merge(DependChainNodes* that) -{ - // find common sub list - while (size_ > that->size_) { - head_ = head_->next; - size_--; - } - - auto lhs = this->head_; - auto rhs = that->head_; - size_t rhsSize = that->size_; - while (rhsSize > size_) { - rhs = rhs->next; - rhsSize--; - } - while (lhs != rhs) { - ASSERT(lhs != nullptr); - lhs = lhs->next; - rhs = rhs->next; - size_--; - } - head_ = lhs; -} - -bool DependChainNodes::Equals(DependChainNodes* that) -{ - if (that == nullptr) { - return false; - } - if (size_ != that->size_) { - return false; - } - auto lhs = this->head_; - auto rhs = that->head_; - while (lhs != rhs) { - if (lhs->gate != rhs->gate) { - return false; + case OpCode::CHECK_AND_CONVERT: { + if (acc_.GetSrcType(lhs) != acc_.GetSrcType(rhs)) { + return false; + } + if (acc_.GetDstType(lhs) != acc_.GetDstType(rhs)) { + return false; + } + break; } - lhs = lhs->next; - rhs = rhs->next; + default: + break; } return true; } - -GateRef DependChainNodes::LookupNode(LaterElimination* elimination, GateRef gate) -{ - for (Node* node = head_; node != nullptr; node = node->next) { - if (elimination->CheckReplacement(node->gate, gate)) { - return node->gate; - } - } - return Circuit::NullGate(); -} - -DependChainNodes* DependChainNodes::UpdateNode(GateRef gate) -{ - // assign node->next to head - Node* node = chunk_->New(gate, head_); - DependChainNodes* that = new (chunk_) DependChainNodes(chunk_); - // assign head to node - that->head_ = node; - that->size_ = size_ + 1; - return that; -} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/later_elimination.h b/ecmascript/compiler/later_elimination.h index 0c08f8cbec8bce20ff952f6a709cef368d0969fe..da533e3ac8b1ebd00d86ada2b4f9bdbc5f226179 100644 --- a/ecmascript/compiler/later_elimination.h +++ b/ecmascript/compiler/later_elimination.h @@ -19,37 +19,11 @@ #include "ecmascript/compiler/circuit_builder.h" #include "ecmascript/compiler/gate_accessor.h" #include "ecmascript/compiler/graph_visitor.h" +#include "ecmascript/compiler/base/depend_chain_helper.h" #include "ecmascript/mem/chunk_containers.h" namespace panda::ecmascript::kungfu { -class LaterElimination; - -class DependChainNodes : public ChunkObject { -public: - DependChainNodes(Chunk* chunk) : chunk_(chunk) {} - ~DependChainNodes() = default; - - GateRef LookupNode(LaterElimination* elimination, GateRef gate); - DependChainNodes* UpdateNode(GateRef gate); - bool Equals(DependChainNodes* that); - void Merge(DependChainNodes* that); - void CopyFrom(DependChainNodes *other) - { - head_ = other->head_; - size_ = other->size_; - } -private: - struct Node { - Node(GateRef gate, Node* next) : gate(gate), next(next) {} - GateRef gate; - Node *next; - }; - - Node *head_{nullptr}; - size_t size_ {0}; - Chunk* chunk_; -}; - +class DependChains; class LaterElimination : public GraphVisitor { public: LaterElimination(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk) @@ -73,7 +47,7 @@ private: return methodName_; } - DependChainNodes* GetDependChain(GateRef dependIn) + DependChains* GetDependChain(GateRef dependIn) { size_t idx = acc_.GetId(dependIn); ASSERT(idx <= circuit_->GetMaxGateId()); @@ -81,14 +55,14 @@ private: } GateRef VisitDependEntry(GateRef gate); - GateRef UpdateDependChain(GateRef gate, DependChainNodes* dependInfo); + GateRef UpdateDependChain(GateRef gate, DependChains* dependInfo); GateRef TryEliminateGate(GateRef gate); GateRef TryEliminateOther(GateRef gate); GateRef TryEliminateDependSelector(GateRef gate); bool enableLog_ {false}; std::string methodName_; - ChunkVector dependChains_; + ChunkVector dependChains_; }; } // panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_LATER_ELIMINATION_H \ No newline at end of file diff --git a/ecmascript/compiler/lcr_lowering.cpp b/ecmascript/compiler/lcr_lowering.cpp index 5b3edec66e608e65cd513edb2bd845251420bd85..72f7a06e5506a984f71f552eeb4900a7950115bc 100644 --- a/ecmascript/compiler/lcr_lowering.cpp +++ b/ecmascript/compiler/lcr_lowering.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/compiler/lcr_lowering.h" #include "ecmascript/compiler/bytecodes.h" +#include "ecmascript/compiler/gate_meta_data.h" #include "ecmascript/global_env.h" #include "ecmascript/js_thread.h" #include "ecmascript/js_function.h" @@ -53,6 +54,9 @@ void LCRLowering::Run() case OpCode::GET_GLOBAL_ENV: LowerGetGlobalEnv(gate); break; + case OpCode::GET_GLOBAL_ENV_OBJ: + LowerGetGlobalEnvObj(gate); + break; case OpCode::GET_GLOBAL_ENV_OBJ_HCLASS: LowerGetGlobalEnvObjHClass(gate); break; @@ -71,12 +75,6 @@ void LCRLowering::Run() case OpCode::VALUE_CHECK_NEG_OVERFLOW: LowerValueCheckNegOverflow(gate); break; - case OpCode::NEGATIVE_INDEX_CHECK: - LowerNegativeIndexCheck(gate); - break; - case OpCode::LARGE_INDEX_CHECK: - LowerLargeIndexCheck(gate); - break; case OpCode::OVERFLOW_CHECK: LowerOverflowCheck(gate); break; @@ -86,8 +84,17 @@ void LCRLowering::Run() case OpCode::INT32_DIV_WITH_CHECK: LowerInt32DivWithCheck(gate); break; - default: + case OpCode::LEX_VAR_IS_HOLE_CHECK: + LowerLexVarIsHoleCheck(gate); + break; + case OpCode::STORE_MEMORY: + LowerStoreMemory(gate); break; + case OpCode::CHECK_AND_CONVERT: + LowerCheckAndConvert(gate); + break; + default: + break; } } @@ -111,7 +118,7 @@ void LCRLowering::LowerConvertHoleAsUndefined(GateRef gate) GateRef receiver = acc_.GetValueIn(gate, 0); DEFVAlUE(result, (&builder_), VariableType::JS_ANY(), receiver); - builder_.Branch(builder_.TaggedIsHole(*result), &returnUndefined, &exit); + builder_.Branch(builder_.TaggedIsHole(*result), &returnUndefined, &exit, 1, BranchWeight::DEOPT_WEIGHT); builder_.Bind(&returnUndefined); { result = builder_.UndefineConstant(); @@ -193,7 +200,17 @@ void LCRLowering::LowerHClassStableArrayCheck(GateRef gate) GateRef frameState = acc_.GetFrameState(gate); GateRef hclass = acc_.GetValueIn(gate, 0); - GateRef check = builder_.IsIsStableElementsByHClass(hclass); + GateRef check = Circuit::NullGate(); + GateRef stableCheck = builder_.IsStableElements(hclass); + ArrayMetaDataAccessor accessor = acc_.GetArrayMetaDataAccessor(gate); + ElementsKind kind = accessor.GetElementsKind(); + if (accessor.IsLoadElement() && !Elements::IsHole(kind)) { + GateRef elementsKindCheck = builder_.Equal(builder_.Int32(static_cast(kind)), + builder_.GetElementsKindByHClass(hclass)); + check = builder_.BoolAnd(stableCheck, elementsKindCheck); + } else { + check = stableCheck; + } builder_.DeoptCheck(check, frameState, DeoptType::NOTSARRAY); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); @@ -221,6 +238,16 @@ StateDepend LCRLowering::LowerConvert(StateDepend stateDepend, GateRef gate) result = builder_.NotEqual(value, builder_.Int32(0)); } break; + case ValueType::UINT32: + if (dstType == ValueType::TAGGED_NUMBER) { + result = ConvertUInt32ToTaggedNumber(value, &exit); + } else if (dstType == ValueType::FLOAT64) { + result = ConvertUInt32ToFloat64(value); + } else { + ASSERT(dstType == ValueType::BOOL); + result = builder_.NotEqual(value, builder_.Int32(0)); + } + break; case ValueType::FLOAT64: if (dstType == ValueType::TAGGED_DOUBLE) { result = ConvertFloat64ToTaggedDouble(value); @@ -307,12 +334,16 @@ GateRef LCRLowering::ConvertTaggedNumberToFloat64(GateRef gate, Label *exit) return *result; } -StateDepend LCRLowering::LowerCheckAndConvert(StateDepend stateDepend, GateRef gate, GateRef frameState) +void LCRLowering::LowerCheckAndConvert(GateRef gate) { - Environment env(stateDepend.State(), stateDepend.Depend(), {}, circuit_, &builder_); + Environment env(gate, circuit_, &builder_); + GateRef frameState = acc_.GetFrameState(gate); ValueType srcType = acc_.GetSrcType(gate); Label exit(&builder_); switch (srcType) { + case ValueType::UINT32: + LowerCheckUInt32AndConvert(gate, frameState); + break; case ValueType::TAGGED_INT: LowerCheckTaggedIntAndConvert(gate, frameState); break; @@ -325,10 +356,21 @@ StateDepend LCRLowering::LowerCheckAndConvert(StateDepend stateDepend, GateRef g case ValueType::TAGGED_NUMBER: LowerCheckTaggedNumberAndConvert(gate, frameState, &exit); break; + case ValueType::BOOL: + LowerCheckSupportAndConvert(gate, frameState); + break; default: UNREACHABLE(); } - return builder_.GetStateDepend(); +} + +void LCRLowering::LowerCheckUInt32AndConvert(GateRef gate, GateRef frameState) +{ + GateRef value = acc_.GetValueIn(gate, 0); + GateRef upperBound = builder_.Int32(INT32_MAX); + GateRef check = builder_.Int32UnsignedLessThanOrEqual(value, upperBound); + builder_.DeoptCheck(check, frameState, DeoptType::INT32OVERFLOW); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), value); } void LCRLowering::LowerCheckTaggedIntAndConvert(GateRef gate, GateRef frameState) @@ -381,6 +423,24 @@ void LCRLowering::LowerCheckTaggedNumberAndConvert(GateRef gate, GateRef frameSt acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } +void LCRLowering::LowerCheckSupportAndConvert(GateRef gate, GateRef frameState) +{ + ValueType dstType = acc_.GetDstType(gate); + ASSERT(dstType == ValueType::INT32 || dstType == ValueType::FLOAT64); + bool support = acc_.IsConvertSupport(gate); + GateRef value = acc_.GetValueIn(gate, 0); + + GateRef result = Circuit::NullGate(); + if (dstType == ValueType::INT32) { + builder_.DeoptCheck(builder_.Boolean(support), frameState, DeoptType::NOTINT); + result = builder_.BooleanToInt32(value); + } else { + builder_.DeoptCheck(builder_.Boolean(support), frameState, DeoptType::NOTDOUBLE); + result = builder_.BooleanToFloat64(value); + } + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); +} + void LCRLowering::LowerCheckTaggedBoolAndConvert(GateRef gate, GateRef frameState) { GateRef value = acc_.GetValueIn(gate, 0); @@ -407,11 +467,33 @@ GateRef LCRLowering::ConvertInt32ToFloat64(GateRef gate) return builder_.ChangeInt32ToFloat64(gate); } +GateRef LCRLowering::ConvertUInt32ToFloat64(GateRef gate) +{ + return builder_.ChangeUInt32ToFloat64(gate); +} + GateRef LCRLowering::ConvertInt32ToTaggedInt(GateRef gate) { return builder_.Int32ToTaggedPtr(gate); } +GateRef LCRLowering::ConvertUInt32ToTaggedNumber(GateRef gate, Label *exit) +{ + Label isOverFlow(&builder_); + Label notOverFlow(&builder_); + GateRef upperBound = builder_.Int32(INT32_MAX); + DEFVAlUE(taggedVal, (&builder_), VariableType::JS_ANY(), builder_.HoleConstant()); + builder_.Branch(builder_.Int32UnsignedLessThanOrEqual(gate, upperBound), ¬OverFlow, &isOverFlow); + builder_.Bind(¬OverFlow); + taggedVal = builder_.Int32ToTaggedPtr(gate); + builder_.Jump(exit); + builder_.Bind(&isOverFlow); + taggedVal = builder_.DoubleToTaggedDoublePtr(builder_.ChangeUInt32ToFloat64(gate)); + builder_.Jump(exit); + builder_.Bind(exit); + return *taggedVal; +} + GateRef LCRLowering::ConvertFloat64ToInt32(GateRef gate, Label *exit) { return builder_.DoubleToInt(gate, exit); @@ -457,6 +539,16 @@ void LCRLowering::LowerGetGlobalEnv(GateRef gate) acc_.ReplaceGate(gate, Circuit::NullGate(), builder_.GetDepend(), glueGlobalEnv); } +void LCRLowering::LowerGetGlobalEnvObj(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + GateRef globalEnv = acc_.GetValueIn(gate, 0); + size_t index = acc_.GetIndex(gate); + GateRef offset = builder_.IntPtr(GlobalEnv::HEADER_SIZE + JSTaggedValue::TaggedTypeSize() * index); + GateRef object = builder_.Load(VariableType::JS_ANY(), globalEnv, offset); + acc_.ReplaceGate(gate, Circuit::NullGate(), builder_.GetDepend(), object); +} + void LCRLowering::LowerGetGlobalEnvObjHClass(GateRef gate) { Environment env(gate, circuit_, &builder_); @@ -547,34 +639,23 @@ void LCRLowering::LowerFloat64CheckRightIsZero(GateRef gate) acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } -void LCRLowering::LowerValueCheckNegOverflow(GateRef gate) +void LCRLowering::LowerLexVarIsHoleCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); GateRef frameState = acc_.GetFrameState(gate); GateRef value = acc_.GetValueIn(gate, 0); - GateRef valueNotZero = builder_.NotEqual(value, builder_.Int32(0)); - builder_.DeoptCheck(valueNotZero, frameState, DeoptType::NOTNEGOV); + GateRef valueIsNotHole = builder_.TaggedIsNotHole(value); + builder_.DeoptCheck(valueIsNotHole, frameState, DeoptType::LEXVARISHOLE); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } -void LCRLowering::LowerNegativeIndexCheck(GateRef gate) -{ - Environment env(gate, circuit_, &builder_); - GateRef frameState = acc_.GetFrameState(gate); - GateRef index = acc_.GetValueIn(gate, 0); - GateRef condition = builder_.Int32LessThanOrEqual(builder_.Int32(0), index); - builder_.DeoptCheck(condition, frameState, DeoptType::NEGTIVEINDEX); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); -} - -void LCRLowering::LowerLargeIndexCheck(GateRef gate) +void LCRLowering::LowerValueCheckNegOverflow(GateRef gate) { Environment env(gate, circuit_, &builder_); GateRef frameState = acc_.GetFrameState(gate); - GateRef index = acc_.GetValueIn(gate, 0); - GateRef length = acc_.GetValueIn(gate, 1); - GateRef condition = builder_.Int32LessThan(index, length); - builder_.DeoptCheck(condition, frameState, DeoptType::LARGEINDEX); + GateRef value = acc_.GetValueIn(gate, 0); + GateRef valueNotZero = builder_.NotEqual(value, builder_.Int32(0)); + builder_.DeoptCheck(valueNotZero, frameState, DeoptType::NOTNEGOV); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } @@ -612,12 +693,23 @@ void LCRLowering::LowerInt32DivWithCheck(GateRef gate) GateRef condition = builder_.BoolOr(rightGreaterZero, builder_.BoolAnd(rightLessZero, leftNotZero)); builder_.DeoptCheck(condition, frameState, DeoptType::DIVZERO); result = builder_.BinaryArithmetic(circuit_->Sdiv(), MachineType::I32, left, right, GateType::NJSValue()); - GateRef truncated = builder_.BinaryArithmetic(circuit_->Mul(), MachineType::I32, result, right); + GateRef truncated = builder_.BinaryArithmetic(circuit_->Mul(), + MachineType::I32, result, right, GateType::NJSValue()); GateRef overCheck = builder_.Int32Equal(truncated, left); builder_.DeoptCheck(overCheck, frameState, DeoptType::NOTINT); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } +void LCRLowering::LowerStoreMemory(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef index = acc_.GetValueIn(gate, 1); + GateRef value = acc_.GetValueIn(gate, 2); + builder_.Store(VariableType::VOID(), glue_, receiver, index, value); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} + void LCRLowering::InitializeWithSpeicalValue(Label *exit, GateRef object, GateRef glue, GateRef value, GateRef start, GateRef end) { diff --git a/ecmascript/compiler/lcr_lowering.h b/ecmascript/compiler/lcr_lowering.h index cd9ae449e1f58b531c8cb06010e547675e876856..ce2fbd85bc1f3e4cb01c7af619060afc8b6145d1 100644 --- a/ecmascript/compiler/lcr_lowering.h +++ b/ecmascript/compiler/lcr_lowering.h @@ -36,7 +36,6 @@ public: return enableLog_; } void Run(); - StateDepend LowerCheckAndConvert(StateDepend stateDepend, GateRef gate, GateRef frameState); StateDepend LowerConvert(StateDepend stateDepend, GateRef gate); private: const std::string& GetMethodName() const @@ -52,26 +51,32 @@ private: void LowerLoadConstOffset(GateRef gate); void LowerStoreConstOffset(GateRef gate); void LowerConvertHoleAsUndefined(GateRef gate); + void LowerCheckAndConvert(GateRef gate); + void LowerCheckUInt32AndConvert(GateRef gate, GateRef frameState); void LowerCheckTaggedIntAndConvert(GateRef gate, GateRef frameState); void LowerCheckTaggedDoubleAndConvert(GateRef gate, GateRef frameState, Label *exit); void LowerCheckTaggedNumberAndConvert(GateRef gate, GateRef frameState, Label *exit); void LowerCheckTaggedBoolAndConvert(GateRef gate, GateRef frameState); + void LowerCheckSupportAndConvert(GateRef gate, GateRef frameState); void LowerGetGlobalEnv(GateRef gate); + void LowerGetGlobalEnvObj(GateRef gate); void LowerGetGlobalEnvObjHClass(GateRef gate); void LowerGetGlobalConstantValue(GateRef gate); void LowerHeapAllocate(GateRef gate); void LowerInt32CheckRightIsZero(GateRef gate); void LowerFloat64CheckRightIsZero(GateRef gate); void LowerValueCheckNegOverflow(GateRef gate); - void LowerNegativeIndexCheck(GateRef gate); - void LowerLargeIndexCheck(GateRef gate); void LowerOverflowCheck(GateRef gate); void LowerInt32UnsignedUpperBoundCheck(GateRef gate); void LowerInt32DivWithCheck(GateRef gate); + void LowerLexVarIsHoleCheck(GateRef gate); + void LowerStoreMemory(GateRef gate); GateRef ConvertBoolToTaggedBoolean(GateRef gate); GateRef ConvertInt32ToFloat64(GateRef gate); + GateRef ConvertUInt32ToFloat64(GateRef gate); GateRef ConvertInt32ToTaggedInt(GateRef gate); + GateRef ConvertUInt32ToTaggedNumber(GateRef gate, Label *exit); GateRef ConvertFloat64ToBool(GateRef gate); GateRef ConvertFloat64ToInt32(GateRef gate, Label *exit); GateRef ConvertFloat64ToTaggedDouble(GateRef gate); diff --git a/ecmascript/compiler/litecg_codegen.cpp b/ecmascript/compiler/litecg_codegen.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4653aa4312149c614370b7f932b6c2af273a714a --- /dev/null +++ b/ecmascript/compiler/litecg_codegen.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/litecg_codegen.h" +#if defined(PANDA_TARGET_MACOS) || defined(PANDA_TARGET_IOS) +#include "ecmascript/base/llvm_helper.h" +#endif + +#include +#include +#include + +#include "ecmascript/compiler/call_signature.h" +#include "ecmascript/compiler/compiler_log.h" +#include "ecmascript/compiler/debug_info.h" +#include "ecmascript/compiler/litecg_ir_builder.h" +#include "ecmascript/ecma_macros.h" +#include "ecmascript/mem/region.h" +#include "ecmascript/object_factory.h" +#include "ecmascript/stackmap/llvm_stackmap_parser.h" +#include "lmir_builder.h" +#include "litecg.h" + +namespace panda::ecmascript::kungfu { +class CompilerLog; + +using namespace panda::ecmascript; + +LiteCGAssembler::LiteCGAssembler(LMIRModule &module) : lmirModule(module) {} + +static uint8_t *AllocateCodeSection(void *object, uint32_t size, [[maybe_unused]] uint32_t alignment, + const std::string §ionName) +{ + struct CodeInfo& state = *static_cast(object); + return state.AllocaCodeSection(size, sectionName.c_str()); +} + +static void SaveFunc2Addr(void *object, std::string funcName, uint32_t address) { + struct CodeInfo &state = *static_cast(object); + state.SaveFunc2Addr(funcName, address); +} + +void LiteCGAssembler::Run([[maybe_unused]]const CompilerLog &log, [[maybe_unused]]bool fastCompileMode) { + maple::litecg::LiteCG liteCG(*lmirModule.GetModule()); + liteCG.SetupLiteCGEmitMemoryManager(&codeInfo_, AllocateCodeSection, SaveFunc2Addr); + liteCG.DoCG(); +} + +void LiteCGIRGeneratorImpl::GenerateCodeForStub(Circuit *circuit, const ControlFlowGraph &graph, size_t index, + const CompilationConfig *cfg) +{ + (void)circuit; + (void)graph; + (void)index; + (void)cfg; + // LLVMValueRef function = module_->GetFunction(index); + // const CallSignature* cs = module_->GetCSign(index); + // LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, cs->GetCallConv(), enableLog_, false, cs->GetName()); + // builder.Build(); +} + +void LiteCGIRGeneratorImpl::GenerateCode(Circuit *circuit, const ControlFlowGraph &graph, const CompilationConfig *cfg, + const panda::ecmascript::MethodLiteral *methodLiteral, + const JSPandaFile *jsPandaFile, const std::string &methodName) +{ + circuit->SetFrameType(FrameType::OPTIMIZED_JS_FUNCTION_FRAME); + CallSignature::CallConv conv; + if (methodLiteral->IsFastCall()) { + conv = CallSignature::CallConv::CCallConv; + } else { + conv = CallSignature::CallConv::WebKitJSCallConv; + } + LiteCGIRBuilder builder(&graph, circuit, module_, cfg, conv, + enableLog_, methodLiteral, jsPandaFile, methodName); + builder.Build(); +} +} // namespace panda::ecmascript::kungfu + diff --git a/ecmascript/compiler/litecg_codegen.h b/ecmascript/compiler/litecg_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..d0fc6e9ac09c0ad3c3e0f68fb1619f8f2a57716d --- /dev/null +++ b/ecmascript/compiler/litecg_codegen.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_LITECG_CODEGEN_H +#define ECMASCRIPT_COMPILER_LITECG_CODEGEN_H + +#include "ecmascript/compiler/binary_section.h" +#include "ecmascript/compiler/code_generator.h" +#include "ecmascript/compiler/litecg_ir_builder.h" + +namespace panda::ecmascript::kungfu { +class CompilerLog; + +class LiteCGAssembler :public Assembler { +public: + explicit LiteCGAssembler(LMIRModule &module); + virtual ~LiteCGAssembler() = default; + void Run(const CompilerLog &log, bool fastCompileMode) override; +private: + LMIRModule &lmirModule; +}; + +class LiteCGIRGeneratorImpl : public CodeGeneratorImpl { +public: + LiteCGIRGeneratorImpl(LMIRModule *module, bool enableLog) + : module_(module), enableLog_(enableLog) {} + ~LiteCGIRGeneratorImpl() override = default; + void GenerateCodeForStub(Circuit *circuit, const ControlFlowGraph &graph, size_t index, + const CompilationConfig *cfg) override; + void GenerateCode(Circuit *circuit, const ControlFlowGraph &graph, const CompilationConfig *cfg, + const MethodLiteral *methodLiteral, const JSPandaFile *jsPandaFile, const std::string &methodName) override; + + bool IsLogEnabled() const + { + return enableLog_; + } + + LMIRModule* GetModule() const + { + return module_; + } + +private: + LMIRModule *module_; + bool enableLog_ {false}; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_LITECG_CODEGEN_H diff --git a/ecmascript/compiler/litecg_ir_builder.cpp b/ecmascript/compiler/litecg_ir_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0534a5cfee5d58b9fbe6103c00767aa3d0dc4c55 --- /dev/null +++ b/ecmascript/compiler/litecg_ir_builder.cpp @@ -0,0 +1,2266 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/litecg_ir_builder.h" + +#include +#include + +#include "ecmascript/compiler/argument_accessor.h" +#include "ecmascript/compiler/bc_call_signature.h" +#include "ecmascript/compiler/circuit.h" +#include "ecmascript/compiler/call_signature.h" +#include "ecmascript/compiler/common_stubs.h" +#include "ecmascript/compiler/debug_info.h" +#include "ecmascript/compiler/gate.h" +#include "ecmascript/compiler/rt_call_signature.h" +#include "ecmascript/deoptimizer/deoptimizer.h" +#include "ecmascript/frames.h" +#include "ecmascript/js_thread.h" +#include "ecmascript/method.h" +#include "lmir_builder.h" + +namespace panda::ecmascript::kungfu { +#define __ lmirBuilder_-> +using FunctionBuilder = maple::litecg::LMIRBuilder::FunctionBuilder; +using SwitchBuilder = maple::litecg::LMIRBuilder::SwitchBuilder; +using Function = maple::litecg::Function; +using LMIRBuilder = maple::litecg::LMIRBuilder; +using BB = maple::litecg::BB; +using Expr = maple::litecg::Expr; +using Stmt = maple::litecg::Stmt; +using Const = maple::litecg::Const; +using LiteCGType = maple::litecg::Type; +using IntCmpCondition = maple::litecg::IntCmpCondition; +using Var = maple::litecg::Var; +using PregIdx = maple::litecg::PregIdx; +using IntrinsicId = maple::litecg::IntrinsicId; + +using StubIdType = std::variant; + +LiteCGIRBuilder::LiteCGIRBuilder(const std::vector> *schedule, Circuit *circuit, + LMIRModule *module, const CompilationConfig *cfg, + CallSignature::CallConv callConv, bool enableLog, + const panda::ecmascript::MethodLiteral *methodLiteral, + const JSPandaFile *jsPandaFile, + const std::string &funcName) + : scheduledGates_(schedule), circuit_(circuit), lmirModule_(module), compCfg_(cfg), callConv_(callConv), enableLog_(enableLog), + methodLiteral_(methodLiteral), jsPandaFile_(jsPandaFile), funcName_(funcName), acc_(circuit) +{ + lmirBuilder_ = new LMIRBuilder(*module->GetModule()); + ASSERT(compCfg_->Is64Bit()); + slotSize_ = sizeof(uint64_t); + slotType_ = __ i64Type; + InitializeHandlers(); +} + +LiteCGIRBuilder::~LiteCGIRBuilder() +{ + delete lmirBuilder_; +} + +void LiteCGIRBuilder::BuildInstID2BBIDMap() +{ + for (size_t bbIdx = 0; bbIdx < scheduledGates_->size(); bbIdx++) { + const std::vector& bb = scheduledGates_->at(bbIdx); + for (size_t instIdx = bb.size(); instIdx > 0; instIdx--) { + GateId gateId = acc_.GetId(bb[instIdx - 1]); + instID2bbID_[gateId] = static_cast(bbIdx); + } + } +} + +BB& LiteCGIRBuilder::GetOrCreateBB(int bbID) { + auto itr = bbID2BB_.find(bbID); + if (itr != bbID2BB_.end()) { + return *(itr->second); + } + BB &bb = __ CreateBB(); + bbID2BB_[bbID] = &bb; + return bb; +} + +BB& LiteCGIRBuilder::GetFirstBB() { + // Obtain the first BB (i.e. the BB with id zero) for inserting prologue information + return GetOrCreateBB(0); +} + +BB &LiteCGIRBuilder::CreateBB() { + BB &bb = __ CreateBB(false); + return bb; +} + +LiteCGType *LiteCGIRBuilder::ConvertLiteCGTypeFromGate(GateRef gate) const +{ + if (acc_.IsGCRelated(gate)) { + return __ i64RefType; + } + + MachineType t = acc_.GetMachineType(gate); + switch (t) { + case MachineType::NOVALUE: + return __ voidType; + case MachineType::I1: + return __ u1Type; + case MachineType::I8: + return __ i8Type; + case MachineType::I16: + return __ i16Type; + case MachineType::I32: + return __ i32Type; + case MachineType::I64: + return __ i64Type; + case MachineType::F32: + return __ f32Type; + case MachineType::F64: + return __ f64Type; + case MachineType::ARCH: + return __ i64Type; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + +void LiteCGIRBuilder::AddFunc() { + // setup function type + std::string funcName = lmirModule_->GetFuncName(methodLiteral_, jsPandaFile_); + FunctionBuilder funcBuilder = __ DefineFunction(funcName); + funcBuilder.Param(__ i64Type, "glue"); + if (!methodLiteral_->IsFastCall()) { + funcBuilder.Param(__ i64Type, "actualArgc") + .Param(__ i64RefType, "func") + .Param(__ i64RefType, "new_target") + .Param(__ i64RefType, "this_object"); + for (uint32_t i = 0; i < methodLiteral_->GetNumArgsWithCallField(); ++i) { + funcBuilder.Param(__ i64RefType, "param" + std::to_string(i)); + } + } else { + funcBuilder.Param(__ i64RefType, "func") + .Param(__ i64RefType, "this_object"); + for (uint32_t i = 0; i < methodLiteral_->GetNumArgsWithCallField(); ++i) { + funcBuilder.Param(__ i64RefType, "param" + std::to_string(i)); + } + } + + funcBuilder.CallConvAttribute(ConvertCallAttr(callConv_)); + Function &function = funcBuilder.Return(__ i64RefType).Done(); + __ SetCurFunc(function); + GenPrologue(function); + auto offsetInPandaFile = methodLiteral_->GetMethodId().GetOffset(); + lmirModule_->SetFunction(offsetInPandaFile, funcName, methodLiteral_->IsFastCall()); +} + +void LiteCGIRBuilder::Build() +{ + BuildInstID2BBIDMap(); + AddFunc(); + std::cout << "============== building litecg ir=======" << std::endl; + + for (size_t bbIdx = 0; bbIdx < scheduledGates_->size(); bbIdx++) { + const std::vector& bb = scheduledGates_->at(bbIdx); + + for (size_t instIdx = bb.size(); instIdx > 0; instIdx--) { + GateRef gate = bb[instIdx - 1]; + auto found = opHandlers_.find(acc_.GetOpCode(gate)); + if (found != opHandlers_.end()) { + (this->*(found->second))(gate); + continue; + } + if (illegalOpHandlers_.find(acc_.GetOpCode(gate)) == illegalOpHandlers_.end()) { + std::cout << "========can't process opcode: " << acc_.GetOpCode(gate) << std::endl; + } + } + } + + for (auto &pair : bbID2unmergedPhis_) { + for (auto &desc : pair.second) { + Expr value = gate2Expr_[desc.operand]; + Stmt &phiAssign = __ Dassign(value, desc.phi); + __ AppendStmtBeforeBranch(GetOrCreateBB(desc.predBBId), phiAssign); + } + } + bbID2unmergedPhis_.clear(); + + __ DumpIRToFile("fini.mpl"); +} + +void LiteCGIRBuilder::GenPrologue(maple::litecg::Function &function) { + auto frameType = circuit_->GetFrameType(); + if (IsInterpreted()) { + return; + } + __ SetFuncFramePointer("all"); + size_t reservedSlotsSize = 0; + if (frameType == FrameType::OPTIMIZED_FRAME) { + reservedSlotsSize = OptimizedFrame::ComputeReservedSize(slotSize_); + __ SetFuncFrameResverdSlot(reservedSlotsSize); + SaveFrameTypeOnFrame(frameType); + } else if (frameType == FrameType::OPTIMIZED_JS_FUNCTION_FRAME) { + reservedSlotsSize = OptimizedJSFunctionFrame::ComputeReservedJSFuncOffset(slotSize_); + __ SetFuncFrameResverdSlot(reservedSlotsSize); + auto ArgList = circuit_->GetArgRoot(); + auto uses = acc_.Uses(ArgList); + for (auto useIt = uses.begin(); useIt != uses.end(); ++useIt) { + int argth = static_cast(acc_.TryGetValue(*useIt)); + Var &value = __ GetParam(function, argth); + int funcIndex = 0; + if (methodLiteral_->IsFastCall()) { + frameType = FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME; + funcIndex = static_cast(FastCallArgIdx::FUNC); + } else { + funcIndex = static_cast(CommonArgIdx::FUNC); + } + if (argth == funcIndex) { + SaveJSFuncOnOptJSFuncFrame(value); + SaveFrameTypeOnFrame(frameType); + } + } + } else { + LOG_COMPILER(FATAL) << "frameType interpret type error !"; + ASSERT_PRINT(static_cast(frameType), "is not support !"); + } +} + +void LiteCGIRBuilder::SaveJSFuncOnOptJSFuncFrame(maple::litecg::Var &value) { + ASSERT(circuit_->GetFrameType() == FrameType::OPTIMIZED_JS_FUNCTION_FRAME); + Expr fpAddr = CallingFp(false); + Expr frameAddr = __ Cvt(fpAddr.GetType(), __ i64Type, fpAddr); + size_t reservedOffset = OptimizedJSFunctionFrame::ComputeReservedJSFuncOffset(slotSize_); + Expr frameJSFuncSlotAddr = __ Sub(frameAddr.GetType(), frameAddr, __ ConstVal(__ CreateIntConst(slotType_, reservedOffset))); + Expr jsFuncAddr = __ Cvt(frameJSFuncSlotAddr.GetType(), __ CreatePtrType(slotType_), frameJSFuncSlotAddr); + Expr jsFuncValue = __ Cvt(__ i64PtrType, slotType_, __ Dread(value)); + auto &stmt = __ Iassign(jsFuncValue, jsFuncAddr, jsFuncAddr.GetType()); + __ AppendStmt(GetFirstBB(), stmt); +} + + +void LiteCGIRBuilder::SaveFrameTypeOnFrame(FrameType frameType) { + Expr fpAddr = CallingFp(false); + Expr frameAddr = __ Cvt(fpAddr.GetType(), __ i64Type, fpAddr); + Expr frameJSFuncSlotAddr = __ Sub(frameAddr.GetType(), frameAddr, __ ConstVal(__ CreateIntConst(slotType_, slotSize_))); + Expr jsFuncAddr = __ Cvt(frameJSFuncSlotAddr.GetType(), __ CreatePtrType(slotType_), frameJSFuncSlotAddr); + Expr liteFramType = __ ConstVal(__ CreateIntConst(__ i64Type, static_cast(frameType))); + auto &stmt = __ Iassign(liteFramType, jsFuncAddr, jsFuncAddr.GetType()); + __ AppendStmt(GetFirstBB(), stmt); +} + +Expr LiteCGIRBuilder::GetGlue(const std::vector &inList) { + GateRef glueGate = inList[static_cast(CallInputs::GLUE)]; + auto itr = gate2Expr_.find(glueGate); + if (itr != gate2Expr_.end()) { + return itr->second; + } + Expr glue = __ Dread(__ GetLocalVar("glue")); + gate2Expr_[glueGate] = glue; + return glue; +} + +void LiteCGIRBuilder::InitializeHandlers() +{ + opHandlers_ = { + {OpCode::STATE_ENTRY, &LiteCGIRBuilder::HandleGoto}, + {OpCode::RETURN, &LiteCGIRBuilder::HandleReturn}, + {OpCode::RETURN_VOID, &LiteCGIRBuilder::HandleReturnVoid}, + {OpCode::IF_BRANCH, &LiteCGIRBuilder::HandleBranch}, + // {OpCode::SWITCH_BRANCH, &LiteCGIRBuilder::HandleSwitch}, + {OpCode::ORDINARY_BLOCK, &LiteCGIRBuilder::HandleGoto}, + {OpCode::IF_TRUE, &LiteCGIRBuilder::HandleGoto}, + {OpCode::IF_FALSE, &LiteCGIRBuilder::HandleGoto}, + {OpCode::SWITCH_CASE, &LiteCGIRBuilder::HandleGoto}, + {OpCode::MERGE, &LiteCGIRBuilder::HandleGoto}, + {OpCode::DEFAULT_CASE, &LiteCGIRBuilder::HandleGoto}, + {OpCode::LOOP_BEGIN, &LiteCGIRBuilder::HandleGoto}, + {OpCode::LOOP_BACK, &LiteCGIRBuilder::HandleGoto}, + {OpCode::VALUE_SELECTOR, &LiteCGIRBuilder::HandlePhi}, + {OpCode::RUNTIME_CALL, &LiteCGIRBuilder::HandleRuntimeCall}, + {OpCode::RUNTIME_CALL_WITH_ARGV, &LiteCGIRBuilder::HandleRuntimeCallWithArgv}, + {OpCode::NOGC_RUNTIME_CALL, &LiteCGIRBuilder::HandleCall}, + {OpCode::CALL_OPTIMIZED, &LiteCGIRBuilder::HandleCall}, + {OpCode::FAST_CALL_OPTIMIZED, &LiteCGIRBuilder::HandleCall}, + {OpCode::CALL, &LiteCGIRBuilder::HandleCall}, + // {OpCode::BYTECODE_CALL, &LiteCGIRBuilder::HandleBytecodeCall}, + // {OpCode::DEBUGGER_BYTECODE_CALL, &LiteCGIRBuilder::HandleBytecodeCall}, + {OpCode::BUILTINS_CALL, &LiteCGIRBuilder::HandleCall}, + {OpCode::BUILTINS_CALL_WITH_ARGV, &LiteCGIRBuilder::HandleCall}, + // {OpCode::ALLOCA, &LiteCGIRBuilder::HandleAlloca}, + {OpCode::ARG, &LiteCGIRBuilder::HandleParameter}, + {OpCode::CONSTANT, &LiteCGIRBuilder::HandleConstant}, + // {OpCode::CONSTSTRING, &LiteCGIRBuilder::HandleConstString}, + // {OpCode::RELOCATABLE_DATA, &LiteCGIRBuilder::HandleRelocatableData}, + {OpCode::ZEXT, &LiteCGIRBuilder::HandleZExtInt}, + {OpCode::SEXT, &LiteCGIRBuilder::HandleSExtInt}, + {OpCode::TRUNC, &LiteCGIRBuilder::HandleCastIntXToIntY}, + {OpCode::FEXT, &LiteCGIRBuilder::HandleFPExt}, + {OpCode::FTRUNC, &LiteCGIRBuilder::HandleFPTrunc}, + {OpCode::REV, &LiteCGIRBuilder::HandleIntRev}, + {OpCode::ADD, &LiteCGIRBuilder::HandleAdd}, + {OpCode::SUB, &LiteCGIRBuilder::HandleSub}, + {OpCode::MUL, &LiteCGIRBuilder::HandleMul}, + {OpCode::FDIV, &LiteCGIRBuilder::HandleFloatDiv}, + {OpCode::SDIV, &LiteCGIRBuilder::HandleIntDiv}, + {OpCode::UDIV, &LiteCGIRBuilder::HandleUDiv}, + {OpCode::AND, &LiteCGIRBuilder::HandleIntAnd}, + {OpCode::OR, &LiteCGIRBuilder::HandleIntOr}, + {OpCode::XOR, &LiteCGIRBuilder::HandleIntXor}, + {OpCode::LSR, &LiteCGIRBuilder::HandleIntLsr}, + {OpCode::ASR, &LiteCGIRBuilder::HandleIntAsr}, + {OpCode::ICMP, &LiteCGIRBuilder::HandleCmp}, + // {OpCode::FCMP, &LiteCGIRBuilder::HandleCmp}, + {OpCode::LOAD, &LiteCGIRBuilder::HandleLoad}, + {OpCode::STORE, &LiteCGIRBuilder::HandleStore}, + {OpCode::SIGNED_INT_TO_FLOAT, &LiteCGIRBuilder::HandleChangeInt32ToDouble}, + {OpCode::UNSIGNED_INT_TO_FLOAT, &LiteCGIRBuilder::HandleChangeUInt32ToDouble}, + {OpCode::FLOAT_TO_SIGNED_INT, &LiteCGIRBuilder::HandleChangeDoubleToInt32}, + {OpCode::TAGGED_TO_INT64, &LiteCGIRBuilder::HandleChangeTaggedPointerToInt64}, + {OpCode::INT64_TO_TAGGED, &LiteCGIRBuilder::HandleChangeInt64ToTagged}, + {OpCode::BITCAST, &LiteCGIRBuilder::HandleBitCast}, + {OpCode::LSL, &LiteCGIRBuilder::HandleIntLsl}, + {OpCode::SMOD, &LiteCGIRBuilder::HandleMod}, + {OpCode::FMOD, &LiteCGIRBuilder::HandleMod}, + {OpCode::DEOPT_CHECK, &LiteCGIRBuilder::HandleDeoptCheck}, + {OpCode::TRUNC_FLOAT_TO_INT64, &LiteCGIRBuilder::HandleTruncFloatToInt}, + {OpCode::TRUNC_FLOAT_TO_INT32, &LiteCGIRBuilder::HandleTruncFloatToInt}, + {OpCode::ADD_WITH_OVERFLOW, &LiteCGIRBuilder::HandleAddWithOverflow}, + {OpCode::SUB_WITH_OVERFLOW, &LiteCGIRBuilder::HandleSubWithOverflow}, + {OpCode::MUL_WITH_OVERFLOW, &LiteCGIRBuilder::HandleMulWithOverflow}, + {OpCode::EXTRACT_VALUE, &LiteCGIRBuilder::HandleExtractValue}, + {OpCode::SQRT, &LiteCGIRBuilder::HandleSqrt}, + {OpCode::READSP, &LiteCGIRBuilder::HandleReadSp}, + }; + illegalOpHandlers_ = { + OpCode::NOP, OpCode::CIRCUIT_ROOT, OpCode::DEPEND_ENTRY, + OpCode::DEAD, OpCode::RETURN_LIST, + OpCode::ARG_LIST, OpCode::THROW, + OpCode::DEPEND_SELECTOR, OpCode::DEPEND_RELAY, + OpCode::FRAME_STATE, OpCode::STATE_SPLIT, OpCode::FRAME_ARGS, + OpCode::LOOP_EXIT_DEPEND, OpCode::LOOP_EXIT, + OpCode::START_ALLOCATE, OpCode::FINISH_ALLOCATE, OpCode::FRAME_VALUES + }; +} + +void LiteCGIRBuilder::HandleReturnVoid([[maybe_unused]]GateRef gate) +{ + return; +} + +void LiteCGIRBuilder::HandleGoto(GateRef gate) +{ + std::vector outs; + acc_.GetOutStates(gate, outs); + int block = instID2bbID_[acc_.GetId(gate)]; + int bbOut = instID2bbID_[acc_.GetId(outs[0])]; + switch (acc_.GetOpCode(gate)) { + case OpCode::MERGE: + case OpCode::LOOP_BEGIN: { + for (const auto &out : outs) { + bbOut = instID2bbID_[acc_.GetId(out)]; + VisitGoto(block, bbOut); + } + break; + } + default: { + VisitGoto(block, bbOut); + break; + } + } +} + +void LiteCGIRBuilder::VisitGoto(int block, int bbOut) +{ + if (block == bbOut) { + return; + } + BB &srcBB = GetOrCreateBB(block); + BB &destBB = GetOrCreateBB(bbOut); + + __ AppendStmt(srcBB, __ Goto(destBB)); + __ AppendBB(srcBB); +} + + +void LiteCGIRBuilder::HandleParameter(GateRef gate) +{ + return VisitParameter(gate); +} + +void LiteCGIRBuilder::VisitParameter(GateRef gate) +{ + size_t argth = static_cast(acc_.TryGetValue(gate)); + Var ¶m = __ GetParam(__ GetCurFunction(), argth); + gate2Expr_[gate] = __ Dread(param); +} + +void LiteCGIRBuilder::HandleConstant(GateRef gate) +{ + std::bitset<64> value = acc_.GetConstantValue(gate); // 64: bit width + VisitConstant(gate, value); +} + +// TODO: +void LiteCGIRBuilder::VisitConstant(GateRef gate, std::bitset<64> value) // 64: bit width +{ + auto machineType = acc_.GetMachineType(gate); + if (machineType == MachineType::ARCH) { + ASSERT(compCfg_->Is64Bit()); + machineType = MachineType::I64; + } + + Const *constVal = nullptr; + if (machineType == MachineType::I32) { + constVal = &(__ CreateIntConst(__ i32Type, static_cast(value.to_ulong()))); + } else if (machineType == MachineType::I64) { + constVal = &(__ CreateIntConst(__ i64Type, static_cast(value.to_ulong()))); + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + if (__ LiteCGGetTypeKind(type) == maple::litecg::kLiteCGTypePointer) { + Expr constExpr = __ Cvt(__ i64Type, type, __ ConstVal(*constVal)); + gate2Expr_[gate] = constExpr; + return; + } else if (__ LiteCGGetTypeKind(type) == maple::litecg::kLiteCGTypeScalar) { + // do nothing + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + } else if (machineType == MachineType::F64) { + auto doubleValue = base::bit_cast(value.to_ullong()); // actual double value + constVal = &(__ CreateDoubleConst(static_cast(doubleValue))); + } else if (machineType == MachineType::I8) { + constVal = &(__ CreateIntConst(__ u8Type, static_cast(value.to_ulong()))); + } else if (machineType == MachineType::I16) { + constVal = &(__ CreateIntConst(__ u16Type, static_cast(value.to_ulong()))); + } else if (machineType == MachineType::I1) { + constVal = &(__ CreateIntConst(__ u1Type, static_cast(value.to_ulong()))); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = __ ConstVal(*constVal); +} + +void LiteCGIRBuilder::HandleAdd(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitAdd(gate, g0, g1); +} + +Expr LiteCGIRBuilder::CanonicalizeToPtr(Expr expr, LiteCGType *type) { + if (__ LiteCGGetTypeKind(expr.GetType()) == maple::litecg::kLiteCGTypePointer) { + if (expr.GetType() == type) { + return expr; + } + return __ Cvt(expr.GetType(), type, expr); + } else if (__ LiteCGGetTypeKind(expr.GetType()) == maple::litecg::kLiteCGTypeScalar) { + return __ Cvt(__ i64Type, type, expr); + } else { + LOG_COMPILER(FATAL) << "can't Canonicalize to Ptr: "; + UNREACHABLE(); + } + return expr; +} + +void LiteCGIRBuilder::VisitAdd(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + + Expr result; + /* + * If the first operand is pointer, special treatment is needed + * 1) add, pointer, int + * 2) add, vector{i8* x 2}, int + */ + LiteCGType *returnType = ConvertLiteCGTypeFromGate(gate); + auto machineType = acc_.GetMachineType(gate); + if (IsAddIntergerType(machineType)) { + auto e1Type = ConvertLiteCGTypeFromGate(e1); + auto e1TypeKind = __ LiteCGGetTypeKind(e1Type); + auto e2Type = ConvertLiteCGTypeFromGate(e2); + if (e1TypeKind == maple::litecg::kLiteCGTypePointer) { + // LOG_ECMA(FATAL) << "not supported VectorType in VisitAdd currently"; + // UNREACHABLE(); + // result = PointerAdd(e1Value, e2Value, returnType); + // Fixme: + Expr tmp1 = __ Cvt(e1Type, __ i64Type, e1Value); + Expr tmp2 = (e2Type == __ i64Type) ? e2Value : __ Cvt(e2Type, __ i64Type, e2Value); + Expr tmp3 = __ Add(__ i64Type, tmp1, tmp2); + result = __ Cvt(__ i64Type, returnType, tmp3); + } else { + Expr tmp1Expr = + (e1Type == returnType) ? e1Value : __ Cvt(e1Type, returnType, e1Value); + Expr tmp2Expr = + (e2Type == returnType) ? e2Value : __ Cvt(e2Type, returnType, e2Value); + result = __ Add(returnType, tmp1Expr, tmp2Expr); + } + } else if (machineType == MachineType::F64) { + result = __ Add(returnType, e1Value, e2Value); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleLoad(GateRef gate) +{ + VisitLoad(gate, acc_.GetIn(gate, 1)); +} + +void LiteCGIRBuilder::VisitLoad(GateRef gate, GateRef base) +{ + Expr baseAddr = gate2Expr_[base]; + + LiteCGType *returnType = ConvertLiteCGTypeFromGate(gate); + LiteCGType *memType = (__ IsHeapPointerType(returnType)) ? __ CreateRefType(returnType) + : __ CreatePtrType(returnType); + baseAddr = CanonicalizeToPtr(baseAddr, memType); + Expr result = __ Iread(returnType, baseAddr, memType); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleCmp(GateRef gate) +{ + GateRef left = acc_.GetIn(gate, 0); + GateRef right = acc_.GetIn(gate, 1); + VisitCmp(gate, left, right); +} + +IntCmpCondition LiteCGIRBuilder::ConvertLiteCGPredicateFromICMP(ICmpCondition cond) const +{ + switch (cond) { + case ICmpCondition::SLT: + return IntCmpCondition::kSLT; + case ICmpCondition::SLE: + return IntCmpCondition::kSLE; + case ICmpCondition::SGT: + return IntCmpCondition::kSGT; + case ICmpCondition::SGE: + return IntCmpCondition::kSGE; + case ICmpCondition::ULT: + return IntCmpCondition::kULT; + case ICmpCondition::ULE: + return IntCmpCondition::kULE; + case ICmpCondition::UGT: + return IntCmpCondition::kUGT; + case ICmpCondition::UGE: + return IntCmpCondition::kUGE; + case ICmpCondition::NE: + return IntCmpCondition::kNE; + case ICmpCondition::EQ: + return IntCmpCondition::kEQ; + default: + LOG_COMPILER(FATAL) << "unexpected cond!"; + UNREACHABLE(); + } + return IntCmpCondition::kEQ; +} + +void LiteCGIRBuilder::VisitCmp(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *returnType = ConvertLiteCGTypeFromGate(gate); + + [[maybe_unused]] auto e1ValCode = acc_.GetMachineType(e1); + [[maybe_unused]] auto e2ValCode = acc_.GetMachineType(e2); + ASSERT((e1ValCode == e2ValCode) || + (compCfg_->Is64Bit() && (e1ValCode == MachineType::ARCH) && (e2ValCode == MachineType::I64)) || + (compCfg_->Is64Bit() && (e2ValCode == MachineType::ARCH) && (e1ValCode == MachineType::I64))); + auto op = acc_.GetOpCode(gate); + if (op == OpCode::ICMP) { + auto cond = acc_.GetICmpCondition(gate); + auto litecgCond = ConvertLiteCGPredicateFromICMP(cond); + Expr result = __ ICmp(returnType, e1Value, e2Value, litecgCond); + gate2Expr_[gate] = result; + } else if (op == OpCode::FCMP) { + // TODO: + LOG_ECMA(FATAL) << "this branch is not supported currently"; + UNREACHABLE(); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + // gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleBranch(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + std::vector outs; + acc_.GetOutStates(gate, outs); + GateRef bTrue = (acc_.GetOpCode(outs[0]) == OpCode::IF_TRUE) ? outs[0] : outs[1]; + GateRef bFalse = (acc_.GetOpCode(outs[0]) == OpCode::IF_FALSE) ? outs[0] : outs[1]; + int bbTrue = instID2bbID_[acc_.GetId(bTrue)]; + int bbFalse = instID2bbID_[acc_.GetId(bFalse)]; + VisitBranch(gate, ins[1], bbTrue, bbFalse); +} + +void LiteCGIRBuilder::VisitBranch(GateRef gate, GateRef cmp, int btrue, int bfalse) +{ + if (gate2Expr_.count(cmp) == 0) { + OPTIONAL_LOG_COMPILER(ERROR) << "Branch condition gate is nullptr!"; + return; + } + BB &curBB = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + __ AppendBB(curBB); + BB &bb = CreateBB(); + BB &falseBB = GetOrCreateBB(bfalse); + Stmt &stmt = __ Goto(falseBB); + __ AppendStmt(bb, stmt); + __ AppendBB(bb); + BB &trueBB = GetOrCreateBB(btrue); + Expr cond = gate2Expr_[cmp]; + Stmt &condBR = __ CondGoto(cond, trueBB, true); + __ AppendStmt(curBB, condBR); +} + + +void LiteCGIRBuilder::HandleReturn(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitReturn(gate, 1, ins); +} + +void LiteCGIRBuilder::VisitReturn([[maybe_unused]] GateRef gate, [[maybe_unused]] GateRef popCount, + const std::vector &operands) +{ + // [STATE] [DEPEND] [VALUE] [RETURN_LIST] + GateRef operand = operands[2]; // 2: skip 2 in gate that are not data gate + Expr returnValue = gate2Expr_[operand]; + Stmt &returnNode = __ Return(returnValue); + BB &curBB = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + __ AppendStmt(curBB, returnNode); + __ AppendBB(curBB); +} + +Expr LiteCGIRBuilder::GetRTStubOffset(Expr glue, int index) +{ + size_t slotOffset = JSThread::GlueData::GetRTStubEntriesOffset(compCfg_->Is32Bit()) + index * slotSize_; + Const &constVal = __ CreateIntConst(glue.GetType(), static_cast(slotOffset)); + return __ ConstVal(constVal); +} + +Expr LiteCGIRBuilder::GetCoStubOffset(Expr glue, int index) const +{ + int offset = JSThread::GlueData::GetCOStubEntriesOffset(compCfg_->Is32Bit()) + + static_cast(index * slotSize_); + Const &constVal = __ CreateIntConst(glue.GetType(), static_cast(offset)); + return __ ConstVal(constVal); +} + +void LiteCGIRBuilder::HandleRuntimeCall(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitRuntimeCall(gate, ins); +}; + +LiteCGType *LiteCGIRBuilder::ConvertLiteCGTypeFromVariableType(VariableType type) const +{ + std::map machineTypeMap = { + {VariableType::VOID(), __ voidType}, + {VariableType::BOOL(), __ u1Type}, + {VariableType::INT8(), __ i8Type}, + {VariableType::INT16(), __ i16Type}, + {VariableType::INT32(), __ i32Type}, + {VariableType::INT64(), __ i64Type}, + {VariableType::FLOAT32(), __ f32Type}, + {VariableType::FLOAT64(), __ f64Type}, + {VariableType::NATIVE_POINTER(), __ i64Type}, + {VariableType::JS_POINTER(), __ i64RefType}, + {VariableType::JS_ANY(), __ i64RefType}, + }; + return machineTypeMap[type]; +} + +LiteCGType *LiteCGIRBuilder::GenerateFuncType(const std::vector ¶ms, const CallSignature *stubDescriptor) +{ + LiteCGType *retType = ConvertLiteCGTypeFromVariableType(stubDescriptor->GetReturnType()); + std::vector paramTys; + for (auto value : params) { + paramTys.emplace_back(value.GetType()); + } + LiteCGType *functionType = __ CreateFuncType(paramTys, retType, false); + return functionType; +} + +LiteCGType *LiteCGIRBuilder::GetFuncType(const CallSignature *stubDescriptor) const +{ + LiteCGType *returnType = ConvertLiteCGTypeFromVariableType(stubDescriptor->GetReturnType()); + std::vector paramTys; + auto paramCount = stubDescriptor->GetParametersCount(); + auto paramsType = stubDescriptor->GetParametersType(); + if (paramsType != nullptr) { + LiteCGType *glueType = ConvertLiteCGTypeFromVariableType(paramsType[0]); + paramTys.push_back(glueType); + + for (size_t i = 1; i < paramCount; i++) { + paramTys.push_back(ConvertLiteCGTypeFromVariableType(paramsType[i])); + } + } + auto funcType = __ CreateFuncType(paramTys, returnType, stubDescriptor->IsVariadicArgs()); + return funcType; +} + +Expr LiteCGIRBuilder::GetFunction(BB &bb, Expr glue, const CallSignature *signature, + Expr rtbaseoffset, const std::string &realName) const +{ + + LiteCGType *rtfuncType = GetFuncType(signature); + LiteCGType *rtfuncTypePtr = __ CreatePtrType(rtfuncType); + LiteCGType *rtFuncTypePtrPtr = __ CreatePtrType(rtfuncTypePtr); + LiteCGType *glueType = (glue.GetType()); + LiteCGType *glueTypePtr = __ CreatePtrType(glueType); + Expr rtbaseAddr = __ Cvt(rtbaseoffset.GetType(), glueTypePtr, rtbaseoffset); + + Expr funcAddr = __ Iread(glueType, rtbaseAddr, glueTypePtr); + Expr callee = __ Cvt(glueType, rtFuncTypePtrPtr, funcAddr); + + std::string name = realName.empty() + ? signature->GetName() + : realName; + Var &func = __ CreateLocalVar(callee.GetType(), name); + Stmt &funcAddrNode = __ Dassign(callee, func); + __ AppendStmt(bb, funcAddrNode); + + return __ Dread(func); +} + +bool LiteCGIRBuilder::IsOptimizedJSFunction() const +{ + return circuit_->GetFrameType() == FrameType::OPTIMIZED_JS_FUNCTION_FRAME; +} + +bool LiteCGIRBuilder::IsOptimized() const +{ + return circuit_->GetFrameType() == FrameType::OPTIMIZED_FRAME; +} + +CallExceptionKind LiteCGIRBuilder::GetCallExceptionKind(size_t index, OpCode op) const +{ + bool hasPcOffset = IsOptimizedJSFunction() && + ((op == OpCode::NOGC_RUNTIME_CALL && (kungfu::RuntimeStubCSigns::IsAsmStub(index))) || + (op == OpCode::CALL) || + (op == OpCode::RUNTIME_CALL)); + return hasPcOffset ? CallExceptionKind::HAS_PC_OFFSET : CallExceptionKind::NO_PC_OFFSET; +} + +// ToFix: +void LiteCGIRBuilder::VisitRuntimeCall(GateRef gate, const std::vector &inList) +{ + StubIdType stubId = RTSTUB_ID(CallRuntime); + Expr glue = GetGlue(inList); + int stubIndex = static_cast(std::get(stubId)); + Expr rtoffset = GetRTStubOffset(glue, stubIndex); + Expr rtbaseOffset = __ Add(glue.GetType(), glue, rtoffset); + const CallSignature *signature = RuntimeStubCSigns::Get(std::get(stubId)); + + CallExceptionKind kind = GetCallExceptionKind(stubIndex, OpCode::RUNTIME_CALL); + bool hasPCOffset = (kind == CallExceptionKind::HAS_PC_OFFSET); + size_t actualNumArgs = hasPCOffset ? (inList.size() - 1) : inList.size(); + + std::vector params; + params.push_back(glue); // glue + + const int index = static_cast(acc_.GetConstantValue(inList[static_cast(CallInputs::TARGET)])); + Expr indexValue = __ ConstVal(__ CreateIntConst(__ u64Type, static_cast(index))); + params.push_back(indexValue); // target + + const int64_t argc = actualNumArgs - static_cast(CallInputs::FIRST_PARAMETER); + Expr argcValue = __ ConstVal(__ CreateIntConst(__ u64Type, static_cast(argc))); + params.push_back(argcValue); // argc + + for (size_t paraIdx = static_cast(CallInputs::FIRST_PARAMETER); paraIdx < actualNumArgs; ++paraIdx) { + GateRef gateTmp = inList[paraIdx]; + params.push_back(gate2Expr_[gateTmp]); + } + + std::string targetName = RuntimeStubCSigns::GetRTName(index); + BB &bb = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + std::string name = targetName.empty() + ? signature->GetName() + : targetName; + Expr callee = GetFunction(bb, glue, signature, rtbaseOffset, name); + + static uint32_t val = 0; + std::string returnCallValName = name + "Ret" + std::to_string(val++); + LiteCGType *returnType = ConvertLiteCGTypeFromVariableType(signature->GetReturnType()); + Var *returnVar = + (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, returnCallValName)); + Stmt &callNode = __ ICall(callee, params, returnVar); + if (kind == CallExceptionKind::HAS_PC_OFFSET) { + std::map deoptBundleInfo; + auto pcIndex = static_cast(SpecVregIndex::PC_OFFSET_INDEX); + Expr pcOffset = hasPCOffset ? (gate2Expr_[inList[actualNumArgs]]) + : __ ConstVal(__ CreateIntConst(__ i32Type, 0)); + PregIdx pregIdx = __ CreatePreg(pcOffset.GetType()); + __ AppendStmt(bb, __ Regassign(pcOffset, pregIdx)); + + + deoptBundleInfo.insert(std::pair(pcIndex, pregIdx)); + maple::litecg::LiteCGSetDeoptBundleInfo(callNode, deoptBundleInfo); + } + __ SetStmtCallConv(callNode, maple::litecg::Web_Kit_JS_Call); + __ AppendStmt(bb, callNode); + if (returnVar != nullptr) { + gate2Expr_[gate] = __ Dread(returnVar); + } + + // if (IsLogEnabled()) { + // SetDebugInfo(gate, runtimeCall); + // } +} + +void LiteCGIRBuilder::HandleZExtInt(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitZExtInt(gate, ins[0]); +} + +void LiteCGIRBuilder::VisitZExtInt(GateRef gate, GateRef e1) +{ + ASSERT(GetBitWidthFromMachineType(acc_.GetMachineType(e1)) <= + GetBitWidthFromMachineType(acc_.GetMachineType(gate))); + LiteCGType *fromType = ConvertLiteCGTypeFromGate(e1); + LiteCGType *toType = ConvertLiteCGTypeFromGate(gate); + Expr result = __ ZExt(fromType, toType, gate2Expr_[e1]); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntDiv(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntDiv(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntDiv(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ SDiv(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +Expr LiteCGIRBuilder::GetCallee(maple::litecg::BB &bb, const std::vector &inList, + const CallSignature *signature, const std::string &realName) +{ + LiteCGType *rtfuncType = GetFuncType(signature); + LiteCGType *rtfuncTypePtr = __ CreatePtrType(rtfuncType); + LiteCGType *realFuncPtrType = __ CreatePtrType(rtfuncTypePtr); + (void)inList; + Expr code = gate2Expr_[inList[static_cast(CallInputs::TARGET)]]; + Expr callee = __ Cvt(__ i64Type, realFuncPtrType, code); + + std::string name = realName.empty() + ? signature->GetName() + : realName; + + Var &func = __ CreateLocalVar(callee.GetType(), name); + Stmt &funcAddrNode = __ Dassign(callee, func); + __ AppendStmt(bb, funcAddrNode); + return __ Dread(func); +} + +void LiteCGIRBuilder::HandleRuntimeCallWithArgv(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitRuntimeCallWithArgv(gate, ins); +} + +void LiteCGIRBuilder::VisitRuntimeCallWithArgv(GateRef gate, const std::vector &inList) +{ + ASSERT(IsOptimized() == true); + StubIdType stubId = RTSTUB_ID(CallRuntimeWithArgv); + Expr glue = GetGlue(inList); + int stubIndex = static_cast(std::get(stubId)); + Expr rtoffset = GetRTStubOffset(glue, stubIndex); + Expr rtbaseoffset = __ Add(glue.GetType(), glue, rtoffset); + const CallSignature *signature = RuntimeStubCSigns::Get(std::get(stubId)); + + std::vector params; + params.push_back(glue); // glue + + uint64_t index = acc_.GetConstantValue(inList[static_cast(CallInputs::TARGET)]); + auto targetId = __ ConstVal(__ CreateIntConst(__ i64Type, index)); + params.push_back(targetId); // target + for (size_t paraIdx = static_cast(CallInputs::FIRST_PARAMETER); paraIdx < inList.size(); ++paraIdx) { + GateRef gateTmp = inList[paraIdx]; + params.push_back(gate2Expr_[gateTmp]); + } + + BB &bb = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + std::string targetName = RuntimeStubCSigns::GetRTName(index); + std::string name = targetName.empty() + ? signature->GetName() + : targetName; + Expr callee = GetFunction(bb, glue, signature, rtbaseoffset, name); + + static uint32_t val = 0; + std::string returnCallValName = name + "Ret" + std::to_string(val++); + LiteCGType *returnType = ConvertLiteCGTypeFromVariableType(signature->GetReturnType()); + Var *returnVar = + (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, returnCallValName)); + Stmt &callNode = __ ICall(callee, params, returnVar); + __ AppendStmt(bb, callNode); + if (returnVar != nullptr) { + gate2Expr_[gate] = __ Dread(returnVar); + } + + // TODO: + // if (IsLogEnabled()) { + // SetDebugInfo(gate, runtimeCall); + // } +} + +void LiteCGIRBuilder::HandleCall(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + OpCode callOp = acc_.GetOpCode(gate); + if (callOp == OpCode::CALL || callOp == OpCode::NOGC_RUNTIME_CALL || + callOp == OpCode::BUILTINS_CALL || callOp == OpCode::BUILTINS_CALL_WITH_ARGV || + callOp == OpCode::CALL_OPTIMIZED || callOp == OpCode::FAST_CALL_OPTIMIZED) { + VisitCall(gate, ins, callOp); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + +void LiteCGIRBuilder::VisitCall(GateRef gate, const std::vector &inList, OpCode op) +{ + size_t targetIndex = static_cast(CallInputs::TARGET); + static_assert(static_cast(CallInputs::FIRST_PARAMETER) == 3); + const CallSignature *calleeDescriptor = nullptr; + Expr glue = GetGlue(inList); + Expr callee; + CallExceptionKind kind = CallExceptionKind::NO_PC_OFFSET; + BB &bb = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + if (op == OpCode::CALL) { + const size_t index = acc_.GetConstantValue(inList[targetIndex]); + calleeDescriptor = CommonStubCSigns::Get(index); + Expr rtoffset = GetCoStubOffset(glue, index); + Expr rtbaseoffset = __ Add(glue.GetType(), glue, rtoffset); + callee = GetFunction(bb, glue, calleeDescriptor, rtbaseoffset); + kind = GetCallExceptionKind(index, op); + } else if (op == OpCode::NOGC_RUNTIME_CALL) { + UpdateLeaveFrame(glue); + const size_t index = acc_.GetConstantValue(inList[targetIndex]); + calleeDescriptor = RuntimeStubCSigns::Get(index); + Expr rtoffset = GetRTStubOffset(glue, index); + Expr rtbaseoffset = __ Add(glue.GetType(), glue, rtoffset); + callee = GetFunction(bb, glue, calleeDescriptor, rtbaseoffset); + kind = GetCallExceptionKind(index, op); + } else if (op == OpCode::CALL_OPTIMIZED) { + calleeDescriptor = RuntimeStubCSigns::GetOptimizedCallSign(); + callee = GetCallee(bb, inList, calleeDescriptor, calleeDescriptor->GetName()); + if (IsOptimizedJSFunction()) { + kind = CallExceptionKind::HAS_PC_OFFSET; + } else { + kind = CallExceptionKind::NO_PC_OFFSET; + } + } else if (op == OpCode::FAST_CALL_OPTIMIZED) { + calleeDescriptor = RuntimeStubCSigns::GetOptimizedFastCallSign(); + callee = GetCallee(bb, inList, calleeDescriptor, calleeDescriptor->GetName()); + if (IsOptimizedJSFunction()) { + kind = CallExceptionKind::HAS_PC_OFFSET; + } else { + kind = CallExceptionKind::NO_PC_OFFSET; + } + } else { + ASSERT(op == OpCode::BUILTINS_CALL || op == OpCode::BUILTINS_CALL_WITH_ARGV); + Expr opcodeOffset = gate2Expr_[inList[targetIndex]]; + Expr rtoffset = GetBuiltinsStubOffset(glue); + Expr offset = __ Add(rtoffset.GetType(), rtoffset, opcodeOffset); + Expr rtbaseoffset = __ Add(glue.GetType(), glue, offset); + if (op == OpCode::BUILTINS_CALL) { + calleeDescriptor = BuiltinsStubCSigns::BuiltinsCSign(); + } else { + calleeDescriptor = BuiltinsStubCSigns::BuiltinsWithArgvCSign(); + } + callee = GetFunction(bb, glue, calleeDescriptor, rtbaseoffset); + } + + std::vector params; + const size_t firstArg = static_cast(CallInputs::FIRST_PARAMETER); + params.push_back(glue); + + LiteCGType *calleeFuncType = __ LiteCGGetPointedType(callee.GetType()); + std::vector paramTypes = __ LiteCGGetFuncParamTypes(calleeFuncType); + + bool hasPCOffset = (kind == CallExceptionKind::HAS_PC_OFFSET); + size_t actualNumArgs = hasPCOffset ? (inList.size() - 1) : inList.size(); + + // then push the actual parameter for js function call + for (size_t paraIdx = firstArg + 1; paraIdx < actualNumArgs; ++paraIdx) { + GateRef gateTmp = inList[paraIdx]; + Expr gateExpr = gate2Expr_[gateTmp]; + const auto gateTmpType = gateExpr.GetType(); + if (params.size() < paramTypes.size()) { // this condition will be false for variadic arguments + const auto paramType = paramTypes.at(params.size()); + // match parameter types and function signature types + if (__ IsHeapPointerType(paramType) && ! __ IsHeapPointerType(gateTmpType)) { + Expr cvtI64Expr = __ Cvt(gateTmpType, __ i64Type, gateExpr); + params.push_back(__ Cvt(__ i64Type, paramType, cvtI64Expr)); + } else { + params.push_back(__ Cvt(gateTmpType, paramType, gateExpr)); + } + } else { + params.push_back(gateExpr); + } + } + + LiteCGType *returnType = __ LiteCGGetFuncReturnType(calleeFuncType); + static uint32_t retNo = 0; + std::string retName = calleeDescriptor->GetName() + "Ret" + std::to_string(retNo++); + Var *returnVar = (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, retName)); + Stmt &callNode = __ ICall(callee, params, returnVar); + if (kind == CallExceptionKind::HAS_PC_OFFSET) { + std::map deoptBundleInfo; + auto pcIndex = static_cast(SpecVregIndex::PC_OFFSET_INDEX); + Expr pcOffset = hasPCOffset ? (gate2Expr_[inList[actualNumArgs]]) + : __ ConstVal(__ CreateIntConst(__ i32Type, 0)); + PregIdx pregIdx = __ CreatePreg(pcOffset.GetType()); + __ AppendStmt(bb, __ Regassign(pcOffset, pregIdx)); + + + deoptBundleInfo.insert(std::pair(pcIndex, pregIdx)); + maple::litecg::LiteCGSetDeoptBundleInfo(callNode, deoptBundleInfo); + } + __ SetStmtCallConv(callNode, ConvertCallAttr(calleeDescriptor->GetCallConv())); + __ AppendStmt(bb, callNode); + if (returnVar != nullptr) { + gate2Expr_[gate] = __ Dread(returnVar); + } + + // TODO: + // if (IsLogEnabled()) { + // SetDebugInfo(gate, call); + // } +} + +maple::litecg::ConvAttr LiteCGIRBuilder::ConvertCallAttr(const CallSignature::CallConv callConv) { + switch (callConv) { + case CallSignature::CallConv::GHCCallConv: { + return maple::litecg::GHC_Call; + } + case CallSignature::CallConv::WebKitJSCallConv: { + return maple::litecg::Web_Kit_JS_Call; + } + default: { + return maple::litecg::CCall; + } + } +} + +Expr LiteCGIRBuilder::GetBuiltinsStubOffset(Expr glue) { + Const &constVal = __ CreateIntConst(glue.GetType(), JSThread::GlueData::GetBuiltinsStubEntriesOffset(compCfg_->Is32Bit())); + return __ ConstVal(constVal); +} + +void LiteCGIRBuilder::UpdateLeaveFrame(Expr glue) +{ + Expr leaveFrameOffset = GetLeaveFrameOffset(glue); + Expr leaveFrameValue = __ Add(glue.GetType(), glue, leaveFrameOffset); + LiteCGType *glueType = glue.GetType(); + LiteCGType *glueTypePtr = __ CreatePtrType(glueType); + Expr leaveFrameAddr = __ Cvt(leaveFrameValue.GetType(), glueTypePtr, leaveFrameValue); + Expr fpAddr = CallingFp(true); + Expr fp = __ Cvt(fpAddr.GetType(), __ i64Type, fpAddr); + + __ Iassign(fp, leaveFrameAddr, fp.GetType()); +} + +bool LiteCGIRBuilder::IsInterpreted() const +{ + return circuit_->GetFrameType() == FrameType::ASM_INTERPRETER_FRAME; +} + +Expr LiteCGIRBuilder::CallingFp(bool /*isCaller*/) +{ + // TODO: + ASSERT(!IsInterpreted()); + // if (IsInterpreted()) { + // return LLVMGetParam(function_, static_cast(InterpreterHandlerInputs::SP)); + // } + + + /* 0:calling 1:its caller */ + Function &func = __ GetCurFunction(); + return __ LiteCGGetPregFP(func); +} + +Expr LiteCGIRBuilder::GetLeaveFrameOffset(Expr glue) +{ + size_t slotOffset = JSThread::GlueData::GetLeaveFrameOffset(compCfg_->Is32Bit()); + Const &constVal = __ CreateIntConst(glue.GetType(), static_cast(slotOffset)); + return __ ConstVal(constVal); +} + +void LiteCGIRBuilder::HandleUDiv(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitUDiv(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitUDiv(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ UDiv(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntAnd(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntAnd(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntAnd(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ And(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntOr(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntOr(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntOr(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ Or(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntXor(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntXor(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntXor(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ Xor(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntLsr(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntLsr(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntLsr(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ LShr(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntAsr(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntAsr(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntAsr(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ AShr(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleBitCast(GateRef gate) +{ + VisitBitCast(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitBitCast(GateRef gate, GateRef e1) +{ + ASSERT(GetBitWidthFromMachineType(acc_.GetMachineType(gate)) == + GetBitWidthFromMachineType(acc_.GetMachineType(e1))); + LiteCGType *fromType = ConvertLiteCGTypeFromGate(e1); + LiteCGType *toType = ConvertLiteCGTypeFromGate(gate); + Expr e1Value = gate2Expr_[e1]; + Expr result = __ BitCast(fromType, toType, e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntLsl(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitIntLsl(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitIntLsl(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr result = __ Shl(type, e1Value, e2Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleMod(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitMod(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitMod(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + ASSERT(type == ConvertLiteCGTypeFromGate(e1)); + ASSERT(type == ConvertLiteCGTypeFromGate(e2)); + auto machineType = acc_.GetMachineType(gate); + Expr result; + if (machineType == MachineType::I32) { + result = __ SRem(type, e1Value, e2Value); + } else if (machineType == MachineType::F64) { + // TODO: FRem or SRem ??? + // result = __ FRem(type, e1Value, e2Value); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleCastIntXToIntY(GateRef gate) +{ + VisitCastIntXToIntY(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitCastIntXToIntY(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + ASSERT(GetBitWidthFromMachineType(acc_.GetMachineType(e1)) >= + GetBitWidthFromMachineType(acc_.GetMachineType(gate))); + auto e1Type = ConvertLiteCGTypeFromGate(e1); + Expr result = __ Cvt(e1Type, ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleChangeInt32ToDouble(GateRef gate) +{ + VisitChangeInt32ToDouble(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitChangeInt32ToDouble(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + auto e1Type = ConvertLiteCGTypeFromGate(e1); + Expr result = __ Cvt(e1Type, ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleChangeUInt32ToDouble(GateRef gate) +{ + VisitChangeUInt32ToDouble(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitChangeUInt32ToDouble(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + auto e1Type = ConvertLiteCGTypeFromGate(e1); + Expr result = __ Cvt(e1Type, ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleChangeDoubleToInt32(GateRef gate) +{ + VisitChangeDoubleToInt32(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitChangeDoubleToInt32(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + auto e1Type = ConvertLiteCGTypeFromGate(e1); + Expr result = __ Cvt(e1Type, ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleChangeTaggedPointerToInt64(GateRef gate) +{ + VisitChangeTaggedPointerToInt64(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitChangeTaggedPointerToInt64(GateRef gate, GateRef e1) +{ + Expr result = CanonicalizeToInt(e1); + gate2Expr_[gate] = result; +} + +Expr LiteCGIRBuilder::CanonicalizeToInt(GateRef gate) +{ + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + Expr opnd = gate2Expr_[gate]; + if (__ LiteCGGetTypeKind(type) == maple::litecg::kLiteCGTypePointer) { + return __ Cvt(type, __ i64Type, opnd); + } else if (__ LiteCGGetTypeKind(type) == maple::litecg::kLiteCGTypeScalar) { + return opnd; + } else { + LOG_COMPILER(FATAL) << "can't Canonicalize to Int64: "; + UNREACHABLE(); + } +} + +void LiteCGIRBuilder::HandleChangeInt64ToTagged(GateRef gate) +{ + VisitChangeInt64ToTagged(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitChangeInt64ToTagged(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + ASSERT(__ LiteCGGetTypeKind(ConvertLiteCGTypeFromGate(e1)) == maple::litecg::kLiteCGTypeScalar); + Expr result = __ Cvt(__ i64Type, __ i64RefType, e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleSub(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitSub(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitSub(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + Expr result; + LiteCGType *returnType = ConvertLiteCGTypeFromGate(gate); + auto machineType = acc_.GetMachineType(gate); + if (machineType == MachineType::I16 || machineType == MachineType::I32 || + machineType == MachineType::I64 || machineType == MachineType::ARCH || + machineType == MachineType::F64) { + result = __ Sub(returnType, e1Value, e2Value); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleMul(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitMul(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitMul(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + Expr result; + LiteCGType *returnType = ConvertLiteCGTypeFromGate(gate); + auto machineType = acc_.GetMachineType(gate); + if (IsMulIntergerType(machineType)) { + result = __ Mul(returnType, e1Value, e2Value); + } else if (machineType == MachineType::F64) { + result = __ Mul(returnType, e1Value, e2Value); + // result = LLVMBuildFMul(builder_, e1Value, e2Value, ""); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleIntRev(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitIntRev(gate, ins[0]); +} + +void LiteCGIRBuilder::VisitIntRev(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + ASSERT(type == ConvertLiteCGTypeFromGate(e1)); + Expr result; + auto machineType = acc_.GetMachineType(gate); + if (machineType <= MachineType::I64 && machineType >= MachineType::I1) { + // result = LLVMBuildNot(builder_, e1Value, ""); + result = __ Not(type, e1Value); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleFloatDiv(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + auto g1 = acc_.GetIn(gate, 1); + VisitFloatDiv(gate, g0, g1); +} + +void LiteCGIRBuilder::VisitFloatDiv(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + // TODO:??? Is float operator equals to sint ??? + Expr result = __ SDiv(ConvertLiteCGTypeFromGate(gate), e1Value, e2Value); + // Expr result = LLVMBuildFDiv(builder_, e1Value, e2Value, ""); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleTruncFloatToInt(GateRef gate) +{ + auto g0 = acc_.GetIn(gate, 0); + VisitTruncFloatToInt(gate, g0); +} + +void LiteCGIRBuilder::VisitTruncFloatToInt(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + auto machineType = acc_.GetMachineType(e1); + Expr result; + if (machineType <= MachineType::F64 && machineType >= MachineType::F32) { + // TODO: ?? I32 or I64 equals to SI in LLVM ?? + result = __ Trunc(ConvertLiteCGTypeFromGate(e1), __ i64Type, e1Value); + //result = LLVMBuildFPToSI(builder_, e1Value, ConvertLLVMTypeFromGate(gate), ""); + } else { + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleAddWithOverflow(GateRef gate) +{ + auto in0 = acc_.GetIn(gate, 0); + auto in1 = acc_.GetIn(gate, 1); + ASSERT(acc_.GetMachineType(in0) == MachineType::I32); + ASSERT(acc_.GetMachineType(in1) == MachineType::I32); + VisitAddWithOverflow(gate, in0, in1); +} + +void LiteCGIRBuilder::VisitAddWithOverflow(GateRef gate, GateRef e1, GateRef e2) +{ + // FIXME: need use different symbol name? + // get return type {i32 res, u1 carry} + auto *retType = __ GetStructType("overflow_internal@i32"); + retType = retType ? retType : + __ CreateStructType("overflow_internal@i32").Field("res", __ i32Type).Field("carry", __ u1Type).Done(); + Var &retVar = __ CreateLocalVar(retType, "overflow_ret@i32"); + + // generate function call + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + std::vector args = {e1Value, e2Value}; + auto &call = __ IntrinsicCall(IntrinsicId::INTRN_ADD_WITH_OVERFLOW, args, &retVar); + gate2Expr_[gate] = __ Dread(retVar); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), call); +} + +void LiteCGIRBuilder::HandleSubWithOverflow(GateRef gate) +{ + auto in0 = acc_.GetIn(gate, 0); + auto in1 = acc_.GetIn(gate, 1); + ASSERT(acc_.GetMachineType(in0) == MachineType::I32); + ASSERT(acc_.GetMachineType(in1) == MachineType::I32); + VisitSubWithOverflow(gate, in0, in1); +} + +void LiteCGIRBuilder::VisitSubWithOverflow(GateRef gate, GateRef e1, GateRef e2) +{ + // FIXME: need use different symbol name? + // get return type {i32 res, u1 carry} + auto *retType = __ GetStructType("overflow_internal@i32"); + retType = retType ? retType : + __ CreateStructType("overflow_internal@i32").Field("res", __ i32Type).Field("carry", __ u1Type).Done(); + Var &retVar = __ CreateLocalVar(retType, "overflow_ret@i32"); + + // generate function call + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + std::vector args = {e1Value, e2Value}; + auto &call = __ IntrinsicCall(IntrinsicId::INTRN_SUB_WITH_OVERFLOW, args, &retVar); + gate2Expr_[gate] = __ Dread(retVar); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), call); +} + +void LiteCGIRBuilder::HandleMulWithOverflow(GateRef gate) +{ + auto in0 = acc_.GetIn(gate, 0); + auto in1 = acc_.GetIn(gate, 1); + ASSERT(acc_.GetMachineType(in0) == MachineType::I32); + ASSERT(acc_.GetMachineType(in1) == MachineType::I32); + VisitMulWithOverflow(gate, in0, in1); +} + +void LiteCGIRBuilder::VisitMulWithOverflow(GateRef gate, GateRef e1, GateRef e2) +{ + // FIXME: need use different symbol name? + // get return type {i32 res, u1 carry} + auto *retType = __ GetStructType("overflow_internal@i32"); + retType = retType ? retType : + __ CreateStructType("overflow_internal@i32").Field("res", __ i32Type).Field("carry", __ u1Type).Done(); + Var &retVar = __ CreateLocalVar(retType, "overflow_ret@i32"); + + // generate function call + Expr e1Value = gate2Expr_[e1]; + Expr e2Value = gate2Expr_[e2]; + std::vector args = {e1Value, e2Value}; + auto &call = __ IntrinsicCall(IntrinsicId::INTRN_MUL_WITH_OVERFLOW, args, &retVar); + gate2Expr_[gate] = __ Dread(retVar); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), call); +} + +void LiteCGIRBuilder::HandleSExtInt(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitSExtInt(gate, ins[0]); +} + +void LiteCGIRBuilder::VisitSExtInt(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + LiteCGType *fromType = ConvertLiteCGTypeFromGate(e1); + LiteCGType *toType = ConvertLiteCGTypeFromGate(gate); + Expr result = __ SExt(fromType, toType, e1Value); + gate2Expr_[gate] = result; + // TODO: ??? WHY llvm don't need totype ??? + // Expr result = __ SExt(builder_, e1Value, ConvertLLVMTypeFromGate(gate), ""); +} + +void LiteCGIRBuilder::HandleSqrt(GateRef gate) +{ + GateRef param = acc_.GetIn(gate, 0); + VisitSqrt(gate, param); +} + +void LiteCGIRBuilder::VisitSqrt(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + LiteCGType *type = ConvertLiteCGTypeFromGate(e1); + Expr result; + if (type == __ f32Type || type == __ f64Type) { + result = __ Sqrt(type, e1Value); + } else { + result = __ Sqrt(__ f64Type, __ Cvt(type, __ f64Type, e1Value)); + } + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleReadSp(GateRef gate) +{ + ASSERT(acc_.GetOpCode(gate) == OpCode::READSP); + VisitReadSp(gate); +} + +void LiteCGIRBuilder::VisitReadSp(GateRef gate) +{ + Expr result = __ LiteCGGetPregSP(); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleFPTrunc(GateRef gate) +{ + VisitFPTrunc(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitFPTrunc(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + ASSERT(GetBitWidthFromMachineType(acc_.GetMachineType(e1)) >= + GetBitWidthFromMachineType(acc_.GetMachineType(gate))); + Expr result = __ Cvt(ConvertLiteCGTypeFromGate(e1), ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleFPExt(GateRef gate) +{ + VisitFPExt(gate, acc_.GetIn(gate, 0)); +} + +void LiteCGIRBuilder::VisitFPExt(GateRef gate, GateRef e1) +{ + Expr e1Value = gate2Expr_[e1]; + ASSERT(GetBitWidthFromMachineType(acc_.GetMachineType(e1)) <= + GetBitWidthFromMachineType(acc_.GetMachineType(gate))); + Expr result = __ Cvt(ConvertLiteCGTypeFromGate(e1), ConvertLiteCGTypeFromGate(gate), e1Value); + gate2Expr_[gate] = result; +} + +void LiteCGIRBuilder::HandleExtractValue(GateRef gate) +{ + GateRef pointer = acc_.GetIn(gate, 0); + GateRef index = acc_.GetIn(gate, 1); + VisitExtractValue(gate, pointer, index); +} + +void LiteCGIRBuilder::VisitExtractValue(GateRef gate, GateRef e1, GateRef e2) +{ + Expr e1Value = gate2Expr_[e1]; + ASSERT((acc_.GetOpCode(e2) == OpCode::CONSTANT) && acc_.GetMachineType(e2) == MachineType::I32); + uint32_t index = static_cast(acc_.GetConstantValue(e2)); + Var *baseVar = __ GetLocalVarFromExpr(e1Value); + ASSERT(baseVar != nullptr); + // in maple type system, field 0 means the agg itself and field index start from 1 + Expr rhs = __ DreadWithField(*__ GetLocalVarFromExpr(e1Value), index + 1); + PregIdx pregIdx = __ CreatePreg(rhs.GetType()); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), __ Regassign(rhs, pregIdx)); + gate2Expr_[gate] = __ Regread(pregIdx); +} + +void LiteCGIRBuilder::HandleStore(GateRef gate) +{ + VisitStore(gate, acc_.GetIn(gate, 2), acc_.GetIn(gate, 1)); // 2:baseAddr gate, 1:data gate +} + +void LiteCGIRBuilder::VisitStore(GateRef gate, GateRef base, GateRef value) +{ + Expr baseAddr = gate2Expr_[base]; + Expr data = gate2Expr_[value]; + + LiteCGType *returnType = ConvertLiteCGTypeFromGate(value); + LiteCGType *memType = (__ IsHeapPointerType(baseAddr.GetType())) ? __ CreateRefType(returnType) + : __ CreatePtrType(returnType); + baseAddr = CanonicalizeToPtr(baseAddr, memType); + + Stmt &store = __ Iassign(data, baseAddr, memType); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), store); +} + +void LiteCGIRBuilder::HandlePhi(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitPhi(gate, ins); +} + +void LiteCGIRBuilder::AddPhiDesc(int bbID, PhiDesc &desc) { + auto it = bbID2unmergedPhis_.find(bbID); + if (it == bbID2unmergedPhis_.end()) { + std::vector vec; + vec.push_back(std::move(desc)); + bbID2unmergedPhis_.insert(std::make_pair(bbID, vec)); + } else { + it->second.push_back(std::move(desc)); + } +} + +void LiteCGIRBuilder::VisitPhi(GateRef gate, const std::vector &phiIns) +{ + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + + static uint32_t phiNo = 0; + std::string phiVarName = "phi" + std::to_string(phiNo++); + Var &phi = __ CreateLocalVar(type, phiVarName); + + if (phiIns.size() > 1) { + gate2Expr_[gate] = __ Dread(phi); + } + // Collect the states merges of this phi and note the 1-in is the merged states. + std::vector phiStates; + acc_.GetIns(phiIns[0], phiStates); + ASSERT(phiStates.size() + 1 == phiIns.size()); + int curBBId = instID2bbID_[acc_.GetId(gate)]; + for (int i = 1; i < static_cast(phiIns.size()); i++) { + int preBBId = LookupPredBB(phiStates[i - 1], curBBId); + + // if bbID2BB_.count(preBBId) = 0 means bb with current bbIdx hasn't been created + if (bbID2BB_.count(preBBId) != 0) { + BB *preBB = bbID2BB_[preBBId]; + if (preBB == nullptr) { + OPTIONAL_LOG_COMPILER(ERROR) << "VisitPhi failed BasicBlock nullptr"; + return; + } + if (!__ IsEmptyBB(*preBB)) { + Expr value = gate2Expr_[phiIns[i]]; + Stmt &phiAssign = __ Dassign(value, phi); + __ AppendStmtBeforeBranch(*preBB, phiAssign); + } else { + PhiDesc desc = {preBBId, phiIns[i], phi}; + AddPhiDesc(curBBId, desc); + } + // TODO: + // BasicBlockImpl *impl = bb->GetImpl(); + // if (impl == nullptr) { + // OPTIONAL_LOG_COMPILER(ERROR) << "VisitPhi failed impl nullptr"; + // return; + // } + // LLVMBasicBlockRef llvmBB = EnsureLBB(bb); // The llvm bb + } else { + PhiDesc desc = {preBBId, phiIns[i], phi}; + AddPhiDesc(curBBId, desc); + } + } +} + +void LiteCGIRBuilder::HandleSwitch(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + std::vector outs; + acc_.GetOutStates(gate, outs); + VisitSwitch(gate, ins[1], outs); +} + +void LiteCGIRBuilder::VisitSwitch(GateRef gate, GateRef input, const std::vector &outList) +{ + Expr cond = gate2Expr_[input]; + int caseNum = static_cast(outList.size()); + BB *defaultOutBB = nullptr; + for (int i = 0; i < caseNum; i++) { + if (acc_.GetOpCode(outList[i]) == OpCode::DEFAULT_CASE) { + defaultOutBB = &GetOrCreateBB(instID2bbID_[acc_.GetId(outList[i])]); + } + } + + LiteCGType *type = ConvertLiteCGTypeFromGate(gate); + SwitchBuilder builder = __ Switch(type, cond, *defaultOutBB); + for (int i = 0; i < caseNum; i++) { + if (acc_.GetOpCode(outList[i]) == OpCode::DEFAULT_CASE) { + continue; + } + BB &curOutBB = GetOrCreateBB(instID2bbID_[acc_.GetId(outList[i])]); + builder.Case(i, curOutBB); + } + Stmt &switchStmt = builder.Done(); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), switchStmt); + // TODO: 待确认 + __ AppendBB(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)])); +} + +void LiteCGIRBuilder::HandleBytecodeCall(GateRef gate) +{ + std::vector ins; + acc_.GetIns(gate, ins); + VisitBytecodeCall(gate, ins); +} + +Expr LiteCGIRBuilder::GetBaseOffset(GateRef gate, Expr glue) +{ + switch (acc_.GetOpCode(gate)) { + case OpCode::BYTECODE_CALL: + return GetBCStubOffset(glue); + case OpCode::DEBUGGER_BYTECODE_CALL: + return GetBCDebugStubOffset(glue); + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + +Expr LiteCGIRBuilder::GetBCStubOffset(Expr glue) +{ + return __ ConstVal(__ CreateIntConst(glue.GetType(), + JSThread::GlueData::GetBCStubEntriesOffset(compCfg_->Is32Bit()))); +} + +Expr LiteCGIRBuilder::GetBCDebugStubOffset(Expr glue) +{ + return __ ConstVal(__ CreateIntConst(glue.GetType(), + JSThread::GlueData::GetBCDebuggerStubEntriesOffset(compCfg_->Is32Bit()))); +} + +void LiteCGIRBuilder::VisitBytecodeCall(GateRef gate, const std::vector &inList) +{ + size_t paraStartIndex = static_cast(CallInputs::FIRST_PARAMETER); + size_t targetIndex = static_cast(CallInputs::TARGET); + size_t glueIndex = static_cast(CallInputs::GLUE); + Expr opcodeOffset = gate2Expr_[inList[targetIndex]]; + + // start index of bytecode handler csign + Expr glue = gate2Expr_[inList[glueIndex]]; + Expr baseOffset = GetBaseOffset(gate, glue); + Expr offset = __ Add(baseOffset.GetType(), baseOffset, opcodeOffset); + Expr rtbaseoffset = __ Add(glue.GetType(), glue, offset); + const CallSignature *signature = BytecodeStubCSigns::BCHandler(); + BB &bb = GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]); + Expr callee = GetFunction(bb, glue, signature, rtbaseoffset); + + std::vector params; + for (size_t paraIdx = paraStartIndex; paraIdx < inList.size(); ++paraIdx) { + GateRef gateTmp = inList[paraIdx]; + params.push_back(gate2Expr_[gateTmp]); + } + + LiteCGType *funcType = GenerateFuncType(params, signature); + LiteCGType *returnType = __ LiteCGGetFuncReturnType(funcType); + static uint32_t retNo = 0; + std::string retName = signature->GetName() + "Ret" + std::to_string(retNo++); + Var *returnVar = (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, retName)); + Stmt &callNode = __ ICall(callee, params, returnVar); + __ AppendStmt(bb, callNode); + if (returnVar != nullptr) { + gate2Expr_[gate] = __ Dread(returnVar); + } + + // TODO: + // SetGCLeafFunction(call); + // LLVMSetTailCall(call, true); + __ SetStmtCallConv(callNode, maple::litecg::GHC_Call); + + // TODO: + // if (IsLogEnabled()) { + // SetDebugInfo(gate, call); + // } +} + +void LiteCGIRBuilder::HandleDeoptCheck(GateRef gate) +{ + int block = instID2bbID_[acc_.GetId(gate)]; + std::vector outs; + acc_.GetOutStates(gate, outs); + int bbOut = instID2bbID_[acc_.GetId(outs[0])]; // 0: output + + BB &trueBB = GetOrCreateBB(bbOut); + BB &falseBB = CreateBB(); + GateRef cmp = acc_.GetValueIn(gate, 0); // 0: cond + Expr cond = gate2Expr_[cmp]; + BB &curBB = GetOrCreateBB(block); + __ AppendStmt(curBB, __ CondGoto(cond, trueBB, true)); + __ AppendBB(curBB); + __ AppendBB(falseBB); + + VisitDeoptCheck(gate); + Expr returnValue = gate2Expr_[gate]; + + // TODO: + // if (IsLogEnabled()) { + // SetDebugInfo(gate, returnValue); + // } + __ AppendStmt(falseBB, __ Return(returnValue)); + +} + +LiteCGType *LiteCGIRBuilder::GetExperimentalDeoptTy() +{ + // TODO: GetTaggedHPtrT() == __ i64RefType ??? + std::vector paramTys = { __ i64Type, __ i64RefType, __ i64RefType }; + LiteCGType *functionType = __ CreateFuncType(paramTys, __ i64RefType, false); + return functionType; +} + +void LiteCGIRBuilder::SaveFrameTypeOnFrame(BB &bb, FrameType frameType) +{ + Expr llvmFpAddr = CallingFp(false); + Expr frameAddr = __ Cvt(llvmFpAddr.GetType(), slotType_, llvmFpAddr); + Expr frameTypeSlotAddr = __ Sub(slotType_, frameAddr, __ ConstVal(__ CreateIntConst(slotType_, slotSize_))); + LiteCGType *slotTypePtr = __ CreatePtrType(slotType_); + Expr addr = __ Cvt(frameTypeSlotAddr.GetType(), slotTypePtr, frameTypeSlotAddr); + Expr llvmFrameType = __ ConstVal(__ CreateIntConst(slotType_, static_cast(frameType))); + Stmt &stmt = __ Iassign(llvmFrameType, addr, slotTypePtr); + __ AppendStmt(bb, stmt); +} + +void LiteCGIRBuilder::GenDeoptEntry(std::string funcName) +{ + BB &bb = CreateBB(); + // TODO: 待实现函数属性 + // auto reservedSlotsSize = OptimizedFrame::ComputeReservedSize(slotSize_); + // LLVMAddTargetDependentFunctionAttr(function, "frame-reserved-slots", std::to_string(reservedSlotsSize).c_str()); + SaveFrameTypeOnFrame(bb, FrameType::OPTIMIZED_FRAME); + Function &func = __ GetCurFunction(); + lmirModule_->SetFunction(LMIRModule::kDeoptEntryOffset, funcName, false); + + Expr glue = __ Dread(__ GetParam(func, 0)); + Expr check = __ Dread(__ GetParam(func, 1)); + Expr depth = __ Dread(__ GetParam(func, 2)); + + StubIdType stubId = RTSTUB_ID(DeoptHandlerAsm); + int stubIndex = static_cast(std::get(stubId)); + Expr rtoffset = __ Add(glue.GetType(), glue, GetRTStubOffset(glue, stubIndex)); + Expr patchAddr = __ Cvt(glue.GetType(), __ i64PtrType, rtoffset); + Expr funcAddr = __ Iread(rtoffset.GetType(), patchAddr, __ i64PtrType); + + LiteCGType *funcType = GetExperimentalDeoptTy(); + LiteCGType *funcTypePtr = __ CreatePtrType(funcType); + LiteCGType *funcTypePtrPtr = __ CreatePtrType(funcTypePtr); + Expr callee = __ Cvt(glue.GetType(), funcTypePtrPtr, funcAddr); + + Var &funcVar = __ CreateLocalVar(callee.GetType(), "DeoptimizeSubFunc"); + Stmt &funcAddrNode = __ Dassign(callee, funcVar); + __ AppendStmt(bb, funcAddrNode); + + LiteCGType *returnType = __ LiteCGGetFuncReturnType(funcType); + static uint32_t retNo = 0; + std::string retName = "Deoptimize.P1i64.Ret" + std::to_string(retNo++); + Var *returnVar = (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, retName)); + std::vector params = {glue, check, depth}; + Stmt &callNode = __ ICall(__ Dread(funcVar), params, returnVar); + __ AppendStmt(bb, callNode); + __ AppendStmt(bb, __ Return(__ Dread(returnVar))); + __ AppendBB(bb); +} + +Function *LiteCGIRBuilder::GetExperimentalDeopt() +{ + /* 0:calling 1:its caller */ + std::string funcName = "litecg.experimental.deoptimize.p1i64"; + auto fn = __ GetFunc(funcName); + if (!fn) { + // save previous func for restore env + Function &preFunc = __ GetCurFunction(); + auto fnTy = GetExperimentalDeoptTy(); + FunctionBuilder funcBuilder = __ DefineFunction(funcName); + // glue type depth + funcBuilder.Param(__ i64Type, "glue") + .Param(__ i64RefType, "deopt_type") + .Param(__ i64RefType, "max_depth"); + Function &curFunc = funcBuilder.Return(__ LiteCGGetFuncReturnType(fnTy)).Done(); + funcBuilder.CallConvAttribute(maple::litecg::CCall); + __ SetCurFunc(curFunc); + GenDeoptEntry(funcName); + fn = &curFunc; + + __ SetCurFunc(preFunc); + } + return fn; +} + +Expr LiteCGIRBuilder::ConvertToTagged(GateRef gate) +{ + auto machineType = acc_.GetMachineType(gate); + switch (machineType) { + case MachineType::I1: + return ConvertBoolToTaggedBoolean(gate); + case MachineType::I32: + return ConvertInt32ToTaggedInt(gate2Expr_[gate]); + case MachineType::F64: + return ConvertFloat64ToTaggedDouble(gate); + case MachineType::I64: + break; + default: + LOG_COMPILER(FATAL) << "unexpected machineType!"; + UNREACHABLE(); + break; + } + return gate2Expr_.at(gate); +} + +Expr LiteCGIRBuilder::ConvertInt32ToTaggedInt(Expr value) +{ + Expr e1Value = __ SExt(value.GetType(), __ i64Type, value); + Expr tagMask = __ ConstVal(__ CreateIntConst(__ i64Type, JSTaggedValue::TAG_INT)); + Expr result = __ Or(__ i64Type, e1Value, tagMask); + return __ Cvt(__ i64Type, __ i64RefType, result); +} + +Expr LiteCGIRBuilder::ConvertBoolToTaggedBoolean(GateRef gate) +{ + Expr value = gate2Expr_[gate]; + Expr e1Value = __ ZExt(value.GetType(), __ u64Type, value); + Expr tagMask = __ ConstVal(__ CreateIntConst(__ i64Type, JSTaggedValue::TAG_BOOLEAN_MASK)); + Expr result = __ Or(__ u64Type, e1Value, tagMask); + return __ Cvt(__ u64Type, __ i64RefType, result); +} + +Expr LiteCGIRBuilder::ConvertFloat64ToTaggedDouble(GateRef gate) +{ + Expr value = gate2Expr_[gate]; + Expr e1Value = __ BitCast(value.GetType(), __ i64Type, value); + Expr offset = __ ConstVal(__ CreateIntConst(__ i64Type, JSTaggedValue::DOUBLE_ENCODE_OFFSET)); + Expr result = __ Add(__ i64Type, e1Value, offset); + return __ Cvt(__ i64Type, __ i64RefType, result); +} + +void LiteCGIRBuilder::SaveDeoptVregInfo(std::map &deoptBundleInfo, BB &bb, + int32_t index, size_t curDepth, size_t shift, GateRef gate) +{ + int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(index, curDepth, shift); + Expr value = ConvertToTagged(gate); + PregIdx pregIdx = __ CreatePreg(value.GetType()); + __ AppendStmt(bb, __ Regassign(value, pregIdx)); + deoptBundleInfo.insert(std::pair(encodeIndex, pregIdx)); +} +void LiteCGIRBuilder::SaveDeoptVregInfoWithI64(std::map &deoptBundleInfo, BB &bb, + int32_t index, size_t curDepth, size_t shift, GateRef gate) +{ + int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(index, curDepth, shift); + Expr expr = gate2Expr_.at(gate); + Expr value = ConvertInt32ToTaggedInt(__ Cvt(expr.GetType(), __ i32Type, expr)); + PregIdx pregIdx = __ CreatePreg(value.GetType()); + __ AppendStmt(bb, __ Regassign(value, pregIdx)); + deoptBundleInfo.insert(std::pair(encodeIndex, pregIdx)); +} + +void LiteCGIRBuilder::VisitDeoptCheck(GateRef gate) +{ + BB &bb = __ GetLastAppendedBB(); // falseBB of deopt check + Expr glue = gate2Expr_.at(acc_.GetGlueFromArgList()); + GateRef deoptFrameState = acc_.GetValueIn(gate, 1); // 1: frame state + ASSERT(acc_.GetOpCode(deoptFrameState) == OpCode::FRAME_STATE); + std::vector params; + params.push_back(glue); // glue + GateRef deoptType = acc_.GetValueIn(gate, 2); // 2: deopt type + uint64_t v = acc_.GetConstantValue(deoptType); + Expr constV = __ ConstVal(__ CreateIntConst(__ u32Type, static_cast(v))); + params.push_back(ConvertInt32ToTaggedInt(constV)); // deoptType + Function *callee = GetExperimentalDeopt(); + LiteCGType *funcType = GetExperimentalDeoptTy(); + + std::map deoptBundleInfo; + size_t maxDepth = 0; + GateRef frameState = acc_.GetFrameState(deoptFrameState); + while ((acc_.GetOpCode(frameState) == OpCode::FRAME_STATE)) { + maxDepth++; + frameState = acc_.GetFrameState(frameState); + } + Expr constMaxDepth = __ ConstVal(__ CreateIntConst(__ u32Type, static_cast(maxDepth))); + params.push_back(ConvertInt32ToTaggedInt(constMaxDepth)); + size_t shift = Deoptimizier::ComputeShift(maxDepth); + frameState = deoptFrameState; + ArgumentAccessor argAcc(const_cast(circuit_)); + for (int32_t curDepth = static_cast(maxDepth); curDepth >= 0; curDepth--) { + ASSERT(acc_.GetOpCode(frameState) == OpCode::FRAME_STATE); + GateRef frameValues = acc_.GetValueIn(frameState, 1); // 1: frame values + const size_t numValueIn = acc_.GetNumValueIn(frameValues); + const size_t envIndex = numValueIn - 2; // 2: env valueIn index + const size_t accIndex = numValueIn - 1; // 1: acc valueIn index + GateRef env = acc_.GetValueIn(frameValues, envIndex); + GateRef acc = acc_.GetValueIn(frameValues, accIndex); + auto pc = acc_.TryGetPcOffset(frameState); + GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); + GateRef newTarget = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::NEW_TARGET); + GateRef thisObj = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::THIS_OBJECT); + GateRef actualArgc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::ACTUAL_ARGC); + // vreg + for (size_t i = 0; i < envIndex; i++) { + GateRef vregValue = acc_.GetValueIn(frameValues, i); + if (acc_.IsConstantValue(vregValue, JSTaggedValue::VALUE_OPTIMIZED_OUT)) { + continue; + } + SaveDeoptVregInfo(deoptBundleInfo, bb, i, curDepth, shift, vregValue); + } + // env + if (!acc_.IsConstantValue(env, JSTaggedValue::VALUE_OPTIMIZED_OUT)) { + int32_t specEnvVregIndex = static_cast(SpecVregIndex::ENV_INDEX); + SaveDeoptVregInfo(deoptBundleInfo, bb, specEnvVregIndex, curDepth, shift, env); + } + // acc + if (!acc_.IsConstantValue(acc, JSTaggedValue::VALUE_OPTIMIZED_OUT)) { + int32_t specAccVregIndex = static_cast(SpecVregIndex::ACC_INDEX); + SaveDeoptVregInfo(deoptBundleInfo, bb, specAccVregIndex, curDepth, shift, acc); + } + // pc offset + int32_t specPcOffsetIndex = static_cast(SpecVregIndex::PC_OFFSET_INDEX); + int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(specPcOffsetIndex, curDepth, shift); + Expr pcOffset = __ ConstVal(__ CreateIntConst(__ u32Type, pc)); + PregIdx pregIdx = __ CreatePreg(pcOffset.GetType()); + __ AppendStmt(bb, __ Regassign(pcOffset, pregIdx)); + deoptBundleInfo.insert(std::pair(encodeIndex, pregIdx)); + + // func + int32_t specCallTargetIndex = static_cast(SpecVregIndex::FUNC_INDEX); + SaveDeoptVregInfo(deoptBundleInfo, bb, specCallTargetIndex, curDepth, shift, jsFunc); + // newTarget + int32_t specNewTargetIndex = static_cast(SpecVregIndex::NEWTARGET_INDEX); + SaveDeoptVregInfo(deoptBundleInfo, bb, specNewTargetIndex, curDepth, shift, newTarget); + // this object + int32_t specThisIndex = static_cast(SpecVregIndex::THIS_OBJECT_INDEX); + SaveDeoptVregInfo(deoptBundleInfo, bb, specThisIndex, curDepth, shift, thisObj); + int32_t specArgcIndex = static_cast(SpecVregIndex::ACTUAL_ARGC_INDEX); + SaveDeoptVregInfoWithI64(deoptBundleInfo, bb, specArgcIndex, curDepth, shift, actualArgc); + frameState = acc_.GetFrameState(frameState); + } + + static uint32_t retNo = 0; + std::string retName = "DeoptCheckRet" + std::to_string(retNo++); + LiteCGType *returnType = __ LiteCGGetFuncReturnType(funcType); + Var *returnVar = (returnType == __ voidType) ? nullptr : &(__ CreateLocalVar(returnType, retName)); + Stmt &callNode = __ Call(*callee, params, returnVar); + maple::litecg::LiteCGSetDeoptBundleInfo(callNode, deoptBundleInfo); + + __ AppendStmt(bb, callNode); + if (returnVar != nullptr) { + gate2Expr_[gate] = __ Dread(returnVar); + } +} + +void LiteCGIRBuilder::HandleConstString(GateRef gate) +{ + const ChunkVector &str = acc_.GetConstantString(gate); // 64: bit width + VisitConstString(gate, str); +} + +void LiteCGIRBuilder::VisitConstString(GateRef gate, const ChunkVector &str) // 64: bit width +{ + ASSERT(acc_.GetMachineType(gate) == MachineType::ARCH); + + Expr value = __ ConstVal(__ CreateStrConst(std::string(str.data(), str.size()))); + static uint32_t val = 0; + std::string name = "ConstStringVar" + std::to_string(val++); + Var &var = __ CreateLocalVar(value.GetType(), name); + Stmt &stmt = __ Dassign(value, var); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), stmt); + gate2Expr_[gate] = __ Addrof(var); +} + +void LiteCGIRBuilder::HandleRelocatableData(GateRef gate) +{ + uint64_t value = acc_.TryGetValue(gate); + VisitRelocatableData(gate, value); +} + +void LiteCGIRBuilder::VisitRelocatableData(GateRef gate, uint64_t value) +{ + Var &var = __ CreateGlobalVar(__ i64Type, "G", maple::litecg::GlobalVarAttr::VAR_internal); + Expr constVal = __ ConstVal(__ CreateIntConst(__ i64Type, value)); + Stmt &stmt = __ Dassign(constVal, var); + __ AppendStmt(GetOrCreateBB(instID2bbID_[acc_.GetId(gate)]), stmt); + gate2Expr_[gate] = __ Dread(var); +} + +void LiteCGIRBuilder::HandleAlloca(GateRef gate) +{ + return VisitAlloca(gate); +} + +void LiteCGIRBuilder::VisitAlloca(GateRef gate) +{ + uint64_t machineRep = acc_.TryGetValue(gate); + LiteCGType *dataType = GetMachineRepType(static_cast(machineRep)); + + static uint32_t val = 0; + std::string name = "AllocaVar" + std::to_string(val++); + Var &var = __ CreateLocalVar(dataType, name); + Expr addr = __ Addrof(var); + __ Cvt(addr.GetType(), ConvertLiteCGTypeFromGate(gate), addr); + + gate2Expr_[gate] = addr; +} + +LiteCGType *LiteCGIRBuilder::GetMachineRepType(MachineRep rep) const { + LiteCGType *dstType; + switch (rep) { + case MachineRep::K_BIT: + dstType = __ u1Type; + break; + case MachineRep::K_WORD8: + dstType = __ i8Type; + break; + case MachineRep::K_WORD16: + dstType = __ i16Type; + break; + case MachineRep::K_WORD32: + dstType = __ i32Type; + break; + case MachineRep::K_FLOAT64: + dstType = __ f64Type; + break; + case MachineRep::K_WORD64: + dstType = __ i64Type; + break; + case MachineRep::K_PTR_1: + dstType = __ i64RefType; + break; + case MachineRep::K_META: + // TODO: 待实现 + // dstType = LLVMMetadataTypeInContext(context_); + + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + break; + } + return dstType; +} + +int64_t LiteCGIRBuilder::GetBitWidthFromMachineType(MachineType machineType) const +{ + switch (machineType) { + case NOVALUE: + return 0; + case ARCH: + return 48; // 48: Pointer representation in different architectures + case I1: + return 1; + case I8: + return 8; // 8: bit width + case I16: + return 16; // 16: bit width + case I32: + return 32; // 32: bit width + case I64: + return 64; // 64: bit width + case F32: + return 32; // 32: bit width + case F64: + return 64; // 64: bit width + case FLEX: + case ANYVALUE: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + +int LiteCGIRBuilder::LookupPredBB(GateRef start, int bbID) +{ + GateId gateId = acc_.GetId(start); + int owner = instID2bbID_[gateId]; + if (owner != bbID) { + return owner; + } + GateRef pred = start; + while (owner == bbID) { + pred = acc_.GetState(pred); + auto id = acc_.GetId(pred); + owner = instID2bbID_[id]; + } + return owner; +} + +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/litecg_ir_builder.h b/ecmascript/compiler/litecg_ir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..7cbdd1fb8c77a3248e195f6af6a4afd3096e9968 --- /dev/null +++ b/ecmascript/compiler/litecg_ir_builder.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_LITECG_IR_BUILDER_H +#define ECMASCRIPT_COMPILER_LITECG_IR_BUILDER_H + +#include +#include + +#include "ecmascript/compiler/circuit.h" +#include "ecmascript/compiler/gate.h" +#include "ecmascript/compiler/stub_builder.h" +#include "ecmascript/compiler/call_signature.h" +#include "ecmascript/compiler/common_stubs.h" +#include "ecmascript/compiler/interpreter_stub.h" +#include "ecmascript/compiler/rt_call_signature.h" +#include "ecmascript/compiler/ir_builder.h" +#include "ecmascript/compiler/ir_module.h" +#include "ecmascript/jspandafile/method_literal.h" +#include "lmir_builder.h" + +namespace panda::ecmascript::kungfu { + +struct PhiDesc { + int predBBId; + GateRef operand; + maple::litecg::Var& phi; +}; + +class LMIRModule : public IRModule +{ +public: + static constexpr int kDeoptEntryOffset = 0; + LMIRModule(NativeAreaAllocator* allocator, const std::string &name, bool logDbg, const std::string &triple) + : IRModule(allocator, logDbg, triple) + { + module = maple::litecg::CreateModuleWithName(name); + } + + ~LMIRModule() + { + if (module != nullptr) { + maple::litecg::ReleaseModule(module); + } + } + + maple::litecg::Module *GetModule() + { + return module; + } + + ModuleKind GetModuleKind() const override + { + return MODULE_LITECG; + } + + void SetFunction(size_t index, std::string funcName, bool isFastCall) + { + funcIndexMap_.emplace_back(std::make_tuple(index, funcName, isFastCall)); + } + + template + void IteratefuncIndexMap(const Callback &cb) const + { + for (auto record : funcIndexMap_) { + // 2: 3nd param + cb(std::get<0>(record), std::get<1>(record), std::get<2>(record)); + } + } + +private: + maple::litecg::Module *module; + std::vector> funcIndexMap_; +}; + +class LiteCGIRBuilder +{ +public: + LiteCGIRBuilder(const std::vector> *schedule, Circuit *circuit, + LMIRModule *module, const CompilationConfig *cfg, + CallSignature::CallConv callConv, bool enableLog, + const panda::ecmascript::MethodLiteral *methodLiteral, + const JSPandaFile *jsPandaFile, + const std::string &funcName); + ~LiteCGIRBuilder(); + void Build(); + +private: + const std::vector> *scheduledGates_ {nullptr}; + const Circuit *circuit_ {nullptr}; + LMIRModule *lmirModule_ {nullptr}; + const CompilationConfig *compCfg_ {nullptr}; + CallSignature::CallConv callConv_ = CallSignature::CallConv::CCallConv; + bool enableLog_ {false}; + const panda::ecmascript::MethodLiteral *methodLiteral_ {nullptr}; + const JSPandaFile *jsPandaFile_ {nullptr}; + std::string funcName_; + GateAccessor acc_; + maple::litecg::LMIRBuilder *lmirBuilder_ {nullptr}; + std::unordered_map gate2Expr_; + std::unordered_map opHandlers_; + std::set illegalOpHandlers_; + std::map instID2bbID_; + std::map bbID2BB_; + int slotSize_ {-1}; + maple::litecg::Type *slotType_ {nullptr}; + std::map> bbID2unmergedPhis_; + + #define DECLAREVISITLOWEROPCODE(name, signature) void Visit##name signature; + OPCODES(DECLAREVISITLOWEROPCODE) + #undef DECLAREVISITLOWEROPCODE + #define DECLAREHANDLELOWEROPCODE(name, ignore) void Handle##name(GateRef gate); + OPCODES(DECLAREHANDLELOWEROPCODE) + #undef DECLAREHANDLELOWEROPCODE + void BuildInstID2BBIDMap(); + maple::litecg::BB &GetOrCreateBB(int bbID); + maple::litecg::BB &GetFirstBB(); + maple::litecg::BB &CreateBB(); + void AddPhiDesc(int bbID, PhiDesc &desc); + maple::litecg::Type *ConvertLiteCGTypeFromGate(GateRef gate) const; + maple::litecg::IntCmpCondition ConvertLiteCGPredicateFromICMP(ICmpCondition cond) const; + void InitializeHandlers(); + maple::litecg::Expr GetGlue(const std::vector &inList); + maple::litecg::Expr GetRTStubOffset(maple::litecg::Expr glue, int index); + maple::litecg::Type *ConvertLiteCGTypeFromVariableType(VariableType type) const; + maple::litecg::Type *GenerateFuncType(const std::vector ¶ms, + const CallSignature *stubDescriptor); + maple::litecg::Type *GetFuncType(const CallSignature *stubDescriptor) const; + maple::litecg::Expr GetFunction(maple::litecg::BB &bb, maple::litecg::Expr glue, const CallSignature *signature, + maple::litecg::Expr rtbaseoffset, const std::string &realName = "") const; + bool IsOptimizedJSFunction() const; + bool IsOptimized() const; + CallExceptionKind GetCallExceptionKind(size_t index, OpCode op) const; + maple::litecg::Expr GetRTStubOffset(maple::litecg::Expr glue, int index) const; + maple::litecg::Expr GetCoStubOffset(maple::litecg::Expr glue, int index) const; + maple::litecg::Expr GetCallee(maple::litecg::BB &bb, const std::vector &inList, + const CallSignature *signature, const std::string &realName); + maple::litecg::Expr CanonicalizeToPtr(maple::litecg::Expr expr, maple::litecg::Type *type); + // maple::litecg::Expr PointerAdd(maple::litecg::Expr baseAddr, + // maple::litecg::Expr offset, LiteCGType *type); + maple::litecg::Expr CanonicalizeToInt(GateRef gate); + int64_t GetBitWidthFromMachineType(MachineType machineType) const; + int LookupPredBB(GateRef start, int bbID); + maple::litecg::Expr GetBuiltinsStubOffset(maple::litecg::Expr glue); + void UpdateLeaveFrame(maple::litecg::Expr glue); + maple::litecg::Expr GetLeaveFrameOffset(maple::litecg::Expr glue); + maple::litecg::Expr CallingFp(bool isCaller); + maple::litecg::Expr GetBaseOffset(GateRef gate, maple::litecg::Expr glue); + maple::litecg::Expr GetBCDebugStubOffset(maple::litecg::Expr glue); + maple::litecg::Expr GetBCStubOffset(maple::litecg::Expr glue); + maple::litecg::Type *GetExperimentalDeoptTy(); + maple::litecg::Function *GetExperimentalDeopt(); + void GenDeoptEntry(std::string funcName); + void SaveFrameTypeOnFrame(maple::litecg::BB &bb, FrameType frameType); + maple::litecg::Expr ConvertToTagged(GateRef gate); + maple::litecg::Expr ConvertInt32ToTaggedInt(maple::litecg::Expr value); + maple::litecg::Expr ConvertBoolToTaggedBoolean(GateRef gate); + maple::litecg::Expr ConvertFloat64ToTaggedDouble(GateRef gate); + void SaveDeoptVregInfo(std::map &deoptBundleInfo, maple::litecg::BB &bb, + int32_t index, size_t curDepth, size_t shift, GateRef gate); + void SaveDeoptVregInfoWithI64(std::map &deoptBundleInfo, maple::litecg::BB &bb, + int32_t index, size_t curDepth, size_t shift, GateRef gate); + maple::litecg::Type *GetMachineRepType(MachineRep rep) const; + + maple::litecg::ConvAttr ConvertCallAttr(const CallSignature::CallConv callConv); + void GenPrologue(maple::litecg::Function &function); + void SaveJSFuncOnOptJSFuncFrame(maple::litecg::Var &value); + void SaveFrameTypeOnFrame(FrameType frameType); + bool IsInterpreted() const; + void AddFunc(); + bool IsLogEnabled() const + { + return enableLog_; + } +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_LITECG_IR_BUILDER_H diff --git a/ecmascript/compiler/llvm_codegen.cpp b/ecmascript/compiler/llvm_codegen.cpp index bc80d7798eb4805caec035272800ac40d16a17ed..dd14d922576895014004cb9e16a6c4e53fd6c174 100644 --- a/ecmascript/compiler/llvm_codegen.cpp +++ b/ecmascript/compiler/llvm_codegen.cpp @@ -39,7 +39,11 @@ #include "llvm-c/DisassemblerTypes.h" #include "llvm-c/Target.h" #include "llvm-c/Transforms/PassManagerBuilder.h" +#if defined(PANDA_TARGET_MACOS) #include "llvm/CodeGen/BuiltinGCs.h" +#else +#include "llvm/IR/BuiltinGCs.h" +#endif #include "llvm/DebugInfo/DWARF/DWARFContext.h" #include "llvm/DebugInfo/DIContext.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" @@ -112,12 +116,8 @@ CodeInfo::CodeSpace::~CodeSpace() unreqSecs_ = nullptr; } -uint8_t *CodeInfo::CodeSpace::Alloca(uintptr_t size, bool isReq, size_t alignSize, bool alignFlag) +uint8_t *CodeInfo::CodeSpace::Alloca(uintptr_t size, bool isReq, size_t alignSize) { - // align up for rodata section - if (alignFlag) { - size = AlignUp(size, static_cast(MemAlignment::MEM_ALIGN_REGION)); - } uint8_t *addr = nullptr; auto bufBegin = isReq ? reqSecs_ : unreqSecs_; auto &curPos = isReq ? reqBufPos_ : unreqBufPos_; @@ -135,9 +135,9 @@ uint8_t *CodeInfo::CodeSpace::Alloca(uintptr_t size, bool isReq, size_t alignSiz return addr; } -uint8_t *CodeInfo::AllocaInReqSecBuffer(uintptr_t size, size_t alignSize, bool alignFlag) +uint8_t *CodeInfo::AllocaInReqSecBuffer(uintptr_t size, size_t alignSize) { - return CodeSpace::GetInstance()->Alloca(size, true, alignSize, alignFlag); + return CodeSpace::GetInstance()->Alloca(size, true, alignSize); } uint8_t *CodeInfo::AllocaInNotReqSecBuffer(uintptr_t size, size_t alignSize) @@ -147,9 +147,18 @@ uint8_t *CodeInfo::AllocaInNotReqSecBuffer(uintptr_t size, size_t alignSize) uint8_t *CodeInfo::AllocaCodeSection(uintptr_t size, const char *sectionName) { - // if have got section, don't use align. - uint8_t *addr = AllocaInReqSecBuffer(size, false); + uint8_t *addr = nullptr; auto curSec = ElfSection(sectionName); + if (curSec.isValidAOTSec()) { + if (!alreadyPageAlign_) { + addr = AllocaInReqSecBuffer(size, AOTFileInfo::PAGE_ALIGN); + alreadyPageAlign_ = true; + } else { + addr = AllocaInReqSecBuffer(size, AOTFileInfo::TEXT_SEC_ALIGN); + } + } else { + addr = AllocaInReqSecBuffer(size); + } codeInfo_.push_back({addr, size}); if (curSec.isValidAOTSec()) { secInfos_[curSec.GetIntIndex()] = std::make_pair(addr, size); @@ -164,8 +173,14 @@ uint8_t *CodeInfo::AllocaDataSection(uintptr_t size, const char *sectionName) // rodata section needs 16 bytes alignment if (curSec.InRodataSection()) { size = AlignUp(size, static_cast(MemAlignment::MEM_ALIGN_REGION)); - addr = curSec.isSequentialAOTSec() ? AllocaInReqSecBuffer(size, AOTFileInfo::TEXT_SEC_ALIGN) - : AllocaInNotReqSecBuffer(size, AOTFileInfo::TEXT_SEC_ALIGN); + if (!alreadyPageAlign_) { + addr = curSec.isSequentialAOTSec() ? AllocaInReqSecBuffer(size, AOTFileInfo::PAGE_ALIGN) + : AllocaInNotReqSecBuffer(size, AOTFileInfo::PAGE_ALIGN); + alreadyPageAlign_ = true; + } else { + addr = curSec.isSequentialAOTSec() ? AllocaInReqSecBuffer(size, AOTFileInfo::DATA_SEC_ALIGN) + : AllocaInNotReqSecBuffer(size, AOTFileInfo::DATA_SEC_ALIGN); + } } else { addr = curSec.isSequentialAOTSec() ? AllocaInReqSecBuffer(size) : AllocaInNotReqSecBuffer(size); } @@ -175,6 +190,10 @@ uint8_t *CodeInfo::AllocaDataSection(uintptr_t size, const char *sectionName) return addr; } +void CodeInfo::SaveFunc2Addr(std::string funcName, uint32_t address) { + func2Addr.insert(std::pair(funcName, address)); +} + void CodeInfo::Reset() { codeInfo_.clear(); @@ -300,8 +319,35 @@ void LLVMAssembler::BuildAndRunPasses() LLVMDisposePassManager(modPass1); } +void LLVMAssembler::BuildAndRunPassesFastMode() +{ + LLVMPassManagerBuilderRef pmBuilder = LLVMPassManagerBuilderCreate(); + LLVMPassManagerBuilderSetOptLevel(pmBuilder, options_.OptLevel); // using O3 optimization level + LLVMPassManagerBuilderSetSizeLevel(pmBuilder, 0); + + // pass manager creation:rs4gc pass is the only pass in modPass, other opt module-based pass are in modPass1 + LLVMPassManagerRef funcPass = LLVMCreateFunctionPassManagerForModule(module_); + LLVMPassManagerRef modPass = LLVMCreatePassManager(); + + // add pass into pass managers + LLVMPassManagerBuilderPopulateFunctionPassManager(pmBuilder, funcPass); + llvm::unwrap(modPass)->add(llvm::createRewriteStatepointsForGCLegacyPass()); // rs4gc pass added + + LLVMInitializeFunctionPassManager(funcPass); + for (LLVMValueRef fn = LLVMGetFirstFunction(module_); fn; fn = LLVMGetNextFunction(fn)) { + LLVMRunFunctionPassManager(funcPass, fn); + } + LLVMFinalizeFunctionPassManager(funcPass); + LLVMRunPassManager(modPass, module_); + + LLVMPassManagerBuilderDispose(pmBuilder); + LLVMDisposePassManager(funcPass); + LLVMDisposePassManager(modPass); +} + LLVMAssembler::LLVMAssembler(LLVMModule *lm, LOptions option) - : llvmModule_(lm), + : Assembler(), + llvmModule_(lm), module_(llvmModule_->GetModule()), listener_(this) { @@ -325,7 +371,7 @@ LLVMAssembler::~LLVMAssembler() error_ = nullptr; } -void LLVMAssembler::Run(const CompilerLog &log) +void LLVMAssembler::Run(const CompilerLog &log, bool fastCompileMode) { char *error = nullptr; std::string originName = llvm::unwrap(module_)->getModuleIdentifier() + ".ll"; @@ -342,7 +388,11 @@ void LLVMAssembler::Run(const CompilerLog &log) return; } llvm::unwrap(engine_)->setProcessAllSections(true); - BuildAndRunPasses(); + if (fastCompileMode) { + BuildAndRunPassesFastMode(); + } else { + BuildAndRunPasses(); + } if (log.OutputLLIR()) { error = nullptr; LLVMPrintModuleToFile(module_, optName.c_str(), &error); @@ -371,15 +421,6 @@ void LLVMAssembler::Initialize(LOptions option) LLVMInitializeAArch64AsmPrinter(); LLVMInitializeAArch64AsmParser(); LLVMInitializeAArch64Target(); - } else if (triple.compare(TARGET_ARM32) == 0) { -#if defined(PANDA_TARGET_MACOS) || !defined(PANDA_TARGET_ARM64) - LLVMInitializeARMTargetInfo(); - LLVMInitializeARMTargetMC(); - LLVMInitializeARMDisassembler(); - LLVMInitializeARMAsmPrinter(); - LLVMInitializeARMAsmParser(); - LLVMInitializeARMTarget(); -#endif } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -406,7 +447,11 @@ kungfu::CalleeRegAndOffsetVec LLVMAssembler::GetCalleeReg2Offset(LLVMValueRef fn { kungfu::CalleeRegAndOffsetVec info; llvm::Function* func = llvm::unwrap(fn); +#if defined(PANDA_TARGET_MACOS) for (const auto &Attr : func->getAttributes().getFnAttributes()) { +#else + for (const auto &Attr : func->getAttributes().getFnAttrs()) { +#endif if (Attr.isStringAttribute()) { std::string str = std::string(Attr.getKindAsString().data()); std::string expectedKey = "DwarfReg"; diff --git a/ecmascript/compiler/llvm_codegen.h b/ecmascript/compiler/llvm_codegen.h index af4905a9bf05b735eb45161031c654f830361c83..57345c586028f3aba46c3aa83a95e500f3cf6f6a 100644 --- a/ecmascript/compiler/llvm_codegen.h +++ b/ecmascript/compiler/llvm_codegen.h @@ -46,65 +46,6 @@ class CompilerLog; class MethodLogList; class LLVMModule; -struct CodeInfo { - using sectionInfo = std::pair; - CodeInfo(); - - ~CodeInfo(); - - class CodeSpace { - public: - static CodeSpace *GetInstance(); - - uint8_t *Alloca(uintptr_t size, bool isReq, size_t alignSize, bool alignFlag = true); - - private: - CodeSpace(); - ~CodeSpace(); - - static constexpr size_t REQUIRED_SECS_LIMIT = (1 << 29); // 512M - static constexpr size_t UNREQUIRED_SECS_LIMIT = (1 << 28); // 256M - - // start point of the buffer reserved for sections required in executing phase - uint8_t *reqSecs_ {nullptr}; - size_t reqBufPos_ {0}; - // start point of the buffer reserved for sections not required in executing phase - uint8_t *unreqSecs_ {nullptr}; - size_t unreqBufPos_ {0}; - }; - - uint8_t *AllocaInReqSecBuffer(uintptr_t size, size_t alignSize = 0, bool alignFlag = true); - - uint8_t *AllocaInNotReqSecBuffer(uintptr_t size, size_t alignSize = 0); - - uint8_t *AllocaCodeSection(uintptr_t size, const char *sectionName); - - uint8_t *AllocaDataSection(uintptr_t size, const char *sectionName); - - void Reset(); - - uint8_t *GetSectionAddr(ElfSecName sec) const; - - size_t GetSectionSize(ElfSecName sec) const; - - std::vector> GetCodeInfo() const; - - template - void IterateSecInfos(const Callback &cb) const - { - for (size_t i = 0; i < secInfos_.size(); i++) { - if (secInfos_[i].second == 0) { - continue; - } - cb(i, secInfos_[i]); - } - } - -private: - std::array(ElfSecName::SIZE)> secInfos_; - std::vector> codeInfo_ {}; // info for disasssembler, planed to be deprecated -}; - enum class FPFlag : uint32_t { ELIM_FP = 0, RESERVE_FP = 1 @@ -120,11 +61,11 @@ struct LOptions { : optLevel(level), genFp(static_cast(flag)), relocMode(relocMode) {}; }; -class LLVMAssembler { +class LLVMAssembler :public Assembler { public: explicit LLVMAssembler(LLVMModule *lm, LOptions option = LOptions()); virtual ~LLVMAssembler(); - void Run(const CompilerLog &log); + void Run(const CompilerLog &log, bool fastCompileMode) override; const LLVMExecutionEngineRef &GetEngine() { return engine_; @@ -136,27 +77,11 @@ public: static int GetFpDeltaPrevFramSp(LLVMValueRef fn, const CompilerLog &log); static kungfu::CalleeRegAndOffsetVec GetCalleeReg2Offset(LLVMValueRef fn, const CompilerLog &log); - uintptr_t GetSectionAddr(ElfSecName sec) const - { - return reinterpret_cast(codeInfo_.GetSectionAddr(sec)); - } - - uint32_t GetSectionSize(ElfSecName sec) const - { - return static_cast(codeInfo_.GetSectionSize(sec)); - } - void *GetFuncPtrFromCompiledModule(LLVMValueRef function) { return LLVMGetPointerToGlobal(engine_, function); } - template - void IterateSecInfos(const Callback &cb) const - { - codeInfo_.IterateSecInfos(cb); - } - void SetObjFile(const llvm::object::ObjectFile *obj) { objFile_ = obj; @@ -184,6 +109,7 @@ private: void UseRoundTripSectionMemoryManager(); bool BuildMCJITEngine(); void BuildAndRunPasses(); + void BuildAndRunPassesFastMode(); void Initialize(LOptions option); static void PrintInstAndStep(uint64_t &pc, uint8_t **byteSp, uintptr_t &numBytes, size_t instSize, uint64_t textOffset, char *outString, std::ostringstream &codeStream, @@ -197,7 +123,6 @@ private: LLVMExecutionEngineRef engine_ {nullptr}; AOTEventListener listener_; char *error_ {nullptr}; - CodeInfo codeInfo_ {}; }; class LLVMIRGeneratorImpl : public CodeGeneratorImpl { diff --git a/ecmascript/compiler/llvm_ir_builder.cpp b/ecmascript/compiler/llvm_ir_builder.cpp index 0c10672336a63dd5530ce7106745aadbc6df3760..2d98162475bcb6a4ae769f3ae2710c5e2180b3e6 100644 --- a/ecmascript/compiler/llvm_ir_builder.cpp +++ b/ecmascript/compiler/llvm_ir_builder.cpp @@ -24,6 +24,7 @@ #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/debug_info.h" #include "ecmascript/compiler/gate.h" +#include "ecmascript/compiler/gate_meta_data.h" #include "ecmascript/compiler/rt_call_signature.h" #include "ecmascript/deoptimizer/deoptimizer.h" #include "ecmascript/frames.h" @@ -63,6 +64,7 @@ LLVMIRBuilder::LLVMIRBuilder(const std::vector> *schedule, function_(function), llvmModule_(module), callConv_(callConv), enableLog_(enableLog), isFastCallAot_(isFastCallAot) { + ASSERT(compCfg_->Is64Bit()); context_ = module->GetContext(); builder_ = LLVMCreateBuilderInContext(context_); bbID2BB_.clear(); @@ -70,17 +72,8 @@ LLVMIRBuilder::LLVMIRBuilder(const std::vector> *schedule, InitializeHandlers(); LLVMSetGC(function_, "statepoint-example"); - if (compCfg_->Is32Bit()) { - slotSize_ = sizeof(uint32_t); - slotType_ = LLVMInt32TypeInContext(context_); - } else { - slotSize_ = sizeof(uint64_t); - slotType_ = LLVMInt64TypeInContext(context_); - } - if (compCfg_->Is32Bit()) { - // hard float instruction - LLVMAddTargetDependentFunctionAttr(function_, "target-features", "+armv8-a"); - } + slotSize_ = sizeof(uint64_t); + slotType_ = GetInt64T(); LLVMMetadataRef dFile = llvmModule_->GetDFileMD(); LLVMMetadataRef funcTyMD = GetFunctionTypeMD(dFile); @@ -115,11 +108,7 @@ void LLVMIRBuilder::SetFunctionCallConv() LLVMSetFunctionCallConv(function_, LLVMGHCCallConv); break; case CallSignature::CallConv::WebKitJSCallConv: { - if (!compCfg_->Is32Bit()) { - LLVMSetFunctionCallConv(function_, LLVMWebKitJSCallConv); - } else { - LLVMSetFunctionCallConv(function_, LLVMCCallConv); - } + LLVMSetFunctionCallConv(function_, LLVMWebKitJSCallConv); break; } default: { @@ -214,6 +203,7 @@ void LLVMIRBuilder::InitializeHandlers() {OpCode::MUL_WITH_OVERFLOW, &LLVMIRBuilder::HandleMulWithOverflow}, {OpCode::EXTRACT_VALUE, &LLVMIRBuilder::HandleExtractValue}, {OpCode::SQRT, &LLVMIRBuilder::HandleSqrt}, + {OpCode::READSP, &LLVMIRBuilder::HandleReadSp}, }; illegalOpHandlers_ = { OpCode::NOP, OpCode::CIRCUIT_ROOT, OpCode::DEPEND_ENTRY, @@ -305,8 +295,9 @@ void LLVMIRBuilder::ProcessPhiWorkList() for (BasicBlock *bb : phiRebuildWorklist_) { auto impl = bb->GetImpl(); for (auto &e : impl->unmergedPhis_) { - BasicBlock *pred = e.pred; - if (impl->started == 0) { + ASSERT(bbID2BB_.count(e.predBBId) > 0); + BasicBlock *pred = bbID2BB_[e.predBBId].get(); + if (!impl->started) { OPTIONAL_LOG_COMPILER(ERROR) << " ProcessPhiWorkList error hav't start "; return; } @@ -351,9 +342,6 @@ BasicBlockImpl *LLVMIRBuilder::EnsureBBImpl(BasicBlock *bb) const void LLVMIRBuilder::GenPrologue() { - if (compCfg_->Is32Bit()) { - return; - } auto frameType = circuit_->GetFrameType(); if (IsInterpreted()) { return; @@ -410,14 +398,12 @@ LLVMValueRef LLVMIRBuilder::CallingFp(LLVMModuleRef &module, LLVMBuilderRef &bui return LLVMGetParam(function_, static_cast(InterpreterHandlerInputs::SP)); } /* 0:calling 1:its caller */ - std::vector args = {LLVMConstInt(LLVMInt32TypeInContext(context_), 0, isCaller)}; + std::vector args = {LLVMConstInt(GetInt32T(), 0, isCaller)}; auto fn = LLVMGetNamedFunction(module, "llvm.frameaddress.p0i8"); if (!fn) { /* init instrinsic function declare */ - LLVMTypeRef paramTys1[] = { - LLVMInt32TypeInContext(context_), - }; - auto fnTy = LLVMFunctionType(LLVMPointerType(LLVMInt8TypeInContext(context_), 0), paramTys1, 1, 0); + LLVMTypeRef paramTys1[] = { GetInt32T() }; + auto fnTy = LLVMFunctionType(GetRawPtrT(), paramTys1, 1, 0); fn = LLVMAddFunction(module, "llvm.frameaddress.p0i8", fnTy); } LLVMValueRef fAddrRet = LLVMBuildCall(builder, fn, args.data(), 1, ""); @@ -434,7 +420,7 @@ LLVMValueRef LLVMIRBuilder::ReadRegister(LLVMModuleRef &module, [[maybe_unused]] LLVMTypeRef paramTys1[] = { GetMachineRepType(MachineRep::K_META), }; - auto fnTy = LLVMFunctionType(LLVMInt64TypeInContext(context_), paramTys1, 1, 0); + auto fnTy = LLVMFunctionType(GetInt64T(), paramTys1, 1, 0); fn = LLVMAddFunction(module, "llvm.read_register.i64", fnTy); } LLVMValueRef fAddrRet = LLVMBuildCall(builder_, fn, args.data(), 1, ""); @@ -461,30 +447,25 @@ LLVMTypeRef LLVMIRBuilder::GetMachineRepType(MachineRep rep) const LLVMTypeRef dstType; switch (rep) { case MachineRep::K_BIT: - dstType = LLVMInt1TypeInContext(context_); + dstType = GetInt1T(); break; case MachineRep::K_WORD8: - dstType = LLVMInt8TypeInContext(context_); + dstType = GetInt8T(); break; case MachineRep::K_WORD16: - dstType = LLVMInt16TypeInContext(context_); + dstType = GetInt16T(); break; case MachineRep::K_WORD32: - dstType = LLVMInt32TypeInContext(context_); + dstType = GetInt32T(); break; case MachineRep::K_FLOAT64: - dstType = LLVMDoubleTypeInContext(context_); + dstType = GetDoubleT(); break; case MachineRep::K_WORD64: - dstType = LLVMInt64TypeInContext(context_); + dstType = GetInt64T(); break; case MachineRep::K_PTR_1: - if (compCfg_->Is32Bit()) { - dstType = - LLVMVectorType(LLVMPointerType(LLVMInt8TypeInContext(context_), 1), 2); // 2: packed vector type - } else { - dstType = LLVMPointerType(LLVMInt64TypeInContext(context_), 1); - } + dstType = GetTaggedHPtrT(); break; case MachineRep::K_META: dstType = LLVMMetadataTypeInContext(context_); @@ -497,6 +478,12 @@ LLVMTypeRef LLVMIRBuilder::GetMachineRepType(MachineRep rep) const return dstType; } +void LLVMIRBuilder::HandleReadSp(GateRef gate) +{ + ASSERT(acc_.GetOpCode(gate) == OpCode::READSP); + VisitReadSp(gate); +} + void LLVMIRBuilder::HandleCall(GateRef gate) { std::vector ins; @@ -580,14 +567,14 @@ void LLVMIRBuilder::VisitRuntimeCall(GateRef gate, const std::vector &i auto kind = GetCallExceptionKind(stubIndex, OpCode::RUNTIME_CALL); size_t actualNumArgs = 0; - LLVMValueRef pcOffset = LLVMConstInt(LLVMInt32TypeInContext(context_), 0, 0); + LLVMValueRef pcOffset = LLVMConstInt(GetInt32T(), 0, 0); ComputeArgCountAndPCOffset(actualNumArgs, pcOffset, inList, kind); std::vector params; params.push_back(glue); // glue const int index = static_cast(acc_.GetConstantValue(inList[static_cast(CallInputs::TARGET)])); - params.push_back(LLVMConstInt(LLVMInt64TypeInContext(context_), index, 0)); // target - params.push_back(LLVMConstInt(LLVMInt64TypeInContext(context_), + params.push_back(LLVMConstInt(GetInt64T(), index, 0)); // target + params.push_back(LLVMConstInt(GetInt64T(), actualNumArgs - static_cast(CallInputs::FIRST_PARAMETER), 0)); // argc for (size_t paraIdx = static_cast(CallInputs::FIRST_PARAMETER); paraIdx < actualNumArgs; ++paraIdx) { GateRef gateTmp = inList[paraIdx]; @@ -601,8 +588,7 @@ void LLVMIRBuilder::VisitRuntimeCall(GateRef gate, const std::vector &i LLVMValueRef runtimeCall = nullptr; if (kind == CallExceptionKind::HAS_PC_OFFSET) { std::vector values; - auto pcIndex = - LLVMConstInt(LLVMInt64TypeInContext(context_), static_cast(SpecVregIndex::PC_OFFSET_INDEX), 1); + auto pcIndex = LLVMConstInt(GetInt64T(), static_cast(SpecVregIndex::PC_OFFSET_INDEX), 1); values.push_back(pcIndex); values.push_back(pcOffset); runtimeCall = LLVMBuildCall3(builder_, funcType, callee, params.data(), actualNumArgs, @@ -610,10 +596,7 @@ void LLVMIRBuilder::VisitRuntimeCall(GateRef gate, const std::vector &i } else { runtimeCall = LLVMBuildCall2(builder_, funcType, callee, params.data(), actualNumArgs, ""); } - - if (!compCfg_->Is32Bit()) { // Arm32 not support webkit jscc calling convention - LLVMSetInstructionCallConv(runtimeCall, LLVMWebKitJSCallConv); - } + LLVMSetInstructionCallConv(runtimeCall, LLVMWebKitJSCallConv); gate2LValue_[gate] = runtimeCall; if (IsLogEnabled()) { @@ -655,7 +638,7 @@ void LLVMIRBuilder::VisitRuntimeCallWithArgv(GateRef gate, const std::vector(CallInputs::TARGET)]); - auto targetId = LLVMConstInt(LLVMInt64TypeInContext(context_), index, 0); + auto targetId = LLVMConstInt(GetInt64T(), index, 0); params.push_back(targetId); // target for (size_t paraIdx = static_cast(CallInputs::FIRST_PARAMETER); paraIdx < inList.size(); ++paraIdx) { GateRef gateTmp = inList[paraIdx]; @@ -689,7 +672,7 @@ LLVMValueRef LLVMIRBuilder::GetCurrentFrameType(LLVMValueRef currentSpFrameAddr) { LLVMValueRef tmp = LLVMBuildSub(builder_, currentSpFrameAddr, LLVMConstInt(slotType_, slotSize_, 1), ""); LLVMValueRef frameTypeAddr = - LLVMBuildIntToPtr(builder_, tmp, LLVMPointerType(LLVMInt64TypeInContext(context_), 0), ""); + LLVMBuildIntToPtr(builder_, tmp, LLVMPointerType(GetInt64T(), 0), ""); LLVMValueRef frameType = LLVMBuildLoad(builder_, frameTypeAddr, ""); return frameType; } @@ -782,7 +765,7 @@ void LLVMIRBuilder::ComputeArgCountAndPCOffset(size_t &actualNumArgs, LLVMValueR } } -LLVMIRBuilder::CallExceptionKind LLVMIRBuilder::GetCallExceptionKind(size_t index, OpCode op) const +CallExceptionKind LLVMIRBuilder::GetCallExceptionKind(size_t index, OpCode op) const { bool hasPcOffset = IsOptimizedJSFunction() && ((op == OpCode::NOGC_RUNTIME_CALL && (kungfu::RuntimeStubCSigns::IsAsmStub(index))) || @@ -798,7 +781,7 @@ void LLVMIRBuilder::UpdateLeaveFrame(LLVMValueRef glue) LLVMTypeRef glueType = LLVMTypeOf(glue); LLVMValueRef leaveFrameAddr = LLVMBuildIntToPtr(builder_, leaveFrameValue, LLVMPointerType(glueType, 0), ""); LLVMValueRef llvmFpAddr = CallingFp(module_, builder_, true); - LLVMValueRef fp = LLVMBuildPtrToInt(builder_, llvmFpAddr, LLVMInt64TypeInContext(context_), "cast_int64_t"); + LLVMValueRef fp = LLVMBuildPtrToInt(builder_, llvmFpAddr, GetInt64T(), "cast_int64_t"); LLVMBuildStore(builder_, fp, leaveFrameAddr); } @@ -817,6 +800,12 @@ LLVMValueRef LLVMIRBuilder::GetCallee(const std::vector &inList, const return callee; } +void LLVMIRBuilder::VisitReadSp(GateRef gate) +{ + LLVMValueRef spValue = GetCurrentSP(); + gate2LValue_[gate] = spValue; +} + void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, OpCode op) { size_t targetIndex = static_cast(CallInputs::TARGET); @@ -827,6 +816,7 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, LLVMValueRef rtbaseoffset; LLVMValueRef callee; CallExceptionKind kind = CallExceptionKind::NO_PC_OFFSET; + bool isNoGC = false; if (op == OpCode::CALL) { const size_t index = acc_.GetConstantValue(inList[targetIndex]); calleeDescriptor = CommonStubCSigns::Get(index); @@ -850,6 +840,7 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, } else { kind = CallExceptionKind::NO_PC_OFFSET; } + isNoGC = acc_.IsNoGC(gate); } else if (op == OpCode::FAST_CALL_OPTIMIZED) { calleeDescriptor = RuntimeStubCSigns::GetOptimizedFastCallSign(); callee = GetCallee(inList, calleeDescriptor); @@ -858,6 +849,7 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, } else { kind = CallExceptionKind::NO_PC_OFFSET; } + isNoGC = acc_.IsNoGC(gate); } else { ASSERT(op == OpCode::BUILTINS_CALL || op == OpCode::BUILTINS_CALL_WITH_ARGV); LLVMValueRef opcodeOffset = gate2LValue_[inList[targetIndex]]; @@ -884,7 +876,7 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, int extraParameterCnt = 0; size_t actualNumArgs = 0; - LLVMValueRef pcOffset = LLVMConstInt(LLVMInt32TypeInContext(context_), 0, 0); + LLVMValueRef pcOffset = LLVMConstInt(GetInt32T(), 0, 0); ComputeArgCountAndPCOffset(actualNumArgs, pcOffset, inList, kind); // then push the actual parameter for js function call @@ -896,8 +888,8 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, // match parameter types and function signature types if (IsHeapPointerType(paramType) && !IsHeapPointerType(gateTmpType)) { params.push_back(LLVMBuildIntToPtr(builder_, - LLVMBuildBitCast(builder_, gate2LValue_[gateTmp], LLVMInt64TypeInContext(context_), ""), - paramType, "")); + LLVMBuildBitCast(builder_, gate2LValue_[gateTmp], GetInt64T(), ""), + paramType, "")); } else { params.push_back(LLVMBuildBitCast(builder_, gate2LValue_[gateTmp], paramType, "")); } @@ -911,8 +903,7 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, callee = LLVMBuildPointerCast(builder_, callee, LLVMPointerType(funcType, 0), ""); if (kind == CallExceptionKind::HAS_PC_OFFSET) { std::vector values; - auto pcIndex = - LLVMConstInt(LLVMInt64TypeInContext(context_), static_cast(SpecVregIndex::PC_OFFSET_INDEX), 1); + auto pcIndex = LLVMConstInt(GetInt64T(), static_cast(SpecVregIndex::PC_OFFSET_INDEX), 1); values.push_back(pcIndex); values.push_back(pcOffset); call = LLVMBuildCall3(builder_, funcType, callee, params.data(), actualNumArgs - firstArg + extraParameterCnt, @@ -922,6 +913,9 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector &inList, ""); } SetCallConvAttr(calleeDescriptor, call); + if (isNoGC) { + SetGCLeafFunction(call); + } gate2LValue_[gate] = call; if (IsLogEnabled()) { @@ -998,16 +992,36 @@ void LLVMIRBuilder::HandlePhi(GateRef gate) VisitPhi(gate, ins); } -void LLVMIRBuilder::VisitPhi(GateRef gate, const std::vector &srcGates) +int LLVMIRBuilder::LookupPredBB(GateRef start, int bbID) +{ + GateId gateId = acc_.GetId(start); + int owner = instID2bbID_[gateId]; + if (owner != bbID) { + return owner; + } + GateRef pred = start; + while (owner == bbID) { + pred = acc_.GetState(pred); + auto id = acc_.GetId(pred); + owner = instID2bbID_[id]; + } + return owner; +} + +void LLVMIRBuilder::VisitPhi(GateRef gate, const std::vector &phiIns) { LLVMTypeRef type = ConvertLLVMTypeFromGate(gate); LLVMValueRef phi = LLVMBuildPhi(builder_, type, ""); - std::vector relMergeIns; - acc_.GetIns(srcGates[0], relMergeIns); - bool addToPhiRebuildList = false; - for (int i = 1; i < static_cast(srcGates.size()); i++) { - GateId gateId = acc_.GetId(relMergeIns[i - 1]); - int bbIdx = instID2bbID_[gateId]; + if (phiIns.size() > 1) { + gate2LValue_[gate] = phi; + } + // Collect the states merges of this phi and note the 1-in is the merged states. + std::vector phiStates; + acc_.GetIns(phiIns[0], phiStates); + ASSERT(phiStates.size() + 1 == phiIns.size()); + for (int i = 1; i < static_cast(phiIns.size()); i++) { + int bbIdx = LookupPredBB(phiStates[i - 1], currentBb_->GetId()); + int cnt = static_cast(bbID2BB_.count(bbIdx)); // if cnt = 0 means bb with current bbIdx hasn't been created if (cnt > 0) { @@ -1022,26 +1036,22 @@ void LLVMIRBuilder::VisitPhi(GateRef gate, const std::vector &srcGates) return; } LLVMBasicBlockRef llvmBB = EnsureLBB(bb); // The llvm bb - LLVMValueRef value = gate2LValue_[srcGates[i]]; + LLVMValueRef value = gate2LValue_[phiIns[i]]; if (impl->started) { LLVMAddIncoming(phi, &value, &llvmBB, 1); } else { - addToPhiRebuildList = true; impl = currentBb_->GetImpl(); - impl->unmergedPhis_.emplace_back(); - auto ¬_merged_phi = impl->unmergedPhis_.back(); - not_merged_phi.phi = phi; - not_merged_phi.pred = bb; - not_merged_phi.operand = srcGates[i]; + NotMergedPhiDesc d = { bbIdx, phiIns[i], phi }; + impl->unmergedPhis_.emplace_back(d); + phiRebuildWorklist_.push_back(currentBb_); } } else { - addToPhiRebuildList = true; - } - if (addToPhiRebuildList) { + BasicBlockImpl* impl = currentBb_->GetImpl(); + NotMergedPhiDesc d = { bbIdx, phiIns[i], phi }; + impl->unmergedPhis_.emplace_back(d); phiRebuildWorklist_.push_back(currentBb_); } - gate2LValue_[gate] = phi; } } @@ -1091,7 +1101,7 @@ void LLVMIRBuilder::LinkToLLVMCfg(int bbId, const OperandsVector &predecessors) LLVMBasicBlockRef preLBB = EnsureLBB(pre); LLVMMoveBasicBlockBefore(preLBB, lBB); } - if (isPrologue(bbId)) { + if (IsPrologue(bbId)) { GenPrologue(); } } @@ -1145,26 +1155,16 @@ void LLVMIRBuilder::VisitConstant(GateRef gate, std::bitset<64> value) // 64: bi LLVMValueRef llvmValue = nullptr; auto machineType = acc_.GetMachineType(gate); if (machineType == MachineType::ARCH) { - machineType = compCfg_->Is32Bit() ? MachineType::I32 : MachineType::I64; + ASSERT(compCfg_->Is64Bit()); + machineType = MachineType::I64; } if (machineType == MachineType::I32) { - llvmValue = LLVMConstInt(LLVMInt32TypeInContext(context_), value.to_ulong(), 0); + llvmValue = LLVMConstInt(GetInt32T(), value.to_ulong(), 0); } else if (machineType == MachineType::I64) { - llvmValue = LLVMConstInt(LLVMInt64TypeInContext(context_), value.to_ullong(), 0); + llvmValue = LLVMConstInt(GetInt64T(), value.to_ullong(), 0); LLVMTypeRef type = ConvertLLVMTypeFromGate(gate); if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) { llvmValue = LLVMBuildIntToPtr(builder_, llvmValue, type, ""); - } else if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) { - LLVMValueRef tmp1Value = LLVMBuildLShr( - builder_, llvmValue, LLVMConstInt(LLVMInt64TypeInContext(context_), 32, 0), ""); // 32: offset - LLVMValueRef tmp2Value = LLVMBuildIntCast(builder_, llvmValue, LLVMInt32TypeInContext(context_), ""); // low - LLVMValueRef emptyValue = LLVMGetUndef(type); - tmp1Value = LLVMBuildIntToPtr(builder_, tmp1Value, LLVMPointerType(LLVMInt8TypeInContext(context_), 1), ""); - tmp2Value = LLVMBuildIntToPtr(builder_, tmp2Value, LLVMPointerType(LLVMInt8TypeInContext(context_), 1), ""); - llvmValue = LLVMBuildInsertElement( - builder_, emptyValue, tmp2Value, LLVMConstInt(LLVMInt32TypeInContext(context_), 0, 0), ""); - llvmValue = LLVMBuildInsertElement( - builder_, llvmValue, tmp1Value, LLVMConstInt(LLVMInt32TypeInContext(context_), 1, 0), ""); } else if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind) { // do nothing } else { @@ -1173,13 +1173,13 @@ void LLVMIRBuilder::VisitConstant(GateRef gate, std::bitset<64> value) // 64: bi } } else if (machineType == MachineType::F64) { auto doubleValue = base::bit_cast(value.to_ullong()); // actual double value - llvmValue = LLVMConstReal(LLVMDoubleTypeInContext(context_), doubleValue); + llvmValue = LLVMConstReal(GetDoubleT(), doubleValue); } else if (machineType == MachineType::I8) { - llvmValue = LLVMConstInt(LLVMInt8TypeInContext(context_), value.to_ulong(), 0); + llvmValue = LLVMConstInt(GetInt8T(), value.to_ulong(), 0); } else if (machineType == MachineType::I16) { - llvmValue = LLVMConstInt(LLVMInt16TypeInContext(context_), value.to_ulong(), 0); + llvmValue = LLVMConstInt(GetInt16T(), value.to_ulong(), 0); } else if (machineType == MachineType::I1) { - llvmValue = LLVMConstInt(LLVMInt1TypeInContext(context_), value.to_ulong(), 0); + llvmValue = LLVMConstInt(GetInt1T(), value.to_ulong(), 0); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -1210,8 +1210,8 @@ void LLVMIRBuilder::HandleRelocatableData(GateRef gate) void LLVMIRBuilder::VisitRelocatableData(GateRef gate, uint64_t value) { - LLVMValueRef globalValue = LLVMAddGlobal(module_, LLVMInt64TypeInContext(context_), "G"); - LLVMSetInitializer(globalValue, LLVMConstInt(LLVMInt64TypeInContext(context_), value, 0)); + LLVMValueRef globalValue = LLVMAddGlobal(module_, GetInt64T(), "G"); + LLVMSetInitializer(globalValue, LLVMConstInt(GetInt64T(), value, 0)); gate2LValue_[gate] = globalValue; } @@ -1314,6 +1314,18 @@ void LLVMIRBuilder::VisitBranch(GateRef gate, GateRef cmp, int btrue, int bfalse LLVMBasicBlockRef llvmFalseBB = falseBB->GetImpl()->lBB_; LLVMValueRef result = LLVMBuildCondBr(builder_, cond, llvmTrueBB, llvmFalseBB); EndCurrentBlock(); + + if (acc_.HasBranchWeight(gate)) { + auto trueWeight = acc_.GetTrueWeight(gate); + auto falseWeight = acc_.GetFalseWeight(gate); + LLVMMetadataRef branch_weights = LLVMMDStringInContext2(context_, "branch_weights", 14); + LLVMMetadataRef weight1 = LLVMValueAsMetadata(LLVMConstInt(LLVMIntType(32), trueWeight, 0)); + LLVMMetadataRef weight2 = LLVMValueAsMetadata(LLVMConstInt(LLVMIntType(32), falseWeight, 0)); + LLVMMetadataRef mds[] = {branch_weights, weight1, weight2}; + LLVMMetadataRef metadata = LLVMMDNodeInContext2(context_, mds, 3); + LLVMValueRef metadata_value = LLVMMetadataAsValue(context_, metadata); + LLVMSetMetadata(result, LLVMGetMDKindID("prof", 4), metadata_value); + } gate2LValue_[gate] = result; } @@ -1354,44 +1366,40 @@ void LLVMIRBuilder::VisitSwitch(GateRef gate, GateRef input, const std::vectorIs32Bit()) { - return LLVMVectorType(LLVMPointerType(LLVMInt8TypeInContext(context_), 1), 2); // 2: packed vector type - } else { - return LLVMPointerType(LLVMInt64TypeInContext(context_), 1); - } + return GetTaggedHPtrT(); } MachineType t = acc_.GetMachineType(gate); switch (t) { case MachineType::NOVALUE: - return LLVMVoidTypeInContext(context_); + return GetVoidT(); case MachineType::I1: - return LLVMInt1TypeInContext(context_); + return GetInt1T(); case MachineType::I8: - return LLVMInt8TypeInContext(context_); + return GetInt8T(); case MachineType::I16: - return LLVMInt16TypeInContext(context_); + return GetInt16T(); case MachineType::I32: - return LLVMInt32TypeInContext(context_); + return GetInt32T(); case MachineType::I64: - return LLVMInt64TypeInContext(context_); + return GetInt64T(); case MachineType::F32: - return LLVMFloatTypeInContext(context_); + return GetFloatT(); case MachineType::F64: - return LLVMDoubleTypeInContext(context_); + return GetDoubleT(); case MachineType::ARCH: { - if (compCfg_->Is32Bit()) { - return LLVMInt32TypeInContext(context_); - } else { - return LLVMInt64TypeInContext(context_); - } + return GetInt64T(); } default: LOG_ECMA(FATAL) << "this branch is unreachable"; @@ -1555,38 +1572,17 @@ void LLVMIRBuilder::VisitTruncFloatToInt(GateRef gate, GateRef e1) gate2LValue_[gate] = result; } -bool IsAddIntergerType(MachineType machineType) -{ - switch (machineType) { - case MachineType::I8: - case MachineType::I16: - case MachineType::I32: - case MachineType::I64: - case MachineType::ARCH: - return true; - default: - return false; - } -} - void LLVMIRBuilder::VisitAdd(GateRef gate, GateRef e1, GateRef e2) { LLVMValueRef e1Value = gate2LValue_[e1]; LLVMValueRef e2Value = gate2LValue_[e2]; LLVMValueRef result = nullptr; - /* - * If the first operand is pointer, special treatment is needed - * 1) add, pointer, int - * 2) add, vector{i8* x 2}, int - */ - LLVMTypeRef returnType = ConvertLLVMTypeFromGate(gate); + LLVMTypeRef returnType = ConvertLLVMTypeFromGate(gate); auto machineType = acc_.GetMachineType(gate); if (IsAddIntergerType(machineType)) { auto e1Type = LLVMGetTypeKind(ConvertLLVMTypeFromGate(e1)); - if (e1Type == LLVMVectorTypeKind) { - result = VectorAdd(e1Value, e2Value, returnType); - } else if (e1Type == LLVMPointerTypeKind) { + if (e1Type == LLVMPointerTypeKind) { result = PointerAdd(e1Value, e2Value, returnType); } else { LLVMValueRef tmp1Value = LLVMBuildIntCast2(builder_, e1Value, returnType, 0, ""); @@ -1637,18 +1633,6 @@ void LLVMIRBuilder::HandleMul(GateRef gate) VisitMul(gate, g0, g1); } -bool IsMulIntergerType(MachineType machineType) -{ - switch (machineType) { - case MachineType::I32: - case MachineType::I64: - case MachineType::ARCH: - return true; - default: - return false; - } -} - void LLVMIRBuilder::VisitMul(GateRef gate, GateRef e1, GateRef e2) { LLVMValueRef e1Value = gate2LValue_[e1]; @@ -1739,14 +1723,8 @@ void LLVMIRBuilder::VisitAddWithOverflow(GateRef gate, GateRef e1, GateRef e2) auto fn = LLVMGetNamedFunction(module_, "llvm.sadd.with.overflow.i32"); if (!fn) { /* init instrinsic function declare */ - LLVMTypeRef paramTys1[] = { - LLVMInt32TypeInContext(context_), - LLVMInt32TypeInContext(context_), - }; - LLVMTypeRef structTys[] = { - LLVMInt32TypeInContext(context_), - LLVMInt1TypeInContext(context_), - }; + LLVMTypeRef paramTys1[] = { GetInt32T(), GetInt32T() }; + LLVMTypeRef structTys[] = { GetInt32T(), GetInt1T() }; LLVMTypeRef returnType = LLVMStructTypeInContext(context_, structTys, 2, 0); auto fnTy = LLVMFunctionType(returnType, paramTys1, 2, 0); fn = LLVMAddFunction(module_, "llvm.sadd.with.overflow.i32", fnTy); @@ -1772,14 +1750,8 @@ void LLVMIRBuilder::VisitSubWithOverflow(GateRef gate, GateRef e1, GateRef e2) auto fn = LLVMGetNamedFunction(module_, "llvm.ssub.with.overflow.i32"); if (!fn) { /* init instrinsic function declare */ - LLVMTypeRef paramTys1[] = { - LLVMInt32TypeInContext(context_), - LLVMInt32TypeInContext(context_), - }; - LLVMTypeRef structTys[] = { - LLVMInt32TypeInContext(context_), - LLVMInt1TypeInContext(context_), - }; + LLVMTypeRef paramTys1[] = { GetInt32T(), GetInt32T() }; + LLVMTypeRef structTys[] = { GetInt32T(), GetInt1T() }; LLVMTypeRef returnType = LLVMStructTypeInContext(context_, structTys, 2, 0); auto fnTy = LLVMFunctionType(returnType, paramTys1, 2, 0); fn = LLVMAddFunction(module_, "llvm.ssub.with.overflow.i32", fnTy); @@ -1805,14 +1777,8 @@ void LLVMIRBuilder::VisitMulWithOverflow(GateRef gate, GateRef e1, GateRef e2) auto fn = LLVMGetNamedFunction(module_, "llvm.smul.with.overflow.i32"); if (!fn) { /* init instrinsic function declare */ - LLVMTypeRef paramTys1[] = { - LLVMInt32TypeInContext(context_), - LLVMInt32TypeInContext(context_), - }; - LLVMTypeRef structTys[] = { - LLVMInt32TypeInContext(context_), - LLVMInt1TypeInContext(context_), - }; + LLVMTypeRef paramTys1[] = { GetInt32T(), GetInt32T() }; + LLVMTypeRef structTys[] = { GetInt32T(), GetInt1T() }; LLVMTypeRef returnType = LLVMStructTypeInContext(context_, structTys, 2, 0); auto fnTy = LLVMFunctionType(returnType, paramTys1, 2, 0); fn = LLVMAddFunction(module_, "llvm.smul.with.overflow.i32", fnTy); @@ -1850,10 +1816,8 @@ void LLVMIRBuilder::VisitSqrt(GateRef gate, GateRef e1) auto fn = LLVMGetNamedFunction(module_, "llvm.sqrt.f64"); if (!fn) { /* init instrinsic function declare */ - LLVMTypeRef paramTys1[] = { - LLVMDoubleTypeInContext(context_), - }; - auto fnTy = LLVMFunctionType(LLVMDoubleTypeInContext(context_), paramTys1, 1, 0); + LLVMTypeRef paramTys1[] = { GetDoubleT() }; + auto fnTy = LLVMFunctionType(GetDoubleT(), paramTys1, 1, 0); fn = LLVMAddFunction(module_, "llvm.sqrt.f64", fnTy); } LLVMValueRef result = LLVMBuildCall(builder_, fn, args.data(), 1, ""); @@ -1920,9 +1884,7 @@ void LLVMIRBuilder::VisitCmp(GateRef gate, GateRef e1, GateRef e2) [[maybe_unused]] auto e1ValCode = acc_.GetMachineType(e1); [[maybe_unused]] auto e2ValCode = acc_.GetMachineType(e2); ASSERT((e1ValCode == e2ValCode) || - (compCfg_->Is32Bit() && (e1ValCode == MachineType::ARCH) && (e2ValCode == MachineType::I32)) || (compCfg_->Is64Bit() && (e1ValCode == MachineType::ARCH) && (e2ValCode == MachineType::I64)) || - (compCfg_->Is32Bit() && (e2ValCode == MachineType::ARCH) && (e1ValCode == MachineType::I32)) || (compCfg_->Is64Bit() && (e2ValCode == MachineType::ARCH) && (e1ValCode == MachineType::I64))); LLVMIntPredicate intOpcode = LLVMIntEQ; LLVMRealPredicate realOpcode = LLVMRealPredicateFalse; @@ -2133,14 +2095,14 @@ void LLVMIRBuilder::VisitChangeInt32ToDouble(GateRef gate, GateRef e1) void LLVMIRBuilder::VisitChangeUInt32ToDouble(GateRef gate, GateRef e1) { LLVMValueRef e1Value = gate2LValue_[e1]; - LLVMValueRef result = LLVMBuildUIToFP(builder_, e1Value, LLVMDoubleTypeInContext(context_), ""); + LLVMValueRef result = LLVMBuildUIToFP(builder_, e1Value, GetDoubleT(), ""); gate2LValue_[gate] = result; } void LLVMIRBuilder::VisitChangeDoubleToInt32(GateRef gate, GateRef e1) { LLVMValueRef e1Value = gate2LValue_[e1]; - LLVMValueRef result = LLVMBuildFPToSI(builder_, e1Value, LLVMInt32TypeInContext(context_), ""); + LLVMValueRef result = LLVMBuildFPToSI(builder_, e1Value, GetInt32T(), ""); gate2LValue_[gate] = result; } @@ -2155,23 +2117,7 @@ void LLVMIRBuilder::VisitChangeInt64ToTagged(GateRef gate, GateRef e1) { LLVMValueRef e1Value = gate2LValue_[e1]; ASSERT(LLVMGetTypeKind(LLVMTypeOf(e1Value)) == LLVMIntegerTypeKind); - LLVMValueRef result; - if (compCfg_->Is32Bit()) { - LLVMValueRef tmp1Value = - LLVMBuildLShr(builder_, e1Value, LLVMConstInt(LLVMInt64TypeInContext(context_), 32, 0), ""); // 32: offset - LLVMValueRef tmp2Value = LLVMBuildIntCast(builder_, e1Value, LLVMInt32TypeInContext(context_), ""); // low - LLVMTypeRef vectorType = LLVMVectorType( - LLVMPointerType(LLVMInt8TypeInContext(context_), 1), 2); // 2: packed vector type - LLVMValueRef emptyValue = LLVMGetUndef(vectorType); - tmp1Value = LLVMBuildIntToPtr(builder_, tmp1Value, LLVMPointerType(LLVMInt8TypeInContext(context_), 1), ""); - tmp2Value = LLVMBuildIntToPtr(builder_, tmp2Value, LLVMPointerType(LLVMInt8TypeInContext(context_), 1), ""); - result = LLVMBuildInsertElement( - builder_, emptyValue, tmp2Value, LLVMConstInt(LLVMInt32TypeInContext(context_), 0, 0), ""); - result = LLVMBuildInsertElement( - builder_, result, tmp1Value, LLVMConstInt(LLVMInt32TypeInContext(context_), 1, 0), ""); - } else { - result = LLVMBuildIntToPtr(builder_, e1Value, LLVMPointerType(LLVMInt64TypeInContext(context_), 1), ""); - } + LLVMValueRef result = LLVMBuildIntToPtr(builder_, e1Value, GetTaggedHPtrT(), ""); gate2LValue_[gate] = result; } @@ -2204,6 +2150,15 @@ void LLVMIRBuilder::HandleDeoptCheck(GateRef gate) GateRef cmp = acc_.GetValueIn(gate, 0); // 0: cond LLVMValueRef cond = gate2LValue_[cmp]; LLVMValueRef result = LLVMBuildCondBr(builder_, cond, llvmTrueBB, llvmFalseBB); + + LLVMMetadataRef branch_weights = LLVMMDStringInContext2(context_, "branch_weights", 14); + LLVMMetadataRef weight1 = LLVMValueAsMetadata(LLVMConstInt(LLVMIntType(32), BranchWeight::DEOPT_WEIGHT, 0)); + LLVMMetadataRef weight2 = LLVMValueAsMetadata(LLVMConstInt(LLVMIntType(32), BranchWeight::ONE_WEIGHT, 0)); + LLVMMetadataRef mds[] = {branch_weights, weight1, weight2}; + LLVMMetadataRef metadata = LLVMMDNodeInContext2(context_, mds, 3); // 3: size of mds + LLVMValueRef metadata_value = LLVMMetadataAsValue(context_, metadata); + LLVMSetMetadata(result, LLVMGetMDKindID("prof", 4), metadata_value); // 4: length of "prof" + EndCurrentBlock(); LLVMPositionBuilderAtEnd(builder_, llvmFalseBB); @@ -2221,7 +2176,7 @@ void LLVMIRBuilder::HandleDeoptCheck(GateRef gate) LLVMTypeRef LLVMIRBuilder::GetExperimentalDeoptTy() { - auto fnTy = LLVMFunctionType(LLVMPointerType(LLVMInt64TypeInContext(context_), 1), nullptr, 0, 1); + auto fnTy = LLVMFunctionType(GetTaggedHPtrT(), nullptr, 0, 1); return fnTy; } @@ -2234,9 +2189,8 @@ LLVMValueRef LLVMModule::GetDeoptFunction() void LLVMIRBuilder::GenDeoptEntry(LLVMModuleRef &module) { // glue type depth - std::vector paramTys = { - LLVMInt64TypeInContext(context_), LLVMInt64TypeInContext(context_), LLVMInt64TypeInContext(context_)}; - auto funcType = LLVMFunctionType(LLVMInt64TypeInContext(context_), paramTys.data(), paramTys.size(), 0); + std::vector paramTys = { GetInt64T(), GetInt64T(), GetInt64T() }; + auto funcType = LLVMFunctionType(GetInt64T(), paramTys.data(), paramTys.size(), 0); auto function = LLVMAddFunction(module, Deoptimizier::GetLLVMDeoptRelocateSymbol(), funcType); LLVMSetFunctionCallConv(function, LLVMCCallConv); llvmModule_->SetFunction(LLVMModule::kDeoptEntryOffset, function, false); @@ -2256,8 +2210,7 @@ void LLVMIRBuilder::GenDeoptEntry(LLVMModuleRef &module) StubIdType stubId = RTSTUB_ID(DeoptHandlerAsm); int stubIndex = static_cast(std::get(stubId)); LLVMValueRef rtoffset = LLVMBuildAdd(builder, glue, GetRTStubOffset(glue, stubIndex), ""); - LLVMValueRef patchAddr = LLVMBuildIntToPtr( - builder, rtoffset, LLVMPointerType(LLVMInt64TypeInContext(context_), 0), ""); + LLVMValueRef patchAddr = LLVMBuildIntToPtr(builder, rtoffset, GetTaggedPtrT(), ""); LLVMValueRef llvmAddr = LLVMBuildLoad(builder, patchAddr, ""); LLVMTypeRef rtfuncTypePtr = LLVMPointerType(funcType, 0); LLVMValueRef callee = LLVMBuildIntToPtr(builder, llvmAddr, rtfuncTypePtr, ""); @@ -2283,10 +2236,10 @@ LLVMValueRef LLVMIRBuilder::GetExperimentalDeopt(LLVMModuleRef &module) LLVMValueRef LLVMIRBuilder::ConvertBoolToTaggedBoolean(GateRef gate) { LLVMValueRef value = gate2LValue_[gate]; - LLVMValueRef e1Value = LLVMBuildZExt(builder_, value, LLVMInt64TypeInContext(context_), ""); - auto tagMask = LLVMConstInt(LLVMInt64TypeInContext(context_), JSTaggedValue::TAG_BOOLEAN_MASK, 0); + LLVMValueRef e1Value = LLVMBuildZExt(builder_, value, GetInt64T(), ""); + auto tagMask = LLVMConstInt(GetInt64T(), JSTaggedValue::TAG_BOOLEAN_MASK, 0); LLVMValueRef result = LLVMBuildOr(builder_, e1Value, tagMask, ""); - return LLVMBuildIntToPtr(builder_, result, LLVMPointerType(LLVMInt64TypeInContext(context_), 1), ""); + return LLVMBuildIntToPtr(builder_, result, GetTaggedHPtrT(), ""); } LLVMValueRef LLVMIRBuilder::ConvertInt32ToTaggedInt(GateRef gate) @@ -2297,19 +2250,19 @@ LLVMValueRef LLVMIRBuilder::ConvertInt32ToTaggedInt(GateRef gate) LLVMValueRef LLVMIRBuilder::ConvertInt32ToTaggedInt(LLVMValueRef value) { - LLVMValueRef e1Value = LLVMBuildSExt(builder_, value, LLVMInt64TypeInContext(context_), ""); - auto tagMask = LLVMConstInt(LLVMInt64TypeInContext(context_), JSTaggedValue::TAG_INT, 0); + LLVMValueRef e1Value = LLVMBuildSExt(builder_, value, GetInt64T(), ""); + auto tagMask = LLVMConstInt(GetInt64T(), JSTaggedValue::TAG_INT, 0); LLVMValueRef result = LLVMBuildOr(builder_, e1Value, tagMask, ""); - return LLVMBuildIntToPtr(builder_, result, LLVMPointerType(LLVMInt64TypeInContext(context_), 1), ""); + return LLVMBuildIntToPtr(builder_, result, GetTaggedHPtrT(), ""); } LLVMValueRef LLVMIRBuilder::ConvertFloat64ToTaggedDouble(GateRef gate) { LLVMValueRef value = gate2LValue_[gate]; - LLVMValueRef e1Value = LLVMBuildBitCast(builder_, value, LLVMInt64TypeInContext(context_), ""); - auto offset = LLVMConstInt(LLVMInt64TypeInContext(context_), JSTaggedValue::DOUBLE_ENCODE_OFFSET, 0); + LLVMValueRef e1Value = LLVMBuildBitCast(builder_, value, GetInt64T(), ""); + auto offset = LLVMConstInt(GetInt64T(), JSTaggedValue::DOUBLE_ENCODE_OFFSET, 0); LLVMValueRef result = LLVMBuildAdd(builder_, e1Value, offset, ""); - return LLVMBuildIntToPtr(builder_, result, LLVMPointerType(LLVMInt64TypeInContext(context_), 1), ""); + return LLVMBuildIntToPtr(builder_, result, GetTaggedHPtrT(), ""); } LLVMValueRef LLVMIRBuilder::ConvertToTagged(GateRef gate) @@ -2336,17 +2289,16 @@ void LLVMIRBuilder::SaveDeoptVregInfo(std::vector &values, int32_t GateRef gate) { int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(index, curDepth, shift); - values.emplace_back(LLVMConstInt(LLVMInt32TypeInContext(context_), encodeIndex, false)); + values.emplace_back(LLVMConstInt(GetInt32T(), encodeIndex, false)); values.emplace_back(ConvertToTagged(gate)); } void LLVMIRBuilder::SaveDeoptVregInfoWithI64(std::vector &values, int32_t index, size_t curDepth, size_t shift, GateRef gate) { - LLVMValueRef value = LLVMBuildIntCast2(builder_, gate2LValue_.at(gate), - LLVMInt32TypeInContext(context_), 1, ""); + LLVMValueRef value = LLVMBuildIntCast2(builder_, gate2LValue_.at(gate), GetInt32T(), 1, ""); int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(index, curDepth, shift); - values.emplace_back(LLVMConstInt(LLVMInt32TypeInContext(context_), encodeIndex, false)); + values.emplace_back(LLVMConstInt(GetInt32T(), encodeIndex, false)); values.emplace_back(ConvertInt32ToTaggedInt(value)); } @@ -2359,8 +2311,7 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate) params.push_back(glue); // glue GateRef deoptType = acc_.GetValueIn(gate, 2); // 2: deopt type uint64_t v = acc_.GetConstantValue(deoptType); - params.push_back(ConvertInt32ToTaggedInt(LLVMConstInt(LLVMInt32TypeInContext(context_), - static_cast(v), false))); // deoptType + params.push_back(ConvertInt32ToTaggedInt(LLVMConstInt(GetInt32T(), static_cast(v), false))); // deoptType LLVMValueRef callee = GetExperimentalDeopt(module_); LLVMTypeRef funcType = GetExperimentalDeoptTy(); @@ -2371,12 +2322,11 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate) maxDepth++; frameState = acc_.GetFrameState(frameState); } - params.push_back(ConvertInt32ToTaggedInt(LLVMConstInt(LLVMInt32TypeInContext(context_), - static_cast(maxDepth), false))); + params.push_back(ConvertInt32ToTaggedInt(LLVMConstInt(GetInt32T(), static_cast(maxDepth), false))); size_t shift = Deoptimizier::ComputeShift(maxDepth); frameState = deoptFrameState; ArgumentAccessor argAcc(const_cast(circuit_)); - for (size_t curDepth = 0; curDepth <= maxDepth; curDepth++) { + for (int32_t curDepth = static_cast(maxDepth); curDepth >= 0; curDepth--) { ASSERT(acc_.GetOpCode(frameState) == OpCode::FRAME_STATE); GateRef frameValues = acc_.GetValueIn(frameState, 1); // 1: frame values const size_t numValueIn = acc_.GetNumValueIn(frameValues); @@ -2410,8 +2360,8 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate) // pc offset int32_t specPcOffsetIndex = static_cast(SpecVregIndex::PC_OFFSET_INDEX); int32_t encodeIndex = Deoptimizier::EncodeDeoptVregIndex(specPcOffsetIndex, curDepth, shift); - values.emplace_back(LLVMConstInt(LLVMInt32TypeInContext(context_), encodeIndex, false)); - values.emplace_back(LLVMConstInt(LLVMInt32TypeInContext(context_), pc, false)); + values.emplace_back(LLVMConstInt(GetInt32T(), encodeIndex, false)); + values.emplace_back(LLVMConstInt(GetInt32T(), pc, false)); // func int32_t specCallTargetIndex = static_cast(SpecVregIndex::FUNC_INDEX); SaveDeoptVregInfo(values, specCallTargetIndex, curDepth, shift, jsFunc); @@ -2431,11 +2381,8 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate) } LLVMModule::LLVMModule(NativeAreaAllocator* allocator, const std::string &name, bool logDbg, const std::string &triple) + : IRModule(allocator, logDbg, triple) { - tripleStr_ = triple; - CompilationConfig cfg(tripleStr_); - is64Bit_ = cfg.Is64Bit(); - triple_ = cfg.GetTriple(); context_ = LLVMContextCreate(); module_ = LLVMModuleCreateWithNameInContext(name.c_str(), context_); LLVMSetTarget(module_, triple.c_str()); @@ -2444,7 +2391,18 @@ LLVMModule::LLVMModule(NativeAreaAllocator* allocator, const std::string &name, dUnitMD_ = LLVMDIBuilderCreateCompileUnit(dBuilder_, LLVMDWARFSourceLanguageC_plus_plus, dFileMD_, "ArkCompiler", 0, 0, NULL, 0, 0, NULL, 0, LLVMDWARFEmissionFull, 0, 0, 0, "/", 1, "", 0); - debugInfo_ = new DebugInfo(allocator, logDbg); + + voidT_ = LLVMVoidTypeInContext(context_); + int1T_ = LLVMInt1TypeInContext(context_); + int8T_ = LLVMInt8TypeInContext(context_); + int16T_ = LLVMInt16TypeInContext(context_); + int32T_ = LLVMInt32TypeInContext(context_); + int64T_ = LLVMInt64TypeInContext(context_); + floatT_ = LLVMFloatTypeInContext(context_); + doubleT_ = LLVMDoubleTypeInContext(context_); + taggedHPtrT_ = LLVMPointerType(LLVMInt64TypeInContext(context_), 1); + taggedPtrT_ = LLVMPointerType(LLVMInt64TypeInContext(context_), 0); + rawPtrT_ = LLVMPointerType(LLVMInt8TypeInContext(context_), 0); } LLVMModule::~LLVMModule() @@ -2461,10 +2419,6 @@ LLVMModule::~LLVMModule() LLVMDisposeDIBuilder(dBuilder_); dBuilder_ = nullptr; } - if (debugInfo_ != nullptr) { - delete debugInfo_; - debugInfo_ = nullptr; - } } void LLVMModule::InitialLLVMFuncTypeAndFuncByModuleCSigns() @@ -2535,42 +2489,25 @@ LLVMTypeRef LLVMModule::GenerateFuncType(const std::vector ¶ms LLVMTypeRef LLVMModule::ConvertLLVMTypeFromVariableType(VariableType type) { std::map machineTypeMap = { - {VariableType::VOID(), LLVMVoidTypeInContext(context_)}, - {VariableType::BOOL(), LLVMInt1TypeInContext(context_)}, - {VariableType::INT8(), LLVMInt8TypeInContext(context_)}, - {VariableType::INT16(), LLVMInt16TypeInContext(context_)}, - {VariableType::INT32(), LLVMInt32TypeInContext(context_)}, - {VariableType::INT64(), LLVMInt64TypeInContext(context_)}, - {VariableType::INT8(), LLVMInt8TypeInContext(context_)}, - {VariableType::INT16(), LLVMInt16TypeInContext(context_)}, - {VariableType::INT32(), LLVMInt32TypeInContext(context_)}, - {VariableType::INT64(), LLVMInt64TypeInContext(context_)}, - {VariableType::FLOAT32(), LLVMFloatTypeInContext(context_)}, - {VariableType::FLOAT64(), LLVMDoubleTypeInContext(context_)}, - {VariableType::NATIVE_POINTER(), LLVMInt64TypeInContext(context_)}, - {VariableType::JS_POINTER(), LLVMPointerType(LLVMInt64TypeInContext(context_), 1)}, - {VariableType::JS_ANY(), LLVMPointerType(LLVMInt64TypeInContext(context_), 1)}, + {VariableType::VOID(), GetVoidT() }, + {VariableType::BOOL(), GetInt1T() }, + {VariableType::INT8(), GetInt8T() }, + {VariableType::INT16(), GetInt16T() }, + {VariableType::INT32(), GetInt32T() }, + {VariableType::INT64(), GetInt64T() }, + {VariableType::INT8(), GetInt8T() }, + {VariableType::INT16(), GetInt16T() }, + {VariableType::INT32(), GetInt32T() }, + {VariableType::INT64(), GetInt64T() }, + {VariableType::FLOAT32(), GetFloatT() }, + {VariableType::FLOAT64(), GetDoubleT() }, + {VariableType::NATIVE_POINTER(), GetInt64T() }, + {VariableType::JS_POINTER(), GetTaggedHPtrT() }, + {VariableType::JS_ANY(), GetTaggedHPtrT()}, }; - if (Is32Bit()) { - machineTypeMap[VariableType::NATIVE_POINTER()] = LLVMInt32TypeInContext(context_); - LLVMTypeRef vectorType = LLVMVectorType( - LLVMPointerType(LLVMInt8TypeInContext(context_), 1), 2); // 2: packed vector type - machineTypeMap[VariableType::JS_POINTER()] = vectorType; - machineTypeMap[VariableType::JS_ANY()] = vectorType; - } return machineTypeMap[type]; } -std::string LLVMModule::GetFuncName(const panda::ecmascript::MethodLiteral *methodLiteral, - const JSPandaFile *jsPandaFile) -{ - auto offset = methodLiteral->GetMethodId().GetOffset(); - std::string fileName = jsPandaFile->GetFileName(); - std::string name = MethodLiteral::GetMethodName(jsPandaFile, methodLiteral->GetMethodId()); - name += std::string("@") + std::to_string(offset) + std::string("@") + fileName; - return name; -} - LLVMValueRef LLVMModule::AddFunc(const panda::ecmascript::MethodLiteral *methodLiteral, const JSPandaFile *jsPandaFile) { LLVMTypeRef returnType = NewLType(MachineType::I64, GateType::TaggedValue()); // possibly get it for circuit @@ -2582,13 +2519,13 @@ LLVMValueRef LLVMModule::AddFunc(const panda::ecmascript::MethodLiteral *methodL paramTys.emplace_back(actualArgc); auto funcIndex = static_cast(CommonArgIdx::FUNC); auto numOfComArgs = static_cast(CommonArgIdx::NUM_OF_ARGS); - paramCount = methodLiteral->GetNumArgs() + numOfComArgs; + paramCount = methodLiteral->GetNumArgsWithCallField() + numOfComArgs; auto numOfRestArgs = paramCount - funcIndex; paramTys.insert(paramTys.end(), numOfRestArgs, NewLType(MachineType::I64, GateType::TaggedValue())); } else { auto funcIndex = static_cast(FastCallArgIdx::FUNC); auto numOfComArgs = static_cast(FastCallArgIdx::NUM_OF_ARGS); - paramCount = methodLiteral->GetNumArgs() + numOfComArgs; + paramCount = methodLiteral->GetNumArgsWithCallField() + numOfComArgs; auto numOfRestArgs = paramCount - funcIndex; paramTys.insert(paramTys.end(), numOfRestArgs, NewLType(MachineType::I64, GateType::TaggedValue())); } diff --git a/ecmascript/compiler/llvm_ir_builder.h b/ecmascript/compiler/llvm_ir_builder.h index cb1ae22e5cd67d65a6dc8fe31b7f0e586d541848..05b7b267663c499fce083e964fc88de9a337ce0c 100644 --- a/ecmascript/compiler/llvm_ir_builder.h +++ b/ecmascript/compiler/llvm_ir_builder.h @@ -28,33 +28,19 @@ #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/interpreter_stub.h" #include "ecmascript/compiler/rt_call_signature.h" +#include "ecmascript/compiler/ir_module.h" +#include "ecmascript/compiler/ir_builder.h" #include "ecmascript/jspandafile/method_literal.h" #include "llvm-c/DebugInfo.h" #include "llvm-c/Core.h" namespace panda::ecmascript::kungfu { -using OperandsVector = std::set; class BasicBlock; class DebugInfo; using BasicBlockMap = std::map>; class LLVMIRBuilder; using HandleType = void(LLVMIRBuilder::*)(GateRef gate); -enum class MachineRep { - K_NONE, - K_BIT, - K_WORD8, - K_WORD16, - K_WORD32, - K_WORD64, - // FP representations must be last, and in order of increasing size. - K_FLOAT32, - K_FLOAT64, - K_SIMD128, - K_PTR_1, // Tagged Pointer - K_META, -}; - class BasicBlock { public: explicit BasicBlock(int id) : id_(id) @@ -98,7 +84,7 @@ private: }; struct NotMergedPhiDesc { - BasicBlock *pred; + int predBBId; GateRef operand; LLVMValueRef phi; }; @@ -111,7 +97,7 @@ struct BasicBlockImpl { std::vector unmergedPhis_; }; -class LLVMModule { +class LLVMModule : public IRModule { public: LLVMModule(NativeAreaAllocator* allocator, const std::string &name, bool logDbg, const std::string &triple); ~LLVMModule(); @@ -132,6 +118,11 @@ public: funcIndexMap_.emplace_back(std::make_tuple(index, func, isFastCall)); } + ModuleKind GetModuleKind() const override + { + return MODULE_LLVM; + } + LLVMValueRef GetFunction(size_t index) { // next optimization can be performed @@ -143,16 +134,6 @@ public: return nullptr; } - bool Is64Bit() const - { - return is64Bit_; - } - - bool Is32Bit() const - { - return !is64Bit_; - } - size_t GetFuncCount() const { return funcIndexMap_.size(); @@ -172,11 +153,6 @@ public: return callSigns_[index]; } - const std::string &GetTripleStr() const - { - return tripleStr_; - } - const std::vector &GetCSigns() const { return callSigns_; @@ -199,20 +175,62 @@ public: LLVMValueRef GetDeoptFunction(); - DebugInfo* GetDebugInfo() const + static constexpr int kDeoptEntryOffset = 0; + + LLVMTypeRef GetVoidT() const { - return debugInfo_; + return voidT_; } - Triple GetTriple() const + LLVMTypeRef GetInt1T() const { - return triple_; + return int1T_; } - std::string GetFuncName(const panda::ecmascript::MethodLiteral *methodLiteral, const JSPandaFile *jsPandaFile); + LLVMTypeRef GetInt8T() const + { + return int8T_; + } - static constexpr int kDeoptEntryOffset = 0; + LLVMTypeRef GetInt16T() const + { + return int16T_; + } + LLVMTypeRef GetInt32T() const + { + return int32T_; + } + + LLVMTypeRef GetInt64T() const + { + return int64T_; + } + + LLVMTypeRef GetFloatT() const + { + return floatT_; + } + + LLVMTypeRef GetDoubleT() const + { + return doubleT_; + } + + LLVMTypeRef GetTaggedPtrT() const + { + return taggedPtrT_; + } + + LLVMTypeRef GetTaggedHPtrT() const + { + return taggedHPtrT_; + } + + LLVMTypeRef GetRawPtrT() const + { + return rawPtrT_; + } private: LLVMValueRef AddAndGetFunc(const CallSignature *stubDescriptor); void InitialLLVMFuncTypeAndFuncByModuleCSigns(); @@ -228,68 +246,19 @@ private: LLVMMetadataRef dFileMD_ {nullptr}; LLVMMetadataRef dUnitMD_ {nullptr}; LLVMDIBuilderRef dBuilder_ {nullptr}; - DebugInfo* debugInfo_ {nullptr}; - std::string tripleStr_; - bool is64Bit_ {false}; - Triple triple_; -}; - -#define OPCODES(V) \ - V(Call, (GateRef gate, const std::vector &inList, OpCode op)) \ - V(RuntimeCall, (GateRef gate, const std::vector &inList)) \ - V(RuntimeCallWithArgv, (GateRef gate, const std::vector &inList)) \ - V(NoGcRuntimeCall, (GateRef gate, const std::vector &inList)) \ - V(BytecodeCall, (GateRef gate, const std::vector &inList)) \ - V(Alloca, (GateRef gate)) \ - V(Block, (int id, const OperandsVector &predecessors)) \ - V(Goto, (int block, int bbout)) \ - V(Parameter, (GateRef gate)) \ - V(Constant, (GateRef gate, std::bitset<64> value)) \ - V(ConstString, (GateRef gate, const ChunkVector &str)) \ - V(RelocatableData, (GateRef gate, uint64_t value)) \ - V(ZExtInt, (GateRef gate, GateRef e1)) \ - V(SExtInt, (GateRef gate, GateRef e1)) \ - V(FPExt, (GateRef gate, GateRef e1)) \ - V(FPTrunc, (GateRef gate, GateRef e1)) \ - V(Load, (GateRef gate, GateRef base)) \ - V(Store, (GateRef gate, GateRef base, GateRef value)) \ - V(IntRev, (GateRef gate, GateRef e1)) \ - V(Add, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Sub, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Mul, (GateRef gate, GateRef e1, GateRef e2)) \ - V(FloatDiv, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntDiv, (GateRef gate, GateRef e1, GateRef e2)) \ - V(UDiv, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntOr, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntAnd, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntXor, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntLsr, (GateRef gate, GateRef e1, GateRef e2)) \ - V(IntAsr, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Int32LessThanOrEqual, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Cmp, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Branch, (GateRef gate, GateRef cmp, GateRef btrue, GateRef bfalse)) \ - V(Switch, (GateRef gate, GateRef input, const std::vector &outList)) \ - V(SwitchCase, (GateRef gate, GateRef switchBranch, GateRef out)) \ - V(Phi, (GateRef gate, const std::vector &srcGates)) \ - V(Return, (GateRef gate, GateRef popCount, const std::vector &operands)) \ - V(ReturnVoid, (GateRef gate)) \ - V(CastIntXToIntY, (GateRef gate, GateRef e1)) \ - V(ChangeInt32ToDouble, (GateRef gate, GateRef e1)) \ - V(ChangeUInt32ToDouble, (GateRef gate, GateRef e1)) \ - V(ChangeDoubleToInt32, (GateRef gate, GateRef e1)) \ - V(BitCast, (GateRef gate, GateRef e1)) \ - V(IntLsl, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Mod, (GateRef gate, GateRef e1, GateRef e2)) \ - V(ChangeTaggedPointerToInt64, (GateRef gate, GateRef e1)) \ - V(ChangeInt64ToTagged, (GateRef gate, GateRef e1)) \ - V(DeoptCheck, (GateRef gate)) \ - V(TruncFloatToInt, (GateRef gate, GateRef e1)) \ - V(AddWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ - V(SubWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ - V(MulWithOverflow, (GateRef gate, GateRef e1, GateRef e2)) \ - V(ExtractValue, (GateRef gate, GateRef e1, GateRef e2)) \ - V(Sqrt, (GateRef gate, GateRef e1)) + LLVMTypeRef voidT_ {nullptr}; + LLVMTypeRef int1T_ {nullptr}; + LLVMTypeRef int8T_ {nullptr}; + LLVMTypeRef int16T_ {nullptr}; + LLVMTypeRef int32T_ {nullptr}; + LLVMTypeRef int64T_ {nullptr}; + LLVMTypeRef floatT_ {nullptr}; + LLVMTypeRef doubleT_ {nullptr}; + LLVMTypeRef taggedHPtrT_ {nullptr}; + LLVMTypeRef taggedPtrT_ {nullptr}; + LLVMTypeRef rawPtrT_ {nullptr}; +}; // runtime/common stub ID, opcodeOffset for bc stub using StubIdType = std::variant; @@ -310,7 +279,7 @@ private: OPCODES(DECLAREHANDLEOPCODE) #undef DECLAREHANDLEOPCODE - bool isPrologue(int bbId) const + bool IsPrologue(int bbId) const { return bbId == 0; } @@ -333,19 +302,12 @@ private: void InitializeHandlers(); std::string LLVMValueToString(LLVMValueRef val) const; - LLVMTypeRef GetIntPtr() const - { - if (compCfg_->Is32Bit()) { - return LLVMInt32TypeInContext(context_); - } - return LLVMInt64TypeInContext(context_); - } LLVMTypeRef ConvertLLVMTypeFromGate(GateRef gate) const; int64_t GetBitWidthFromMachineType(MachineType machineType) const; - LLVMValueRef PointerAdd(LLVMValueRef baseAddr, LLVMValueRef offset, LLVMTypeRef rep); - LLVMValueRef VectorAdd(LLVMValueRef e1Value, LLVMValueRef e2Value, LLVMTypeRef rep); - LLVMValueRef CanonicalizeToInt(LLVMValueRef value); - LLVMValueRef CanonicalizeToPtr(LLVMValueRef value); + LLVMValueRef PointerAdd(LLVMValueRef baseAddr, LLVMValueRef offsetInByte, LLVMTypeRef rep); + LLVMValueRef CanonicalizeToInt(LLVMValueRef value) const; + LLVMValueRef CanonicalizeToPtr(LLVMValueRef value) const; + LLVMValueRef CanonicalizeToPtr(LLVMValueRef value, LLVMTypeRef ptrType) const; LLVMValueRef GetCurrentFrameType(LLVMValueRef currentSpFrameAddr); void SetFunctionCallConv(); @@ -366,23 +328,70 @@ private: void SetCallConvAttr(const CallSignature *calleeDescriptor, LLVMValueRef call); bool IsHeapPointerType(LLVMTypeRef valueType); -private: - enum class CallInputs : size_t { - DEPEND = 0, - TARGET, - GLUE, - FIRST_PARAMETER - }; - enum class CallExceptionKind : bool { - HAS_PC_OFFSET = true, - NO_PC_OFFSET = false - }; + LLVMTypeRef GetVoidT() const + { + return llvmModule_->GetVoidT(); + } + + LLVMTypeRef GetInt1T() const + { + return llvmModule_->GetInt1T(); + } + + LLVMTypeRef GetInt8T() const + { + return llvmModule_->GetInt8T(); + } + + LLVMTypeRef GetInt16T() const + { + return llvmModule_->GetInt16T(); + } + + LLVMTypeRef GetInt32T() const + { + return llvmModule_->GetInt32T(); + } + + LLVMTypeRef GetInt64T() const + { + return llvmModule_->GetInt64T(); + } + + LLVMTypeRef GetFloatT() const + { + return llvmModule_->GetFloatT(); + } + + LLVMTypeRef GetDoubleT() const + { + return llvmModule_->GetDoubleT(); + } + LLVMTypeRef GetTaggedPtrT() const + { + return llvmModule_->GetTaggedPtrT(); + } + + LLVMTypeRef GetTaggedHPtrT() const + { + return llvmModule_->GetTaggedHPtrT(); + } + + LLVMTypeRef GetRawPtrT() const + { + return llvmModule_->GetRawPtrT(); + } + +private: LLVMDIBuilderRef GetDIBuilder() const { return llvmModule_ == nullptr ? nullptr : llvmModule_->GetDIBuilder(); } + unsigned GetPtrAddressSpace(LLVMValueRef v) const; + bool IsLInteger(LLVMValueRef v) const; + bool IsLPointer(LLVMValueRef v) const; LLVMRealPredicate ConvertLLVMPredicateFromFCMP(FCmpCondition cond); LLVMIntPredicate ConvertLLVMPredicateFromICMP(ICmpCondition cond); LLVMValueRef GetGlue(const std::vector &inList); @@ -414,6 +423,7 @@ private: GateRef gate); void SaveDeoptVregInfoWithI64(std::vector &values, int32_t index, size_t curDepth, size_t shift, GateRef gate); + int LookupPredBB(GateRef start, int bbID); const CompilationConfig *compCfg_ {nullptr}; const std::vector> *scheduledGates_ {nullptr}; diff --git a/ecmascript/compiler/loop_analysis.cpp b/ecmascript/compiler/loop_analysis.cpp index 814f1c921636721204496604b88a1c75c501f4dd..b66f90fd5b97d6b5c1308c71003f1871294d9279 100644 --- a/ecmascript/compiler/loop_analysis.cpp +++ b/ecmascript/compiler/loop_analysis.cpp @@ -79,8 +79,12 @@ void LoopAnalysis::CollectLoopBody(LoopInfo* loopInfo) // only calculate loop depth for state & depend edges, // since there is no phi of each value and each loop head. gateToDepth[nex] = ComputeLoopDepth(cur, nex, gateToDepth[cur]); - if (acc_.GetOpCode(nex) == OpCode::STATE_SPLIT) { - gateToDepth[acc_.GetFrameState(nex)] = gateToDepth[nex]; + if (acc_.HasFrameState(nex)) { + auto frameState = acc_.GetFrameState(nex); + if (acc_.GetOpCode(frameState) == OpCode::FRAME_STATE) { + gateToDepth[frameState] = gateToDepth[nex]; + gateToDepth[acc_.GetValueIn(frameState, 1)] = gateToDepth[nex]; + } } // state and depend edge should be visited first. firstList.push(nex); @@ -120,14 +124,17 @@ void LoopAnalysis::UpdateLoopInfo(LoopInfo* loopInfo, GateRef gate, size_t dep) } break; } - case OpCode::STATE_SPLIT: { - loopInfo->size++; - loopInfo->loopBodys.emplace_back(acc_.GetFrameState(gate)); - break; - } default: break; } + if (acc_.HasFrameState(gate)) { + auto frameState = acc_.GetFrameState(gate); + if (acc_.GetOpCode(frameState) == OpCode::FRAME_STATE) { + loopInfo->size += 2; // 2: framestate and framevalues + loopInfo->loopBodys.emplace_back(frameState); + loopInfo->loopBodys.emplace_back(acc_.GetValueIn(frameState, 1)); + } + } loopInfo->loopBodys.emplace_back(gate); } diff --git a/ecmascript/compiler/loop_peeling.cpp b/ecmascript/compiler/loop_peeling.cpp index d7625444dd30dcf7f204264925d344d3002484dd..11d02b6bb8c93f776abb0bcdd84e8626d1385f34 100644 --- a/ecmascript/compiler/loop_peeling.cpp +++ b/ecmascript/compiler/loop_peeling.cpp @@ -102,6 +102,8 @@ void LoopPeeling::Peel() for (auto gate : asyncList) { bcBuilder_->UpdateAsyncRelatedGate(gate); } + + Print(); } void LoopPeeling::SetCopy(GateRef gate) @@ -130,7 +132,10 @@ void LoopPeeling::SetCopy(GateRef gate) GateRef LoopPeeling::GetCopy(GateRef gate) const { - return copies_.at(gate); + if (copies_.count(gate)) { + return copies_.at(gate); + } + return gate; } GateRef LoopPeeling::TryGetCopy(GateRef gate) const @@ -140,4 +145,19 @@ GateRef LoopPeeling::TryGetCopy(GateRef gate) const } return Circuit::NullGate(); } + +void LoopPeeling::Print() const +{ + if (IsLogEnabled()) { + LOG_COMPILER(INFO) << ""; + LOG_COMPILER(INFO) << "\033[34m" + << "====================" + << " After loop peeling " + << "[" << GetMethodName() << "]" + << "====================" + << "\033[0m"; + circuit_->PrintAllGatesWithBytecode(); + LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; + } +} } // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/loop_peeling.h b/ecmascript/compiler/loop_peeling.h index bd886928c6ef338e08d6c651f071dc8fb890d9ce..cc2ba53396969031973aea41eead022b5dff3958 100644 --- a/ecmascript/compiler/loop_peeling.h +++ b/ecmascript/compiler/loop_peeling.h @@ -27,9 +27,10 @@ namespace panda::ecmascript::kungfu { class LoopPeeling { public: - LoopPeeling(BytecodeCircuitBuilder* bcBuilder, Circuit *circuit, Chunk* chunk, LoopInfo* loopInfo) - : bcBuilder_(bcBuilder), circuit_(circuit), acc_(circuit), - chunk_(chunk), loopInfo_(loopInfo), copies_(chunk_) {} + LoopPeeling(BytecodeCircuitBuilder* bcBuilder, Circuit *circuit, bool enableLog, + const std::string& name, Chunk* chunk, LoopInfo* loopInfo) + : bcBuilder_(bcBuilder), circuit_(circuit), acc_(circuit), enableLog_(enableLog), + methodName_(name), chunk_(chunk), loopInfo_(loopInfo), copies_(chunk_) {} ~LoopPeeling() = default; void Peel(); @@ -37,9 +38,20 @@ private: void SetCopy(GateRef gate); GateRef GetCopy(GateRef gate) const; GateRef TryGetCopy(GateRef gate) const; + void Print() const; + bool IsLogEnabled() const + { + return enableLog_; + } + std::string GetMethodName() const + { + return methodName_; + } BytecodeCircuitBuilder* bcBuilder_{nullptr}; Circuit* circuit_; GateAccessor acc_; + bool enableLog_{false}; + std::string methodName_; Chunk* chunk_{nullptr}; LoopInfo* loopInfo_{nullptr}; ChunkMap copies_; diff --git a/ecmascript/compiler/new_object_stub_builder.cpp b/ecmascript/compiler/new_object_stub_builder.cpp index 907a55d524fe803639e31a246cc342e6b6892eba..dbbcec7fbdff2e725ba8b626c660a2a44142a8e4 100644 --- a/ecmascript/compiler/new_object_stub_builder.cpp +++ b/ecmascript/compiler/new_object_stub_builder.cpp @@ -16,6 +16,7 @@ #include "ecmascript/compiler/new_object_stub_builder.h" #include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/ecma_string.h" #include "ecmascript/global_env.h" #include "ecmascript/global_env_constants.h" #include "ecmascript/js_arguments.h" @@ -23,6 +24,10 @@ #include "ecmascript/js_thread.h" #include "ecmascript/lexical_env.h" #include "ecmascript/mem/mem.h" +#include "ecmascript/js_map_iterator.h" +#include "ecmascript/js_set_iterator.h" +#include "ecmascript/js_set.h" +#include "ecmascript/js_map.h" namespace panda::ecmascript::kungfu { void NewObjectStubBuilder::NewLexicalEnv(Variable *result, Label *exit, GateRef numSlots, GateRef parent) @@ -289,7 +294,7 @@ void NewObjectStubBuilder::NewJSArrayLiteral(Variable *result, Label *exit, Regi if (isEmptyArray) { Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), propertiesOffset, obj); Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), elementsOffset, obj); - Store(VariableType::JS_ANY(), glue_, result->ReadVariable(), lengthOffset, IntToTaggedInt(Int32(0))); + Store(VariableType::INT32(), glue_, result->ReadVariable(), lengthOffset, Int32(0)); } else { auto newProperties = Load(VariableType::JS_POINTER(), obj, propertiesOffset); Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), propertiesOffset, newProperties); @@ -297,8 +302,8 @@ void NewObjectStubBuilder::NewJSArrayLiteral(Variable *result, Label *exit, Regi auto newElements = Load(VariableType::JS_POINTER(), obj, elementsOffset); Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), elementsOffset, newElements); - GateRef arrayLength = Load(VariableType::JS_ANY(), obj, lengthOffset); - Store(VariableType::JS_ANY(), glue_, result->ReadVariable(), lengthOffset, arrayLength); + GateRef arrayLength = Load(VariableType::INT32(), obj, lengthOffset); + Store(VariableType::INT32(), glue_, result->ReadVariable(), lengthOffset, arrayLength); } auto accessor = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, ConstantIndex::ARRAY_LENGTH_ACCESSOR); @@ -421,6 +426,29 @@ void NewObjectStubBuilder::AllocLineStringObject(Variable *result, Label *exit, Jump(exit); } +void NewObjectStubBuilder::AllocSlicedStringObject(Variable *result, Label *exit, GateRef from, GateRef length, + FlatStringStubBuilder *flatString) +{ + auto env = GetEnvironment(); + + size_ = AlignUp(IntPtr(SlicedString::SIZE), IntPtr(static_cast(MemAlignment::MEM_ALIGN_OBJECT))); + Label afterAllocate(env); + AllocateInYoung(result, &afterAllocate); + + Bind(&afterAllocate); + GateRef stringClass = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, + ConstantIndex::SLICED_STRING_CLASS_INDEX); + StoreHClass(glue_, result->ReadVariable(), stringClass); + GateRef mixLength = Load(VariableType::INT32(), flatString->GetFlatString(), IntPtr(EcmaString::MIX_LENGTH_OFFSET)); + GateRef isCompressed = Int32And(Int32(EcmaString::STRING_COMPRESSED_BIT), mixLength); + SetLength(glue_, result->ReadVariable(), length, isCompressed); + SetRawHashcode(glue_, result->ReadVariable(), Int32(0)); + BuiltinsStringStubBuilder builtinsStringStubBuilder(this); + builtinsStringStubBuilder.StoreParent(glue_, result->ReadVariable(), flatString->GetFlatString()); + builtinsStringStubBuilder.StoreStartIndex(glue_, result->ReadVariable(), + Int32Add(from, flatString->GetStartIndex())); + Jump(exit); +} GateRef NewObjectStubBuilder::FastNewThisObject(GateRef glue, GateRef ctor) { @@ -499,7 +527,74 @@ GateRef NewObjectStubBuilder::NewThisObjectChecked(GateRef glue, GateRef ctor) return ret; } -GateRef NewObjectStubBuilder::CreateEmptyArray(GateRef glue) +void NewObjectStubBuilder::LoadArrayHClass(Variable *hclass, Label *exit, GateRef glue, + GateRef jsFunc, GateRef pc, GateRef profileTypeInfo, GateRef slotId, GateRef arrayLiteral) +{ + auto env = GetEnvironment(); + Label profiler(env); + Label slowpath(env); + DEFVARIABLE(ret, VariableType::JS_POINTER(), Undefined()); + + Branch(TaggedIsUndefined(profileTypeInfo), &slowpath, &profiler); + Bind(&profiler); + { + Label uninitialize(env); + Label compareLabel(env); + + GateRef slotValue = GetValueFromTaggedArray(profileTypeInfo, slotId); + Branch(TaggedIsInt(slotValue), &compareLabel, &uninitialize); + Bind(&compareLabel); + { + GateRef hclassIndex = TaggedGetInt(slotValue); + GateRef gConstAddr = Load(VariableType::JS_ANY(), glue, + IntPtr(JSThread::GlueData::GetGlobalConstOffset(env->Is32Bit()))); + ret = Load(VariableType::JS_POINTER(), gConstAddr, hclassIndex); + hclass->WriteVariable(*ret); + Jump(exit); + } + Bind(&uninitialize); + { + Label fastpath(env); + + auto pfAddr = LoadPfHeaderFromConstPool(jsFunc); + GateRef traceId = TruncPtrToInt32(PtrSub(IntPtr(pc), pfAddr)); + GateRef hclassIndex = LoadHCIndexFromConstPool(jsFunc, traceId); + Branch(Int32NotEqual(hclassIndex, + Int32(static_cast(ConstantIndex::ELEMENT_HOLE_TAGGED_HCLASS_INDEX))), + &fastpath, &slowpath); + Bind(&fastpath); + { + Label updateSlot(env); + + GateRef gConstAddr = Load(VariableType::JS_ANY(), glue, + IntPtr(JSThread::GlueData::GetGlobalConstOffset(env->Is32Bit()))); + ret = Load(VariableType::JS_POINTER(), gConstAddr, hclassIndex); + hclass->WriteVariable(*ret); + Branch(TaggedIsUndefined(slotValue), &updateSlot, exit); + Bind(&updateSlot); + SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, + slotId, IntToTaggedInt(hclassIndex)); + Jump(exit); + } + } + } + Bind(&slowpath); + { + // emptyarray + if (arrayLiteral == Circuit::NullGate()) { + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); + auto arrayFunc = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); + ret = Load(VariableType::JS_POINTER(), arrayFunc, IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + } else { + ret = LoadHClass(arrayLiteral); + } + hclass->WriteVariable(*ret); + Jump(exit); + } +} + +GateRef NewObjectStubBuilder::CreateEmptyArrayCommon(GateRef glue, GateRef hclass, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -508,44 +603,134 @@ GateRef NewObjectStubBuilder::CreateEmptyArray(GateRef glue) DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); - GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); - GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); - auto arrayFunc = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); - auto hclass = Load(VariableType::JS_POINTER(), arrayFunc, IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); GateRef size = GetObjectSizeFromHClass(hclass); - auto emptyArray = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); - + GateRef emptyArray = GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); SetParameters(glue, size); NewJSArrayLiteral(&result, &exit, RegionSpaceFlag::IN_YOUNG_SPACE, emptyArray, hclass, true); - Bind(&exit); - auto ret = *result; + GateRef ret = *result; + if (!callback.IsEmpty()) { + GateRef noneHClass = GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::ELEMENT_NONE_HCLASS_INDEX); + StoreHClass(glue, ret, noneHClass); + callback.ProfileCreateObject(ret); + } env->SubCfgExit(); return ret; } -GateRef NewObjectStubBuilder::CreateArrayWithBuffer(GateRef glue, GateRef index, GateRef jsFunc) +GateRef NewObjectStubBuilder::CreateEmptyArray(GateRef glue, ProfileOperation callback) +{ + auto env = GetEnvironment(); + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); + GateRef arrayFunc = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); + GateRef hclass = Load(VariableType::JS_POINTER(), arrayFunc, IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + return CreateEmptyArrayCommon(glue, hclass, callback); +} + +GateRef NewObjectStubBuilder::CreateEmptyArray( + GateRef glue, GateRef jsFunc, GateRef pc, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label afterHClassInit(env); + + DEFVARIABLE(hclass, VariableType::JS_POINTER(), Undefined()); + LoadArrayHClass(&hclass, &afterHClassInit, glue, jsFunc, pc, profileTypeInfo, slotId); + Bind(&afterHClassInit); + GateRef result = CreateEmptyArrayCommon(glue, *hclass, callback); + env->SubCfgExit(); + return result; +} + +GateRef NewObjectStubBuilder::CreateArrayWithBuffer(GateRef glue, + GateRef index, GateRef jsFunc, GateRef pc, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); env->SubCfgEntry(&entry); Label exit(env); + Label afterHClassInit(env); DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); + DEFVARIABLE(hclass, VariableType::JS_POINTER(), Undefined()); GateRef method = GetMethodFromFunction(jsFunc); GateRef constPool = Load(VariableType::JS_ANY(), method, IntPtr(Method::CONSTANT_POOL_OFFSET)); GateRef module = GetModuleFromFunction(jsFunc); auto obj = GetArrayLiteralFromConstPool(glue, constPool, index, module); - auto hclass = LoadHClass(obj); - GateRef size = GetObjectSizeFromHClass(hclass); + LoadArrayHClass(&hclass, &afterHClassInit, glue, jsFunc, pc, profileTypeInfo, slotId, obj); + Bind(&afterHClassInit); + GateRef size = GetObjectSizeFromHClass(*hclass); SetParameters(glue, size); - NewJSArrayLiteral(&result, &exit, RegionSpaceFlag::IN_YOUNG_SPACE, obj, hclass, false); + NewJSArrayLiteral(&result, &exit, RegionSpaceFlag::IN_YOUNG_SPACE, obj, *hclass, false); Bind(&exit); auto ret = *result; + callback.ProfileCreateObject(ret); env->SubCfgExit(); return ret; } + +template +void NewObjectStubBuilder::CreateJSCollectionIterator( + Variable *result, Label *exit, GateRef thisValue, GateRef kind) +{ + ASSERT_PRINT((std::is_same_v || std::is_same_v), + "IteratorType must be JSSetIterator or JSMapIterator type"); + auto env = GetEnvironment(); + ConstantIndex iterClassIdx = static_cast(0); + int32_t iterOffset = 0; // ITERATED_SET_OFFSET + int32_t iterPrototypeIdx = 0; // ITERATOR_PROTOTYPE_INDEX + size_t linkedOffset = 0; // LINKED_MAP_OFFSET + if constexpr (std::is_same_v) { + iterClassIdx = ConstantIndex::JS_SET_ITERATOR_CLASS_INDEX; + iterOffset = IteratorType::ITERATED_SET_OFFSET; + iterPrototypeIdx = GlobalEnv::SET_ITERATOR_PROTOTYPE_INDEX; + linkedOffset = CollectionType::LINKED_SET_OFFSET; + } else { + iterClassIdx = ConstantIndex::JS_MAP_ITERATOR_CLASS_INDEX; + iterOffset = IteratorType::ITERATED_MAP_OFFSET; + iterPrototypeIdx = GlobalEnv::MAP_ITERATOR_PROTOTYPE_INDEX; + linkedOffset = CollectionType::LINKED_MAP_OFFSET; + } + GateRef iteratorHClass = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, iterClassIdx); + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue_, glueGlobalEnvOffset); + GateRef prototype = GetGlobalEnvValue(VariableType::JS_POINTER(), glueGlobalEnv, iterPrototypeIdx); + SetPrototypeToHClass(VariableType::JS_POINTER(), glue_, iteratorHClass, prototype); + + Label afterAllocate(env); + NewJSObject(result, &afterAllocate, iteratorHClass); + Bind(&afterAllocate); + Label setProperties(env); + Branch(TaggedIsException(result->ReadVariable()), exit, &setProperties); + Bind(&setProperties); + + SetExtensibleToBitfield(glue_, result->ReadVariable(), true); + // GetLinked + GateRef linked = Load(VariableType::JS_ANY(), thisValue, IntPtr(linkedOffset)); + + // SetIterated + GateRef iteratorOffset = IntPtr(iterOffset); + Store(VariableType::JS_POINTER(), glue_, result->ReadVariable(), iteratorOffset, linked); + + // SetIteratorNextIndex + GateRef nextIndexOffset = IntPtr(IteratorType::NEXT_INDEX_OFFSET); + Store(VariableType::INT32(), glue_, result->ReadVariable(), nextIndexOffset, Int32(0)); + + // SetIterationKind + GateRef kindBitfieldOffset = IntPtr(IteratorType::BIT_FIELD_OFFSET); + Store(VariableType::INT32(), glue_, result->ReadVariable(), kindBitfieldOffset, kind); + Jump(exit); +} + +template void NewObjectStubBuilder::CreateJSCollectionIterator( + Variable *result, Label *exit, GateRef set, GateRef kind); +template void NewObjectStubBuilder::CreateJSCollectionIterator( + Variable *result, Label *exit, GateRef set, GateRef kind); } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/new_object_stub_builder.h b/ecmascript/compiler/new_object_stub_builder.h index 93752935dfdadf148b50f00d52f3c8e74268962b..677d44cd9d53740b72a07a72877fb8e51d990cd4 100644 --- a/ecmascript/compiler/new_object_stub_builder.h +++ b/ecmascript/compiler/new_object_stub_builder.h @@ -16,6 +16,8 @@ #ifndef ECMASCRIPT_COMPILER_NEW_OBJECT_STUB_BUILDER_H #define ECMASCRIPT_COMPILER_NEW_OBJECT_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_string_stub_builder.h" +#include "ecmascript/compiler/profiler_operation.h" #include "ecmascript/compiler/stub_builder.h" namespace panda::ecmascript::kungfu { @@ -50,18 +52,28 @@ public: void NewArgumentsList(Variable *result, Label *exit, GateRef sp, GateRef startIdx, GateRef numArgs); void NewArgumentsObj(Variable *result, Label *exit, GateRef argumentsList, GateRef numArgs); void AllocLineStringObject(Variable *result, Label *exit, GateRef length, bool compressed); + void AllocSlicedStringObject(Variable *result, Label *exit, GateRef from, GateRef length, + FlatStringStubBuilder *flatString); void HeapAlloc(Variable *result, Label *exit, RegionSpaceFlag spaceType); void NewJSArrayLiteral(Variable *result, Label *exit, RegionSpaceFlag spaceType, GateRef obj, GateRef hclass, bool isEmptyArray); void InitializeWithSpeicalValue(Label *exit, GateRef object, GateRef value, GateRef start, GateRef end); GateRef FastNewThisObject(GateRef glue, GateRef ctor); GateRef NewThisObjectChecked(GateRef glue, GateRef ctor); - GateRef CreateEmptyArray(GateRef glue); - GateRef CreateArrayWithBuffer(GateRef glue, GateRef index, GateRef jsFunc); + GateRef CreateEmptyArray(GateRef glue, ProfileOperation callback); + GateRef CreateEmptyArray(GateRef glue, GateRef jsFunc, GateRef pc, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback); + GateRef CreateArrayWithBuffer(GateRef glue, GateRef index, GateRef jsFunc, GateRef pc, + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); + void LoadArrayHClass(Variable *hclass, Label *exit, GateRef glue, GateRef jsFunc, + GateRef pc, GateRef profileTypeInfo, GateRef slotId, GateRef arrayLiteral = Circuit::NullGate()); void NewTaggedArrayChecked(Variable *result, GateRef len, Label *exit); + template + void CreateJSCollectionIterator(Variable *result, Label *exit, GateRef set, GateRef kind); private: static constexpr int MAX_TAGGED_ARRAY_LENGTH = 50; + GateRef CreateEmptyArrayCommon(GateRef glue, GateRef hclass, ProfileOperation callback); void AllocateInYoung(Variable *result, Label *exit); void InitializeTaggedArrayWithSpeicalValue(Label *exit, GateRef array, GateRef value, GateRef start, GateRef length); diff --git a/ecmascript/compiler/ntype_hcr_lowering.cpp b/ecmascript/compiler/ntype_hcr_lowering.cpp index 3061399875314aeba81f65acf0c150b7dd65adac..1ce2b152dbb87fe252b3984aa398941008637f31 100644 --- a/ecmascript/compiler/ntype_hcr_lowering.cpp +++ b/ecmascript/compiler/ntype_hcr_lowering.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/compiler/ntype_hcr_lowering.h" +#include "ecmascript/compiler/circuit_builder-inl.h" #include "ecmascript/dfx/vmstat/opt_code_profiler.h" namespace panda::ecmascript::kungfu { @@ -49,52 +50,169 @@ void NTypeHCRLowering::Lower(GateRef gate) switch (ecmaOpcode) { case EcmaOpcode::CREATEEMPTYARRAY_IMM8: case EcmaOpcode::CREATEEMPTYARRAY_IMM16: - LowerTypedCreateEmptyArray(gate); + LowerNTypedCreateEmptyArray(gate); + break; + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + LowerNTypedCreateArrayWithBuffer(gate); break; case EcmaOpcode::STOWNBYINDEX_IMM8_V8_IMM16: case EcmaOpcode::STOWNBYINDEX_IMM16_V8_IMM16: case EcmaOpcode::WIDE_STOWNBYINDEX_PREF_V8_IMM32: - LowerTypedStownByIndex(gate); + LowerNTypedStownByIndex(gate); break; case EcmaOpcode::STOWNBYNAME_IMM8_ID16_V8: case EcmaOpcode::STOWNBYNAME_IMM16_ID16_V8: - LowerTypedStOwnByName(gate); + LowerNTypedStOwnByName(gate); + break; + case EcmaOpcode::THROW_UNDEFINEDIFHOLEWITHNAME_PREF_ID16: + LowerThrowUndefinedIfHoleWithName(gate); + break; + case EcmaOpcode::LDLEXVAR_IMM4_IMM4: + case EcmaOpcode::LDLEXVAR_IMM8_IMM8: + case EcmaOpcode::WIDE_LDLEXVAR_PREF_IMM16_IMM16: + LowerLdLexVar(gate); + break; + case EcmaOpcode::STLEXVAR_IMM4_IMM4: + case EcmaOpcode::STLEXVAR_IMM8_IMM8: + case EcmaOpcode::WIDE_STLEXVAR_PREF_IMM16_IMM16: + LowerStLexVar(gate); break; default: break; } } -void NTypeHCRLowering::LowerTypedCreateEmptyArray(GateRef gate) +void NTypeHCRLowering::LowerThrowUndefinedIfHoleWithName(GateRef gate) +{ + GateRef value = acc_.GetValueIn(gate, 1); // 1: the second parameter + builder_.LexVarIsHoleCheck(value); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), Circuit::NullGate()); +} + +void NTypeHCRLowering::LowerLdLexVar(GateRef gate) +{ + AddProfiling(gate); + GateRef level = acc_.GetValueIn(gate, 0); // 0: first parameter + GateRef index = acc_.GetValueIn(gate, 1); // 1: the second parameter + GateRef currentEnv = acc_.GetValueIn(gate, 2); // 2: the third parameter + + uint32_t levelValue = static_cast(acc_.GetConstantValue(level)); + uint32_t indexValue = static_cast(acc_.GetConstantValue(index)); + indexValue += LexicalEnv::RESERVED_ENV_LENGTH; + GateRef result = Circuit::NullGate(); + if (levelValue == 0) { + result = builder_.LoadFromTaggedArray(currentEnv, indexValue); + } else if (levelValue == 1) { // 1: level 1 + auto parentEnv = builder_.LoadFromTaggedArray(currentEnv, LexicalEnv::PARENT_ENV_INDEX); + result = builder_.LoadFromTaggedArray(parentEnv, indexValue); + } else { + // level > 1, go slowpath + return; + } + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); +} + +void NTypeHCRLowering::LowerStLexVar(GateRef gate) +{ + AddProfiling(gate); + GateRef level = acc_.GetValueIn(gate, 0); // 0: first parameter + GateRef index = acc_.GetValueIn(gate, 1); // 1: the second parameter + GateRef currentEnv = acc_.GetValueIn(gate, 2); // 2: the third parameter + GateRef value = acc_.GetValueIn(gate, 3); // 3: the fourth parameter + + uint32_t levelValue = static_cast(acc_.GetConstantValue(level)); + uint32_t indexValue = static_cast(acc_.GetConstantValue(index)); + indexValue += LexicalEnv::RESERVED_ENV_LENGTH; + GateRef result = Circuit::NullGate(); + if (levelValue == 0) { + result = builder_.StoreToTaggedArray(currentEnv, indexValue, value); + } else if (levelValue == 1) { // 1: level 1 + auto parentEnv = builder_.LoadFromTaggedArray(currentEnv, LexicalEnv::PARENT_ENV_INDEX); + result = builder_.StoreToTaggedArray(parentEnv, indexValue, value); + } else { + // level > 1, go slowpath + return; + } + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); +} + +void NTypeHCRLowering::LowerNTypedCreateEmptyArray(GateRef gate) { // in the future, the type of the elements in the array will be obtained through pgo, // and the type will be used to determine whether to create a typed-array. AddProfiling(gate); - GateRef array = builder_.CreateArray(0); + auto thread = tsManager_->GetEcmaVM()->GetJSThread(); + uint64_t bcAbsoluteOffset = GetBcAbsoluteOffset(gate); + ElementsKind kind = acc_.TryGetElementsKind(gate); + auto hclassIdx = thread->GetArrayHClassIndexMap().at(kind); + tsManager_->AddArrayTSConstantIndex(bcAbsoluteOffset, JSTaggedValue(static_cast(hclassIdx))); + GateRef array = builder_.CreateArray(kind, 0); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); } -void NTypeHCRLowering::LowerTypedStownByIndex(GateRef gate) +void NTypeHCRLowering::LowerNTypedCreateArrayWithBuffer(GateRef gate) +{ + // 1: number of value inputs + ASSERT(acc_.GetNumValueIn(gate) == 1); + GateRef index = acc_.GetValueIn(gate, 0); + auto thread = tsManager_->GetEcmaVM()->GetJSThread(); + uint64_t bcAbsoluteOffset = GetBcAbsoluteOffset(gate); + uint32_t cpIdx = static_cast(acc_.GetConstantValue(index)); + JSHandle constpoolHandle(tsManager_->GetConstantPool()); + JSTaggedValue arr = ConstantPool::GetLiteralFromCache( + thread, constpoolHandle.GetTaggedValue(), cpIdx, recordName_); + JSHandle arrayHandle(thread, arr); + + ElementsKind kind = acc_.TryGetElementsKind(gate); + auto hclassIdx = thread->GetArrayHClassIndexMap().at(kind); + GateType gateType = acc_.GetGateType(gate); + panda_file::File::EntityId id = ConstantPool::GetIdFromCache(constpoolHandle.GetTaggedValue(), cpIdx); + tsManager_->AddArrayTSConstantIndex(bcAbsoluteOffset, JSTaggedValue(static_cast(hclassIdx))); + tsManager_->AddArrayTSElements(id, arrayHandle->GetElements()); + tsManager_->AddArrayTSElementsKind(id, JSTaggedValue(static_cast(kind))); + gateType = tsManager_->TryNarrowUnionType(gateType); + + int elementIndex = -1; + if (tsManager_->IsArrayTypeKind(gateType)) { + elementIndex = tsManager_->GetElementsIndexByArrayType(gateType, id); + } + if (elementIndex == -1) { // slowpath + return; + } + + AddProfiling(gate); + GateRef elementIndexGate = builder_.IntPtr(elementIndex); + GateRef array = builder_.CreateArrayWithBuffer(kind, ArrayMetaDataAccessor::Mode::CREATE, index, elementIndexGate); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); +} + +void NTypeHCRLowering::LowerNTypedStownByIndex(GateRef gate) { // 3: number of value inputs ASSERT(acc_.GetNumValueIn(gate) == 3); GateRef receiver = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); GateRef value = acc_.GetValueIn(gate, 2); - if (acc_.GetOpCode(receiver) != OpCode::CREATE_ARRAY) { + if (acc_.GetOpCode(receiver) != OpCode::CREATE_ARRAY && + acc_.GetOpCode(receiver) != OpCode::CREATE_ARRAY_WITH_BUFFER) { return; } + builder_.COWArrayCheck(receiver); AddProfiling(gate); - uint32_t arraySize = acc_.GetArraySize(receiver); uint32_t indexValue = static_cast(acc_.GetConstantValue(index)); + uint32_t arraySize = acc_.GetArraySize(receiver); + if (indexValue > arraySize) { + acc_.TrySetElementsKind(receiver, ElementsKind::HOLE); + } acc_.SetArraySize(receiver, std::max(arraySize, indexValue + 1)); index = builder_.Int32(indexValue); builder_.StoreElement(receiver, index, value); acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), Circuit::NullGate()); } -void NTypeHCRLowering::LowerTypedStOwnByName(GateRef gate) +void NTypeHCRLowering::LowerNTypedStOwnByName(GateRef gate) { // 3: number of value inputs ASSERT(acc_.GetNumValueIn(gate) == 3); @@ -116,7 +234,7 @@ void NTypeHCRLowering::LowerTypedStOwnByName(GateRef gate) if (hclassIndex == -1) { // slowpath return; } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread, hclass, propKey); if (!plr.IsFound() || !plr.IsLocal() || plr.IsAccessor() || !plr.IsWritable()) { // slowpath @@ -130,6 +248,15 @@ void NTypeHCRLowering::LowerTypedStOwnByName(GateRef gate) acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), Circuit::NullGate()); } +uint64_t NTypeHCRLowering::GetBcAbsoluteOffset(GateRef gate) const +{ + uint64_t pcOffset = acc_.TryGetPcOffset(gate); + uint64_t pfOffset = reinterpret_cast(jsPandaFile_->GetHeader()); + uint64_t methodOffset = reinterpret_cast(methodLiteral_->GetBytecodeArray()); + uint64_t bcAbsoluteOffset = methodOffset - pfOffset + pcOffset; + return bcAbsoluteOffset; +} + void NTypeHCRLowering::AddProfiling(GateRef gate) { if (IsTraceBC()) { diff --git a/ecmascript/compiler/ntype_hcr_lowering.h b/ecmascript/compiler/ntype_hcr_lowering.h index 5e32b963987fca764b8033aeb56ca12cb7bd4183..b2256401f0fcfcc1e55661ee07ea8af999318cbf 100644 --- a/ecmascript/compiler/ntype_hcr_lowering.h +++ b/ecmascript/compiler/ntype_hcr_lowering.h @@ -25,12 +25,15 @@ namespace panda::ecmascript::kungfu { class NTypeHCRLowering { public: - NTypeHCRLowering(Circuit *circuit, PassContext *ctx, TSManager *tsManager, - bool enableLog, const std::string& name) + NTypeHCRLowering(Circuit *circuit, PassContext *ctx, TSManager *tsManager, const MethodLiteral *methodLiteral, + const CString &recordName, bool enableLog, const std::string& name) : circuit_(circuit), acc_(circuit), builder_(circuit, ctx->GetCompilerConfig()), + recordName_(recordName), tsManager_(tsManager), + jsPandaFile_(ctx->GetJSPandaFile()), + methodLiteral_(methodLiteral), enableLog_(enableLog), profiling_(ctx->GetCompilerConfig()->IsProfiling()), traceBc_(ctx->GetCompilerConfig()->IsTraceBC()), @@ -42,9 +45,14 @@ public: void RunNTypeHCRLowering(); private: void Lower(GateRef gate); - void LowerTypedCreateEmptyArray(GateRef gate); - void LowerTypedStownByIndex(GateRef gate); - void LowerTypedStOwnByName(GateRef gate); + void LowerNTypedCreateEmptyArray(GateRef gate); + void LowerNTypedCreateArrayWithBuffer(GateRef gate); + void LowerNTypedStownByIndex(GateRef gate); + void LowerNTypedStOwnByName(GateRef gate); + void LowerLdLexVar(GateRef gate); + void LowerStLexVar(GateRef gate); + void LowerThrowUndefinedIfHoleWithName(GateRef gate); + uint64_t GetBcAbsoluteOffset(GateRef gate) const; bool IsLogEnabled() const { @@ -70,7 +78,10 @@ private: Circuit *circuit_ {nullptr}; GateAccessor acc_; CircuitBuilder builder_; - [[maybe_unused]] TSManager *tsManager_ {nullptr}; + const CString &recordName_; + TSManager *tsManager_ {nullptr}; + const JSPandaFile *jsPandaFile_ {nullptr}; + const MethodLiteral *methodLiteral_ {nullptr}; bool enableLog_ {false}; bool profiling_ {false}; bool traceBc_ {false}; diff --git a/ecmascript/compiler/ntype_mcr_lowering.cpp b/ecmascript/compiler/ntype_mcr_lowering.cpp index e50acd65533f67cddfd77bd5aceca8ba3b73b8e6..e26423605aa6d0cdf777418ddfcac853ea3d4f3e 100644 --- a/ecmascript/compiler/ntype_mcr_lowering.cpp +++ b/ecmascript/compiler/ntype_mcr_lowering.cpp @@ -46,6 +46,9 @@ void NTypeMCRLowering::Lower(GateRef gate) case OpCode::CREATE_ARRAY: LowerCreateArray(gate, glue); break; + case OpCode::CREATE_ARRAY_WITH_BUFFER: + LowerCreateArrayWithBuffer(gate, glue); + break; default: break; } @@ -63,46 +66,92 @@ void NTypeMCRLowering::LowerCreateArray(GateRef gate, GateRef glue) void NTypeMCRLowering::LowerCreateEmptyArray(GateRef gate) { - JSHandle arrayFunc(tsManager_->GetEcmaVM()->GetGlobalEnv()->GetArrayFunction()); - JSTaggedValue protoOrHClass = arrayFunc->GetProtoOrHClass(); - JSHClass *arrayHC = JSHClass::Cast(protoOrHClass.GetTaggedObject()); - size_t arraySize = arrayHC->GetObjectSize(); - size_t lengthAccessorOffset = arrayHC->GetInlinedPropertiesOffset(JSArray::LENGTH_INLINE_PROPERTY_INDEX); + GateRef length = builder_.Int32(0); + GateRef elements = builder_.GetGlobalConstantValue(ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); - GateRef obj = builder_.GetGlobalConstantValue(ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); - GateRef globalEnv = builder_.GetGlobalEnv(); - GateRef accessor = builder_.GetGlobalConstantValue(ConstantIndex::ARRAY_LENGTH_ACCESSOR); - GateRef hclass = builder_.GetGlobalEnvObjHClass(globalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); - GateRef size = builder_.IntPtr(arrayHC->GetObjectSize()); - - builder_.StartAllocate(); - GateRef array = builder_.HeapAlloc(size, GateType::TaggedValue(), RegionSpaceFlag::IN_YOUNG_SPACE); - // initialization - for (size_t offset = JSArray::SIZE; offset < arraySize; offset += JSTaggedValue::TaggedTypeSize()) { - builder_.StoreConstOffset(VariableType::INT64(), array, offset, builder_.Undefined()); - } - builder_.StoreConstOffset(VariableType::JS_POINTER(), array, 0, hclass); - builder_.StoreConstOffset(VariableType::INT64(), array, ECMAObject::HASH_OFFSET, - builder_.Int64(JSTaggedValue(0).GetRawData())); - builder_.StoreConstOffset(VariableType::JS_POINTER(), array, JSObject::PROPERTIES_OFFSET, obj); - builder_.StoreConstOffset(VariableType::JS_POINTER(), array, JSObject::ELEMENTS_OFFSET, obj); - builder_.StoreConstOffset(VariableType::JS_ANY(), array, JSArray::LENGTH_OFFSET, - builder_.Int32ToTaggedInt(builder_.Int32(0))); - builder_.StoreConstOffset(VariableType::JS_POINTER(), array, lengthAccessorOffset, accessor); - builder_.FinishAllocate(); + auto array = NewJSArrayLiteral(gate, elements, length); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); } void NTypeMCRLowering::LowerCreateArrayWithOwn(GateRef gate, GateRef glue) { - size_t elementsLength = acc_.GetArraySize(gate); + uint32_t elementsLength = acc_.GetArraySize(gate); GateRef length = builder_.IntPtr(elementsLength); + GateRef elements = CreateElementsWithLength(gate, glue, elementsLength); + + auto array = NewJSArrayLiteral(gate, elements, length); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); +} + +void NTypeMCRLowering::LowerCreateArrayWithBuffer(GateRef gate, GateRef glue) +{ + Environment env(gate, circuit_, &builder_); + // 2: number of value inputs + ASSERT(acc_.GetNumValueIn(gate) == 2); + GateRef index = acc_.GetValueIn(gate, 0); + GateRef aotElmIndex = acc_.GetValueIn(gate, 1); + auto elementIndex = acc_.GetConstantValue(aotElmIndex); + uint32_t constPoolIndex = static_cast(acc_.GetConstantValue(index)); + ArgumentAccessor argAcc(circuit_); + GateRef frameState = GetFrameState(gate); + GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); + GateRef literialElements = LoadFromConstPool(jsFunc, elementIndex); + auto thread = tsManager_->GetEcmaVM()->GetJSThread(); + JSHandle constpoolHandle(tsManager_->GetConstantPool()); + JSTaggedValue arr = ConstantPool::GetLiteralFromCache( + thread, constpoolHandle.GetTaggedValue(), constPoolIndex, recordName_); + JSHandle arrayHandle(thread, arr); + TaggedArray *arrayLiteral = TaggedArray::Cast(arrayHandle->GetElements()); + uint32_t literialLength = arrayLiteral->GetLength(); + uint32_t arrayLength = acc_.GetArraySize(gate); + GateRef elements = Circuit::NullGate(); + GateRef length = Circuit::NullGate(); + if (arrayLength > literialLength) { + elements = CreateElementsWithLength(gate, glue, arrayLength); + for (uint32_t i = 0; i < literialLength; i++) { + GateRef value = builder_.LoadFromTaggedArray(literialElements, i); + builder_.StoreToTaggedArray(elements, i, value); + } + length = builder_.IntPtr(arrayLength); + } else { + elements = literialElements; + length = builder_.IntPtr(literialLength); + } + + auto array = NewJSArrayLiteral(gate, elements, length); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); +} + +GateRef NTypeMCRLowering::LoadFromConstPool(GateRef jsFunc, size_t index) +{ + GateRef constPool = builder_.GetConstPool(jsFunc); + return builder_.LoadFromTaggedArray(constPool, index); +} + +GateRef NTypeMCRLowering::CreateElementsWithLength(GateRef gate, GateRef glue, size_t arrayLength) +{ GateRef elements = Circuit::NullGate(); - if (elementsLength < MAX_TAGGED_ARRAY_LENGTH) { - elements = NewTaggedArray(elementsLength); + GateRef length = builder_.IntPtr(arrayLength); + if (arrayLength < MAX_TAGGED_ARRAY_LENGTH) { + elements = NewTaggedArray(arrayLength); } else { elements = LowerCallRuntime(glue, gate, RTSTUB_ID(NewTaggedArray), { builder_.Int32ToTaggedInt(length) }, true); } + return elements; +} + +GateRef NTypeMCRLowering::NewJSArrayLiteral(GateRef gate, GateRef elements, GateRef length) +{ + ElementsKind kind = acc_.GetArrayMetaDataAccessor(gate).GetElementsKind(); + GateRef hclass = Circuit::NullGate(); + if (!Elements::IsGeneric(kind)) { + auto thread = tsManager_->GetEcmaVM()->GetJSThread(); + auto hclassIndex = thread->GetArrayHClassIndexMap().at(kind); + hclass = builder_.GetGlobalConstantValue(hclassIndex); + } else { + GateRef globalEnv = builder_.GetGlobalEnv(); + hclass = builder_.GetGlobalEnvObjHClass(globalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); + } JSHandle arrayFunc(tsManager_->GetEcmaVM()->GetGlobalEnv()->GetArrayFunction()); JSTaggedValue protoOrHClass = arrayFunc->GetProtoOrHClass(); @@ -110,10 +159,8 @@ void NTypeMCRLowering::LowerCreateArrayWithOwn(GateRef gate, GateRef glue) size_t arraySize = arrayHC->GetObjectSize(); size_t lengthAccessorOffset = arrayHC->GetInlinedPropertiesOffset(JSArray::LENGTH_INLINE_PROPERTY_INDEX); - GateRef obj = builder_.GetGlobalConstantValue(ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); - GateRef globalEnv = builder_.GetGlobalEnv(); + GateRef emptyArray = builder_.GetGlobalConstantValue(ConstantIndex::EMPTY_ARRAY_OBJECT_INDEX); GateRef accessor = builder_.GetGlobalConstantValue(ConstantIndex::ARRAY_LENGTH_ACCESSOR); - GateRef hclass = builder_.GetGlobalEnvObjHClass(globalEnv, GlobalEnv::ARRAY_FUNCTION_INDEX); GateRef size = builder_.IntPtr(arrayHC->GetObjectSize()); builder_.StartAllocate(); @@ -125,12 +172,12 @@ void NTypeMCRLowering::LowerCreateArrayWithOwn(GateRef gate, GateRef glue) builder_.StoreConstOffset(VariableType::JS_POINTER(), array, 0, hclass); builder_.StoreConstOffset(VariableType::INT64(), array, ECMAObject::HASH_OFFSET, builder_.Int64(JSTaggedValue(0).GetRawData())); - builder_.StoreConstOffset(VariableType::JS_POINTER(), array, JSObject::PROPERTIES_OFFSET, obj); + builder_.StoreConstOffset(VariableType::JS_POINTER(), array, JSObject::PROPERTIES_OFFSET, emptyArray); builder_.StoreConstOffset(VariableType::JS_POINTER(), array, JSObject::ELEMENTS_OFFSET, elements); - builder_.StoreConstOffset(VariableType::JS_ANY(), array, JSArray::LENGTH_OFFSET, builder_.Int32ToTaggedInt(length)); + builder_.StoreConstOffset(VariableType::INT32(), array, JSArray::LENGTH_OFFSET, length); builder_.StoreConstOffset(VariableType::JS_POINTER(), array, lengthAccessorOffset, accessor); builder_.FinishAllocate(); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), array); + return array; } GateRef NTypeMCRLowering::NewTaggedArray(size_t length) diff --git a/ecmascript/compiler/ntype_mcr_lowering.h b/ecmascript/compiler/ntype_mcr_lowering.h index 9e9764352e2fb1dfc088bb4b9305231054295034..079db5cab1660e9ab3560d24b74174056d5b2c89 100644 --- a/ecmascript/compiler/ntype_mcr_lowering.h +++ b/ecmascript/compiler/ntype_mcr_lowering.h @@ -24,13 +24,14 @@ namespace panda::ecmascript::kungfu { class NTypeMCRLowering { public: - NTypeMCRLowering(Circuit *circuit, PassContext *ctx, TSManager *tsManager, + NTypeMCRLowering(Circuit *circuit, PassContext *ctx, const CString &recordName, bool enableLog, const std::string& name) : circuit_(circuit), acc_(circuit), builder_(circuit, ctx->GetCompilerConfig()), dependEntry_(circuit->GetDependRoot()), - tsManager_(tsManager), + tsManager_(ctx->GetTSManager()), + recordName_(recordName), enableLog_(enableLog), profiling_(ctx->GetCompilerConfig()->IsProfiling()), traceBc_(ctx->GetCompilerConfig()->IsTraceBC()), @@ -44,13 +45,24 @@ private: static constexpr int MAX_TAGGED_ARRAY_LENGTH = 50; void Lower(GateRef gate); void LowerCreateArray(GateRef gate, GateRef glue); + void LowerCreateArrayWithBuffer(GateRef gate, GateRef glue); void LowerCreateEmptyArray(GateRef gate); void LowerCreateArrayWithOwn(GateRef gate, GateRef glue); + void LowerStLexVar(GateRef gate); + void LowerLdLexVar(GateRef gate); + GateRef LoadFromConstPool(GateRef jsFunc, size_t index); + GateRef NewJSArrayLiteral(GateRef gate, GateRef elements, GateRef length); GateRef NewTaggedArray(size_t length); + GateRef CreateElementsWithLength(GateRef gate, GateRef glue, size_t arrayLength); GateRef LowerCallRuntime(GateRef glue, GateRef hirGate, int index, const std::vector &args, bool useLabel = false); + GateRef GetFrameState(GateRef gate) const + { + return acc_.GetFrameState(gate); + } + bool IsLogEnabled() const { return enableLog_; @@ -66,6 +78,8 @@ private: CircuitBuilder builder_; GateRef dependEntry_; TSManager *tsManager_ {nullptr}; + const CString &recordName_; + panda_file::File::EntityId methodId_ {0}; bool enableLog_ {false}; bool profiling_ {false}; bool traceBc_ {false}; diff --git a/ecmascript/compiler/number_gate_info.h b/ecmascript/compiler/number_gate_info.h index caf0507d8640e7e63f71bed9c16ed9116ea9e763..d7e0d0936adeea6db81b6d518270d6adc1f6855a 100644 --- a/ecmascript/compiler/number_gate_info.h +++ b/ecmascript/compiler/number_gate_info.h @@ -17,6 +17,7 @@ #define ECMASCRIPT_NUMBER_GATE_INFO_H #include "ecmascript/compiler/gate_accessor.h" #include "ecmascript/js_hclass.h" +#include "ecmascript/js_typed_array.h" namespace panda::ecmascript::kungfu { @@ -24,6 +25,7 @@ enum class TypeInfo { NONE, INT1, INT32, + UINT32, FLOAT64, TAGGED, }; @@ -98,17 +100,19 @@ public: max_ = *std::lower_bound(rangeBounds_.begin(), rangeBounds_.end(), max); } } - + static constexpr int32_t UINT30_MAX = 0x3fffffff; - static const inline std::vector rangeBounds_ = { INT32_MIN, INT32_MIN + 1, - -1, 0, 1, UINT30_MAX, UINT30_MAX + 1, - INT32_MAX - 1, INT32_MAX }; + static constexpr int32_t TYPED_ARRAY_ONHEAP_MAX = JSTypedArray::MAX_ONHEAP_LENGTH; + static constexpr int32_t UINT18_MAX = (1 << 18) - 1; + static const inline std::vector rangeBounds_ = { INT32_MIN, INT32_MIN + 1, -UINT18_MAX, -TYPED_ARRAY_ONHEAP_MAX, + -1, 0, 1, TYPED_ARRAY_ONHEAP_MAX - 1, TYPED_ARRAY_ONHEAP_MAX, TYPED_ARRAY_ONHEAP_MAX + 1, + TYPED_ARRAY_ONHEAP_MAX * 3, UINT18_MAX, UINT30_MAX, UINT30_MAX + 1, INT32_MAX - 1, INT32_MAX }; static RangeInfo NONE() { return RangeInfo(INT32_MAX, INT32_MIN); } - + static RangeInfo ANY() { return RangeInfo(INT32_MIN, INT32_MAX); @@ -151,11 +155,81 @@ public: RangeInfo operator+ (const RangeInfo &rhs) const { + ASSERT(min_ <= max_ && rhs.min_ <= rhs.max_); int32_t nmax = MaybeAddOverflow(rhs) ? INT32_MAX : max_ + rhs.max_; int32_t nmin = MaybeAddUnderflow(rhs) ? INT32_MIN : min_ + rhs.min_; return RangeInfo(nmin, nmax); } + RangeInfo operator% (const RangeInfo &rhs) const + { + ASSERT(min_ <= max_ && rhs.min_ <= rhs.max_); + RangeInfo result = RangeInfo(0, 0); + int32_t nmax = std::max(std::abs(rhs.min_), std::abs(rhs.max_)); + if (max_ > 0) result = result.Union(RangeInfo(0, nmax - 1)); + if (min_ < 0) result = result.Union(RangeInfo(-nmax + 1, 0)); + return result; + } + + bool MaybeZero() const + { + return min_ <= 0 && max_ >= 0; + } + + RangeInfo operator* (const RangeInfo &rhs) const + { + ASSERT(min_ <= max_ && rhs.min_ <= rhs.max_); + int32_t nmax = GetMaxMulResult(rhs); + int32_t nmin = GetMinMulResult(rhs); + return RangeInfo(nmin, nmax); + } + + int32_t GetMaxMulResult(const RangeInfo &rhs) const + { + return std::max({ TryMul(min_, rhs.min_), TryMul(min_, rhs.max_), TryMul(max_, rhs.min_), TryMul(max_, rhs.max_) }); + } + + int32_t GetMinMulResult(const RangeInfo &rhs) const + { + return std::min({ TryMul(min_, rhs.min_), TryMul(min_, rhs.max_), TryMul(max_, rhs.min_), TryMul(max_, rhs.max_) }); + } + + int32_t TryMul(int32_t lhs, int32_t rhs) const + { + if (MaybeMulOverflow(lhs, rhs)){ + return INT32_MAX; + } + if (MaybeMulUnderflow(lhs, rhs)){ + return INT32_MIN; + } + return lhs * rhs; + } + + bool MaybeMulOverflowOrUnderflow(const RangeInfo &rhs) const + { + return MaybeMulOverflow(rhs) || MaybeMulUnderflow(rhs); + } + + bool MaybeMulUnderflow(const RangeInfo &rhs) const + { + return MaybeMulUnderflow(min_, rhs.max_) || MaybeMulUnderflow(max_, rhs.min_); + } + + bool MaybeMulOverflow(const RangeInfo &rhs) const + { + return MaybeMulOverflow(max_, rhs.max_) || MaybeMulOverflow(min_, rhs.min_); + } + + bool MaybeMulUnderflow(int32_t lhs, int32_t rhs) const + { + return (lhs > 0 && rhs < 0 && rhs < INT32_MIN / lhs) || (lhs < 0 && rhs > 0 && lhs < INT32_MIN / rhs); + } + + bool MaybeMulOverflow(int32_t lhs, int32_t rhs) const + { + return (lhs > 0 && rhs > 0 && lhs > INT32_MAX / rhs) || (lhs < 0 && rhs < 0 && lhs < INT32_MAX / rhs); + } + bool MaybeSubOverflow(const RangeInfo &rhs) const { return (rhs.min_ < 0) && (max_ > INT32_MAX + rhs.min_); @@ -173,6 +247,7 @@ public: RangeInfo operator- (const RangeInfo &rhs) const { + ASSERT(min_ <= max_ && rhs.min_ <= rhs.max_); int32_t nmax = MaybeSubOverflow(rhs) ? INT32_MAX : max_ - rhs.min_; int32_t nmin = MaybeSubUnderflow(rhs) ? INT32_MIN : min_ - rhs.max_; return RangeInfo(nmin, nmax); @@ -188,6 +263,8 @@ public: RangeInfo SHR(const RangeInfo &rhs) const { + ASSERT(min_ <= max_); + ASSERT(rhs.max_ == rhs.min_); if (MaybeShrOverflow(rhs)) { // assume no overflow occurs since overflow will lead to deopt return RangeInfo(0, std::max(0, GetMax())); @@ -202,6 +279,7 @@ public: RangeInfo ASHR(const RangeInfo &rhs) const { + ASSERT(min_ <= max_); ASSERT(rhs.max_ == rhs.min_); int32_t shift = rhs.max_ & 0x1f; // 0x1f : shift bits int32_t nmin = min_ >> shift; @@ -223,7 +301,7 @@ public: { return (min_ == INT32_MAX) && (max_ == INT32_MIN); } - + private: int32_t min_ {INT32_MIN}; int32_t max_ {INT32_MAX}; diff --git a/ecmascript/compiler/number_speculative_lowering.cpp b/ecmascript/compiler/number_speculative_lowering.cpp index ac87c3642089f494b0184a5c196780b8b3a960d7..ff4c5c9a10dea960e9f3c407d2b7efd41f791d69 100644 --- a/ecmascript/compiler/number_speculative_lowering.cpp +++ b/ecmascript/compiler/number_speculative_lowering.cpp @@ -32,14 +32,19 @@ void NumberSpeculativeLowering::Run() std::vector gateList; acc_.GetAllGates(gateList); for (auto gate : gateList) { - if (acc_.GetOpCode(gate) != OpCode::INDEX_CHECK) { - VisitGate(gate); - } else { - checkedGates_.push_back(gate); + auto op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::RANGE_GUARD: { + rangeGuardGates_.push_back(gate); + break; + } + default: { + VisitGate(gate); + } } } - for (auto check : checkedGates_) { - VisitIndexCheck(check); + for (auto rangeGuard : rangeGuardGates_) { + VisitRangeGuard(rangeGuard); } } @@ -75,11 +80,27 @@ void NumberSpeculativeLowering::VisitGate(GateRef gate) VisitLoadElement(gate); break; } + case OpCode::INDEX_CHECK: { + VisitIndexCheck(gate); + break; + } + case OpCode::RANGE_CHECK_PREDICATE: { + VisitRangeCheckPredicate(gate); + break; + } case OpCode::LOAD_ARRAY_LENGTH: case OpCode::LOAD_TYPED_ARRAY_LENGTH: { VisitLoadArrayLength(gate); break; } + case OpCode::LOAD_STRING_LENGTH: { + VisitLoadStringLength(gate); + break; + } + case OpCode::LOAD_PROPERTY: { + VisitLoadProperty(gate); + break; + } default: break; } @@ -322,8 +343,8 @@ void NumberSpeculativeLowering::VisitNumberDiv(GateRef gate) result = builder_.Int32DivWithCheck(left, right); acc_.SetMachineType(gate, MachineType::I32); } else { - builder_.Float64CheckRightIsZero(right); - result = builder_.BinaryArithmetic(circuit_->Fdiv(), MachineType::F64, left, right); + result = builder_.BinaryArithmetic(circuit_->Fdiv(), + MachineType::F64, left, right, GateType::NJSValue()); acc_.SetMachineType(gate, MachineType::F64); } acc_.SetGateType(gate, GateType::NJSValue()); @@ -346,7 +367,9 @@ void NumberSpeculativeLowering::VisitNumberMod(GateRef gate) } GateRef result = Circuit::NullGate(); if (gateType.IsIntType()) { - builder_.Int32CheckRightIsZero(right); + if(GetRange(right).MaybeZero()) { + builder_.Int32CheckRightIsZero(right); + } result = CalculateInts(left, right); UpdateRange(result, GetRange(gate)); acc_.SetMachineType(gate, MachineType::I32); @@ -408,13 +431,34 @@ void NumberSpeculativeLowering::VisitIsTrueOrFalse(GateRef gate, bool flag) void NumberSpeculativeLowering::VisitBooleanJump(GateRef gate) { - TypedJumpOp jumpOp = acc_.GetTypedJumpAccessor(gate).GetTypedJumpOp(); + TypedJumpAccessor jumpAcc = acc_.GetTypedJumpAccessor(gate); + TypedJumpOp jumpOp = jumpAcc.GetTypedJumpOp(); ASSERT((jumpOp == TypedJumpOp::TYPED_JEQZ) || (jumpOp == TypedJumpOp::TYPED_JNEZ)); GateRef condition = acc_.GetValueIn(gate, 0); + uint32_t trueWeight = BranchWeight::ONE_WEIGHT; + uint32_t falseWeight = BranchWeight::ONE_WEIGHT; + BranchKind kind = jumpAcc.GetBranchKind(); + switch (kind) { + case BranchKind::TRUE_BRANCH: + trueWeight = BranchWeight::WEAK_WEIGHT; + break; + case BranchKind::FALSE_BRANCH: + falseWeight = BranchWeight::WEAK_WEIGHT; + break; + case BranchKind::STRONG_TRUE_BRANCH: + trueWeight = BranchWeight::STRONG_WEIGHT; + break; + case BranchKind::STRONG_FALSE_BRANCH: + falseWeight = BranchWeight::STRONG_WEIGHT; + break; + default: + break; + } if (jumpOp == TypedJumpOp::TYPED_JEQZ) { + std::swap(trueWeight, falseWeight); condition = builder_.BoolNot(condition); } - GateRef ifBranch = builder_.Branch(acc_.GetState(gate), condition); + GateRef ifBranch = builder_.Branch(acc_.GetState(gate), condition, trueWeight, falseWeight); acc_.ReplaceGate(gate, ifBranch, acc_.GetDep(gate), Circuit::NullGate()); } @@ -485,22 +529,43 @@ void NumberSpeculativeLowering::VisitPhi(GateRef gate) } } +void NumberSpeculativeLowering::VisitRangeCheckPredicate(GateRef gate) +{ + acc_.SetGateType(gate, GateType::NJSValue()); + acc_.SetMachineType(gate, MachineType::I32); +} + +void NumberSpeculativeLowering::VisitIndexCheck(GateRef gate) +{ + acc_.SetGateType(gate, GateType::NJSValue()); + acc_.SetMachineType(gate, MachineType::I32); +} + void NumberSpeculativeLowering::VisitLoadArrayLength(GateRef gate) { acc_.SetGateType(gate, GateType::NJSValue()); acc_.SetMachineType(gate, MachineType::I32); } +void NumberSpeculativeLowering::VisitLoadStringLength(GateRef gate) +{ + acc_.SetGateType(gate, GateType::NJSValue()); + acc_.SetMachineType(gate, MachineType::I32); +} + void NumberSpeculativeLowering::VisitLoadElement(GateRef gate) { auto op = acc_.GetTypedLoadOp(gate); switch (op) { + case TypedLoadOp::INT8ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT8ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT8CLAMPEDARRAY_LOAD_ELEMENT: + case TypedLoadOp::INT16ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT16ARRAY_LOAD_ELEMENT: case TypedLoadOp::INT32ARRAY_LOAD_ELEMENT: acc_.SetMachineType(gate, MachineType::I32); break; case TypedLoadOp::FLOAT32ARRAY_LOAD_ELEMENT: - acc_.SetMachineType(gate, MachineType::F64); - break; case TypedLoadOp::FLOAT64ARRAY_LOAD_ELEMENT: acc_.SetMachineType(gate, MachineType::F64); break; @@ -510,29 +575,36 @@ void NumberSpeculativeLowering::VisitLoadElement(GateRef gate) acc_.SetGateType(gate, GateType::NJSValue()); } -void NumberSpeculativeLowering::VisitIndexCheck(GateRef gate) +void NumberSpeculativeLowering::VisitLoadProperty(GateRef gate) { - auto type = acc_.GetParamGateType(gate); - if (!tsManager_->IsArrayTypeKind(type)) { - // return checked index value + TypeInfo output = GetOutputType(gate); + if (output == TypeInfo::INT32 || output == TypeInfo::FLOAT64) { + Environment env(gate, circuit_, &builder_); + ASSERT(acc_.GetNumValueIn(gate) == 2); // 2: receiver, plr + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef propertyLookupResult = acc_.GetValueIn(gate, 1); + PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); + ASSERT(plr.IsLocal() || plr.IsFunction()); + + // Hole check? + GateRef result = Circuit::NullGate(); + if (output == TypeInfo::FLOAT64) { + result = builder_.LoadConstOffset(VariableType::FLOAT64(), receiver, plr.GetOffset()); + acc_.SetMachineType(gate, MachineType::F64); + } else { + result = builder_.LoadConstOffset(VariableType::INT32(), receiver, plr.GetOffset()); + acc_.SetMachineType(gate, MachineType::I32); + } acc_.SetGateType(gate, GateType::NJSValue()); - acc_.SetMachineType(gate, MachineType::I32); - return; + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } +} + +void NumberSpeculativeLowering::VisitRangeGuard(GateRef gate) +{ Environment env(gate, circuit_, &builder_); - GateRef index = acc_.GetValueIn(gate, 1); - if (!noCheck_) { - GateRef length = acc_.GetValueIn(gate, 0); - RangeInfo indexRange = GetRange(index); - if (indexRange.GetMin() < 0) { - builder_.NegativeIndexCheck(index); - } - builder_.LargeIndexCheck(index, length); - // return checked index value - acc_.SetGateType(gate, GateType::NJSValue()); - acc_.SetMachineType(gate, MachineType::I32); - } - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), index); + GateRef inputLength = acc_.GetValueIn(gate, 0); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), inputLength); } template @@ -557,10 +629,14 @@ GateRef NumberSpeculativeLowering::CalculateInts(GateRef left, GateRef right) break; } case TypedBinOp::TYPED_MUL: + if(!leftRange.MaybeMulOverflowOrUnderflow(rightRange)) { + return builder_.Int32Mul(left, right); + } res = builder_.MulWithOverflow(left, right); break; case TypedBinOp::TYPED_MOD: { - return builder_.BinaryArithmetic(circuit_->Smod(), MachineType::I32, left, right); + return builder_.BinaryArithmetic(circuit_->Smod(), + MachineType::I32, left, right, GateType::NJSValue()); break; } default: @@ -747,7 +823,7 @@ GateRef NumberSpeculativeLowering::MonocularDouble(GateRef value) res = builder_.DoubleSub(value, builder_.Double(1)); break; case TypedUnOp::TYPED_NEG: - res = builder_.DoubleSub(builder_.Double(0), value); + res = builder_.DoubleMul(builder_.Double(-1), value); break; default: break; @@ -758,7 +834,9 @@ GateRef NumberSpeculativeLowering::MonocularDouble(GateRef value) void NumberSpeculativeLowering::UpdateRange(GateRef gate, const RangeInfo& range) { auto id = acc_.GetId(gate); - rangeInfos_.resize(id + 1, RangeInfo::ANY()); + if (id >= rangeInfos_.size()) { + rangeInfos_.resize(id + 1, RangeInfo::ANY()); + } rangeInfos_[id] = range; } diff --git a/ecmascript/compiler/number_speculative_lowering.h b/ecmascript/compiler/number_speculative_lowering.h index b8613a5c4041f3343c242da43230859a80f8436c..bf9a7b4752214620c550581a23c84340c69b9613 100644 --- a/ecmascript/compiler/number_speculative_lowering.h +++ b/ecmascript/compiler/number_speculative_lowering.h @@ -27,10 +27,10 @@ namespace panda::ecmascript::kungfu { class NumberSpeculativeLowering { public: - NumberSpeculativeLowering(Circuit* circuit, Chunk* chunk, TSManager* tsManager, - ChunkVector& typeInfos, ChunkVector& rangeInfos, bool noCheck) - : circuit_(circuit), acc_(circuit), builder_(circuit), tsManager_(tsManager), - typeInfos_(typeInfos), rangeInfos_(rangeInfos), checkedGates_(chunk), noCheck_(noCheck) {} + NumberSpeculativeLowering(Circuit* circuit, Chunk* chunk, ChunkVector& typeInfos, + ChunkVector& rangeInfos) + : circuit_(circuit), acc_(circuit), builder_(circuit), typeInfos_(typeInfos), + rangeInfos_(rangeInfos), rangeGuardGates_(chunk) {} void Run(); private: @@ -44,9 +44,13 @@ private: void VisitPhi(GateRef gate); void VisitUndefinedStrictEq(GateRef gate); void VisitCallBuiltins(GateRef gate); + void VisitRangeGuard(GateRef gate); + void VisitRangeCheckPredicate(GateRef gate); void VisitIndexCheck(GateRef gate); void VisitLoadArrayLength(GateRef gate); + void VisitLoadStringLength(GateRef gate); void VisitLoadElement(GateRef gate); + void VisitLoadProperty(GateRef gate); template void VisitNumberCalculate(GateRef gate); @@ -95,11 +99,9 @@ private: Circuit* circuit_; GateAccessor acc_; CircuitBuilder builder_; - TSManager* tsManager_ {nullptr}; ChunkVector& typeInfos_; ChunkVector& rangeInfos_; - ChunkVector checkedGates_; - bool noCheck_; + ChunkVector rangeGuardGates_; }; } // panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_NUMBER_SPECULATIVE_LOWERING_H diff --git a/ecmascript/compiler/number_speculative_retype.cpp b/ecmascript/compiler/number_speculative_retype.cpp index 6c3abbc48697ddb34a577f445af997873aea4366..7f1b0dd012be74e9dc04e6e03a879abcf255769b 100644 --- a/ecmascript/compiler/number_speculative_retype.cpp +++ b/ecmascript/compiler/number_speculative_retype.cpp @@ -51,6 +51,28 @@ GateRef NumberSpeculativeRetype::SetOutputType(GateRef gate, PGOSampleType pgoTy return old == type ? Circuit::NullGate() : gate; } +GateRef NumberSpeculativeRetype::SetOutputType(GateRef gate, Representation rep) +{ + TypeInfo type = GetOutputTypeInfo(gate); + TypeInfo old = type; + if (rep == Representation::INT) { + type = TypeInfo::INT32; + } else if (rep == Representation::DOUBLE) { + type = TypeInfo::FLOAT64; + } else { + type = TypeInfo::TAGGED; + } + SetOutputTypeInfo(gate, type); + return old == type ? Circuit::NullGate() : gate; +} + +GateRef NumberSpeculativeRetype::SetOutputType(GateRef gate, TypeInfo type) +{ + TypeInfo old = GetOutputTypeInfo(gate); + SetOutputTypeInfo(gate, type); + return old == type ? Circuit::NullGate() : gate; +} + GateRef NumberSpeculativeRetype::VisitGate(GateRef gate) { OpCode op = acc_.GetOpCode(gate); @@ -61,17 +83,23 @@ GateRef NumberSpeculativeRetype::VisitGate(GateRef gate) return VisitTypedUnaryOp(gate); case OpCode::TYPED_CONDITION_JUMP: return VisitTypedConditionJump(gate); + case OpCode::RANGE_CHECK_PREDICATE: + return VisitRangeCheckPredicate(gate); case OpCode::INDEX_CHECK: return VisitIndexCheck(gate); case OpCode::LOAD_ARRAY_LENGTH: case OpCode::LOAD_TYPED_ARRAY_LENGTH: return VisitLoadArrayLength(gate); + case OpCode::LOAD_STRING_LENGTH: + return VisitLoadStringLength(gate); case OpCode::LOAD_ELEMENT: return VisitLoadElement(gate); case OpCode::STORE_ELEMENT: return VisitStoreElement(gate); case OpCode::STORE_PROPERTY: return VisitStoreProperty(gate); + case OpCode::LOAD_PROPERTY: + return VisitLoadProperty(gate); case OpCode::VALUE_SELECTOR: return VisitPhi(gate); case OpCode::CONSTANT: @@ -84,22 +112,19 @@ GateRef NumberSpeculativeRetype::VisitGate(GateRef gate) return VisitFrameState(gate); case OpCode::CALL_GETTER: case OpCode::CALL_SETTER: - case OpCode::LOAD_PROPERTY: case OpCode::CONSTRUCT: case OpCode::TYPEDCALL: case OpCode::TYPEDFASTCALL: case OpCode::OBJECT_TYPE_CHECK: - return VisitWithConstantValue(gate, 1); // ignoreIndex + return VisitWithConstantValue(gate, PROPERTY_LOOKUP_RESULT_INDEX); case OpCode::LOOP_EXIT_VALUE: - return VisitLoopExitValue(gate); + case OpCode::RANGE_GUARD: + return VisitIntermediateValue(gate); case OpCode::JS_BYTECODE: case OpCode::PRIMITIVE_TYPE_CHECK: case OpCode::STABLE_ARRAY_CHECK: case OpCode::TYPED_ARRAY_CHECK: - case OpCode::JSCALLTARGET_TYPE_CHECK: - case OpCode::JSFASTCALLTARGET_TYPE_CHECK: - case OpCode::JSCALLTHISTARGET_TYPE_CHECK: - case OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK: + case OpCode::TYPED_CALLTARGETCHECK_OP: case OpCode::TYPED_CALL_CHECK: case OpCode::HEAP_ALLOC: case OpCode::TYPED_NEW_ALLOCATE_THIS: @@ -110,6 +135,9 @@ GateRef NumberSpeculativeRetype::VisitGate(GateRef gate) case OpCode::FRAME_ARGS: case OpCode::SAVE_REGISTER: case OpCode::RESTORE_REGISTER: + case OpCode::LOAD_CONST_OFFSET: + case OpCode::STORE_CONST_OFFSET: + case OpCode::LEX_VAR_IS_HOLE_CHECK: return VisitOthers(gate); default: return Circuit::NullGate(); @@ -169,7 +197,7 @@ GateRef NumberSpeculativeRetype::VisitConstant(GateRef gate) return Circuit::NullGate(); } -GateRef NumberSpeculativeRetype::VisitLoopExitValue(GateRef gate) +GateRef NumberSpeculativeRetype::VisitIntermediateValue(GateRef gate) { GateRef value = acc_.GetValueIn(gate, 0); TypeInfo valueInfo = GetOutputTypeInfo(value); @@ -192,12 +220,12 @@ TypeInfo NumberSpeculativeRetype::GetOuputForPhi(GateRef gate, bool ignoreConsta if (inputInfo == TypeInfo::NONE) { continue; } - if (ignoreConstant && acc_.GetOpCode(input) == OpCode::CONSTANT) { + if (ignoreConstant && acc_.IsConstantNumber(input)) { hasConstantInput = true; continue; } // use less general input as phi output - if (tempType == TypeInfo::NONE || tempType == TypeInfo::TAGGED) { + if (tempType == TypeInfo::NONE) { tempType = inputInfo; } else if (tempType != inputInfo) { tempType = TypeInfo::TAGGED; @@ -224,17 +252,20 @@ GateRef NumberSpeculativeRetype::VisitPhi(GateRef gate) } ASSERT(IsConvert()); size_t valueNum = acc_.GetNumValueIn(gate); - auto state = acc_.GetState(gate); - auto dependSelector = acc_.GetDependSelectorFromMerge(state); + auto merge = acc_.GetState(gate); + auto dependSelector = acc_.GetDependSelectorFromMerge(merge); TypeInfo output = GetOutputTypeInfo(gate); for (size_t i = 0; i < valueNum; ++i) { GateRef input = acc_.GetValueIn(gate, i); if (output == TypeInfo::TAGGED || output == TypeInfo::NONE) { input = ConvertToTagged(input); } else { + auto state = acc_.GetState(merge, i); auto depend = acc_.GetDep(dependSelector, i); Environment env(state, depend, {}, circuit_, &builder_); input = ConvertTaggedToNJSValue(input, output); + acc_.ReplaceStateIn(merge, builder_.GetState(), i); + acc_.ReplaceDependIn(dependSelector, builder_.GetDepend(), i); } acc_.ReplaceValueIn(gate, input, i); } @@ -308,7 +339,6 @@ GateRef NumberSpeculativeRetype::VisitTypedUnaryOp(GateRef gate) case TypedUnOp::TYPED_NOT: return VisitNumberNot(gate); case TypedUnOp::TYPED_ISFALSE: - return VisitIsTrueOrFalse(gate); case TypedUnOp::TYPED_ISTRUE: return VisitIsTrueOrFalse(gate); default: @@ -378,7 +408,7 @@ GateRef NumberSpeculativeRetype::VisitNumberShiftAndLogical(GateRef gate) rightType = GateType::NumberType(); } } - ConvertForIntOperator(gate, leftType, rightType); + ConvertForShiftAndLogicalOperator(gate, leftType, rightType); } return Circuit::NullGate(); } @@ -404,6 +434,8 @@ GateRef NumberSpeculativeRetype::VisitIntMonocular(GateRef gate) if (IsConvert()) { GateRef value = acc_.GetValueIn(gate, 0); acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value, GateType::IntType()), 0); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -418,6 +450,8 @@ GateRef NumberSpeculativeRetype::VisitDoubleMonocular(GateRef gate) TypedUnaryAccessor accessor(acc_.TryGetValue(gate)); GateRef value = acc_.GetValueIn(gate, 0); acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(value, accessor.GetTypeValue()), 0); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -436,6 +470,8 @@ GateRef NumberSpeculativeRetype::VisitIsTrueOrFalse(GateRef gate) auto input = CheckAndConvertToBool(value, valueType); ResizeAndSetTypeInfo(input, TypeInfo::INT1); acc_.ReplaceValueIn(gate, input, 0); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -452,6 +488,8 @@ GateRef NumberSpeculativeRetype::VisitNumberNot(GateRef gate) Environment env(gate, circuit_, &builder_); GateRef value = acc_.GetValueIn(gate, 0); acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value, valueType), 0); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -473,6 +511,8 @@ GateRef NumberSpeculativeRetype::VisitBooleanJump(GateRef gate) } ResizeAndSetTypeInfo(input, TypeInfo::INT1); acc_.ReplaceValueIn(gate, input, 0); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -518,6 +558,8 @@ GateRef NumberSpeculativeRetype::VisitCallBuiltins(GateRef gate) GateRef input = acc_.GetValueIn(gate, i); acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(input, GateType::NumberType()), i); } + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); } @@ -571,6 +613,8 @@ GateRef NumberSpeculativeRetype::CheckAndConvertToBool(GateRef gate, GateType ga return gate; case TypeInfo::INT32: return builder_.ConvertInt32ToBool(gate); + case TypeInfo::UINT32: + return builder_.ConvertUInt32ToBool(gate); case TypeInfo::FLOAT64: return builder_.ConvertFloat64ToBool(gate); case TypeInfo::TAGGED: { @@ -651,6 +695,23 @@ void NumberSpeculativeRetype::ConvertForIntOperator(GateRef gate, GateType leftT acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(left, leftType), 0); acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(right, rightType), 1); + + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); +} + +void NumberSpeculativeRetype::ConvertForShiftAndLogicalOperator(GateRef gate, GateType leftType, GateType rightType) +{ + GateRef left = acc_.GetValueIn(gate, 0); + GateRef right = acc_.GetValueIn(gate, 1); + GateRef cLeft = CheckAndConvertToInt32(left, leftType, ConvertSupport::ENABLE, OpType::SHIFT_AND_LOGICAL); + GateRef cRight = CheckAndConvertToInt32(right, rightType, ConvertSupport::ENABLE, OpType::SHIFT_AND_LOGICAL); + + acc_.ReplaceValueIn(gate, cLeft, 0); + acc_.ReplaceValueIn(gate, cRight, 1); + + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } void NumberSpeculativeRetype::ConvertForDoubleOperator(GateRef gate, GateType leftType, GateType rightType) @@ -660,6 +721,9 @@ void NumberSpeculativeRetype::ConvertForDoubleOperator(GateRef gate, GateType le acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(left, leftType), 0); acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(right, rightType), 1); + + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } GateRef NumberSpeculativeRetype::TryConvertConstant(GateRef gate, bool needInt32) @@ -667,19 +731,35 @@ GateRef NumberSpeculativeRetype::TryConvertConstant(GateRef gate, bool needInt32 if (acc_.GetOpCode(gate) != OpCode::CONSTANT) { return Circuit::NullGate(); } - GateType gateType = acc_.GetGateType(gate); - if (gateType.IsIntType()) { - int32_t rawValue = acc_.GetInt32FromConstant(gate); - double value = static_cast(rawValue); - return needInt32 ? builder_.Int32(rawValue) : builder_.Double(value); - } else if (gateType.IsDoubleType() && !needInt32) { - double rawValue = acc_.GetFloat64FromConstant(gate); + + if (acc_.GetGateType(gate).IsNJSValueType()) { + MachineType mType = acc_.GetMachineType(gate); + if(mType == MachineType::I32) { + int32_t rawValue = acc_.GetInt32FromConstant(gate); + double value = static_cast(rawValue); + return needInt32 ? builder_.Int32(rawValue) : builder_.Double(value); + } else if(mType == MachineType::F64 && !needInt32) { + double rawValue = acc_.GetFloat64FromConstant(gate); + return builder_.Double(rawValue); + } else { + return Circuit::NullGate(); + } + } + + JSTaggedValue value(acc_.GetConstantValue(gate)); + if(value.IsInt()) { + int32_t rawValue = value.GetInt(); + double doubleValue = static_cast(rawValue); + return needInt32 ? builder_.Int32(rawValue) : builder_.Double(doubleValue); + } else if(value.IsDouble() && !needInt32) { + double rawValue = value.GetDouble(); return builder_.Double(rawValue); } return Circuit::NullGate(); } -GateRef NumberSpeculativeRetype::CheckAndConvertToInt32(GateRef gate, GateType gateType) +GateRef NumberSpeculativeRetype::CheckAndConvertToInt32(GateRef gate, GateType gateType, ConvertSupport support, + OpType type) { auto result = TryConvertConstant(gate, true); if (result != Circuit::NullGate()) { @@ -689,13 +769,23 @@ GateRef NumberSpeculativeRetype::CheckAndConvertToInt32(GateRef gate, GateType g } TypeInfo output = GetOutputTypeInfo(gate); switch (output) { + case TypeInfo::INT1: + result = builder_.ConvertBoolToInt32(gate, support); + break; case TypeInfo::INT32: return gate; + case TypeInfo::UINT32: { + if (type != OpType::SHIFT_AND_LOGICAL) { + result = builder_.CheckUInt32AndConvertToInt32(gate); + } else { + result = gate; + } + break; + } case TypeInfo::FLOAT64: result = builder_.ConvertFloat64ToInt32(gate); break; case TypeInfo::TAGGED: { - ASSERT(gateType.IsNumberType()); if (gateType.IsIntType()) { result = builder_.CheckTaggedIntAndConvertToInt32(gate); } else if (gateType.IsDoubleType()) { @@ -715,7 +805,7 @@ GateRef NumberSpeculativeRetype::CheckAndConvertToInt32(GateRef gate, GateType g return result; } -GateRef NumberSpeculativeRetype::CheckAndConvertToFloat64(GateRef gate, GateType gateType) +GateRef NumberSpeculativeRetype::CheckAndConvertToFloat64(GateRef gate, GateType gateType, ConvertSupport support) { auto result = TryConvertConstant(gate, false); if (result != Circuit::NullGate()) { @@ -725,9 +815,15 @@ GateRef NumberSpeculativeRetype::CheckAndConvertToFloat64(GateRef gate, GateType } TypeInfo output = GetOutputTypeInfo(gate); switch (output) { + case TypeInfo::INT1: + result = builder_.ConvertBoolToFloat64(gate, support); + break; case TypeInfo::INT32: result = builder_.ConvertInt32ToFloat64(gate); break; + case TypeInfo::UINT32: + result = builder_.ConvertUInt32ToFloat64(gate); + break; case TypeInfo::FLOAT64: return gate; case TypeInfo::TAGGED: { @@ -758,6 +854,8 @@ GateRef NumberSpeculativeRetype::CheckAndConvertToTagged(GateRef gate, GateType return builder_.ConvertBoolToTaggedBoolean(gate); case TypeInfo::INT32: return builder_.ConvertInt32ToTaggedInt(gate); + case TypeInfo::UINT32: + return builder_.ConvertUInt32ToTaggedNumber(gate); case TypeInfo::FLOAT64: return builder_.ConvertFloat64ToTaggedDouble(gate); case TypeInfo::TAGGED: { @@ -780,6 +878,8 @@ GateRef NumberSpeculativeRetype::ConvertToTagged(GateRef gate) return builder_.ConvertBoolToTaggedBoolean(gate); case TypeInfo::INT32: return builder_.ConvertInt32ToTaggedInt(gate); + case TypeInfo::UINT32: + return builder_.ConvertUInt32ToTaggedNumber(gate); case TypeInfo::FLOAT64: return builder_.ConvertFloat64ToTaggedDouble(gate); case TypeInfo::NONE: @@ -793,6 +893,28 @@ GateRef NumberSpeculativeRetype::ConvertToTagged(GateRef gate) } } +GateRef NumberSpeculativeRetype::VisitRangeCheckPredicate(GateRef gate) +{ + if (IsRetype()) { + return SetOutputType(gate, GateType::IntType()); + } + + if (IsConvert()) { + Environment env(gate, circuit_, &builder_); + GateRef value0 = acc_.GetValueIn(gate, 0); + GateRef value1 = acc_.GetValueIn(gate, 1); + GateType value0Type = acc_.GetGateType(value0); + GateType value1Type = acc_.GetGateType(value1); + acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value0, value0Type), 0); + acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value1, value1Type), 1); + + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); + } + + return Circuit::NullGate(); +} + GateRef NumberSpeculativeRetype::VisitIndexCheck(GateRef gate) { if (IsRetype()) { @@ -803,15 +925,13 @@ GateRef NumberSpeculativeRetype::VisitIndexCheck(GateRef gate) Environment env(gate, circuit_, &builder_); GateRef receiver = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); - GateType gateType = acc_.GetParamGateType(gate); GateType receiverType = acc_.GetGateType(receiver); GateType indexType = acc_.GetGateType(index); - if (tsManager_->IsArrayTypeKind(gateType)) { - // IndexCheck receive length at first value input. - ASSERT(receiverType.IsNumberType()); - acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(receiver, receiverType), 0); - } + acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(receiver, receiverType), 0); acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(index, indexType), 1); + + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); @@ -826,13 +946,29 @@ GateRef NumberSpeculativeRetype::VisitLoadArrayLength(GateRef gate) return Circuit::NullGate(); } +GateRef NumberSpeculativeRetype::VisitLoadStringLength(GateRef gate) +{ + if (IsRetype()) { + return SetOutputType(gate, GateType::IntType()); + } + + return Circuit::NullGate(); +} + GateRef NumberSpeculativeRetype::VisitLoadElement(GateRef gate) { if (IsRetype()) { auto op = acc_.GetTypedLoadOp(gate); switch (op) { + case TypedLoadOp::INT8ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT8ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT8CLAMPEDARRAY_LOAD_ELEMENT: + case TypedLoadOp::INT16ARRAY_LOAD_ELEMENT: + case TypedLoadOp::UINT16ARRAY_LOAD_ELEMENT: case TypedLoadOp::INT32ARRAY_LOAD_ELEMENT: return SetOutputType(gate, GateType::IntType()); + case TypedLoadOp::UINT32ARRAY_LOAD_ELEMENT: + return SetOutputType(gate, TypeInfo::UINT32); case TypedLoadOp::FLOAT32ARRAY_LOAD_ELEMENT: case TypedLoadOp::FLOAT64ARRAY_LOAD_ELEMENT: return SetOutputType(gate, GateType::DoubleType()); @@ -846,6 +982,8 @@ GateRef NumberSpeculativeRetype::VisitLoadElement(GateRef gate) GateRef index = acc_.GetValueIn(gate, 1); GateType indexType = acc_.GetGateType(index); acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(index, indexType), 1); + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); @@ -865,17 +1003,25 @@ GateRef NumberSpeculativeRetype::VisitStoreElement(GateRef gate) acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(index, indexType), 1); auto op = acc_.GetTypedStoreOp(gate); switch (op) { + case TypedStoreOp::INT8ARRAY_STORE_ELEMENT: + case TypedStoreOp::UINT8ARRAY_STORE_ELEMENT: + case TypedStoreOp::UINT8CLAMPEDARRAY_STORE_ELEMENT: + case TypedStoreOp::INT16ARRAY_STORE_ELEMENT: + case TypedStoreOp::UINT16ARRAY_STORE_ELEMENT: case TypedStoreOp::INT32ARRAY_STORE_ELEMENT: - acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value, GateType::IntType()), 2); // 2: value idx + case TypedStoreOp::UINT32ARRAY_STORE_ELEMENT: + acc_.ReplaceValueIn(gate, CheckAndConvertToInt32(value, GateType::NumberType()), 2); // 2: value idx break; case TypedStoreOp::FLOAT32ARRAY_STORE_ELEMENT: case TypedStoreOp::FLOAT64ARRAY_STORE_ELEMENT: - acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(value, GateType::DoubleType()), 2); // 2: value idx + acc_.ReplaceValueIn(gate, CheckAndConvertToFloat64(value, GateType::NumberType()), 2); // 2: value idx break; default: acc_.ReplaceValueIn(gate, ConvertToTagged(value), 2); // 2: value idx break; } + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); } return Circuit::NullGate(); @@ -888,22 +1034,46 @@ GateRef NumberSpeculativeRetype::VisitStoreProperty(GateRef gate) } ASSERT(IsConvert()); GateRef value = acc_.GetValueIn(gate, 2); // 2: value - TypeInfo output = GetOutputTypeInfo(value); - switch (output) { - case TypeInfo::INT1: - case TypeInfo::INT32: - case TypeInfo::FLOAT64: + + Environment env(gate, circuit_, &builder_); + GateRef propertyLookupResult = acc_.GetValueIn(gate, 1); + PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); + if (plr.GetRepresentation() == Representation::DOUBLE) { + acc_.SetMetaData(gate, circuit_->StorePropertyNoBarrier()); + acc_.ReplaceValueIn( + gate, CheckAndConvertToFloat64(value, acc_.GetGateType(value), ConvertSupport::DISABLE), 2); // 2: value + } else if (plr.GetRepresentation() == Representation::INT) { + acc_.SetMetaData(gate, circuit_->StorePropertyNoBarrier()); + acc_.ReplaceValueIn( + gate, CheckAndConvertToInt32(value, acc_.GetGateType(value), ConvertSupport::DISABLE), 2); // 2: value + } else { + TypeInfo valueType = GetOutputTypeInfo(value); + if (valueType == TypeInfo::INT1 || valueType == TypeInfo::INT32 || valueType == TypeInfo::FLOAT64) { acc_.SetMetaData(gate, circuit_->StorePropertyNoBarrier()); - break; - default: - break; + } + acc_.ReplaceValueIn(gate, ConvertToTagged(value), 2); // 2: value } + GateRef receiver = acc_.GetValueIn(gate, 0); // receiver acc_.ReplaceValueIn(gate, ConvertToTagged(receiver), 0); - acc_.ReplaceValueIn(gate, ConvertToTagged(value), 2); // 2: value + acc_.ReplaceStateIn(gate, builder_.GetState()); + acc_.ReplaceDependIn(gate, builder_.GetDepend()); return Circuit::NullGate(); } +GateRef NumberSpeculativeRetype::VisitLoadProperty(GateRef gate) +{ + if (IsRetype()) { + GateRef propertyLookupResult = acc_.GetValueIn(gate, 1); + PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); + return SetOutputType(gate, plr.GetRepresentation()); + } + + ASSERT(IsConvert()); + + return VisitWithConstantValue(gate, PROPERTY_LOOKUP_RESULT_INDEX); // ignoreIndex +} + GateRef NumberSpeculativeRetype::VisitTypeConvert(GateRef gate) { GateRef input = acc_.GetValueIn(gate, 0); diff --git a/ecmascript/compiler/number_speculative_retype.h b/ecmascript/compiler/number_speculative_retype.h index 6e727146b8eb79c74067b8f43f96877eb8d6c5ad..dde4e75b2d15daa0ff1e104f135f7264da9bec8d 100644 --- a/ecmascript/compiler/number_speculative_retype.h +++ b/ecmascript/compiler/number_speculative_retype.h @@ -28,9 +28,9 @@ namespace panda::ecmascript::kungfu { class NumberSpeculativeRetype : public GraphVisitor { public: - NumberSpeculativeRetype(Circuit *circuit, Chunk* chunk, TSManager* tsManager, ChunkVector& typeInfos) + NumberSpeculativeRetype(Circuit *circuit, Chunk* chunk, ChunkVector& typeInfos) : GraphVisitor(circuit, chunk), acc_(circuit), builder_(circuit), - tsManager_(tsManager), typeInfos_(typeInfos) {} + typeInfos_(typeInfos) {} void Run(); GateRef VisitGate(GateRef gate); @@ -40,6 +40,11 @@ private: Convert, }; + enum class OpType { + NORMAL, + SHIFT_AND_LOGICAL, + }; + bool IsRetype() const { return state_ == State::Retype; @@ -52,6 +57,8 @@ private: GateRef SetOutputType(GateRef gate, PGOSampleType type); GateRef SetOutputType(GateRef gate, GateType type); + GateRef SetOutputType(GateRef gate, Representation rep); + GateRef SetOutputType(GateRef gate, TypeInfo type); GateRef VisitPhi(GateRef gate); GateRef VisitConstant(GateRef gate); GateRef VisitTypedBinaryOp(GateRef gate); @@ -71,11 +78,14 @@ private: GateRef VisitNumberShiftAndLogical(GateRef gate); GateRef VisitNumberMod(GateRef gate); GateRef VisitBooleanJump(GateRef gate); + GateRef VisitRangeCheckPredicate(GateRef gate); GateRef VisitIndexCheck(GateRef gate); GateRef VisitLoadArrayLength(GateRef gate); + GateRef VisitLoadStringLength(GateRef gate); GateRef VisitLoadElement(GateRef gate); GateRef VisitStoreElement(GateRef gate); GateRef VisitStoreProperty(GateRef gate); + GateRef VisitLoadProperty(GateRef gate); GateRef VisitNumberRelated(GateRef gate); GateRef VisitCallBuiltins(GateRef gate); GateRef VisitOthers(GateRef gate); @@ -83,15 +93,17 @@ private: GateRef VisitFrameState(GateRef gate); GateRef VisitIsTrueOrFalse(GateRef gate); GateRef VisitWithConstantValue(GateRef gate, size_t ignoreIndex); - GateRef VisitLoopExitValue(GateRef gate); + GateRef VisitIntermediateValue(GateRef gate); void ConvertForBinaryOp(GateRef gate); void ConvertForCompareOp(GateRef gate); void ConvertForIntOperator(GateRef gate, GateType leftType, GateType rightType); + void ConvertForShiftAndLogicalOperator(GateRef gate, GateType leftType, GateType rightType); void ConvertForDoubleOperator(GateRef gate, GateType leftType, GateType rightType); - GateRef CheckAndConvertToInt32(GateRef gate, GateType gateType); - GateRef CheckAndConvertToFloat64(GateRef gate, GateType gateType); + GateRef CheckAndConvertToInt32(GateRef gate, GateType gateType, ConvertSupport support = ConvertSupport::ENABLE, + OpType type = OpType::NORMAL); + GateRef CheckAndConvertToFloat64(GateRef gate, GateType gateType, ConvertSupport support = ConvertSupport::ENABLE); GateRef CheckAndConvertToTagged(GateRef gate, GateType gateType); GateRef CheckAndConvertToBool(GateRef gate, GateType gateType); GateRef ConvertToTagged(GateRef gate); @@ -123,9 +135,9 @@ private: typeInfos_[index] = info; } + static constexpr size_t PROPERTY_LOOKUP_RESULT_INDEX = 1; GateAccessor acc_; CircuitBuilder builder_; - TSManager* tsManager_ {nullptr}; ChunkVector& typeInfos_; State state_ {0}; }; diff --git a/ecmascript/compiler/number_speculative_runner.cpp b/ecmascript/compiler/number_speculative_runner.cpp index 7a8001a0821dc485e23cd3347fd2b65d28dca31e..65ba38db355756992961feb659d585f58375fd10 100644 --- a/ecmascript/compiler/number_speculative_runner.cpp +++ b/ecmascript/compiler/number_speculative_runner.cpp @@ -21,9 +21,24 @@ namespace panda::ecmascript::kungfu { void NumberSpeculativeRunner::Run() { + RangeGuard rangeGuard(circuit_, chunk_); + rangeGuard.Run(); + + if (IsLogEnabled()) { + LOG_COMPILER(INFO) << ""; + LOG_COMPILER(INFO) << "\033[34m" + << "====================" + << " After range guard " + << "[" << GetMethodName() << "]" + << "====================" + << "\033[0m"; + circuit_->PrintAllGatesWithBytecode(); + LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; + } + auto maxId = circuit_->GetMaxGateId(); typeInfos_.resize(maxId + 1, TypeInfo::NONE); - NumberSpeculativeRetype retype(circuit_, chunk_, tsManager_, typeInfos_); + NumberSpeculativeRetype retype(circuit_, chunk_, typeInfos_); retype.Run(); if (IsLogEnabled()) { @@ -40,7 +55,7 @@ void NumberSpeculativeRunner::Run() maxId = circuit_->GetMaxGateId(); rangeInfos_.resize(maxId + 1, RangeInfo::NONE()); - RangeAnalysis rangeAnalysis(circuit_, chunk_, typeInfos_, rangeInfos_); + RangeAnalysis rangeAnalysis(circuit_, chunk_, typeInfos_, rangeInfos_, IsOnHeap()); rangeAnalysis.Run(); if (IsLogEnabled()) { LOG_COMPILER(INFO) << ""; @@ -54,7 +69,7 @@ void NumberSpeculativeRunner::Run() LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; } - NumberSpeculativeLowering lowering(circuit_, chunk_, tsManager_, typeInfos_, rangeInfos_, noCheck_); + NumberSpeculativeLowering lowering(circuit_, chunk_, typeInfos_, rangeInfos_); lowering.Run(); if (IsLogEnabled()) { LOG_COMPILER(INFO) << ""; diff --git a/ecmascript/compiler/number_speculative_runner.h b/ecmascript/compiler/number_speculative_runner.h index 26384f1ab2f18838e4afe960fb826c4d541e9fc0..27b6c624328475ef3cb98d64bfb8e98803f8e0f0 100644 --- a/ecmascript/compiler/number_speculative_runner.h +++ b/ecmascript/compiler/number_speculative_runner.h @@ -19,16 +19,15 @@ #include "ecmascript/compiler/number_speculative_lowering.h" #include "ecmascript/compiler/pass_manager.h" #include "ecmascript/compiler/number_speculative_retype.h" +#include "ecmascript/compiler/range_guard.h" #include "ecmascript/ts_types/ts_manager.h" namespace panda::ecmascript::kungfu { class NumberSpeculativeRunner { public: - NumberSpeculativeRunner(Circuit *circuit, TSManager* tsManager, - bool enableLog, const std::string& name, Chunk* chunk, PassContext *ctx) - : circuit_(circuit), acc_(circuit), tsManager_(tsManager), enableLog_(enableLog), - methodName_(name), chunk_(chunk), typeInfos_(chunk), rangeInfos_(chunk), - noCheck_(ctx->GetEcmaVM()->GetJSOptions().IsCompilerNoCheck()) {} + NumberSpeculativeRunner(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk, bool onHeapCheck) + : circuit_(circuit), acc_(circuit), enableLog_(enableLog), methodName_(name), + chunk_(chunk), typeInfos_(chunk), rangeInfos_(chunk), onHeapCheck_(onHeapCheck) {} ~NumberSpeculativeRunner() = default; void Run(); @@ -38,6 +37,11 @@ private: return enableLog_; } + bool IsOnHeap() const + { + return onHeapCheck_; + } + const std::string& GetMethodName() const { return methodName_; @@ -45,13 +49,12 @@ private: Circuit *circuit_ {nullptr}; GateAccessor acc_; - TSManager* tsManager_ {nullptr}; bool enableLog_ {false}; std::string methodName_; Chunk *chunk_ {nullptr}; ChunkVector typeInfos_; ChunkVector rangeInfos_; - bool noCheck_ {false}; + bool onHeapCheck_ {false}; }; } // panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_NUMBER_SPECULATIVE_RUNNER_H diff --git a/ecmascript/compiler/object_access_helper.cpp b/ecmascript/compiler/object_access_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c83c3d53ab1bc8f650fff54ad7f84dbe1b7e10c5 --- /dev/null +++ b/ecmascript/compiler/object_access_helper.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/object_access_helper.h" +#include "ecmascript/ts_types/ts_type.h" + +namespace panda::ecmascript::kungfu { +bool ObjectAccessHelper::Compute(ChunkVector &infos) +{ + ASSERT(infos.empty()); + bool result = false; + ObjectAccessInfo info(type_); + TSTypeKind kind = tsManager_->GetTypeKind(type_.GetGTRef()); + switch (kind) { + case TSTypeKind::CLASS_INSTANCE: + result = ComputeForClassInstance(info); + break; + case TSTypeKind::CLASS: + case TSTypeKind::OBJECT: + result = ComputeForClassOrObject(info); + break; + case TSTypeKind::UNION: + return ComputePolymorphism(infos); + default: + return false; + } + + infos.emplace_back(info); + return result; +} + +bool ObjectAccessHelper::ComputeForClassInstance(ObjectAccessInfo &info) +{ + int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(info.Type()); + if (hclassIndex == -1) { + return false; + } + + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); + if (!hclass->HasTSSubtyping()) { + return false; + } + + PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, key_); + info.Set(hclassIndex, plr); + + if (IsLoading()) { + return plr.IsFound(); + } + + return (plr.IsFound() && !plr.IsFunction()); +} + +bool ObjectAccessHelper::ComputeForClassOrObject(ObjectAccessInfo &info) +{ + GateType type = info.Type(); + int hclassIndex = -1; + if (tsManager_->IsClassTypeKind(type)) { + hclassIndex = tsManager_->GetConstructorHClassIndexByClassGateType(type); + } else if (tsManager_->IsObjectTypeKind(type)) { + hclassIndex = tsManager_->GetHClassIndexByObjectType(type); + } + + if (hclassIndex == -1) { + return false; + } + + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); + PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, key_); + info.Set(hclassIndex, plr); + + if (IsLoading()) { + return (plr.IsFound() && plr.IsLocal() && !plr.IsAccessor()); + } + + return (plr.IsFound() && plr.IsLocal() && !plr.IsAccessor() && plr.IsWritable()); +} + +bool ObjectAccessHelper::ComputePolymorphism(ChunkVector &infos) +{ + DISALLOW_GARBAGE_COLLECTION; + ASSERT(tsManager_->IsUnionTypeKind(type_)); + JSHandle unionType(tsManager_->GetTSType(type_.GetGTRef())); + TaggedArray *components = TaggedArray::Cast(unionType->GetComponents().GetTaggedObject()); + uint32_t length = components->GetLength(); + for (uint32_t i = 0; i < length; ++i) { + GlobalTSTypeRef gt(components->Get(i).GetInt()); + GateType type = GateType(gt); + ObjectAccessInfo info(type); + TSTypeKind kind = tsManager_->GetTypeKind(gt); + switch (kind) { + case TSTypeKind::CLASS_INSTANCE: { + if (!ComputeForClassInstance(info)) { + return false; + } + break; + } + case TSTypeKind::CLASS: + case TSTypeKind::OBJECT: { + if (!ComputeForClassOrObject(info)) { + return false; + } + break; + } + default: + return false; + } + infos.emplace_back(info); + } + + return infos.size() <= POLYMORPHIC_MAX_SIZE; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/object_access_helper.h b/ecmascript/compiler/object_access_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..ef451ef541b260b595a6d76bcd3154913863f3f1 --- /dev/null +++ b/ecmascript/compiler/object_access_helper.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_OBJECT_ACCESS_HELPER_H +#define ECMASCRIPT_COMPILER_OBJECT_ACCESS_HELPER_H + +#include "ecmascript/ts_types/ts_manager.h" + +namespace panda::ecmascript::kungfu { +class ObjectAccessInfo final { +public: + explicit ObjectAccessInfo(GateType type, int hclassIndex = -1, PropertyLookupResult plr = PropertyLookupResult()) + : type_(type), hclassIndex_(hclassIndex), plr_(plr) {} + ~ObjectAccessInfo() = default; + + void Set(int hclassIndex, PropertyLookupResult plr) + { + hclassIndex_ = hclassIndex; + plr_ = plr; + } + + GateType Type() const + { + return type_; + } + + int HClassIndex() const + { + return hclassIndex_; + } + + PropertyLookupResult Plr() const + { + return plr_; + } + +private: + GateType type_ {GateType::AnyType()}; + int hclassIndex_ {-1}; + PropertyLookupResult plr_ {}; +}; + +// An auxiliary class serving TSHCRLowering, used for named object property access, +// invoking TSManager and HClass. +class ObjectAccessHelper final { +public: + static constexpr size_t POLYMORPHIC_MAX_SIZE = 4; + + enum AccessMode : uint8_t { + LOAD = 0, + STORE + }; + + explicit ObjectAccessHelper(TSManager *tsManager, AccessMode mode, GateRef receiver, GateType type, + JSTaggedValue key, GateRef value) + : tsManager_(tsManager), + thread_(tsManager_->GetThread()), + mode_(mode), + receiver_(receiver), + type_(type), + key_(key), + value_(value) {} + + ~ObjectAccessHelper() = default; + + bool Compute(ChunkVector &infos); + + AccessMode GetAccessMode() const + { + return mode_; + } + + bool IsLoading() const + { + return mode_ == AccessMode::LOAD; + } + + GateRef GetReceiver() const + { + return receiver_; + } + + GateRef GetValue() const + { + return value_; + } + +private: + bool ComputeForClassInstance(ObjectAccessInfo &info); + bool ComputeForClassOrObject(ObjectAccessInfo &info); + bool ComputePolymorphism(ChunkVector &infos); + + TSManager *tsManager_ {nullptr}; + const JSThread *thread_ {nullptr}; + AccessMode mode_ {}; + GateRef receiver_ {Circuit::NullGate()}; + GateType type_ {GateType::AnyType()}; + JSTaggedValue key_ {JSTaggedValue::Hole()}; + GateRef value_ {Circuit::NullGate()}; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_OBJECT_ACCESS_HELPER_H diff --git a/ecmascript/compiler/ohos_pkg_args.h b/ecmascript/compiler/ohos_pkg_args.h new file mode 100644 index 0000000000000000000000000000000000000000..4a402a498e48e3ef06368ba8bd61e07cfec3cc5b --- /dev/null +++ b/ecmascript/compiler/ohos_pkg_args.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_OHOS_PKG_ARGS_H +#define ECMASCRIPT_COMPILER_OHOS_PKG_ARGS_H + +#include + +#include "ecmascript/ecma_vm.h" +#include "ecmascript/base/json_parser.h" +#include "ecmascript/js_array.h" +#include "ecmascript/js_handle.h" +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/log_wrapper.h" +#include "ecmascript/mem/c_string.h" +#include "ecmascript/platform/file.h" + +namespace panda::ecmascript::kungfu { +class OhosPkgArgs { +public: + constexpr static const char *const KEY_BUNDLE_NAME = "bundleName"; + constexpr static const char *const KEY_MODULE_NAME = "moduleName"; + constexpr static const char *const KEY_PKG_PATH = "pkgPath"; + constexpr static const char *const KEY_FILE_NAME = "abcName"; + constexpr static const char *const KEY_ABC_OFFSET = "abcOffset"; + constexpr static const char *const KEY_ABC_SIZE = "abcSize"; + + OhosPkgArgs() = default; + + static bool ParseListFromJson(EcmaVM *vm, const std::string &jsonInfo, std::list &infoList) + { + LocalScope scope(vm); + ObjectFactory *factory = vm->GetFactory(); + ecmascript::base::JsonParser parser(vm->GetJSThread()); + + JSHandle handleMsg(factory->NewFromASCII(jsonInfo.c_str())); + JSHandle handleStr(JSTaggedValue::ToString(vm->GetAssociatedJSThread(), handleMsg)); // JSON Object + JSHandle result = parser.ParseUtf8(*handleStr); + JSTaggedValue resultValue(static_cast(result->GetRawData())); + if (!resultValue.IsArray(vm->GetJSThread())) { + LOG_COMPILER(ERROR) << "Pkg list info parse failed. result is not an array. jsonData: " << jsonInfo.c_str(); + return false; + } + JSHandle valueHandle(vm->GetJSThread(), resultValue); + JSHandle elements(vm->GetJSThread(), valueHandle->GetElements()); + for (uint32_t i = 0; i < elements->GetLength(); i++) { + OhosPkgArgs pkgInfo; + JSHandle entry(vm->GetJSThread(), elements->Get(i)); + if (entry->IsHole()) { + continue; + } + JSTaggedValue entryValue(static_cast(entry->GetRawData())); + JSHandle entryHandle(vm->GetJSThread(), entryValue); + if (!pkgInfo.ParseFromJsObject(vm, entryHandle)) { + LOG_COMPILER(ERROR) << "Pkg list entry info parse failed. jsonData: " << jsonInfo.c_str(); + return false; + } + infoList.emplace_back(pkgInfo); + } + return true; + } + + bool ParseFromJson(EcmaVM *vm, const std::string &jsonInfo) + { + LocalScope scope(vm); + ObjectFactory *factory = vm->GetFactory(); + ecmascript::base::JsonParser parser(vm->GetJSThread()); + + JSHandle handleMsg(factory->NewFromASCII(jsonInfo.c_str())); + JSHandle handleStr(JSTaggedValue::ToString(vm->GetAssociatedJSThread(), handleMsg)); // JSON Object + JSHandle result = parser.ParseUtf8(*handleStr); + JSTaggedValue resultValue(static_cast(result->GetRawData())); + if (!resultValue.IsECMAObject()) { + LOG_COMPILER(ERROR) << "Pkg info parse failed. result is not an object. jsonData: " << jsonInfo.c_str(); + return false; + } + JSHandle valueHandle(vm->GetJSThread(), resultValue); + return ParseFromJsObject(vm, valueHandle); + } + + bool ParseFromJsObject(EcmaVM *vm, JSHandle &valueHandle) + { + LocalScope scope(vm); + JSHandle nameList(JSObject::EnumerableOwnNames(vm->GetJSThread(), valueHandle)); + for (uint32_t i = 0; i < nameList->GetLength(); i++) { + JSHandle key(vm->GetJSThread(), nameList->Get(i)); + JSHandle value = JSObject::GetProperty(vm->GetJSThread(), valueHandle, key).GetValue(); + if (!key->IsString() || !value->IsString()) { + LOG_COMPILER(ERROR) << "Pkg info parse from js object failed. key and value must be string type."; + return false; + } + UpdateProperty(ConvertToString(*JSTaggedValue::ToString(vm->GetJSThread(), key)).c_str(), + ConvertToString(*JSTaggedValue::ToString(vm->GetJSThread(), value)).c_str()); + } + return Valid(); + } + + void UpdateProperty(const char *key, const char *value) + { + if (strcmp(key, KEY_BUNDLE_NAME) == 0) { + bundleName_ = value; + } else if (strcmp(key, KEY_MODULE_NAME) == 0) { + moduleName_ = value; + } else if (strcmp(key, KEY_PKG_PATH) == 0) { + pkgPath_ = value; + } else if (strcmp(key, KEY_FILE_NAME) == 0) { + abcName_ = value; + } else if (strcmp(key, KEY_ABC_OFFSET) == 0) { + char *str = nullptr; + abcOffset_ = strtol(value, &str, 0); + } else if (strcmp(key, KEY_ABC_SIZE) == 0) { + char *str = nullptr; + abcSize_ = strtol(value, &str, 0); + } else { + LOG_COMPILER(ERROR) << "Unknown keyword when parse pkg info. key: " << key << ", value: " << value; + } + } + + bool Valid() const + { + if (!base::StringHelper::EndsWith(abcName_, ".abc")) { + LOG_COMPILER(ERROR) << "The last argument must be abc file, but now is: " << abcName_ << std::endl; + return false; + } + return !bundleName_.empty() && !moduleName_.empty() && !pkgPath_.empty() && (abcOffset_ != INVALID_VALUE) && + (abcSize_ != INVALID_VALUE); + } + + void Dump() const + { + LOG_ECMA(INFO) << "PkgInfo: " << KEY_BUNDLE_NAME << ": " << bundleName_ << ", " << KEY_MODULE_NAME << ": " + << moduleName_ << ", " << KEY_PKG_PATH << ": " << pkgPath_ << ", " << KEY_ABC_OFFSET << ": " + << std::hex << abcOffset_ << ", " << KEY_ABC_SIZE << ": " << abcSize_; + } + + const std::string &GetBundleName() const + { + return bundleName_; + } + + const std::string &GetModuleName() const + { + return moduleName_; + } + + const std::string &GetPath() const + { + return pkgPath_; + } + + std::string GetFullName() const + { + return pkgPath_ + GetPathSeparator() + abcName_; + } + + uint32_t GetOffset() const + { + return abcOffset_; + } + + uint32_t GetSize() const + { + return abcSize_; + } + +private: + static constexpr uint32_t INVALID_VALUE = std::numeric_limits::max(); + std::string bundleName_; + std::string moduleName_; + std::string pkgPath_; + std::string abcName_; + uint32_t abcOffset_ {INVALID_VALUE}; + uint32_t abcSize_ {INVALID_VALUE}; +}; +} // namespace panda::ecmascript::kungfu +#endif \ No newline at end of file diff --git a/ecmascript/compiler/operations_stub_builder.cpp b/ecmascript/compiler/operations_stub_builder.cpp index 4c9138899189fed93ef415f6f3cad6ec4f2eafbb..4715d5c53cf11012213458987d9c5e7a8d8178c8 100644 --- a/ecmascript/compiler/operations_stub_builder.cpp +++ b/ecmascript/compiler/operations_stub_builder.cpp @@ -26,15 +26,32 @@ GateRef OperationsStubBuilder::Equal(GateRef glue, GateRef left, GateRef right, env->SubCfgEntry(&entry); Label exit(env); Label isHole(env); + Label notHole(env); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); - result = FastEqual(left, right, callback); - Branch(TaggedIsHole(*result), &isHole, &exit); + result = FastEqual(glue, left, right, callback); + Branch(TaggedIsHole(*result), &isHole, ¬Hole); Bind(&isHole); { // slow path result = CallRuntime(glue, RTSTUB_ID(Eq), { left, right }); Jump(&exit); } + Bind(¬Hole); + { + Label resultIsTrue(env); + Label resultNotTrue(env); + Branch(TaggedIsTrue(*result), &resultIsTrue, &resultNotTrue); + Bind(&resultIsTrue); + { + callback.ProfileBranch(true); + Jump(&exit); + } + Bind(&resultNotTrue); + { + callback.ProfileBranch(false); + Jump(&exit); + } + } Bind(&exit); auto ret = *result; env->SubCfgExit(); @@ -49,7 +66,7 @@ GateRef OperationsStubBuilder::NotEqual(GateRef glue, GateRef left, GateRef righ Label exit(env); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); - result = FastEqual(left, right, callback); + result = FastEqual(glue, left, right, callback); Label isHole(env); Label notHole(env); Branch(TaggedIsHole(*result), &isHole, ¬Hole); @@ -67,10 +84,12 @@ GateRef OperationsStubBuilder::NotEqual(GateRef glue, GateRef left, GateRef righ Bind(&resultIsTrue); { result = TaggedFalse(); + callback.ProfileBranch(false); Jump(&exit); } Bind(&resultNotTrue); { + callback.ProfileBranch(true); result = TaggedTrue(); Jump(&exit); } @@ -87,12 +106,19 @@ GateRef OperationsStubBuilder::StrictEqual(GateRef glue, GateRef left, GateRef r Label entry(env); env->SubCfgEntry(&entry); Label exit(env); + Label strictEqual(env); Label notStrictEqual(env); DEFVARIABLE(result, VariableType::JS_ANY(), TaggedTrue()); - Branch(FastStrictEqual(glue, left, right, callback), &exit, ¬StrictEqual); + Branch(FastStrictEqual(glue, left, right, callback), &strictEqual, ¬StrictEqual); + Bind(&strictEqual); + { + callback.ProfileBranch(true); + Jump(&exit); + } Bind(¬StrictEqual); { result = TaggedFalse(); + callback.ProfileBranch(false); Jump(&exit); } Bind(&exit); @@ -108,11 +134,18 @@ GateRef OperationsStubBuilder::StrictNotEqual(GateRef glue, GateRef left, GateRe env->SubCfgEntry(&entry); Label exit(env); Label strictEqual(env); + Label notStrictEqual(env); DEFVARIABLE(result, VariableType::JS_ANY(), TaggedTrue()); - Branch(FastStrictEqual(glue, left, right, callback), &strictEqual, &exit); + Branch(FastStrictEqual(glue, left, right, callback), &strictEqual, ¬StrictEqual); Bind(&strictEqual); { result = TaggedFalse(); + callback.ProfileBranch(false); + Jump(&exit); + } + Bind(¬StrictEqual); + { + callback.ProfileBranch(true); Jump(&exit); } Bind(&exit); @@ -206,11 +239,13 @@ GateRef OperationsStubBuilder::Less(GateRef glue, GateRef left, GateRef right, P } Bind(&leftLessRight); { + callback.ProfileBranch(true); result = TaggedTrue(); Jump(&exit); } Bind(&leftNotLessRight); { + callback.ProfileBranch(false); result = TaggedFalse(); Jump(&exit); } @@ -311,11 +346,13 @@ GateRef OperationsStubBuilder::LessEq(GateRef glue, GateRef left, GateRef right, } Bind(&leftLessEqRight); { + callback.ProfileBranch(true); result = TaggedTrue(); Jump(&exit); } Bind(&leftNotLessEqRight); { + callback.ProfileBranch(false); result = TaggedFalse(); Jump(&exit); } @@ -394,8 +431,8 @@ GateRef OperationsStubBuilder::Greater(GateRef glue, GateRef left, GateRef right } Bind(&rightIsInt1); { - int32_t type = Int32(PGOSampleType::IntType()); - callback.ProfileOpType(Int32(type)); + GateRef type = Int32(PGOSampleType::IntType()); + callback.ProfileOpType(type); doubleRight = ChangeInt32ToFloat64(TaggedGetInt(right)); Jump(&exit2); } @@ -415,11 +452,13 @@ GateRef OperationsStubBuilder::Greater(GateRef glue, GateRef left, GateRef right } Bind(&leftGreaterRight); { + callback.ProfileBranch(true); result = TaggedTrue(); Jump(&exit); } Bind(&leftNotGreaterRight); { + callback.ProfileBranch(false); result = TaggedFalse(); Jump(&exit); } @@ -520,11 +559,13 @@ GateRef OperationsStubBuilder::GreaterEq(GateRef glue, GateRef left, GateRef rig } Bind(&leftGreaterEqRight); { + callback.ProfileBranch(true); result = TaggedTrue(); Jump(&exit); } Bind(&leftNotGreaterEQRight); { + callback.ProfileBranch(false); result = TaggedFalse(); Jump(&exit); } @@ -1345,7 +1386,7 @@ GateRef OperationsStubBuilder::Neg(GateRef glue, GateRef value, ProfileOperation { callback.ProfileOpType(Int32(PGOSampleType::DoubleType())); GateRef valueDouble = GetDoubleOfTDouble(value); - result = DoubleToTaggedDoublePtr(DoubleSub(Double(0), valueDouble)); + result = DoubleToTaggedDoublePtr(DoubleMul(Double(-1), valueDouble)); Jump(&exit); } Bind(&valueNotDouble); diff --git a/ecmascript/compiler/pass.h b/ecmascript/compiler/pass.h index 45f425e699d8a0502a51c80fbcde3ffccee9ffb8..ea019347c58b975e8ded4ebcdf77dc1ec6011a6a 100644 --- a/ecmascript/compiler/pass.h +++ b/ecmascript/compiler/pass.h @@ -21,11 +21,14 @@ #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/compiler_log.h" #include "ecmascript/compiler/early_elimination.h" +#include "ecmascript/compiler/array_bounds_check_elimination.h" #include "ecmascript/compiler/graph_linearizer.h" #include "ecmascript/compiler/later_elimination.h" #include "ecmascript/compiler/lcr_lowering.h" #include "ecmascript/compiler/llvm_codegen.h" +#include "ecmascript/compiler/litecg_codegen.h" #include "ecmascript/compiler/loop_analysis.h" +#include "ecmascript/compiler/loop_peeling.h" #include "ecmascript/compiler/ntype_hcr_lowering.h" #include "ecmascript/compiler/ntype_mcr_lowering.h" #include "ecmascript/compiler/number_speculative_runner.h" @@ -41,6 +44,8 @@ #include "ecmascript/compiler/type_mcr_lowering.h" #include "ecmascript/compiler/value_numbering.h" #include "ecmascript/compiler/verifier.h" +#include "litecg.h" +#include "lmir_builder.h" namespace panda::ecmascript::kungfu { class PassContext; @@ -100,7 +105,7 @@ public: return ctx_->GetJSPandaFile(); } - LLVMModule* GetAotModule() const + IRModule* GetAotModule() const { return ctx_->GetAOTModule(); } @@ -237,16 +242,15 @@ public: return false; } TimeScope timescope("TypeInferPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); - if (data->HasTypes()) { - bool enableLog = data->GetLog()->GetEnableMethodLog() && data->GetLog()->OutputType(); - GlobalTypeInfer globalTypeInfer(data->GetPassContext(), data->GetMethodOffset(), data->GetRecordName(), - data->GetPGOProfilerDecoder(), enableLog); - globalTypeInfer.ProcessTypeInference(data->GetBuilder(), data->GetCircuit()); - if (data->GetMethodLiteral()->IsClassConstructor()) { - InitializationAnalysis initAnalysis(data->GetCircuit(), data->GetTSManager(), data->GetRecordName(), - data->GetMethodName(), enableLog); - initAnalysis.Run(); - } + bool enableLog = data->GetLog()->GetEnableMethodLog() && data->GetLog()->OutputType(); + GlobalTypeInfer globalTypeInfer(data->GetPassContext(), data->GetMethodOffset(), data->GetRecordName(), + data->GetPGOProfilerDecoder(), passOptions->EnableOptTrackField(), + enableLog); + globalTypeInfer.ProcessTypeInference(data->GetBuilder(), data->GetCircuit()); + if (data->HasTypes() && data->GetMethodLiteral()->IsClassConstructor()) { + InitializationAnalysis initAnalysis(data->GetCircuit(), data->GetTSManager(), data->GetRecordName(), + data->GetMethodName(), enableLog); + initAnalysis.Run(); } return true; } @@ -308,8 +312,8 @@ public: } TimeScope timescope("NTypeHCRLoweringPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); bool enableLog = data->GetLog()->EnableMethodCIRLog(); - NTypeHCRLowering lowering(data->GetCircuit(), data->GetPassContext(), - data->GetTSManager(), enableLog, data->GetMethodName()); + NTypeHCRLowering lowering(data->GetCircuit(), data->GetPassContext(), data->GetTSManager(), + data->GetMethodLiteral(), data->GetRecordName(), enableLog, data->GetMethodName()); lowering.RunNTypeHCRLowering(); return true; } @@ -326,7 +330,7 @@ public: TimeScope timescope("TypeMCRLoweringPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); bool enableLog = data->GetLog()->EnableMethodCIRLog(); TypeMCRLowering lowering(data->GetCircuit(), data->GetCompilerConfig(), data->GetTSManager(), - enableLog, data->GetMethodName()); + enableLog, data->GetMethodName(), data->GetPassOptions()->EnableOptOnHeapCheck()); lowering.RunTypeMCRLowering(); return true; } @@ -342,8 +346,8 @@ public: } TimeScope timescope("NTypeMCRLoweringPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); bool enableLog = data->GetLog()->EnableMethodCIRLog(); - NTypeMCRLowering lowering(data->GetCircuit(), data->GetPassContext(), data->GetTSManager(), - enableLog, data->GetMethodName()); + NTypeMCRLowering lowering(data->GetCircuit(), data->GetPassContext(), + data->GetRecordName(), enableLog, data->GetMethodName()); lowering.RunNTypeMCRLowering(); return true; } @@ -376,7 +380,7 @@ public: TimeScope timescope("TSInlineLoweringPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); bool enableLog = data->GetLog()->EnableMethodCIRLog(); TSInlineLowering inlining(data->GetCircuit(), data->GetPassContext(), enableLog, data->GetMethodName(), - data->GetNativeAreaAllocator()); + data->GetNativeAreaAllocator(), passOptions, data->GetMethodOffset()); inlining.RunTSInlineLowering(); return true; } @@ -395,6 +399,20 @@ public: } }; +class RunFlowCyclesVerifierPass { +public: + bool Run(PassData* data) + { + TimeScope timescope("FlowCyclesVerifierPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); + bool hasFlowCycle = Verifier::RunFlowCyclesFind(data->GetCircuit()); + if (hasFlowCycle) { + LOG_FULL(FATAL) << "FlowCyclesVerifierPass fail"; + UNREACHABLE(); + } + return !hasFlowCycle; + } +}; + class VerifierPass { public: bool Run(PassData* data) @@ -421,8 +439,8 @@ public: TimeScope timescope("NumberSpeculativePass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); Chunk chunk(data->GetNativeAreaAllocator()); bool enableLog = data->GetLog()->EnableMethodCIRLog(); - NumberSpeculativeRunner(data->GetCircuit(), data->GetTSManager(), - enableLog, data->GetMethodName(), &chunk, data->GetPassContext()).Run(); + bool onHeapCheck = data->GetPassOptions()->EnableOptOnHeapCheck(); + NumberSpeculativeRunner(data->GetCircuit(), enableLog, data->GetMethodName(), &chunk, onHeapCheck).Run(); return true; } }; @@ -439,6 +457,14 @@ public: auto bb = data->GetBuilder()->GetBasicBlockById(head.second); auto loopInfo = new LoopInfo(&chunk, bb.stateCurrent); loopAnalysis_.CollectLoopBody(loopInfo); + bool enableLog = data->GetLog()->EnableMethodCIRLog(); + if (enableLog) { + loopAnalysis_.PrintLoop(loopInfo); + } + if (data->GetPassOptions()->EnableOptLoopPeeling()) { + LoopPeeling(data->GetBuilder(), data->GetCircuit(), enableLog, + data->GetMethodName(), &chunk, loopInfo).Peel(); + } } loopAnalysis_.LoopExitElimination(); return true; @@ -461,6 +487,23 @@ public: } }; +class ArrayBoundsCheckEliminationPass { +public: + bool Run(PassData* data) + { + PassOptions *passOptions = data->GetPassOptions(); + if (!passOptions->EnableTypeLowering() || !passOptions->EnableArrayBoundsCheckElimination()) { + return false; + } + TimeScope timescope("ArrayBoundsCheckEliminationPass", + data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); + Chunk chunk(data->GetNativeAreaAllocator()); + bool enableLog = data->GetLog()->EnableMethodCIRLog(); + ArrayBoundsCheckElimination(data->GetCircuit(), enableLog, data->GetMethodName(), &chunk).Run(); + return true; + } +}; + class LaterEliminationPass { public: bool Run(PassData* data) @@ -533,26 +576,29 @@ public: } }; -class LLVMIRGenPass { +class CGIRGenPass { public: - void CreateCodeGen(LLVMModule *module, bool enableLog) + void CreateCodeGen(IRModule *module, bool enableLog) { - llvmImpl_ = std::make_unique(module, enableLog); + if (module->GetModuleKind() == MODULE_LLVM) { + cgImpl_ = std::make_unique(static_cast(module), enableLog); + } else { + cgImpl_ = std::make_unique(static_cast(module), enableLog); + } } bool Run(PassData *data) { - auto module = data->GetAotModule(); - TimeScope timescope("LLVMIRGenPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); + TimeScope timescope("CGIRGenPass", data->GetMethodName(), data->GetMethodOffset(), data->GetLog()); bool enableLog = data->GetLog()->EnableMethodCIRLog() || data->GetLog()->OutputASM(); - CreateCodeGen(module, enableLog); - CodeGenerator codegen(llvmImpl_, data->GetMethodName()); + CreateCodeGen(data->GetAotModule(), enableLog); + CodeGenerator codegen(cgImpl_, data->GetMethodName()); codegen.Run(data->GetCircuit(), data->GetConstScheduleResult(), data->GetCompilerConfig(), data->GetMethodLiteral(), data->GetJSPandaFile()); return true; } private: - std::unique_ptr llvmImpl_ {nullptr}; + std::unique_ptr cgImpl_ {nullptr}; }; class AsyncFunctionLoweringPass { @@ -565,10 +611,25 @@ public: AsyncFunctionLowering lowering(data->GetBuilder(), data->GetCircuit(), data->GetCompilerConfig(), enableLog, data->GetMethodName()); if (lowering.IsAsyncRelated()) { - lowering.ProcessAll(); + if (IsFunctionMain(data)) { + lowering.ProcessAll(); + } else { + data->MarkAsTypeAbort(); + } } return true; } + +private: + bool IsFunctionMain(PassData* data) + { + auto methodName = data->GetMethodName(); + auto pos = methodName.find(JSPandaFile::ENTRY_FUNCTION_NAME); + if (pos != std::string::npos) { + return true; + } + return false; + } }; } // namespace panda::ecmascript::kungfu #endif diff --git a/ecmascript/compiler/pass_manager.cpp b/ecmascript/compiler/pass_manager.cpp index 1df70e63e28ec8de3409f4e475b69c9d991b155f..8310ed9bb7a976a56159300635c4581d6037b10f 100644 --- a/ecmascript/compiler/pass_manager.cpp +++ b/ecmascript/compiler/pass_manager.cpp @@ -19,10 +19,12 @@ #include "ecmascript/ecma_handle_scope.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" #include "ecmascript/jspandafile/panda_file_translator.h" +#include "ecmascript/pgo_profiler/pgo_profiler_manager.h" #include "ecmascript/snapshot/mem/snapshot.h" #include "ecmascript/ts_types/ts_manager.h" namespace panda::ecmascript::kungfu { +using PGOProfilerManager = pgo::PGOProfilerManager; bool PassManager::ShouldCollect() const { return passOptions_->EnableTypeInfer() && @@ -39,7 +41,7 @@ bool PassManager::Compile(JSPandaFile *jsPandaFile, const std::string &fileName, return false; } - if (!profilerDecoder_.LoadAndVerify(jsPandaFile->GetChecksum())) { + if (!PGOProfilerManager::MergeApFiles(jsPandaFile->GetChecksum(), profilerDecoder_)) { LOG_COMPILER(ERROR) << "Load and verify profiler failure"; return false; } @@ -95,14 +97,14 @@ bool PassManager::Compile(JSPandaFile *jsPandaFile, const std::string &fileName, } Circuit circuit(vm_->GetNativeAreaAllocator(), ctx.GetAOTModule()->GetDebugInfo(), - fullName.c_str(), cmpCfg->Is64Bit()); - circuit.SetFrameType(FrameType::OPTIMIZED_JS_FUNCTION_FRAME); + fullName.c_str(), cmpCfg->Is64Bit(), FrameType::OPTIMIZED_JS_FUNCTION_FRAME); PGOProfilerDecoder *decoder = passOptions_->EnableOptPGOType() ? &profilerDecoder_ : nullptr; BytecodeCircuitBuilder builder(jsPandaFile, methodLiteral, methodPCInfo, tsManager, &circuit, ctx.GetByteCodes(), hasTypes, enableMethodLog && log_->OutputCIR(), - passOptions_->EnableTypeLowering(), fullName, recordName, decoder, false); + passOptions_->EnableTypeLowering(), fullName, recordName, decoder, false, + passOptions_->EnableOptTrackField()); { TimeScope timeScope("BytecodeToCircuit", methodName, methodOffset, log_); builder.BytecodeToCircuit(); @@ -110,8 +112,8 @@ bool PassManager::Compile(JSPandaFile *jsPandaFile, const std::string &fileName, PassData data(&builder, &circuit, &ctx, log_, fullName, &methodInfo, hasTypes, recordName, methodLiteral, methodOffset, vm_->GetNativeAreaAllocator(), decoder, passOptions_); - PassRunner pipeline(&data); + pipeline.RunPass(); if (builder.EnableLoopOptimization()) { pipeline.RunPass(); } @@ -124,6 +126,11 @@ bool PassManager::Compile(JSPandaFile *jsPandaFile, const std::string &fileName, pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); + // skip async function, because some application run with errors. + if (methodInfo.IsTypeInferAbort()) { + data.AbortCompilation(); + return; + } pipeline.RunPass(); pipeline.RunPass(); if (data.IsTypeAbort()) { @@ -131,19 +138,20 @@ bool PassManager::Compile(JSPandaFile *jsPandaFile, const std::string &fileName, return; } pipeline.RunPass(); + pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); - pipeline.RunPass(); pipeline.RunPass(); + pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); pipeline.RunPass(); - pipeline.RunPass(); + pipeline.RunPass(); }); ProcessConstantPool(&collector); @@ -179,9 +187,8 @@ void PassManager::ResolveModule(const JSPandaFile *jsPandaFile, const std::strin ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); [[maybe_unused]] EcmaHandleScope scope(thread); for (auto info: recordInfo) { - auto recordName = info.first; - if (jsPandaFile->IsModule(thread, recordName)) { - ASSERT(!thread->HasPendingException()); + if (jsPandaFile->IsModule(info.second)) { + auto recordName = info.first; JSHandle moduleRecord = moduleManager->HostResolveImportedModuleWithMerge(fileName.c_str(), recordName); SourceTextModule::Instantiate(thread, moduleRecord); diff --git a/ecmascript/compiler/pass_manager.h b/ecmascript/compiler/pass_manager.h index b5e9c1d004130f49700de261f0d60edd453a6f0b..41706f2e25e502455a25af01eb0c8710169dc662 100644 --- a/ecmascript/compiler/pass_manager.h +++ b/ecmascript/compiler/pass_manager.h @@ -19,8 +19,10 @@ #include "ecmascript/compiler/bytecode_info_collector.h" #include "ecmascript/compiler/compiler_log.h" #include "ecmascript/compiler/file_generators.h" +#include "ecmascript/compiler/ir_module.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/pgo_profiler/pgo_profiler_decoder.h" +#include "ecmascript/pgo_profiler/pgo_profiler_manager.h" #include "ecmascript/ts_types/ts_manager.h" namespace panda::ecmascript::kungfu { @@ -30,11 +32,12 @@ class CompilationConfig; class PassContext { public: - PassContext(const std::string &triple, CompilerLog *log, BytecodeInfoCollector* collector, LLVMModule *aotModule, + PassContext(const std::string &triple, CompilerLog *log, BytecodeInfoCollector* collector, IRModule *aotModule, PGOProfilerDecoder *decoder) : vm_(collector->GetVM()), bcInfoCollector_(collector), tsManager_(vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()), + bytecodes_(collector->GetByteCodes()), lexEnvManager_(bcInfoCollector_->GetEnvManager()), cmpCfg_(triple, &vm_->GetJSOptions()), log_(log), @@ -51,7 +54,7 @@ public: Bytecodes* GetByteCodes() { - return &bytecodes_; + return bytecodes_; } LexEnvManager* GetLexEnvManager() const @@ -79,7 +82,7 @@ public: return bcInfoCollector_; } - LLVMModule* GetAOTModule() const + IRModule* GetAOTModule() const { return aotModule_; } @@ -113,38 +116,49 @@ private: EcmaVM *vm_ {nullptr}; BytecodeInfoCollector *bcInfoCollector_ {nullptr}; TSManager *tsManager_ {nullptr}; - Bytecodes bytecodes_; + Bytecodes *bytecodes_ {nullptr}; LexEnvManager *lexEnvManager_ {nullptr}; CompilationConfig cmpCfg_; CompilerLog *log_ {nullptr}; const JSPandaFile *jsPandaFile_ {nullptr}; - LLVMModule *aotModule_ {nullptr}; + IRModule *aotModule_ {nullptr}; PGOProfilerDecoder *decoder_ {nullptr}; }; class PassOptions { public: - PassOptions(bool enableTypeLowering, bool enableEarlyElimination, bool enableLaterElimination, - bool enableValueNumbering, bool enableTypeInfer, bool enableOptInlining, bool enableOptPGOType) - : enableTypeLowering_(enableTypeLowering), + PassOptions(bool enableArrayBoundsCheckElimination, bool enableTypeLowering, bool enableEarlyElimination, + bool enableLaterElimination, bool enableValueNumbering, bool enableTypeInfer, + bool enableOptInlining, bool enableOptPGOType, bool enableOptTrackField, bool enableOptLoopPeeling, + bool enableOptOnHeapCheck) + : enableArrayBoundsCheckElimination_(enableArrayBoundsCheckElimination), + enableTypeLowering_(enableTypeLowering), enableEarlyElimination_(enableEarlyElimination), enableLaterElimination_(enableLaterElimination), enableValueNumbering_(enableValueNumbering), enableTypeInfer_(enableTypeInfer), enableOptInlining_(enableOptInlining), - enableOptPGOType_(enableOptPGOType) + enableOptPGOType_(enableOptPGOType), + enableOptTrackField_(enableOptTrackField), + enableOptLoopPeeling_(enableOptLoopPeeling), + enableOptOnHeapCheck_(enableOptOnHeapCheck) { } #define OPTION_LIST(V) \ + V(ArrayBoundsCheckElimination, true) \ V(TypeLowering, true) \ V(EarlyElimination, true) \ V(LaterElimination, true) \ V(ValueNumbering, false) \ V(TypeInfer, false) \ V(OptInlining, false) \ + V(OptNoGCCall, false) \ V(OptPGOType, false) \ - V(NoCheck, false) + V(NoCheck, false) \ + V(OptTrackField, false) \ + V(OptLoopPeeling, false) \ + V(OptOnHeapCheck, false) #define DECL_OPTION(NAME, DEFAULT) \ public: \ @@ -163,7 +177,7 @@ private: \ class PassManager { public: - PassManager(EcmaVM* vm, std::string entry, std::string &triple, size_t optLevel, size_t relocMode, + PassManager(EcmaVM* vm, std::string &entry, std::string &triple, size_t optLevel, size_t relocMode, CompilerLog *log, AotMethodLogList *logList, size_t maxAotMethodSize, size_t maxMethodsInModule, const std::string &profIn, uint32_t hotnessThreshold, PassOptions *passOptions) : vm_(vm), entry_(entry), triple_(triple), optLevel_(optLevel), relocMode_(relocMode), log_(log), diff --git a/ecmascript/compiler/pgo_bc_info.cpp b/ecmascript/compiler/pgo_bc_info.cpp index f1229866775582e674571d572db3b6b4fa8f7c30..4072b9db140f84b20e58ff02e39f36790dc57e93 100644 --- a/ecmascript/compiler/pgo_bc_info.cpp +++ b/ecmascript/compiler/pgo_bc_info.cpp @@ -20,9 +20,10 @@ void PGOBCInfo::Info::Record(const InfoDetail &detail) { auto it = methodOffsetToValVec_.find(detail.methodOffset); if (it == methodOffsetToValVec_.end()) { - methodOffsetToValVec_[detail.methodOffset] = ValVec { Val { detail.bcIndex, detail.cpIndex} }; + methodOffsetToValVec_[detail.methodOffset] = + ValVec { Val { detail.bcIndex, detail.bcOffset, detail.cpIndex} }; } else { - it->second.emplace_back(Val{ detail.bcIndex, detail.cpIndex }); + it->second.emplace_back(Val{ detail.bcIndex, detail.bcOffset, detail.cpIndex }); } recordNameToValCount_[detail.recordName]++; } @@ -59,19 +60,19 @@ void PGOBCInfo::Record(const InfoDetail &detail, Type type) } void PGOBCInfo::Record(const BytecodeInstruction &bcIns, int32_t bcIndex, - const CString &recordName, const MethodLiteral *method) + const CString &recordName, const MethodLiteral *method) { BytecodeInstruction::Opcode opcode = static_cast(bcIns.GetOpcode()); uint32_t methodOffset = method->GetMethodId().GetOffset(); - switch (opcode) { - case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: - case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: { - auto cpIndex = bcIns.GetId().AsRawValue(); - Record(InfoDetail {recordName, methodOffset, bcIndex, cpIndex}, Type::OBJ_LITERAL); - break; - } - default: - break; + uint32_t bcOffset = bcIns.GetAddress() - method->GetBytecodeArray(); + if (Bytecodes::IsCreateObjectWithBufferOp(opcode)) { + auto cpIndex = bcIns.GetId().AsRawValue(); + Record(InfoDetail {recordName, methodOffset, bcIndex, bcOffset, cpIndex}, Type::OBJ_LITERAL); + } else if (Bytecodes::IsCreateArrayWithBufferOp(opcode)) { + auto cpIndex = bcIns.GetId().AsRawValue(); + Record(InfoDetail {recordName, methodOffset, bcIndex, bcOffset, cpIndex}, Type::ARRAY_LITERAL); + } else if (Bytecodes::IsCallOp(opcode)) { + Record(InfoDetail {recordName, methodOffset, bcIndex, bcOffset, 0}, Type::CALL_TARGET); } } } // namespace panda::ecmascript diff --git a/ecmascript/compiler/pgo_bc_info.h b/ecmascript/compiler/pgo_bc_info.h index aea3b8457d735c4ca344bbd811f595379a30e4b9..79628d30eaff613b5778bdf4da85fb47bd63043e 100644 --- a/ecmascript/compiler/pgo_bc_info.h +++ b/ecmascript/compiler/pgo_bc_info.h @@ -24,16 +24,18 @@ class PGOBCInfo { public: enum Type { OBJ_LITERAL = 0, - + ARRAY_LITERAL, + CALL_TARGET, TYPE_NUM, TYPE_FIRST = OBJ_LITERAL, - TYPE_LAST = OBJ_LITERAL, + TYPE_LAST = CALL_TARGET, }; struct InfoDetail { const CString &recordName; uint32_t methodOffset; uint32_t bcIndex; + uint32_t bcOffset; uint32_t cpIndex; }; @@ -49,13 +51,14 @@ public: if (methodOffsetToValVec_.find(methodOffset) != methodOffsetToValVec_.end()) { const ValVec &valVec = methodOffsetToValVec_.at(methodOffset); for (auto val : valVec) { - cb(type, val.bcIndex, val.cpIndex); + cb(type, val.bcIndex, val.bcOffset, val.cpIndex); } } } private: struct Val { uint32_t bcIndex; + uint32_t bcOffset; uint32_t cpIndex; }; using ValVec = CVector; diff --git a/ecmascript/compiler/profiler_operation.h b/ecmascript/compiler/profiler_operation.h index 385a4de60f4496cbb52853b8acd3adf6160453ac..57b1470673664c47187f68cf2dd8b60fdfd5ed5b 100644 --- a/ecmascript/compiler/profiler_operation.h +++ b/ecmascript/compiler/profiler_operation.h @@ -24,12 +24,14 @@ namespace panda::ecmascript::kungfu { enum class OperationType : uint8_t { CALL, - CALL_WIDE, OPERATION_TYPE, DEFINE_CLASS, CREATE_OBJECT, STORE_LAYOUT, LOAD_LAYOUT, + INDEX, + TRUE_BRANCH, + FALSE_BRANCH, }; #define COMBINE_TYPE_CALL_BACK(curType, type) \ @@ -47,11 +49,10 @@ public: return callback_ == nullptr; } - inline void ProfileCall(GateRef func, BytecodeInstruction::Format format) const + inline void ProfileCall(GateRef func) const { if (callback_) { - callback_({ func }, - format == BytecodeInstruction::Format::IMM16 ? OperationType::CALL_WIDE : OperationType::CALL); + callback_({ func }, OperationType::CALL); } } @@ -78,10 +79,10 @@ public: } } - inline void ProfileCreateObject(GateRef originObj, GateRef newObj) const + inline void ProfileCreateObject(GateRef newObj) const { if (callback_) { - callback_({ originObj, newObj }, OperationType::CREATE_OBJECT); + callback_({ newObj }, OperationType::CREATE_OBJECT); } } @@ -99,6 +100,20 @@ public: } } + inline void ProfileObjIndex(GateRef object) const + { + if (callback_) { + callback_({ object }, OperationType::INDEX); + } + } + + inline void ProfileBranch(bool isTrue) const + { + if (callback_) { + callback_({}, isTrue ? OperationType::TRUE_BRANCH : OperationType::FALSE_BRANCH); + } + } + private: Callback callback_; }; diff --git a/ecmascript/compiler/profiler_stub_builder.cpp b/ecmascript/compiler/profiler_stub_builder.cpp index f85ceedd00eba6ed624fa4bae413889e18087b8e..6781159cb6804db60fe490b869c6e0623a4cfd4f 100644 --- a/ecmascript/compiler/profiler_stub_builder.cpp +++ b/ecmascript/compiler/profiler_stub_builder.cpp @@ -15,6 +15,7 @@ #include "ecmascript/compiler/profiler_stub_builder.h" +#include "ecmascript/compiler/gate_meta_data.h" #include "ecmascript/compiler/interpreter_stub-inl.h" #include "ecmascript/compiler/stub_builder-inl.h" #include "ecmascript/ic/profile_type_info.h" @@ -25,8 +26,7 @@ void ProfilerStubBuilder::PGOProfiler(GateRef glue, GateRef pc, GateRef func, Ga { switch (type) { case OperationType::CALL: - case OperationType::CALL_WIDE: - ProfileCall(glue, pc, profileTypeInfo, values[0], type); + ProfileCall(glue, pc, func, values[0]); break; case OperationType::OPERATION_TYPE: ProfileOpType(glue, pc, func, profileTypeInfo, values[0]); @@ -35,7 +35,7 @@ void ProfilerStubBuilder::PGOProfiler(GateRef glue, GateRef pc, GateRef func, Ga ProfileDefineClass(glue, pc, func, values[0]); break; case OperationType::CREATE_OBJECT: - ProfileCreateObject(glue, pc, func, values[0], values[1]); + ProfileCreateObject(glue, pc, func, values[0]); break; case OperationType::STORE_LAYOUT: ProfileObjLayout(glue, pc, func, values[0], Int32(1)); @@ -43,6 +43,15 @@ void ProfilerStubBuilder::PGOProfiler(GateRef glue, GateRef pc, GateRef func, Ga case OperationType::LOAD_LAYOUT: ProfileObjLayout(glue, pc, func, values[0], Int32(0)); break; + case OperationType::INDEX: + ProfileObjIndex(glue, pc, func, values[0]); + break; + case OperationType::TRUE_BRANCH: + ProfileBranch(glue, pc, func, profileTypeInfo, true); + break; + case OperationType::FALSE_BRANCH: + ProfileBranch(glue, pc, func, profileTypeInfo, false); + break; default: break; } @@ -69,22 +78,36 @@ void ProfilerStubBuilder::ProfileOpType(GateRef glue, GateRef pc, GateRef func, } Bind(&profiler); { - Label pushLabel(env); + Label uninitialize(env); Label compareLabel(env); + Label updateSlot(env); + Label updateProfile(env); GateRef slotId = ZExtInt8ToInt32(Load(VariableType::INT8(), pc, IntPtr(1))); GateRef slotValue = GetValueFromTaggedArray(profileTypeInfo, slotId); DEFVARIABLE(curType, VariableType::INT32(), type); - Branch(TaggedIsInt(slotValue), &compareLabel, &pushLabel); + DEFVARIABLE(curCount, VariableType::INT32(), Int32(0)); + Branch(TaggedIsInt(slotValue), &compareLabel, &uninitialize); Bind(&compareLabel); { GateRef oldSlotValue = TaggedGetInt(slotValue); - curType = Int32Or(oldSlotValue, type); - Branch(Int32Equal(oldSlotValue, *curType), &exit, &pushLabel); + GateRef oldType = Int32And(oldSlotValue, Int32(PGOSampleType::AnyType())); + curType = Int32Or(oldType, type); + curCount = Int32And(oldSlotValue, Int32(0xfffffc00)); // 0xfffffc00: count bits + Branch(Int32Equal(oldType, *curType), &exit, &updateSlot); + } + Bind(&uninitialize); + { + Branch(TaggedIsUndefined(slotValue), &updateSlot, &updateProfile); } - Bind(&pushLabel); + Bind(&updateSlot); + { + GateRef newSlotValue = Int32Or(*curCount, *curType); + SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, slotId, IntToTaggedInt(newSlotValue)); + Jump(&updateProfile); + } + Bind(&updateProfile); { - SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, slotId, IntToTaggedInt(*curType)); GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); GateRef firstPC = Load(VariableType::NATIVE_POINTER(), method, IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); @@ -112,18 +135,48 @@ void ProfilerStubBuilder::ProfileDefineClass(GateRef glue, GateRef pc, GateRef f env->SubCfgExit(); } -void ProfilerStubBuilder::ProfileCreateObject(GateRef glue, GateRef pc, GateRef func, GateRef originObj, GateRef newObj) +void ProfilerStubBuilder::ProfileCreateObject(GateRef glue, GateRef pc, GateRef func, GateRef newObj) { auto env = GetEnvironment(); Label subEntry(env); env->SubCfgEntry(&subEntry); + Label exit(env); - GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); - GateRef firstPC = - Load(VariableType::NATIVE_POINTER(), method, IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); - GateRef offset = TruncPtrToInt32(PtrSub(pc, firstPC)); - CallNGCRuntime(glue, RTSTUB_ID(ProfileCreateObject), { glue, func, offset, originObj, newObj }); - + DEFVARIABLE(traceId, VariableType::INT32(), Int32(0)); + Label isArray(env); + Label profile(env); + Label calculateTraceId(env); + Branch(TaggedIsJSArray(newObj), &isArray, &calculateTraceId); + Bind(&isArray); + { + GateRef traceIdOffset = IntPtr(JSArray::TRACE_INDEX_OFFSET); + traceId = Load(VariableType::INT32(), newObj, traceIdOffset); + Label uninitialize(env); + Branch(Int32GreaterThan(*traceId, Int32(0)), &exit, &uninitialize); + Bind(&uninitialize); + { + auto pfAddr = LoadPfHeaderFromConstPool(func); + traceId = TruncPtrToInt32(PtrSub(pc, pfAddr)); + Store(VariableType::INT32(), glue, newObj, traceIdOffset, *traceId); + Jump(&profile); + } + } + Bind(&calculateTraceId); + { + auto pfAddr = LoadPfHeaderFromConstPool(func); + traceId = TruncPtrToInt32(PtrSub(pc, pfAddr)); + Jump(&profile); + } + Bind(&profile); + { + GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef firstPC = + Load(VariableType::NATIVE_POINTER(), method, IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); + GateRef offset = TruncPtrToInt32(PtrSub(pc, firstPC)); + CallNGCRuntime(glue, RTSTUB_ID(ProfileCreateObject), { glue, func, offset, newObj, *traceId }); + Jump(&exit); + } + Bind(&exit); env->SubCfgExit(); } @@ -148,60 +201,54 @@ void ProfilerStubBuilder::ProfileObjLayout(GateRef glue, GateRef pc, GateRef fun env->SubCfgExit(); } -void ProfilerStubBuilder::ProfileCall( - GateRef glue, GateRef pc, GateRef profileTypeInfo, GateRef target, OperationType type) +void ProfilerStubBuilder::ProfileObjIndex(GateRef glue, GateRef pc, GateRef func, GateRef object) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + Label isHeap(env); + Label exit(env); + Branch(TaggedIsHeapObject(object), &isHeap, &exit); + Bind(&isHeap); + { + GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef firstPC = + Load(VariableType::NATIVE_POINTER(), method, IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); + GateRef offset = TruncPtrToInt32(PtrSub(pc, firstPC)); + CallNGCRuntime(glue, RTSTUB_ID(ProfileObjIndex), { glue, func, offset, object }); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); +} + +void ProfilerStubBuilder::ProfileCall(GateRef glue, GateRef pc, GateRef func, GateRef target) { auto env = GetEnvironment(); Label subEntry(env); env->SubCfgEntry(&subEntry); Label exit(env); - Label fastpath(env); Label slowpath(env); + Label fastpath(env); - DEFVARIABLE(inc, VariableType::INT32(), Int32(1)); - Branch(TaggedIsUndefined(profileTypeInfo), &slowpath, &fastpath); - Bind(&fastpath); + Label targetIsFunction(env); + Branch(IsJSFunction(target), &targetIsFunction, &exit); + Bind(&targetIsFunction); { - Label initialLabel(env); - Label uninitialLabel(env); - GateRef slotId = Int32(panda::ecmascript::ProfileTypeInfo::INVALID_SLOT_INDEX); - if (type == OperationType::CALL_WIDE) { - GateRef currentInstHigh = ZExtInt8ToInt16(Load(VariableType::INT8(), pc, IntPtr(HIGH_WORD_OFFSET))); - GateRef currentInstHighLsl = Int16LSL(currentInstHigh, Int16(BITS_OF_WORD)); - GateRef currentInstLow = ZExtInt8ToInt16(Load(VariableType::INT8(), pc, IntPtr(LOW_WORD_OFFSET))); - slotId = ZExtInt16ToInt32(Int16Add(currentInstHighLsl, currentInstLow)); - } else { - slotId = ZExtInt8ToInt32(Load(VariableType::INT8(), pc, IntPtr(1))); - } - GateRef slotValue = GetValueFromTaggedArray(profileTypeInfo, slotId); - Branch(TaggedIsInt(slotValue), &initialLabel, &uninitialLabel); - Bind(&initialLabel); + GateRef targetProfileInfo = GetProfileTypeInfo(target); + Label nonHotness(env); + Branch(TaggedIsUndefined(targetProfileInfo), &nonHotness, &exit); + Bind(&nonHotness); { - Label fastLabel(env); - GateRef oldInc = TaggedGetInt(slotValue); - Branch(Int32GreaterThan(oldInc, Int32(MAX_PROFILE_CALL_COUNT)), &exit, &fastLabel); - Bind(&fastLabel); - GateRef count = Int32Add(oldInc, *inc); - SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, slotId, IntToTaggedInt(count)); - inc = Int32(MIN_PROFILE_CALL_INTERVAL); - GateRef mod = Int32Mod(oldInc, *inc); - Branch(Int32Equal(mod, Int32(0)), &slowpath, &exit); - } - Bind(&uninitialLabel); - { - Label fastLabel(env); - Branch(TaggedIsUndefined(slotValue), &fastLabel, &slowpath); - Bind(&fastLabel); - SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, slotId, IntToTaggedInt(*inc)); - Jump(&slowpath); + GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef firstPC = + Load(VariableType::NATIVE_POINTER(), method, IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); + GateRef offset = TruncPtrToInt32(PtrSub(pc, firstPC)); + CallNGCRuntime(glue, RTSTUB_ID(ProfileCall), { glue, func, target, offset, Int32(1)}); + Jump(&exit); } } - Bind(&slowpath); - { - CallNGCRuntime(glue, RTSTUB_ID(ProfileCall), { glue, target, *inc}); - Jump(&exit); - } Bind(&exit); env->SubCfgExit(); } @@ -226,17 +273,12 @@ GateRef ProfilerStubBuilder::UpdateTrackTypeInPropAttr(GateRef attr, GateRef val { newTrackType = TaggedToTrackType(value); Label update(env); - Label nonFirst(env); - Branch(Equal(oldTrackType, Int32(static_cast(TrackType::NONE))), &update, &nonFirst); - Bind(&nonFirst); + Label merge(env); + Branch(Int32Equal(*newTrackType, Int32(static_cast(TrackType::TAGGED))), &update, &merge); + Bind(&merge); { - Label isNotEqual(env); - Branch(Equal(oldTrackType, *newTrackType), &exit, &isNotEqual); - Bind(&isNotEqual); - { - newTrackType = Int32(static_cast(TrackType::TAGGED)); - Jump(&update); - } + newTrackType = Int32Or(oldTrackType, *newTrackType); + Branch(Int32Equal(oldTrackType, *newTrackType), &exit, &update); } Bind(&update); { @@ -250,6 +292,63 @@ GateRef ProfilerStubBuilder::UpdateTrackTypeInPropAttr(GateRef attr, GateRef val return ret; } +void ProfilerStubBuilder::ProfileObjLayoutOrIndex(GateRef glue, GateRef receiver, GateRef key, GateRef isStore, + ProfileOperation callback) +{ + if (callback.IsEmpty()) { + return; + } + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + Label isHeap(env); + Branch(TaggedIsHeapObject(receiver), &isHeap, &exit); + Bind(&isHeap); + GateRef index64 = TryToElementsIndex(glue, key); + Label validIndex(env); + Label profileIndex(env); + Label profileLayout(env); + Label greaterThanInt32Max(env); + Label notGreaterThanInt32Max(env); + Branch(Int64GreaterThanOrEqual(index64, Int64(INT32_MAX)), &greaterThanInt32Max, ¬GreaterThanInt32Max); + Bind(&greaterThanInt32Max); + { + Jump(&exit); + } + Bind(¬GreaterThanInt32Max); + GateRef index = TruncInt64ToInt32(index64); + Branch(Int32GreaterThanOrEqual(index, Int32(0)), &validIndex, &profileLayout); + Bind(&validIndex); + { + Branch(IsTypedArray(receiver), &profileIndex, &profileLayout); + Bind(&profileIndex); + { + callback.ProfileObjIndex(receiver); + Jump(&exit); + } + } + Bind(&profileLayout); + { + Label store(env); + Label load(env); + Branch(isStore, &store, &load); + Bind(&store); + { + callback.ProfileObjLayoutByStore(receiver); + Jump(&exit); + } + Bind(&load); + { + callback.ProfileObjLayoutByLoad(receiver); + Jump(&exit); + } + } + + Bind(&exit); + env->SubCfgExit(); +} + void ProfilerStubBuilder::UpdatePropAttrIC( GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback) { @@ -324,4 +423,134 @@ GateRef ProfilerStubBuilder::TaggedToTrackType(GateRef value) env->SubCfgExit(); return ret; } + +void ProfilerStubBuilder::ProfileBranch(GateRef glue, GateRef pc, GateRef func, GateRef profileTypeInfo, bool isTrue) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + + Label profiler(env); + Label hasSlot(env); + Label currentIsTrue(env); + Label currentIsFalse(env); + Label genCurrentWeight(env); + Label compareLabel(env); + Label updateSlot(env); + Label updateProfile(env); + Label needUpdate(env); + Label exit(env); + DEFVARIABLE(oldType, VariableType::INT32(), Int32(PGOSampleType::None())); + DEFVARIABLE(newType, VariableType::INT32(), Int32(PGOSampleType::NormalBranch())); + DEFVARIABLE(oldPrama, VariableType::INT32(), Int32(PGOSampleType::None())); + DEFVARIABLE(newTrue, VariableType::INT32(), isTrue ? Int32(1) : Int32(0)); + DEFVARIABLE(newFalse, VariableType::INT32(), isTrue ? Int32(0) : Int32(1)); + + Branch(TaggedIsUndefined(profileTypeInfo), &exit, &profiler); + Bind(&profiler); + { + GateRef slotId = ZExtInt8ToInt32(Load(VariableType::INT8(), pc, IntPtr(1))); + GateRef slotValue = GetValueFromTaggedArray(profileTypeInfo, slotId); + Branch(TaggedIsHole(slotValue), &exit, &hasSlot); // ishole -- isundefined + Bind(&hasSlot); + { + Branch(TaggedIsInt(slotValue), &compareLabel, &updateSlot); + Bind(&compareLabel); + { + GateRef oldSlotValue = TaggedGetInt(slotValue); + GateRef oldTrue = Int32LSR(oldSlotValue, Int32(21)); // 21: trueWeight shift bit + GateRef oldFalse = Int32LSR(oldSlotValue, Int32(10)); // 10: falstWeight shift bit + oldFalse = Int32And(oldFalse, Int32(0x7ff)); // 0xffff: weight bits + oldPrama = Int32And(oldSlotValue, Int32(PGOSampleType::AnyType())); + Branch(Int32LessThan(Int32Add(oldTrue, oldFalse), Int32(2000)), &needUpdate, &exit); // 2000: limit + Bind(&needUpdate); + { + oldType = GetBranchTypeFromWeight(oldTrue, oldFalse); + newTrue = Int32Add(*newTrue, oldTrue); + newFalse = Int32Add(*newFalse, oldFalse); + newType = GetBranchTypeFromWeight(*newTrue, *newFalse); + Jump(&updateSlot); + } + } + Bind(&updateSlot); + { + GateRef newSlotValue = Int32Or(*oldPrama, Int32LSL(*newTrue, Int32(21))); // 21: trueWeight shift bit + newSlotValue = Int32Or(newSlotValue, Int32LSL(*newFalse, Int32(10))); // 10: trueWeight shift bit + + SetValueToTaggedArray(VariableType::JS_ANY(), glue, profileTypeInfo, + slotId, IntToTaggedInt(newSlotValue)); + Branch(Int32Equal(*oldType, *newType), &exit, &updateProfile); + } + Bind(&updateProfile); + { + GateRef method = Load(VariableType::JS_ANY(), func, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef firstPC = Load(VariableType::NATIVE_POINTER(), method, + IntPtr(Method::NATIVE_POINTER_OR_BYTECODE_ARRAY_OFFSET)); + GateRef offset = TruncPtrToInt32(PtrSub(pc, firstPC)); + CallNGCRuntime(glue, RTSTUB_ID(ProfileOpType), { glue, func, offset, *newType }); + Jump(&exit); + } + } + } + Bind(&exit); + env->SubCfgExit(); +} + +GateRef ProfilerStubBuilder::GetBranchTypeFromWeight(GateRef trueWeight, GateRef falseWeight) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + Label exit(env); + + DEFVARIABLE(curType, VariableType::INT32(), Int32(PGOSampleType::NormalBranch())); + Label trueBranch(env); + Label strongTrue(env); + Label notStrongTrue(env); + Label notTrueBranch(env); + Label falseBranch(env); + Label strongFalse(env); + Label notStrongFalse(env); + Branch(Int32GreaterThan(trueWeight, Int32Mul(falseWeight, Int32(BranchWeight::WEAK_WEIGHT))), + &trueBranch, ¬TrueBranch); + Bind(&trueBranch); + { + Branch(Int32GreaterThan(trueWeight, Int32Mul(falseWeight, Int32(BranchWeight::STRONG_WEIGHT))), + &strongTrue, ¬StrongTrue); + Bind(&strongTrue); + { + curType = Int32(PGOSampleType::StrongLikely()); + Jump(&exit); + } + Bind(¬StrongTrue); + { + curType = Int32(PGOSampleType::Likely()); + Jump(&exit); + } + } + Bind(¬TrueBranch); + { + Branch(Int32GreaterThan(falseWeight, Int32Mul(trueWeight, Int32(BranchWeight::WEAK_WEIGHT))), + &falseBranch, &exit); + Bind(&falseBranch); + { + Branch(Int32GreaterThan(falseWeight, Int32Mul(trueWeight, Int32(BranchWeight::STRONG_WEIGHT))), + &strongFalse, ¬StrongFalse); + Bind(&strongFalse); + { + curType = Int32(PGOSampleType::StrongUnLikely()); + Jump(&exit); + } + Bind(¬StrongFalse); + { + curType = Int32(PGOSampleType::Unlikely()); + Jump(&exit); + } + } + } + Bind(&exit); + auto result = *curType; + env->SubCfgExit(); + return result; +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/profiler_stub_builder.h b/ecmascript/compiler/profiler_stub_builder.h index 043cf293a3a10c82a978bfb799cfa0ba51210db8..340f81be8d73625c1141e0e4f19ff6d72ffded65 100644 --- a/ecmascript/compiler/profiler_stub_builder.h +++ b/ecmascript/compiler/profiler_stub_builder.h @@ -31,12 +31,17 @@ public: void PGOProfiler(GateRef glue, GateRef pc, GateRef func, GateRef profileTypeInfo, const std::vector &values, OperationType type); - void ProfileCall(GateRef glue, GateRef pc, GateRef profileTypeInfo, GateRef target, OperationType type); + void ProfileCall(GateRef glue, GateRef pc, GateRef func, GateRef target); void ProfileOpType(GateRef glue, GateRef pc, GateRef func, GateRef profileTypeInfo, GateRef type); void ProfileDefineClass(GateRef glue, GateRef pc, GateRef func, GateRef constructor); - void ProfileCreateObject(GateRef glue, GateRef pc, GateRef func, GateRef originObj, GateRef newObj); + void ProfileCreateObject(GateRef glue, GateRef pc, GateRef func, GateRef newObj); void ProfileObjLayout(GateRef glue, GateRef pc, GateRef func, GateRef object, GateRef store); + void ProfileObjIndex(GateRef glue, GateRef pc, GateRef func, GateRef object); + void ProfileBranch(GateRef glue, GateRef pc, GateRef func, GateRef profileTypeInfo, bool isTrue); + GateRef GetBranchTypeFromWeight(GateRef trueWeight, GateRef falseWeight); + void ProfileObjLayoutOrIndex(GateRef glue, GateRef receiver, GateRef key, GateRef isStore, + ProfileOperation callback); GateRef UpdateTrackTypeInPropAttr(GateRef attr, GateRef value, ProfileOperation callback); void UpdatePropAttrIC(GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback); void UpdatePropAttrWithValue(GateRef glue, GateRef receiver, GateRef layout, GateRef attr, GateRef attrIndex, diff --git a/ecmascript/compiler/range_analysis.cpp b/ecmascript/compiler/range_analysis.cpp index cc0d5ae18e341885a5d319d18dd4d5b9b81d656b..d564a477f90dda04e22ab7c55bb8ca40d5449158 100644 --- a/ecmascript/compiler/range_analysis.cpp +++ b/ecmascript/compiler/range_analysis.cpp @@ -59,6 +59,12 @@ GateRef RangeAnalysis::VisitGate(GateRef gate) return VisitIndexCheck(gate); case OpCode::LOAD_ARRAY_LENGTH: return VisitLoadArrayLength(gate); + case OpCode::LOAD_STRING_LENGTH: + return VisitLoadStringLength(gate); + case OpCode::LOAD_TYPED_ARRAY_LENGTH: + return VisitLoadTypedArrayLength(gate); + case OpCode::RANGE_GUARD: + return VisitRangeGuard(gate); default: return VisitOthers(gate); } @@ -135,6 +141,12 @@ GateRef RangeAnalysis::VisitTypedBinaryOp(GateRef gate) case TypedBinOp::TYPED_SUB: range = GetRangeOfCalculate(gate); break; + case TypedBinOp::TYPED_MOD: + range = GetRangeOfCalculate(gate); + break; + case TypedBinOp::TYPED_MUL: + range = GetRangeOfCalculate(gate); + break; case TypedBinOp::TYPED_SHR: range = GetRangeOfShift(gate); break; @@ -150,7 +162,10 @@ GateRef RangeAnalysis::VisitTypedBinaryOp(GateRef gate) GateRef RangeAnalysis::VisitIndexCheck(GateRef gate) { ASSERT(IsInt32Type(gate)); - return UpdateRange(gate, RangeInfo(0, INT32_MAX - 1)); + auto value = GetRange(acc_.GetValueIn(gate, 0)); + auto largerRange = RangeInfo(0, INT32_MAX - 1); + auto intersected = value.intersection(largerRange); + return UpdateRange(gate, intersected); } GateRef RangeAnalysis::VisitLoadArrayLength(GateRef gate) @@ -159,10 +174,28 @@ GateRef RangeAnalysis::VisitLoadArrayLength(GateRef gate) return UpdateRange(gate, RangeInfo(0, INT32_MAX)); } +GateRef RangeAnalysis::VisitLoadStringLength(GateRef gate) +{ + ASSERT(IsInt32Type(gate)); + return UpdateRange(gate, RangeInfo(0, INT32_MAX)); +} + +GateRef RangeAnalysis::VisitLoadTypedArrayLength(GateRef gate) +{ + int32_t max = IsOnHeap() ? RangeInfo::TYPED_ARRAY_ONHEAP_MAX : INT32_MAX; + return UpdateRange(gate, RangeInfo(0, max)); +} + +GateRef RangeAnalysis::VisitRangeGuard(GateRef gate) +{ + auto left = acc_.GetFirstValue(gate); + auto right = acc_.GetSecondValue(gate); + return UpdateRange(gate, RangeInfo(left, right)); +} + template RangeInfo RangeAnalysis::GetRangeOfCalculate(GateRef gate) { - ASSERT((Op == TypedBinOp::TYPED_ADD) || (Op == TypedBinOp::TYPED_SUB)); auto left = GetRange(acc_.GetValueIn(gate, 0)); auto right = GetRange(acc_.GetValueIn(gate, 1)); if (left.IsNone() || right.IsNone()) { @@ -173,6 +206,10 @@ RangeInfo RangeAnalysis::GetRangeOfCalculate(GateRef gate) return left + right; case TypedBinOp::TYPED_SUB: return left - right; + case TypedBinOp::TYPED_MOD: + return left % right; + case TypedBinOp::TYPED_MUL: + return left * right; default: return RangeInfo::ANY(); } @@ -302,6 +339,12 @@ void RangeAnalysis::PrintRangeInfo() const case TypedBinOp::TYPED_ASHR: log += " ashr"; break; + case TypedBinOp::TYPED_MOD: + log += " mod"; + break; + case TypedBinOp::TYPED_MUL: + log += " mul"; + break; default: log += " other"; break; diff --git a/ecmascript/compiler/range_analysis.h b/ecmascript/compiler/range_analysis.h index fdbe79503094a3116d2c33adb45fa481ab5e8473..e1b8c83171f53b8cd4a2e5470c9e7b2a3603f256 100644 --- a/ecmascript/compiler/range_analysis.h +++ b/ecmascript/compiler/range_analysis.h @@ -27,14 +27,19 @@ namespace panda::ecmascript::kungfu { class RangeAnalysis : public GraphVisitor { public: RangeAnalysis(Circuit *circuit, Chunk* chunk, ChunkVector& typeInfos, - ChunkVector& rangeInfos) + ChunkVector& rangeInfos, bool onHeapCheck) : GraphVisitor(circuit, chunk), acc_(circuit), builder_(circuit), - typeInfos_(typeInfos), rangeInfos_(rangeInfos) {} + typeInfos_(typeInfos), rangeInfos_(rangeInfos), onHeapCheck_(onHeapCheck) {} void Run(); GateRef VisitGate(GateRef gate); void PrintRangeInfo() const; private: + bool IsOnHeap() const + { + return onHeapCheck_; + } + GateRef VisitPhi(GateRef gate); GateRef VisitTypedBinaryOp(GateRef gate); GateRef VisitTypedUnaryOp(GateRef gate); @@ -42,6 +47,9 @@ private: GateRef VisitOthers(GateRef gate); GateRef VisitIndexCheck(GateRef gate); GateRef VisitLoadArrayLength(GateRef gate); + GateRef VisitLoadStringLength(GateRef gate); + GateRef VisitLoadTypedArrayLength(GateRef gate); + GateRef VisitRangeGuard(GateRef gate); template RangeInfo GetRangeOfCalculate(GateRef gate); template @@ -55,6 +63,7 @@ private: CircuitBuilder builder_; ChunkVector& typeInfos_; ChunkVector& rangeInfos_; + bool onHeapCheck_ {false}; }; } // panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_RANGE_ANALYSIS_H diff --git a/ecmascript/compiler/range_guard.cpp b/ecmascript/compiler/range_guard.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4d1c7a44db005f0d7564c3f44176b932df620e4d --- /dev/null +++ b/ecmascript/compiler/range_guard.cpp @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ecmascript/compiler/range_guard.h" + +namespace panda::ecmascript::kungfu { +void RangeGuard::Run() +{ + dependChains_.resize(circuit_->GetMaxGateId() + 1, nullptr); // 1: +1 for size + GateRef entry = acc_.GetDependRoot(); + VisitDependEntry(entry); + VisitGraph(); +} + +GateRef RangeGuard::VisitGate(GateRef gate) +{ + auto op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::VALUE_SELECTOR: + case OpCode::TYPED_BINARY_OP: + case OpCode::TYPED_UNARY_OP: + case OpCode::INDEX_CHECK: { + return TryApplyRangeGuardGate(gate); + } + case OpCode::DEPEND_SELECTOR: { + return TraverseDependSelector(gate); + } + default: { + if (acc_.GetDependCount(gate) == 1) { // 1: depend in is 1 + return TraverseOthers(gate); + } + break; + } + } + return Circuit::NullGate(); +} + +GateRef RangeGuard::TraverseOthers(GateRef gate) +{ + ASSERT(acc_.GetDependCount(gate) >= 1); + auto depIn = acc_.GetDep(gate); + auto dependChain = GetDependChain(depIn); + if (dependChain == nullptr) { + return Circuit::NullGate(); + } + + return UpdateDependChain(gate, dependChain); +} + +GateRef RangeGuard::TraverseDependSelector(GateRef gate) +{ + auto state = acc_.GetState(gate); + if (acc_.IsLoopHead(state)) { + return TraverseOthers(gate); + } + + auto dependCount = acc_.GetDependCount(gate); + for (size_t i = 0; i < dependCount; ++i) { + auto depend = acc_.GetDep(gate, i); + auto dependChain = GetDependChain(depend); + if (dependChain == nullptr) { + return Circuit::NullGate(); + } + } + + // all depend done. + auto depend = acc_.GetDep(gate); + auto dependChain = GetDependChain(depend); + DependChains* copy = new (chunk_) DependChains(chunk_); + copy->CopyFrom(dependChain); + for (size_t i = 1; i < dependCount; ++i) { // 1: second in + auto dependIn = acc_.GetDep(gate, i); + auto tempChain = GetDependChain(dependIn); + copy->Merge(tempChain); + } + return UpdateDependChain(gate, copy); +} + +GateRef RangeGuard::TryApplyRangeGuardForLength(DependChains* dependChain, GateRef gate, GateRef input) +{ + ASSERT(dependChain != nullptr); + uint32_t length = dependChain->FoundIndexCheckedForLength(this, input); + if (length) { // when length not equal to 0, then Found the IndexCheck Success + Environment env(gate, circuit_, &builder_); + // If the IndexCheck before the ArrayLength used, the ArrayLength must start by 1. + auto rangeGuardGate = builder_.RangeGuard(input, 1, length); + return rangeGuardGate; + } + return Circuit::NullGate(); +} + +GateRef RangeGuard::TryApplyRangeGuardForIndex(DependChains* dependChain, GateRef gate, GateRef input) +{ + ASSERT(dependChain != nullptr); + uint32_t length = dependChain->FoundIndexCheckedForIndex(this, input); + if (length) { // when length not equal to 0, then Found the IndexCheck Success + Environment env(gate, circuit_, &builder_); + // If the IndexCheck used in the Array, the index must in the Array range. + auto rangeGuardGate = builder_.RangeGuard(input, 0, length); + return rangeGuardGate; + } + return Circuit::NullGate(); +} + +GateRef RangeGuard::TryApplyRangeGuardGate(GateRef gate) +{ + if (acc_.GetDependCount(gate) < 1) { + return Circuit::NullGate(); + } + + auto depIn = acc_.GetDep(gate); + auto dependChain = GetDependChain(depIn); + // dependChain is null + if (dependChain == nullptr) { + return Circuit::NullGate(); + } + + auto numIns = acc_.GetInValueCount(gate); + for (size_t i = 0; i < numIns; ++i) { + auto originalInput = acc_.GetValueIn(gate, i); + auto originalInputOpcode = acc_.GetOpCode(originalInput); + auto rangeGuardGate = Circuit::NullGate(); + if (originalInputOpcode == OpCode::LOAD_TYPED_ARRAY_LENGTH || + originalInputOpcode == OpCode::LOAD_ARRAY_LENGTH) { + rangeGuardGate = TryApplyRangeGuardForLength(dependChain, gate, originalInput); + } else if(originalInputOpcode != OpCode::CONSTANT && rangeGuardGate == Circuit::NullGate()) { + rangeGuardGate = TryApplyRangeGuardForIndex(dependChain, gate, originalInput); + } + if (rangeGuardGate != Circuit::NullGate()) { + acc_.ReplaceValueIn(gate, rangeGuardGate, i); + } + } + dependChain = dependChain->UpdateNode(gate); + return UpdateDependChain(gate, dependChain); +} + +GateRef RangeGuard::VisitDependEntry(GateRef gate) +{ + auto empty = new (chunk_) DependChains(chunk_); + return UpdateDependChain(gate, empty); +} + +GateRef RangeGuard::UpdateDependChain(GateRef gate, DependChains* dependChain) +{ + ASSERT(dependChain != nullptr); + auto oldDependChain = GetDependChain(gate); + if (dependChain->Equals(oldDependChain)) { + return Circuit::NullGate(); + } + dependChains_[acc_.GetId(gate)] = dependChain; + return gate; +} + +uint32_t RangeGuard::CheckIndexCheckLengthInput(GateRef lhs, GateRef rhs) +{ + auto lhsOpcode = acc_.GetOpCode(lhs); + if (lhsOpcode == OpCode::INDEX_CHECK) { + auto indexCheckLengthInput = acc_.GetValueIn(lhs, 0); // length + auto indexCheckLengthInputOpcode = acc_.GetOpCode(indexCheckLengthInput); + if (indexCheckLengthInput == rhs && indexCheckLengthInputOpcode == OpCode::LOAD_TYPED_ARRAY_LENGTH) { + return RangeInfo::TYPED_ARRAY_ONHEAP_MAX; + } else if (indexCheckLengthInput == rhs && indexCheckLengthInputOpcode == OpCode::LOAD_ARRAY_LENGTH) { + return INT32_MAX; + } + } + return 0; +} + +uint32_t RangeGuard::CheckIndexCheckIndexInput(GateRef lhs, GateRef rhs) +{ + auto lhsOpcode = acc_.GetOpCode(lhs); + if (lhsOpcode == OpCode::INDEX_CHECK) { + auto indexCheckLengthInput = acc_.GetValueIn(lhs, 0); // length + auto indexCheckIndexInput = acc_.GetValueIn(lhs, 1); // index + auto indexCheckLengthInputOpcode = acc_.GetOpCode(indexCheckLengthInput); + // TYPED_ARRAY + if (indexCheckIndexInput == rhs && indexCheckLengthInputOpcode == OpCode::LOAD_TYPED_ARRAY_LENGTH) { + return RangeInfo::TYPED_ARRAY_ONHEAP_MAX; + } else if (indexCheckIndexInput == rhs && indexCheckLengthInputOpcode == OpCode::LOAD_ARRAY_LENGTH) { // ARRAY + return INT32_MAX; + } + } + return 0; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/range_guard.h b/ecmascript/compiler/range_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..79719660a4b34d6fcceed26cf4bfdd18f25c2b5f --- /dev/null +++ b/ecmascript/compiler/range_guard.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_RANGE_GUARD_H +#define ECMASCRIPT_COMPILER_RANGE_GUARD_H + +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate_accessor.h" +#include "ecmascript/compiler/graph_visitor.h" +#include "ecmascript/compiler/pass_manager.h" +#include "ecmascript/compiler/base/depend_chain_helper.h" +#include "ecmascript/mem/chunk_containers.h" +#include "ecmascript/compiler/number_gate_info.h" + +namespace panda::ecmascript::kungfu { +class DependChains; +class RangeGuard : public GraphVisitor { +public: + RangeGuard(Circuit *circuit, Chunk* chunk) + : GraphVisitor(circuit, chunk), circuit_(circuit), + builder_(circuit), dependChains_(chunk) {} + + ~RangeGuard() = default; + + void Run(); + + GateRef VisitGate(GateRef gate) override; + bool CheckInputSource(GateRef lhs, GateRef rhs); + uint32_t CheckIndexCheckLengthInput(GateRef lhs, GateRef rhs); + uint32_t CheckIndexCheckIndexInput(GateRef lhs, GateRef rhs); +private: + + DependChains* GetDependChain(GateRef dependIn) + { + size_t idx = acc_.GetId(dependIn); + ASSERT(idx <= circuit_->GetMaxGateId()); + return dependChains_[idx]; + } + + GateRef VisitDependEntry(GateRef gate); + GateRef UpdateDependChain(GateRef gate, DependChains* dependInfo); + GateRef TryApplyRangeGuardForLength(DependChains* dependInfo, GateRef gate, GateRef input); + GateRef TryApplyRangeGuardForIndex(DependChains* dependInfo, GateRef gate, GateRef input); + GateRef TryApplyRangeGuardGate(GateRef gate); + GateRef TraverseOthers(GateRef gate); + GateRef TraverseDependSelector(GateRef gate); + + Circuit* circuit_; + CircuitBuilder builder_; + ChunkVector dependChains_; + + friend class RangeInfo; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_RANGE_GUARD_H \ No newline at end of file diff --git a/ecmascript/compiler/slowpath_lowering.cpp b/ecmascript/compiler/slowpath_lowering.cpp index cf14982e4bd7278f86374d34a95d5f00c01a79d6..9de35340907f732419f18223e7589869745ea6cc 100644 --- a/ecmascript/compiler/slowpath_lowering.cpp +++ b/ecmascript/compiler/slowpath_lowering.cpp @@ -14,6 +14,8 @@ */ #include "ecmascript/compiler/slowpath_lowering.h" +#include "ecmascript/compiler/gate_meta_data.h" +#include "ecmascript/dfx/vm_thread_control.h" #include "ecmascript/dfx/vmstat/opt_code_profiler.h" #include "ecmascript/js_thread.h" #include "ecmascript/message_string.h" @@ -63,8 +65,8 @@ void SlowPathLowering::CallRuntimeLowering() case OpCode::TYPEDFASTCALL: LowerTypedFastCall(gate); break; - case OpCode::UPDATE_HOTNESS: - LowerUpdateHotness(gate); + case OpCode::CHECK_SAFEPOINT_AND_STACKOVER: + LowerCheckSafePointAndStackOver(gate); break; case OpCode::GET_ENV: LowerGetEnv(gate); @@ -1450,7 +1452,8 @@ void SlowPathLowering::LowerFastStrictEqual(GateRef gate) void SlowPathLowering::LowerCreateEmptyArray(GateRef gate) { GateRef result = builder_.CallStub(glue_, gate, CommonStubCSigns::CreateEmptyArray, { glue_ }); - ReplaceHirWithValue(gate, result, true); + GateRef newRes = LowerUpdateArrayHClass(gate, result); + ReplaceHirWithValue(gate, newRes, true); } void SlowPathLowering::LowerCreateEmptyObject(GateRef gate) @@ -1464,7 +1467,23 @@ void SlowPathLowering::LowerCreateArrayWithBuffer(GateRef gate) GateRef jsFunc = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::FUNC); GateRef index = builder_.TruncInt64ToInt32(acc_.GetValueIn(gate, 0)); GateRef result = builder_.CallStub(glue_, gate, CommonStubCSigns::CreateArrayWithBuffer, { glue_, index, jsFunc }); - ReplaceHirWithValue(gate, result, true); + GateRef newRes = LowerUpdateArrayHClass(gate, result); + ReplaceHirWithValue(gate, newRes, true); +} + +GateRef SlowPathLowering::LowerUpdateArrayHClass(GateRef gate, GateRef array) +{ + ElementsKind kind = acc_.TryGetElementsKind(gate); + if (!Elements::IsGeneric(kind)) { + auto thread = tsManager_->GetEcmaVM()->GetJSThread(); + size_t hclassIndex = static_cast(thread->GetArrayHClassIndexMap().at(kind)); + GateRef gConstAddr = builder_.Load(VariableType::JS_POINTER(), glue_, + builder_.IntPtr(JSThread::GlueData::GetGlobalConstOffset(false))); + GateRef constantIndex = builder_.IntPtr(JSTaggedValue::TaggedTypeSize() * hclassIndex); + GateRef hclass = builder_.Load(VariableType::JS_POINTER(), gConstAddr, constantIndex); + builder_.Store(VariableType::JS_POINTER(), glue_, array, builder_.IntPtr(0), hclass); + } + return array; } void SlowPathLowering::LowerCreateObjectWithBuffer(GateRef gate) @@ -2411,8 +2430,6 @@ void SlowPathLowering::LowerStLexVar(GateRef gate) void SlowPathLowering::LowerDefineClassWithBuffer(GateRef gate) { - GateType type = acc_.GetGateType(gate); - // 5: number of value inputs ASSERT(acc_.GetNumValueIn(gate) == 5); GateRef jsFunc = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::FUNC); @@ -2427,29 +2444,10 @@ void SlowPathLowering::LowerDefineClassWithBuffer(GateRef gate) Label isNotException(&builder_); GateRef result; - if (type.IsAnyType()) { - auto args = { proto, lexicalEnv, constpool, - builder_.ToTaggedInt(methodId), builder_.ToTaggedInt(literalId), module }; - result = LowerCallRuntime(gate, RTSTUB_ID(CreateClassWithBuffer), args, true); - builder_.Branch(builder_.IsSpecial(result, JSTaggedValue::VALUE_EXCEPTION), &isException, &isNotException); - } else { - int index = tsManager_->GetHClassIndexByClassGateType(type); - ASSERT(index != -1); - GateRef ihcIndex = builder_.Int32(index); - GateRef ihclass = builder_.GetObjectFromConstPool(glue_, gate, jsFunc, ihcIndex, ConstPoolType::CLASS_LITERAL); - - int constructorIndex = tsManager_->GetConstructorHClassIndexByClassGateType(type); - ASSERT(index != -1); - GateRef constructorHcIndex = builder_.Int32(constructorIndex); - GateRef constructorHclass = builder_.GetObjectFromConstPool(glue_, gate, jsFunc, - constructorHcIndex, ConstPoolType::CLASS_LITERAL); - - auto args = { proto, lexicalEnv, constpool, - builder_.ToTaggedInt(methodId), - builder_.ToTaggedInt(literalId), ihclass, constructorHclass, module }; - result = LowerCallRuntime(gate, RTSTUB_ID(CreateClassWithIHClass), args, true); - builder_.Branch(builder_.IsSpecial(result, JSTaggedValue::VALUE_EXCEPTION), &isException, &isNotException); - } + auto args = { proto, lexicalEnv, constpool, + builder_.ToTaggedInt(methodId), builder_.ToTaggedInt(literalId), module }; + result = LowerCallRuntime(gate, RTSTUB_ID(CreateClassWithBuffer), args, true); + builder_.Branch(builder_.IsSpecial(result, JSTaggedValue::VALUE_EXCEPTION), &isException, &isNotException); StateDepend successControl; StateDepend failControl; @@ -3160,19 +3158,34 @@ void SlowPathLowering::LowerTypedFastCall(GateRef gate) ReplaceHirWithPendingException(gate, state, result, result); } -void SlowPathLowering::LowerUpdateHotness(GateRef gate) +void SlowPathLowering::LowerCheckSafePointAndStackOver(GateRef gate) { Environment env(gate, circuit_, &builder_); - GateRef interruptsFlag = builder_.Load(VariableType::INT8(), glue_, - builder_.IntPtr(JSThread::GlueData::GetInterruptVectorOffset(builder_.GetCompilationConfig()->Is32Bit()))); Label slowPath(&builder_); Label dispatch(&builder_); - builder_.Branch(builder_.Int8Equal(interruptsFlag, - builder_.Int8(VmThreadControl::VM_NEED_SUSPENSION)), &slowPath, &dispatch); + Label checkStackOver(&builder_); + Label stackOverflow(&builder_); + GateRef stackLimit = builder_.Load(VariableType::INT64(), glue_, + builder_.IntPtr(JSThread::GlueData::GetStackLimitOffset(builder_.GetCompilationConfig()->Is32Bit()))); + GateRef interruptsFlag = builder_.Load(VariableType::INT8(), glue_, + builder_.IntPtr(JSThread::GlueData::GetInterruptVectorOffset(builder_.GetCompilationConfig()->Is32Bit()))); + GateRef spValue = builder_.ReadSp(); + builder_.Branch(builder_.Int8Equal(interruptsFlag, builder_.Int8(VmThreadControl::VM_NEED_SUSPENSION)), + &slowPath, &checkStackOver, BranchWeight::ONE_WEIGHT, BranchWeight::DEOPT_WEIGHT); builder_.Bind(&slowPath); { - LowerCallRuntime(glue_, RTSTUB_ID(CheckSafePoint), { }, true); - builder_.Jump(&dispatch); + LowerCallRuntime(glue_, RTSTUB_ID(CheckSafePoint), {}, true); + builder_.Jump(&checkStackOver); + } + builder_.Bind(&checkStackOver); + { + builder_.Branch(builder_.Int64LessThanOrEqual(spValue, stackLimit), &stackOverflow, &dispatch, + BranchWeight::ONE_WEIGHT, BranchWeight::DEOPT_WEIGHT); + builder_.Bind(&stackOverflow); + { + GateRef res = LowerCallRuntime(glue_, RTSTUB_ID(ThrowStackOverflowException), {}, true); + builder_.Return(res); + } } builder_.Bind(&dispatch); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); diff --git a/ecmascript/compiler/slowpath_lowering.h b/ecmascript/compiler/slowpath_lowering.h index 0f802b463c9b29958a549cce7bc762523e7c784b..9db30322546f00811d2384ded3b4e57458207011 100644 --- a/ecmascript/compiler/slowpath_lowering.h +++ b/ecmascript/compiler/slowpath_lowering.h @@ -291,6 +291,7 @@ private: void LowerAsyncGeneratorReject(GateRef gate); void LowerSetGeneratorState(GateRef gate); GateRef GetValueFromTaggedArray(GateRef arrayGate, GateRef indexOffset); + GateRef LowerUpdateArrayHClass(GateRef gate, GateRef array); void AddProfiling(GateRef gate, bool skipGenerator = true); GateRef FastStrictEqual(GateRef left, GateRef right); void LowerWideLdPatchVar(GateRef gate); @@ -299,7 +300,7 @@ private: void LowerConstruct(GateRef gate); void LowerTypedCall(GateRef gate); void LowerTypedFastCall(GateRef gate); - void LowerUpdateHotness(GateRef gate); + void LowerCheckSafePointAndStackOver(GateRef gate); void LowerNotifyConcurrentResult(GateRef gate); void LowerGetEnv(GateRef gate); void DeleteLoopExit(GateRef gate); diff --git a/ecmascript/compiler/state_split_linearizer.cpp b/ecmascript/compiler/state_split_linearizer.cpp index 4ffd2dded494e6c501a5ab63ac53f518a637549d..f2a5c4c1cf929eafe08684238bf55de25aa36c94 100644 --- a/ecmascript/compiler/state_split_linearizer.cpp +++ b/ecmascript/compiler/state_split_linearizer.cpp @@ -21,17 +21,6 @@ void StateSplitLinearizer::Run() { graphLinearizer_.SetScheduleJSOpcode(); graphLinearizer_.LinearizeGraph(); - if (IsLogEnabled()) { - LOG_COMPILER(INFO) << ""; - LOG_COMPILER(INFO) << "\033[34m" - << "====================" - << " Before state split linearizer " - << "[" << GetMethodName() << "]" - << "====================" - << "\033[0m"; - graphLinearizer_.PrintGraph("Build Basic Block"); - LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; - } LinearizeStateSplit(); if (IsLogEnabled()) { LOG_COMPILER(INFO) << ""; @@ -134,7 +123,7 @@ public: void VisitRegion(GateRegion* curRegion) { replacement_.SetState(curRegion->GetState()); - currentIndex_ = curRegion->gateList_.size() - 1; // 1: -1 for size + currentIndex_ = static_cast(curRegion->gateList_.size() - 1); // 1: -1 for size TryLoadDependStart(curRegion); // 0: is state for (; currentIndex_ > 0; currentIndex_--) { @@ -191,11 +180,6 @@ public: ASSERT(replacement_.State() != Circuit::NullGate()); replacement_ = lowering.LowerConvert(replacement_, gate); break; - case OpCode::CHECK_AND_CONVERT: - ASSERT(replacement_.State() != Circuit::NullGate()); - ASSERT(frameState_ != Circuit::NullGate()); - replacement_ = lowering.LowerCheckAndConvert(replacement_, gate, frameState_); - break; default: break; } diff --git a/ecmascript/compiler/stub_builder-inl.h b/ecmascript/compiler/stub_builder-inl.h index 89bc1400c4170aa23ce4cb7604070b5745f4f4c3..68aac2d1eeb87e8ee560920e7d86008ffc6682c7 100644 --- a/ecmascript/compiler/stub_builder-inl.h +++ b/ecmascript/compiler/stub_builder-inl.h @@ -32,10 +32,12 @@ #include "ecmascript/js_generator_object.h" #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value.h" +#include "ecmascript/jspandafile/program_object.h" #include "ecmascript/layout_info.h" #include "ecmascript/message_string.h" #include "ecmascript/mem/slots.h" #include "ecmascript/mem/visitor.h" +#include "ecmascript/property_attributes.h" namespace panda::ecmascript::kungfu { using JSFunction = panda::ecmascript::JSFunction; @@ -550,6 +552,12 @@ inline GateRef StubBuilder::BinaryOp(GateRef x, GateRef y) return env_->GetBuilder()->BinaryOp(x, y); } +template +inline GateRef StubBuilder::BinaryOpWithOverflow(GateRef x, GateRef y) +{ + return env_->GetBuilder()->BinaryOpWithOverflow(x, y); +} + inline GateRef StubBuilder::TaggedIsInt(GateRef x) { return env_->GetBuilder()->TaggedIsInt(x); @@ -575,6 +583,11 @@ inline GateRef StubBuilder::TaggedIsStringOrSymbol(GateRef obj) return env_->GetBuilder()->TaggedIsStringOrSymbol(obj); } +inline GateRef StubBuilder::TaggedIsSymbol(GateRef obj) +{ + return env_->GetBuilder()->TaggedIsSymbol(obj); +} + inline GateRef StubBuilder::BothAreString(GateRef x, GateRef y) { auto allHeapObject = BoolAnd(TaggedIsHeapObject(x), TaggedIsHeapObject(y)); @@ -627,6 +640,11 @@ inline GateRef StubBuilder::TaggedIsGeneratorObject(GateRef x) return env_->GetBuilder()->TaggedIsGeneratorObject(x); } +inline GateRef StubBuilder::TaggedIsJSArray(GateRef x) +{ + return env_->GetBuilder()->TaggedIsJSArray(x); +} + inline GateRef StubBuilder::TaggedIsAsyncGeneratorObject(GateRef x) { return env_->GetBuilder()->TaggedIsAsyncGeneratorObject(x); @@ -743,6 +761,16 @@ inline GateRef StubBuilder::DoubleToTaggedDoublePtr(GateRef x) return env_->GetBuilder()->DoubleToTaggedDoublePtr(x); } +inline GateRef StubBuilder::TaggedPtrToTaggedDoublePtr(GateRef x) +{ + return DoubleToTaggedDoublePtr(CastInt64ToFloat64(ChangeTaggedPointerToInt64(x))); +} + +inline GateRef StubBuilder::TaggedPtrToTaggedIntPtr(GateRef x) +{ + return IntToTaggedPtr(TruncInt64ToInt32(ChangeTaggedPointerToInt64(x))); +} + inline GateRef StubBuilder::CastDoubleToInt64(GateRef x) { return env_->GetBuilder()->CastDoubleToInt64(x); @@ -758,6 +786,11 @@ inline GateRef StubBuilder::TaggedFalse() return env_->GetBuilder()->TaggedFalse(); } +inline GateRef StubBuilder::TaggedUndefined() +{ + return env_->GetBuilder()->UndefineConstant(); +} + // compare operation inline GateRef StubBuilder::Int8Equal(GateRef x, GateRef y) { @@ -854,6 +887,11 @@ inline GateRef StubBuilder::Int32UnsignedGreaterThanOrEqual(GateRef x, GateRef y return env_->GetBuilder()->Int32UnsignedGreaterThanOrEqual(x, y); } +inline GateRef StubBuilder::Int32UnsignedLessThanOrEqual(GateRef x, GateRef y) +{ + return env_->GetBuilder()->Int32UnsignedLessThanOrEqual(x, y); +} + inline GateRef StubBuilder::Int64GreaterThan(GateRef x, GateRef y) { return env_->GetBuilder()->Int64GreaterThan(x, y); @@ -1003,10 +1041,7 @@ inline GateRef StubBuilder::IsDictionaryElement(GateRef hClass) inline GateRef StubBuilder::IsClassConstructorFromBitField(GateRef bitfield) { // decode - return Int32NotEqual( - Int32And(Int32LSR(bitfield, Int32(JSHClass::ClassConstructorBit::START_BIT)), - Int32((1LU << JSHClass::ClassConstructorBit::SIZE) - 1)), - Int32(0)); + return env_->GetBuilder()->IsClassConstructorWithBitField(bitfield); } inline GateRef StubBuilder::IsClassConstructor(GateRef object) @@ -1102,6 +1137,12 @@ inline GateRef StubBuilder::IsLineString(GateRef obj) return Int32Equal(objectType, Int32(static_cast(JSType::LINE_STRING))); } +inline GateRef StubBuilder::IsSlicedString(GateRef obj) +{ + GateRef objectType = GetObjectType(LoadHClass(obj)); + return Int32Equal(objectType, Int32(static_cast(JSType::SLICED_STRING))); +} + inline GateRef StubBuilder::IsConstantString(GateRef obj) { GateRef objectType = GetObjectType(LoadHClass(obj)); @@ -1110,15 +1151,12 @@ inline GateRef StubBuilder::IsConstantString(GateRef obj) inline GateRef StubBuilder::IsTreeString(GateRef obj) { - GateRef objectType = GetObjectType(LoadHClass(obj)); - return Int32Equal(objectType, Int32(static_cast(JSType::TREE_STRING))); + return env_->GetBuilder()->IsTreeString(obj); } inline GateRef StubBuilder::TreeStringIsFlat(GateRef string) { - GateRef second = GetSecondFromTreeString(string); - GateRef len = GetLengthFromString(second); - return Int32Equal(len, Int32(0)); + return env_->GetBuilder()->TreeStringIsFlat(string); } inline GateRef StubBuilder::TaggedObjectIsBigInt(GateRef obj) @@ -1223,6 +1261,26 @@ inline GateRef StubBuilder::IsJSAPIArrayList(GateRef obj) return Int32Equal(objectType, Int32(static_cast(JSType::JS_API_ARRAY_LIST))); } +inline GateRef StubBuilder::IsJSObjectType(GateRef obj, JSType jsType) +{ + auto env = GetEnvironment(); + Label entryPass(env); + env->SubCfgEntry(&entryPass); + DEFVARIABLE(result, VariableType::BOOL(), False()); + Label heapObj(env); + Label exit(env); + GateRef isHeapObject = TaggedIsHeapObject(obj); + Branch(isHeapObject, &heapObj, &exit); + Bind(&heapObj); + GateRef objectType = GetObjectType(LoadHClass(obj)); + result = env_->GetBuilder()->LogicAnd(isHeapObject, Int32Equal(objectType, Int32(static_cast(jsType)))); + Jump(&exit); + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + inline GateRef StubBuilder::GetTarget(GateRef proxyObj) { GateRef offset = IntPtr(JSProxy::TARGET_OFFSET); @@ -1380,6 +1438,12 @@ inline GateRef StubBuilder::HandlerBaseGetAttrIndex(GateRef attr) Int32((1LLU << HandlerBase::AttrIndexBit::SIZE) - 1)); } +inline GateRef StubBuilder::HandlerBaseGetRep(GateRef attr) +{ + return Int32And(Int32LSR(attr, Int32(HandlerBase::RepresentationBit::START_BIT)), + Int32((1LLU << HandlerBase::RepresentationBit::SIZE) - 1)); +} + inline GateRef StubBuilder::IsInternalAccessor(GateRef attr) { return Int32NotEqual( @@ -1476,14 +1540,20 @@ inline GateRef StubBuilder::GetLengthFromString(GateRef value) inline GateRef StubBuilder::GetFirstFromTreeString(GateRef string) { - GateRef offset = IntPtr(TreeEcmaString::FIRST_OFFSET); - return Load(VariableType::JS_POINTER(), string, offset); + return env_->GetBuilder()->GetFirstFromTreeString(string); } inline GateRef StubBuilder::GetSecondFromTreeString(GateRef string) { - GateRef offset = IntPtr(TreeEcmaString::SECOND_OFFSET); - return Load(VariableType::JS_POINTER(), string, offset); + return env_->GetBuilder()->GetSecondFromTreeString(string); +} + +inline GateRef StubBuilder::GetIsAllTaggedPropFromHClass(GateRef hclass) +{ + GateRef bitfield = Load(VariableType::INT32(), hclass, IntPtr(JSHClass::BIT_FIELD1_OFFSET)); + return Int32And(Int32LSR(bitfield, + Int32(JSHClass::IsAllTaggedPropBit::START_BIT)), + Int32((1LLU << JSHClass::IsAllTaggedPropBit::SIZE) - 1)); } inline void StubBuilder::SetBitFieldToHClass(GateRef glue, GateRef hClass, GateRef bitfield) @@ -1492,6 +1562,17 @@ inline void StubBuilder::SetBitFieldToHClass(GateRef glue, GateRef hClass, GateR Store(VariableType::INT32(), glue, hClass, offset, bitfield); } +inline void StubBuilder::SetIsAllTaggedProp(GateRef glue, GateRef hclass, GateRef hasRep) +{ + GateRef bitfield1 = Load(VariableType::INT32(), hclass, IntPtr(JSHClass::BIT_FIELD1_OFFSET)); + GateRef mask = Int32LSL( + Int32((1LU << JSHClass::IsAllTaggedPropBit::SIZE) - 1), + Int32(JSHClass::IsAllTaggedPropBit::START_BIT)); + GateRef newVal = Int32Or(Int32And(bitfield1, Int32Not(mask)), + Int32LSL(hasRep, Int32(JSHClass::IsAllTaggedPropBit::START_BIT))); + Store(VariableType::INT32(), glue, hclass, IntPtr(JSHClass::BIT_FIELD1_OFFSET), newVal); +} + inline void StubBuilder::SetPrototypeToHClass(VariableType type, GateRef glue, GateRef hClass, GateRef proto) { GateRef offset = IntPtr(JSHClass::PROTOTYPE_OFFSET); @@ -1568,6 +1649,14 @@ inline GateRef StubBuilder::GetPropertyInlinedProps(GateRef obj, GateRef hClass, return Load(VariableType::JS_ANY(), obj, ZExtInt32ToInt64(propOffset)); } +inline GateRef StubBuilder::GetInlinedPropOffsetFromHClass(GateRef hclass, GateRef index) +{ + GateRef inlinedPropsStart = GetInlinedPropsStartFromHClass(hclass); + GateRef propOffset = Int32Mul( + Int32Add(inlinedPropsStart, index), Int32(JSTaggedValue::TaggedTypeSize())); + return ZExtInt32ToInt64(propOffset); +} + inline void StubBuilder::IncNumberOfProps(GateRef glue, GateRef hClass) { GateRef propNums = GetNumberOfPropsFromHClass(hClass); @@ -1613,6 +1702,11 @@ inline GateRef StubBuilder::GetInlinedPropertiesFromHClass(GateRef hClass) return Int32Sub(objectSizeInWords, inlinedPropsStart); } +inline GateRef StubBuilder::GetElementsKindFromHClass(GateRef hClass) +{ + return env_->GetBuilder()->GetElementsKindByHClass(hClass); +} + inline GateRef StubBuilder::GetObjectSizeFromHClass(GateRef hClass) { return env_->GetBuilder()->GetObjectSizeFromHClass(hClass); @@ -1626,6 +1720,22 @@ inline GateRef StubBuilder::GetInlinedPropsStartFromHClass(GateRef hClass) Int32((1LU << JSHClass::InlinedPropsStartBits::SIZE) - 1)); } +inline void StubBuilder::SetValueToTaggedArrayWithAttr( + GateRef glue, GateRef array, GateRef index, GateRef key, GateRef val, GateRef attr) +{ + GateRef offset = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); + GateRef dataOffset = PtrAdd(offset, IntPtr(TaggedArray::DATA_OFFSET)); + SetValueWithAttr(glue, array, dataOffset, key, val, attr); +} + +inline void StubBuilder::SetValueToTaggedArrayWithRep( + GateRef glue, GateRef array, GateRef index, GateRef val, GateRef rep, Label *repChange) +{ + GateRef offset = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); + GateRef dataOffset = PtrAdd(offset, IntPtr(TaggedArray::DATA_OFFSET)); + SetValueWithRep(glue, array, dataOffset, val, rep, repChange); +} + inline void StubBuilder::SetValueToTaggedArray(VariableType valType, GateRef glue, GateRef array, GateRef index, GateRef val) { @@ -1715,6 +1825,11 @@ inline GateRef StubBuilder::TaggedCastToIntPtr(GateRef x) return env_->Is32Bit() ? TruncInt64ToInt32(GetInt64OfTInt(x)) : GetInt64OfTInt(x); } +inline GateRef StubBuilder::GetDoubleOfTInt(GateRef x) +{ + return ChangeInt32ToFloat64(GetInt32OfTInt(x)); +} + inline GateRef StubBuilder::GetDoubleOfTDouble(GateRef x) { return env_->GetBuilder()->GetDoubleOfTDouble(x); @@ -1950,6 +2065,7 @@ inline GateRef StubBuilder::SetIsInlinePropsFieldInPropAttr(GateRef attr, GateRe return newVal; } + inline GateRef StubBuilder::SetTrackTypeInPropAttr(GateRef attr, GateRef type) { GateRef mask = Int32LSL( @@ -1967,6 +2083,34 @@ inline GateRef StubBuilder::GetTrackTypeInPropAttr(GateRef attr) Int32((1LLU << PropertyAttributes::TrackTypeField::SIZE) - 1)); } +inline GateRef StubBuilder::GetRepInPropAttr(GateRef attr) +{ + return Int32And( + Int32LSR(attr, Int32(PropertyAttributes::RepresentationField::START_BIT)), + Int32((1LLU << PropertyAttributes::RepresentationField::SIZE) - 1)); +} + +inline GateRef StubBuilder::IsIntRepInPropAttr(GateRef rep) +{ + return Int32Equal(rep, Int32(static_cast(Representation::INT))); +} + +inline GateRef StubBuilder::IsDoubleRepInPropAttr(GateRef rep) +{ + return Int32Equal(rep, Int32(static_cast(Representation::DOUBLE))); +} + +inline GateRef StubBuilder::SetTaggedRepInPropAttr(GateRef attr) +{ + GateRef mask = Int32LSL( + Int32((1LU << PropertyAttributes::RepresentationField::SIZE) - 1), + Int32(PropertyAttributes::RepresentationField::START_BIT)); + GateRef targetType = Int32(static_cast(Representation::TAGGED)); + GateRef newVal = Int32Or(Int32And(attr, Int32Not(mask)), + Int32LSL(targetType, Int32(PropertyAttributes::RepresentationField::START_BIT))); + return newVal; +} + inline void StubBuilder::SetHasConstructorToHClass(GateRef glue, GateRef hClass, GateRef value) { GateRef bitfield = Load(VariableType::INT32(), hClass, IntPtr(JSHClass::BIT_FIELD_OFFSET)); @@ -2260,6 +2404,13 @@ inline void StubBuilder::SetLength(GateRef glue, GateRef str, GateRef length, bo Store(VariableType::INT32(), glue, str, IntPtr(EcmaString::MIX_LENGTH_OFFSET), mixLength); } +inline void StubBuilder::SetLength(GateRef glue, GateRef str, GateRef length, GateRef isCompressed) +{ + GateRef len = Int32LSL(length, Int32(2)); + GateRef mixLength = Int32Or(len, isCompressed); + Store(VariableType::INT32(), glue, str, IntPtr(EcmaString::MIX_LENGTH_OFFSET), mixLength); +} + inline void StubBuilder::SetRawHashcode(GateRef glue, GateRef str, GateRef rawHashcode) { Store(VariableType::INT32(), glue, str, IntPtr(EcmaString::HASHCODE_OFFSET), rawHashcode); @@ -2292,6 +2443,16 @@ inline GateRef StubBuilder::IsStableElements(GateRef hClass) return env_->GetBuilder()->IsStableElements(hClass); } +inline GateRef StubBuilder::HasConstructorByHClass(GateRef hClass) +{ + return env_->GetBuilder()->HasConstructorByHClass(hClass); +} + +inline GateRef StubBuilder::HasConstructor(GateRef object) +{ + return env_->GetBuilder()->HasConstructor(object); +} + inline GateRef StubBuilder::IsStableArguments(GateRef hClass) { return env_->GetBuilder()->IsStableArguments(hClass); @@ -2320,5 +2481,64 @@ inline GateRef StubBuilder::LoadObjectFromConstPool(GateRef jsFunc, GateRef inde { return env_->GetBuilder()->LoadObjectFromConstPool(jsFunc, index); } + +inline GateRef StubBuilder::LoadPfHeaderFromConstPool(GateRef jsFunc) +{ + GateRef method = Load(VariableType::JS_ANY(), jsFunc, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef constPool = Load(VariableType::JS_ANY(), method, IntPtr(Method::CONSTANT_POOL_OFFSET)); + auto length = GetLengthOfTaggedArray(constPool); + auto index = Int32Sub(length, Int32(ConstantPool::JS_PANDA_FILE_INDEX)); + auto jsPandaFile = GetValueFromTaggedArray(constPool, index); + auto jsPfAddr = ChangeInt64ToIntPtr(ChangeTaggedPointerToInt64(jsPandaFile)); + auto pfAddr = Load(VariableType::NATIVE_POINTER(), jsPfAddr, Int32(JSPandaFile::PF_OFFSET)); + auto pfHeader = Load(VariableType::NATIVE_POINTER(), pfAddr, Int32(0)); + return pfHeader; +} + +inline GateRef StubBuilder::LoadHCIndexFromConstPool(GateRef jsFunc, GateRef traceId) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + + GateRef method = Load(VariableType::JS_ANY(), jsFunc, IntPtr(JSFunctionBase::METHOD_OFFSET)); + GateRef constPool = Load(VariableType::JS_ANY(), method, IntPtr(Method::CONSTANT_POOL_OFFSET)); + auto length = GetLengthOfTaggedArray(constPool); + auto index = Int32Sub(length, Int32(ConstantPool::CONSTANT_INDEX_INFO_INDEX)); + auto constantIndexInfo = GetValueFromTaggedArray(constPool, index); + auto indexInfoLength = GetLengthOfTaggedArray(constantIndexInfo); + DEFVARIABLE(bcOffset, VariableType::INT32(), Int32(0)); + DEFVARIABLE(constantIndex, VariableType::INT32(), + Int32(static_cast(ConstantIndex::ELEMENT_HOLE_TAGGED_HCLASS_INDEX))); + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + + Label loopHead(env); + Label loopEnd(env); + Label afterLoop(env); + Label matchSuccess(env); + Label afterUpdate(env); + Branch(Int32LessThan(*i, indexInfoLength), &loopHead, &afterLoop); + LoopBegin(&loopHead); + bcOffset = GetInt32OfTInt(GetValueFromTaggedArray(constantIndexInfo, *i)); + Branch(Int32Equal(*bcOffset, traceId), &matchSuccess, &afterUpdate); + Bind(&matchSuccess); + constantIndex = GetInt32OfTInt(GetValueFromTaggedArray(constantIndexInfo, Int32Add(*i, Int32(1)))); + Jump(&afterLoop); + Bind(&afterUpdate); + i = Int32Add(*i, Int32(2)); // 2 : skip traceId and constantIndex + Branch(Int32LessThan(*i, indexInfoLength), &loopEnd, &afterLoop); + Bind(&loopEnd); + LoopEnd(&loopHead); + Bind(&afterLoop); + auto ret = *constantIndex; + + env->SubCfgExit(); + return ret; +} + +inline GateRef StubBuilder::RemoveTaggedWeakTag(GateRef weak) +{ + return Int64ToTaggedPtr(IntPtrAnd(ChangeTaggedPointerToInt64(weak), IntPtr(~JSTaggedValue::TAG_WEAK))); +} } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_STUB_INL_H diff --git a/ecmascript/compiler/stub_builder.cpp b/ecmascript/compiler/stub_builder.cpp index e40c3405b6a9e7748e5bb2ec8e32901516e3648d..33f324a13ec9129d2d7fc3c12b153ab51df3d822 100644 --- a/ecmascript/compiler/stub_builder.cpp +++ b/ecmascript/compiler/stub_builder.cpp @@ -17,19 +17,22 @@ #include "ecmascript/compiler/assembler_module.h" #include "ecmascript/compiler/access_object_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_string_stub_builder.h" #include "ecmascript/compiler/interpreter_stub.h" #include "ecmascript/compiler/llvm_ir_builder.h" #include "ecmascript/compiler/new_object_stub_builder.h" #include "ecmascript/compiler/profiler_stub_builder.h" #include "ecmascript/compiler/rt_call_signature.h" #include "ecmascript/compiler/typed_array_stub_builder.h" +#include "ecmascript/global_env_constants.h" #include "ecmascript/js_api/js_api_arraylist.h" #include "ecmascript/js_api/js_api_vector.h" #include "ecmascript/js_object.h" #include "ecmascript/js_arguments.h" #include "ecmascript/mem/remembered_set.h" #include "ecmascript/message_string.h" -#include "ecmascript/pgo_profiler/pgo_profiler_type.h" +#include "ecmascript/pgo_profiler/types/pgo_profiler_type.h" +#include "ecmascript/property_attributes.h" #include "ecmascript/tagged_dictionary.h" #include "ecmascript/tagged_hash_table.h" @@ -453,13 +456,15 @@ GateRef StubBuilder::JSObjectGetProperty(GateRef obj, GateRef hclass, GateRef at DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); Label inlinedProp(env); Label notInlinedProp(env); + Label post(env); GateRef attrOffset = GetOffsetFieldInPropAttr(attr); + GateRef rep = GetRepInPropAttr(attr); Branch(IsInlinedProperty(attr), &inlinedProp, ¬InlinedProp); { Bind(&inlinedProp); { result = GetPropertyInlinedProps(obj, hclass, attrOffset); - Jump(&exit); + Jump(&post); } Bind(¬InlinedProp); { @@ -468,8 +473,29 @@ GateRef StubBuilder::JSObjectGetProperty(GateRef obj, GateRef hclass, GateRef at Load(VariableType::INT64(), obj, IntPtr(JSObject::PROPERTIES_OFFSET)); result = GetValueFromTaggedArray(array, Int32Sub(attrOffset, GetInlinedPropertiesFromHClass(hclass))); + Jump(&post); + } + } + Bind(&post); + { + Label nonDoubleToTagged(env); + Label doubleToTagged(env); + Branch(IsDoubleRepInPropAttr(rep), &doubleToTagged, &nonDoubleToTagged); + Bind(&doubleToTagged); + { + result = TaggedPtrToTaggedDoublePtr(*result); Jump(&exit); } + Bind(&nonDoubleToTagged); + { + Label intToTagged(env); + Branch(IsIntRepInPropAttr(rep), &intToTagged, &exit); + Bind(&intToTagged); + { + result = TaggedPtrToTaggedIntPtr(*result); + Jump(&exit); + } + } } Bind(&exit); auto ret = *result; @@ -477,7 +503,8 @@ GateRef StubBuilder::JSObjectGetProperty(GateRef obj, GateRef hclass, GateRef at return ret; } -void StubBuilder::JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hclass, GateRef attr, GateRef value) +void StubBuilder::JSObjectSetProperty( + GateRef glue, GateRef obj, GateRef hclass, GateRef attr, GateRef key, GateRef value) { auto env = GetEnvironment(); Label subEntry(env); @@ -485,12 +512,13 @@ void StubBuilder::JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hclass, Label exit(env); Label inlinedProp(env); Label notInlinedProp(env); - GateRef attrOffset = GetOffsetFieldInPropAttr(attr); + GateRef attrIndex = GetOffsetFieldInPropAttr(attr); Branch(IsInlinedProperty(attr), &inlinedProp, ¬InlinedProp); { Bind(&inlinedProp); { - SetPropertyInlinedProps(glue, obj, hclass, value, attrOffset); + GateRef offset = GetInlinedPropOffsetFromHClass(hclass, attrIndex); + SetValueWithAttr(glue, obj, offset, key, value, attr); Jump(&exit); } Bind(¬InlinedProp); @@ -498,8 +526,8 @@ void StubBuilder::JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hclass, // compute outOfLineProp offset, get it and return GateRef array = Load(VariableType::JS_POINTER(), obj, IntPtr(JSObject::PROPERTIES_OFFSET)); - SetValueToTaggedArray(VariableType::JS_ANY(), glue, array, Int32Sub(attrOffset, - GetInlinedPropertiesFromHClass(hclass)), value); + GateRef offset = Int32Sub(attrIndex, GetInlinedPropertiesFromHClass(hclass)); + SetValueToTaggedArrayWithAttr(glue, array, offset, key, value, attr); Jump(&exit); } } @@ -508,7 +536,7 @@ void StubBuilder::JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hclass, return; } -GateRef StubBuilder::ComputePropertyCapacityInJSObj(GateRef oldLength) +GateRef StubBuilder::ComputeNonInlinedFastPropsCapacity(GateRef oldLength, GateRef maxNonInlinedFastPropsCapacity) { auto env = GetEnvironment(); Label subEntry(env); @@ -518,11 +546,10 @@ GateRef StubBuilder::ComputePropertyCapacityInJSObj(GateRef oldLength) GateRef newL = Int32Add(oldLength, Int32(JSObject::PROPERTIES_GROW_SIZE)); Label reachMax(env); Label notReachMax(env); - Branch(Int32GreaterThan(newL, Int32(JSHClass::MAX_CAPACITY_OF_OUT_OBJECTS)), - &reachMax, ¬ReachMax); + Branch(Int32GreaterThan(newL, maxNonInlinedFastPropsCapacity), &reachMax, ¬ReachMax); { Bind(&reachMax); - result = Int32(JSHClass::MAX_CAPACITY_OF_OUT_OBJECTS); + result = maxNonInlinedFastPropsCapacity; Jump(&exit); Bind(¬ReachMax); result = newL; @@ -534,7 +561,8 @@ GateRef StubBuilder::ComputePropertyCapacityInJSObj(GateRef oldLength) return ret; } -GateRef StubBuilder::CallGetterHelper(GateRef glue, GateRef receiver, GateRef holder, GateRef accessor) +GateRef StubBuilder::CallGetterHelper( + GateRef glue, GateRef receiver, GateRef holder, GateRef accessor, ProfileOperation callback) { auto env = GetEnvironment(); Label subEntry(env); @@ -554,9 +582,23 @@ GateRef StubBuilder::CallGetterHelper(GateRef glue, GateRef receiver, GateRef ho Branch(Equal(accessor, lengthAccessor), &arrayLength, &tryContinue); Bind(&arrayLength); { - result = Load(VariableType::JS_ANY(), holder, - IntPtr(JSArray::LENGTH_OFFSET)); - Jump(&exit); + auto length = Load(VariableType::INT32(), holder, IntPtr(JSArray::LENGTH_OFFSET)); + // TaggedInt supports up to INT32_MAX. + // If length is greater than Int32_MAX, needs to be converted to TaggedDouble. + auto condition = Int32UnsignedGreaterThan(length, Int32(INT32_MAX)); + Label overflow(env); + Label notOverflow(env); + Branch(condition, &overflow, ¬Overflow); + Bind(&overflow); + { + result = DoubleToTaggedDoublePtr(ChangeUInt32ToFloat64(length)); + Jump(&exit); + } + Bind(¬Overflow); + { + result = IntToTaggedPtr(length); + Jump(&exit); + } } Bind(&tryContinue); result = CallRuntime(glue, RTSTUB_ID(CallInternalGetter), { accessor, holder }); @@ -578,7 +620,7 @@ GateRef StubBuilder::CallGetterHelper(GateRef glue, GateRef receiver, GateRef ho Bind(&objNotUndefined); { auto retValue = JSCallDispatch(glue, getter, Int32(0), 0, Circuit::NullGate(), - JSCallMode::CALL_GETTER, { receiver }); + JSCallMode::CALL_GETTER, { receiver }, callback); Label noPendingException(env); Branch(HasPendingException(glue), &exit, &noPendingException); Bind(&noPendingException); @@ -594,7 +636,8 @@ GateRef StubBuilder::CallGetterHelper(GateRef glue, GateRef receiver, GateRef ho return ret; } -GateRef StubBuilder::CallSetterHelper(GateRef glue, GateRef receiver, GateRef accessor, GateRef value) +GateRef StubBuilder::CallSetterHelper( + GateRef glue, GateRef receiver, GateRef accessor, GateRef value, ProfileOperation callback) { auto env = GetEnvironment(); Label subEntry(env); @@ -626,7 +669,7 @@ GateRef StubBuilder::CallSetterHelper(GateRef glue, GateRef receiver, GateRef ac Bind(&objNotUndefined); { auto retValue = JSCallDispatch(glue, setter, Int32(1), 0, Circuit::NullGate(), - JSCallMode::CALL_SETTER, { receiver, value }); + JSCallMode::CALL_SETTER, { receiver, value }, callback); Label noPendingException(env); Branch(HasPendingException(glue), &exit, &noPendingException); Bind(&noPendingException); @@ -771,6 +814,7 @@ GateRef StubBuilder::AddPropertyByName(GateRef glue, GateRef receiver, GateRef k SetPropertyInlinedProps(glue, receiver, hclass, value, numberOfProps); attr = SetOffsetFieldInPropAttr(*attr, numberOfProps); attr = SetIsInlinePropsFieldInPropAttr(*attr, Int32(1)); // 1: set inInlineProps true + attr = SetTaggedRepInPropAttr(*attr); attr = ProfilerStubBuilder(env).UpdateTrackTypeInPropAttr(*attr, value, callback); JSHClassAddProperty(glue, receiver, key, *attr); callback.ProfileObjLayoutByStore(receiver); @@ -822,13 +866,15 @@ GateRef StubBuilder::AddPropertyByName(GateRef glue, GateRef receiver, GateRef k Label ChangeToDict(env); Label notChangeToDict(env); Label afterDictChangeCon(env); - Branch(Int32Equal(*length, Int32(JSHClass::MAX_CAPACITY_OF_OUT_OBJECTS)), + GateRef maxNonInlinedFastPropsCapacity = + Int32Sub(Int32(PropertyAttributes::MAX_FAST_PROPS_CAPACITY), inlinedProperties); + Branch(Int32GreaterThanOrEqual(*length, maxNonInlinedFastPropsCapacity), &ChangeToDict, ¬ChangeToDict); { Bind(&ChangeToDict); { attr = SetDictionaryOrderFieldInPropAttr(*attr, - Int32(PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)); + Int32(PropertyAttributes::MAX_FAST_PROPS_CAPACITY)); GateRef res = CallRuntime(glue, RTSTUB_ID(NameDictPutIfAbsent), { receiver, *array, key, value, IntToTaggedInt(*attr), TaggedTrue() }); SetPropertiesArray(VariableType::JS_POINTER(), glue, receiver, res); @@ -839,7 +885,7 @@ GateRef StubBuilder::AddPropertyByName(GateRef glue, GateRef receiver, GateRef k Jump(&afterDictChangeCon); } Bind(&afterDictChangeCon); - GateRef capacity = ComputePropertyCapacityInJSObj(*length); + GateRef capacity = ComputeNonInlinedFastPropsCapacity(*length, maxNonInlinedFastPropsCapacity); array = CallRuntime(glue, RTSTUB_ID(CopyArray), { *array, IntToTaggedInt(*length), IntToTaggedInt(capacity) }); SetPropertiesArray(VariableType::JS_POINTER(), glue, receiver, *array); @@ -851,6 +897,7 @@ GateRef StubBuilder::AddPropertyByName(GateRef glue, GateRef receiver, GateRef k Bind(&afterArrLenCon); { attr = SetOffsetFieldInPropAttr(*attr, numberOfProps); + attr = SetTaggedRepInPropAttr(*attr); attr = ProfilerStubBuilder(env).UpdateTrackTypeInPropAttr(*attr, value, callback); JSHClassAddProperty(glue, receiver, key, *attr); SetValueToTaggedArray(VariableType::JS_ANY(), glue, *array, outProps, value); @@ -879,7 +926,7 @@ GateRef StubBuilder::TaggedToRepresentation(GateRef value) env->SubCfgEntry(&entry); Label exit(env); DEFVARIABLE(resultRep, VariableType::INT64(), - Int64(static_cast(Representation::OBJECT))); + Int64(static_cast(Representation::TAGGED))); Label isInt(env); Label notInt(env); @@ -901,7 +948,7 @@ GateRef StubBuilder::TaggedToRepresentation(GateRef value) } Bind(¬Double); { - resultRep = Int64(static_cast(Representation::OBJECT)); + resultRep = Int64(static_cast(Representation::TAGGED)); Jump(&exit); } } @@ -911,6 +958,60 @@ GateRef StubBuilder::TaggedToRepresentation(GateRef value) return ret; } +GateRef StubBuilder::TaggedToElementKind(GateRef value) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + + DEFVARIABLE(result, VariableType::INT32(), Int32(static_cast(ElementsKind::TAGGED))); + Label isInt(env); + Label isNotInt(env); + Branch(TaggedIsInt(value), &isInt, &isNotInt); + Bind(&isInt); + { + result = Int32(static_cast(ElementsKind::INT)); + Jump(&exit); + } + Bind(&isNotInt); + { + Label isObject(env); + Label isDouble(env); + Branch(TaggedIsObject(value), &isObject, &isDouble); + Bind(&isDouble); + { + result = Int32(static_cast(ElementsKind::DOUBLE)); + Jump(&exit); + } + Bind(&isObject); + { + Label isHeapObject(env); + Branch(TaggedIsHeapObject(value), &isHeapObject, &exit); + Bind(&isHeapObject); + { + Label isString(env); + Label isNonString(env); + Branch(TaggedIsString(value), &isString, &isNonString); + Bind(&isString); + { + result = Int32(static_cast(ElementsKind::STRING)); + Jump(&exit); + } + Bind(&isNonString); + { + result = Int32(static_cast(ElementsKind::OBJECT)); + Jump(&exit); + } + } + } + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + void StubBuilder::Store(VariableType type, GateRef glue, GateRef base, GateRef offset, GateRef value) { if (!env_->IsAsmInterp()) { @@ -941,6 +1042,102 @@ void StubBuilder::Store(VariableType type, GateRef glue, GateRef base, GateRef o } } +void StubBuilder::SetValueWithAttr(GateRef glue, GateRef obj, GateRef offset, GateRef key, GateRef value, GateRef attr) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + + Label exit(env); + Label repChange(env); + GateRef rep = GetRepInPropAttr(attr); + SetValueWithRep(glue, obj, offset, value, rep, &repChange); + Jump(&exit); + Bind(&repChange); + { + attr = SetTaggedRepInPropAttr(attr); + TransitionForRepChange(glue, obj, key, attr); + Store(VariableType::JS_ANY(), glue, obj, offset, value); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); +} + +void StubBuilder::SetValueWithRep( + GateRef glue, GateRef obj, GateRef offset, GateRef value, GateRef rep, Label *repChange) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + + Label exit(env); + Label repIsDouble(env); + Label repIsNonDouble(env); + Branch(IsDoubleRepInPropAttr(rep), &repIsDouble, &repIsNonDouble); + Bind(&repIsDouble); + { + Label valueIsInt(env); + Label valueIsNotInt(env); + Branch(TaggedIsInt(value), &valueIsInt, &valueIsNotInt); + Bind(&valueIsInt); + { + GateRef result = GetDoubleOfTInt(value); + Store(VariableType::FLOAT64(), glue, obj, offset, result); + Jump(&exit); + } + Bind(&valueIsNotInt); + { + Label valueIsObject(env); + Label valueIsDouble(env); + Branch(TaggedIsObject(value), &valueIsObject, &valueIsDouble); + Bind(&valueIsDouble); + { + // TaggedDouble to double + GateRef result = GetDoubleOfTDouble(value); + Store(VariableType::FLOAT64(), glue, obj, offset, result); + Jump(&exit); + } + Bind(&valueIsObject); + { + Jump(repChange); + } + } + } + Bind(&repIsNonDouble); + { + Label repIsInt(env); + Label repIsTagged(env); + Branch(IsIntRepInPropAttr(rep), &repIsInt, &repIsTagged); + Bind(&repIsInt); + { + Label valueIsInt(env); + Label valueIsNotInt(env); + Branch(TaggedIsInt(value), &valueIsInt, &valueIsNotInt); + Bind(&valueIsInt); + { + GateRef result = GetInt32OfTInt(value); + Store(VariableType::INT32(), glue, obj, offset, result); + Jump(&exit); + } + Bind(&valueIsNotInt); + { + Jump(repChange); + } + } + Bind(&repIsTagged); + { + Store(VariableType::JS_ANY(), glue, obj, offset, value); + Jump(&exit); + } + } + + Bind(&exit); + env->SubCfgExit(); + return; +} + + void StubBuilder::SetValueWithBarrier(GateRef glue, GateRef obj, GateRef offset, GateRef value) { auto env = GetEnvironment(); @@ -1117,6 +1314,7 @@ GateRef StubBuilder::StringToElementIndex(GateRef glue, GateRef string) DEFVARIABLE(result, VariableType::INT32(), Int32(-1)); Label greatThanZero(env); Label inRange(env); + Label flattenFastPath(env); auto len = GetLengthFromString(string); Branch(Int32Equal(len, Int32(0)), &exit, &greatThanZero); Bind(&greatThanZero); @@ -1128,8 +1326,12 @@ GateRef StubBuilder::StringToElementIndex(GateRef glue, GateRef string) Branch(isUtf16String, &exit, &isUtf8); Bind(&isUtf8); { - GateRef dataUtf8 = GetNormalStringData(FlattenString(glue, string)); DEFVARIABLE(c, VariableType::INT32(), Int32(0)); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, string, &flattenFastPath); + Bind(&flattenFastPath); + StringInfoGateRef stringInfoGate(&thisFlat); + GateRef dataUtf8 = GetNormalStringData(stringInfoGate); c = ZExtInt8ToInt32(Load(VariableType::INT8(), dataUtf8)); Label isDigitZero(env); Label notDigitZero(env); @@ -1202,11 +1404,11 @@ GateRef StubBuilder::TryToElementsIndex(GateRef glue, GateRef key) Label isKeyInt(env); Label notKeyInt(env); - DEFVARIABLE(resultKey, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(resultKey, VariableType::INT64(), Int64(-1)); Branch(TaggedIsInt(key), &isKeyInt, ¬KeyInt); Bind(&isKeyInt); { - resultKey = GetInt32OfTInt(key); + resultKey = GetInt64OfTInt(key); Jump(&exit); } Bind(¬KeyInt); @@ -1216,7 +1418,7 @@ GateRef StubBuilder::TryToElementsIndex(GateRef glue, GateRef key) Branch(TaggedIsString(key), &isString, ¬String); Bind(&isString); { - resultKey = StringToElementIndex(glue, key); + resultKey = ZExtInt32ToInt64(StringToElementIndex(glue, key)); Jump(&exit); } Bind(¬String); @@ -1231,7 +1433,7 @@ GateRef StubBuilder::TryToElementsIndex(GateRef glue, GateRef key) Branch(DoubleEqual(number, ChangeInt32ToFloat64(integer)), &isEqual, &exit); Bind(&isEqual); { - resultKey = integer; + resultKey = SExtInt32ToInt64(integer); Jump(&exit); } } @@ -1276,6 +1478,7 @@ GateRef StubBuilder::LoadFromField(GateRef receiver, GateRef handlerInfo) Label exit(env); Label handlerInfoIsInlinedProps(env); Label handlerInfoNotInlinedProps(env); + Label handlerPost(env); DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); GateRef index = HandlerBaseGetOffset(handlerInfo); Branch(HandlerBaseIsInlinedProperty(handlerInfo), &handlerInfoIsInlinedProps, &handlerInfoNotInlinedProps); @@ -1283,12 +1486,34 @@ GateRef StubBuilder::LoadFromField(GateRef receiver, GateRef handlerInfo) { result = Load(VariableType::JS_ANY(), receiver, PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize()))); - Jump(&exit); + Jump(&handlerPost); } Bind(&handlerInfoNotInlinedProps); { result = GetValueFromTaggedArray(GetPropertiesArray(receiver), index); - Jump(&exit); + Jump(&handlerPost); + } + Bind(&handlerPost); + { + Label nonDoubleToTagged(env); + Label doubleToTagged(env); + GateRef rep = HandlerBaseGetRep(handlerInfo); + Branch(IsDoubleRepInPropAttr(rep), &doubleToTagged, &nonDoubleToTagged); + Bind(&doubleToTagged); + { + result = TaggedPtrToTaggedDoublePtr(*result); + Jump(&exit); + } + Bind(&nonDoubleToTagged); + { + Label intToTagged(env); + Branch(IsIntRepInPropAttr(rep), &intToTagged, &exit); + Bind(&intToTagged); + { + result = TaggedPtrToTaggedIntPtr(*result); + Jump(&exit); + } + } } Bind(&exit); auto ret = *result; @@ -1362,7 +1587,8 @@ GateRef StubBuilder::CheckPolyHClass(GateRef cachedValue, GateRef hclass) return ret; } -GateRef StubBuilder::LoadICWithHandler(GateRef glue, GateRef receiver, GateRef argHolder, GateRef argHandler) +GateRef StubBuilder::LoadICWithHandler( + GateRef glue, GateRef receiver, GateRef argHolder, GateRef argHandler, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -1403,7 +1629,7 @@ GateRef StubBuilder::LoadICWithHandler(GateRef glue, GateRef receiver, GateRef a Jump(&exit); Bind(&handlerInfoNotNonExist); GateRef accessor = LoadFromField(*holder, handlerInfo); - result = CallGetterHelper(glue, receiver, *holder, accessor); + result = CallGetterHelper(glue, receiver, *holder, accessor, callback); Jump(&exit); } } @@ -1434,7 +1660,7 @@ GateRef StubBuilder::LoadICWithHandler(GateRef glue, GateRef receiver, GateRef a return ret; } -GateRef StubBuilder::LoadElement(GateRef glue, GateRef receiver, GateRef key) +GateRef StubBuilder::LoadElement(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -1444,8 +1670,17 @@ GateRef StubBuilder::LoadElement(GateRef glue, GateRef receiver, GateRef key) Label indexNotLessZero(env); Label lengthLessIndex(env); Label lengthNotLessIndex(env); + Label greaterThanInt32Max(env); + Label notGreaterThanInt32Max(env); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); - GateRef index = TryToElementsIndex(glue, key); + GateRef index64 = TryToElementsIndex(glue, key); + Branch(Int64GreaterThanOrEqual(index64, Int64(INT32_MAX)), &greaterThanInt32Max, ¬GreaterThanInt32Max); + Bind(&greaterThanInt32Max); + { + Jump(&exit); + } + Bind(¬GreaterThanInt32Max); + GateRef index = TruncInt64ToInt32(index64); Branch(Int32LessThan(index, Int32(0)), &indexLessZero, &indexNotLessZero); Bind(&indexLessZero); { @@ -1459,6 +1694,7 @@ GateRef StubBuilder::LoadElement(GateRef glue, GateRef receiver, GateRef key) Jump(&exit); Bind(&lengthNotLessIndex); result = GetValueFromTaggedArray(elements, index); + callback.ProfileObjLayoutByLoad(receiver); Jump(&exit); } Bind(&exit); @@ -1467,7 +1703,8 @@ GateRef StubBuilder::LoadElement(GateRef glue, GateRef receiver, GateRef key) return ret; } -GateRef StubBuilder::ICStoreElement(GateRef glue, GateRef receiver, GateRef key, GateRef value, GateRef handler) +GateRef StubBuilder::ICStoreElement( + GateRef glue, GateRef receiver, GateRef key, GateRef value, GateRef handler, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -1490,9 +1727,18 @@ GateRef StubBuilder::ICStoreElement(GateRef glue, GateRef receiver, GateRef key, Label cellHasNotChanged(env); Label loopHead(env); Label loopEnd(env); + Label greaterThanInt32Max(env); + Label notGreaterThanInt32Max(env); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); DEFVARIABLE(varHandler, VariableType::JS_ANY(), handler); - GateRef index = TryToElementsIndex(glue, key); + GateRef index64 = TryToElementsIndex(glue, key); + Branch(Int64GreaterThanOrEqual(index64, Int64(INT32_MAX)), &greaterThanInt32Max, ¬GreaterThanInt32Max); + Bind(&greaterThanInt32Max); + { + Jump(&exit); + } + Bind(¬GreaterThanInt32Max); + GateRef index = TruncInt64ToInt32(index64); Branch(Int32LessThan(index, Int32(0)), &indexLessZero, &indexNotLessZero); Bind(&indexLessZero); { @@ -1524,9 +1770,9 @@ GateRef StubBuilder::ICStoreElement(GateRef glue, GateRef receiver, GateRef key, GateRef oldLength = GetArrayLength(receiver); Branch(Int32GreaterThanOrEqual(index, oldLength), &indexGreaterLength, &handerInfoNotJSArray); Bind(&indexGreaterLength); - Store(VariableType::INT64(), glue, receiver, + Store(VariableType::INT32(), glue, receiver, IntPtr(panda::ecmascript::JSArray::LENGTH_OFFSET), - IntToTaggedInt(Int32Add(index, Int32(1)))); + Int32Add(index, Int32(1))); } Jump(&handerInfoNotJSArray); } @@ -1541,11 +1787,33 @@ GateRef StubBuilder::ICStoreElement(GateRef glue, GateRef receiver, GateRef key, RTSTUB_ID(TaggedArraySetValue), { receiver, value, elements, IntToTaggedInt(index), IntToTaggedInt(capacity) }); - Jump(&exit); + Label transition(env); + Branch(TaggedIsHole(*result), &exit, &transition); + Bind(&transition); + { + Label hole(env); + Label notHole(env); + DEFVARIABLE(kind, VariableType::INT32(), Int32(static_cast(ElementsKind::NONE))); + Branch(Int32GreaterThan(index, capacity), &hole, ¬Hole); + Bind(&hole); + { + kind = Int32(static_cast(ElementsKind::HOLE)); + Jump(¬Hole); + } + Bind(¬Hole); + { + TransitToElementsKind(glue, receiver, value, *kind); + callback.ProfileObjLayoutByStore(receiver); + Jump(&exit); + } + } } Bind(&storeElement); { SetValueToTaggedArray(VariableType::JS_ANY(), glue, elements, index, value); + TransitToElementsKind( + glue, receiver, value, Int32(static_cast(ElementsKind::NONE))); + callback.ProfileObjLayoutByStore(receiver); result = Undefined(); Jump(&exit); } @@ -1574,30 +1842,9 @@ GateRef StubBuilder::ICStoreElement(GateRef glue, GateRef receiver, GateRef key, GateRef StubBuilder::GetArrayLength(GateRef object) { - auto env = GetEnvironment(); - Label entry(env); - env->SubCfgEntry(&entry); - Label exit(env); - Label lengthIsInt(env); - Label lengthNotInt(env); - DEFVARIABLE(result, VariableType::INT32(), Int32(0)); GateRef lengthOffset = IntPtr(panda::ecmascript::JSArray::LENGTH_OFFSET); - GateRef length = Load(VariableType::INT64(), object, lengthOffset); - Branch(TaggedIsInt(length), &lengthIsInt, &lengthNotInt); - Bind(&lengthIsInt); - { - result = GetInt32OfTInt(length); - Jump(&exit); - } - Bind(&lengthNotInt); - { - result = ChangeFloat64ToInt32(GetDoubleOfTDouble(length)); - Jump(&exit); - } - Bind(&exit); - auto ret = *result; - env->SubCfgExit(); - return ret; + GateRef result = Load(VariableType::INT32(), object, lengthOffset); + return result; } GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef argHolder, @@ -1641,13 +1888,13 @@ GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef Branch(IsField(handlerInfo), &handlerInfoIsField, &handlerInfoNotField); Bind(&handlerInfoIsField); { - StoreField(glue, receiver, value, handlerInfo, callback); + result = StoreField(glue, receiver, value, handlerInfo, callback); Jump(&exit); } Bind(&handlerInfoNotField); { GateRef accessor = LoadFromField(*holder, handlerInfo); - result = CallSetterHelper(glue, receiver, accessor, value); + result = CallSetterHelper(glue, receiver, accessor, value, callback); Jump(&exit); } } @@ -1656,7 +1903,7 @@ GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef Branch(TaggedIsTransitionHandler(*handler), &handlerIsTransitionHandler, &handlerNotTransitionHandler); Bind(&handlerIsTransitionHandler); { - StoreWithTransition(glue, receiver, value, *handler, callback); + result = StoreWithTransition(glue, receiver, value, *handler, callback); Jump(&exit); } Bind(&handlerNotTransitionHandler); @@ -1669,7 +1916,7 @@ GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef Branch(GetHasChanged(cellValue), &cellHasChanged, &cellNotChanged); Bind(&cellNotChanged); { - StoreWithTransition(glue, receiver, value, *handler, callback, true); + result = StoreWithTransition(glue, receiver, value, *handler, callback, true); Jump(&exit); } } @@ -1712,13 +1959,13 @@ GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef Branch(IsField(handlerInfo), &aotHandlerInfoIsField, &aotHandlerInfoNotField); Bind(&aotHandlerInfoIsField); { - StoreField(glue, receiver, value, handlerInfo, callback); + result = StoreField(glue, receiver, value, handlerInfo, callback); Jump(&exit); } Bind(&aotHandlerInfoNotField); { GateRef accessor = LoadFromField(*holder, handlerInfo); - result = CallSetterHelper(glue, receiver, accessor, value); + result = CallSetterHelper(glue, receiver, accessor, value, callback); Jump(&exit); } } @@ -1738,7 +1985,8 @@ GateRef StubBuilder::StoreICWithHandler(GateRef glue, GateRef receiver, GateRef return ret; } -void StubBuilder::StoreField(GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback) +GateRef StubBuilder::StoreField(GateRef glue, GateRef receiver, GateRef value, GateRef handler, + ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -1748,28 +1996,36 @@ void StubBuilder::StoreField(GateRef glue, GateRef receiver, GateRef value, Gate Label handlerIsInlinedProperty(env); Label handlerNotInlinedProperty(env); GateRef index = HandlerBaseGetOffset(handler); + GateRef rep = HandlerBaseGetRep(handler); + DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); + Label repChange(env); Branch(HandlerBaseIsInlinedProperty(handler), &handlerIsInlinedProperty, &handlerNotInlinedProperty); Bind(&handlerIsInlinedProperty); { - Store(VariableType::JS_ANY(), - glue, - receiver, - PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())), - value); + GateRef toOffset = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); + SetValueWithRep(glue, receiver, toOffset, value, rep, &repChange); Jump(&exit); } Bind(&handlerNotInlinedProperty); { GateRef array = GetPropertiesArray(receiver); - SetValueToTaggedArray(VariableType::JS_ANY(), glue, array, index, value); + SetValueToTaggedArrayWithRep(glue, array, index, value, rep, &repChange); + Jump(&exit); + } + Bind(&repChange); + { + result = Hole(); Jump(&exit); } + Bind(&exit); + auto ret = *result; env->SubCfgExit(); + return ret; } -void StubBuilder::StoreWithTransition(GateRef glue, GateRef receiver, GateRef value, GateRef handler, - ProfileOperation callback, bool withPrototype) +GateRef StubBuilder::StoreWithTransition(GateRef glue, GateRef receiver, GateRef value, GateRef handler, + ProfileOperation callback, bool withPrototype) { auto env = GetEnvironment(); Label entry(env); @@ -1780,6 +2036,7 @@ void StubBuilder::StoreWithTransition(GateRef glue, GateRef receiver, GateRef va Label handlerInfoNotInlinedProps(env); Label indexMoreCapacity(env); Label indexLessCapacity(env); + DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); GateRef newHClass; GateRef handlerInfo; if (withPrototype) { @@ -1795,6 +2052,7 @@ void StubBuilder::StoreWithTransition(GateRef glue, GateRef receiver, GateRef va Bind(&handlerInfoNotInlinedProps); { ProfilerStubBuilder(env).UpdatePropAttrIC(glue, receiver, value, handlerInfo, callback); + Label repChange(env); GateRef array = GetPropertiesArray(receiver); GateRef capacity = GetLengthOfTaggedArray(array); GateRef index = HandlerBaseGetOffset(handlerInfo); @@ -1809,21 +2067,27 @@ void StubBuilder::StoreWithTransition(GateRef glue, GateRef receiver, GateRef va } Bind(&indexLessCapacity); { - Store(VariableType::JS_ANY(), - glue, - PtrAdd(array, IntPtr(TaggedArray::DATA_OFFSET)), - PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())), - value); + GateRef rep = HandlerBaseGetRep(handlerInfo); + GateRef base = PtrAdd(array, IntPtr(TaggedArray::DATA_OFFSET)); + GateRef toIndex = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); + SetValueWithRep(glue, base, toIndex, value, rep, &repChange); + Jump(&exit); + } + Bind(&repChange); + { + result = Hole(); Jump(&exit); } } Bind(&handlerInfoIsInlinedProps); { - StoreField(glue, receiver, value, handlerInfo, callback); + result = StoreField(glue, receiver, value, handlerInfo, callback); Jump(&exit); } Bind(&exit); + auto ret = *result; env->SubCfgExit(); + return ret; } GateRef StubBuilder::StoreGlobal(GateRef glue, GateRef value, GateRef cell) @@ -1933,14 +2197,13 @@ inline void StubBuilder::UpdateValueInDict(GateRef glue, GateRef elements, GateR SetValueToTaggedArray(VariableType::JS_ANY(), glue, elements, valueIndex, value); } -GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index) +GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); env->SubCfgEntry(&entry); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); DEFVARIABLE(holder, VariableType::JS_ANY(), receiver); - DEFVARIABLE(proto, VariableType::JS_ANY(), Hole()); Label exit(env); Label loopHead(env); Label loopEnd(env); @@ -1959,15 +2222,15 @@ GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef // TypeArray Label isFastTypeArray(env); Label notFastTypeArray(env); + Label notTypedArrayProto(env); + Branch(Int32Equal(jsType, Int32(static_cast(JSType::JS_TYPED_ARRAY))), &exit, ¬TypedArrayProto); + Bind(¬TypedArrayProto); Branch(IsFastTypeArray(jsType), &isFastTypeArray, ¬FastTypeArray); Bind(&isFastTypeArray); { - proto = GetPrototypeFromHClass(LoadHClass(receiver)); - Label notOnProtoChain(env); - Branch(Int64NotEqual(*proto, *holder), &exit, ¬OnProtoChain); - Bind(¬OnProtoChain); TypedArrayStubBuilder typedArrayStubBuilder(this); result = typedArrayStubBuilder.FastGetPropertyByIndex(glue, *holder, index, jsType); + callback.ProfileObjIndex(receiver); Jump(&exit); } Bind(¬FastTypeArray); @@ -2004,6 +2267,7 @@ GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef Label notHole(env); Label isHole(env); GateRef value = GetValueFromTaggedArray(elements, index); + callback.ProfileObjLayoutByLoad(receiver); Branch(TaggedIsNotHole(value), ¬Hole, &isHole); Bind(¬Hole); { @@ -2036,7 +2300,7 @@ GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef Branch(IsAccessor(attr), &isAccessor, ¬Accessor); Bind(&isAccessor); { - result = CallGetterHelper(glue, receiver, *holder, value); + result = CallGetterHelper(glue, receiver, *holder, value, callback); Jump(&exit); } Bind(¬Accessor); @@ -2068,7 +2332,7 @@ GateRef StubBuilder::GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef return ret; } -GateRef StubBuilder::GetPropertyByValue(GateRef glue, GateRef receiver, GateRef keyValue) +GateRef StubBuilder::GetPropertyByValue(GateRef glue, GateRef receiver, GateRef keyValue, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -2093,13 +2357,22 @@ GateRef StubBuilder::GetPropertyByValue(GateRef glue, GateRef receiver, GateRef } Bind(&isNumberOrStringSymbol); { - GateRef index = TryToElementsIndex(glue, *key); + GateRef index64 = TryToElementsIndex(glue, *key); Label validIndex(env); Label notValidIndex(env); + Label greaterThanInt32Max(env); + Label notGreaterThanInt32Max(env); + Branch(Int64GreaterThanOrEqual(index64, Int64(INT32_MAX)), &greaterThanInt32Max, ¬GreaterThanInt32Max); + Bind(&greaterThanInt32Max); + { + Jump(&exit); + } + Bind(¬GreaterThanInt32Max); + GateRef index = TruncInt64ToInt32(index64); Branch(Int32GreaterThanOrEqual(index, Int32(0)), &validIndex, ¬ValidIndex); Bind(&validIndex); { - result = GetPropertyByIndex(glue, receiver, index); + result = GetPropertyByIndex(glue, receiver, index, callback); Jump(&exit); } Bind(¬ValidIndex); @@ -2132,7 +2405,7 @@ GateRef StubBuilder::GetPropertyByValue(GateRef glue, GateRef receiver, GateRef } Bind(&getByName); { - result = GetPropertyByName(glue, receiver, *key); + result = GetPropertyByName(glue, receiver, *key, callback); Jump(&exit); } } @@ -2143,7 +2416,7 @@ GateRef StubBuilder::GetPropertyByValue(GateRef glue, GateRef receiver, GateRef return ret; } -GateRef StubBuilder::GetPropertyByName(GateRef glue, GateRef receiver, GateRef key) +GateRef StubBuilder::GetPropertyByName(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -2224,7 +2497,7 @@ GateRef StubBuilder::GetPropertyByName(GateRef glue, GateRef receiver, GateRef k Branch(IsAccessor(attr), &isAccessor, ¬Accessor); Bind(&isAccessor); { - result = CallGetterHelper(glue, receiver, *holder, value); + result = CallGetterHelper(glue, receiver, *holder, value, callback); Jump(&exit); } Bind(¬Accessor); @@ -2263,7 +2536,7 @@ GateRef StubBuilder::GetPropertyByName(GateRef glue, GateRef receiver, GateRef k Branch(IsAccessor(attr), &isAccessor1, ¬Accessor1); Bind(&isAccessor1); { - result = CallGetterHelper(glue, receiver, *holder, value); + result = CallGetterHelper(glue, receiver, *holder, value, callback); Jump(&exit); } Bind(¬Accessor1); @@ -2303,6 +2576,7 @@ void StubBuilder::CopyAllHClass(GateRef glue, GateRef dstHClass, GateRef srcHCla auto proto = GetPrototypeFromHClass(srcHClass); SetPrototypeToHClass(VariableType::JS_POINTER(), glue, dstHClass, proto); SetBitFieldToHClass(glue, dstHClass, GetBitFieldFromHClass(srcHClass)); + SetIsAllTaggedProp(glue, dstHClass, GetIsAllTaggedPropFromHClass(srcHClass)); SetNumberOfPropsToHClass(glue, dstHClass, GetNumberOfPropsFromHClass(srcHClass)); SetTransitionsToHClass(VariableType::INT64(), glue, dstHClass, Undefined()); SetProtoChangeDetailsToHClass(VariableType::INT64(), glue, dstHClass, Null()); @@ -2312,6 +2586,59 @@ void StubBuilder::CopyAllHClass(GateRef glue, GateRef dstHClass, GateRef srcHCla return; } +void StubBuilder::TransitionForRepChange(GateRef glue, GateRef receiver, GateRef key, GateRef attr) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + GateRef hclass = LoadHClass(receiver); + GateRef type = GetObjectType(hclass); + GateRef size = Int32Mul(GetInlinedPropsStartFromHClass(hclass), + Int32(JSTaggedValue::TaggedTypeSize())); + GateRef inlineProps = GetInlinedPropertiesFromHClass(hclass); + GateRef newJshclass = CallRuntime(glue, RTSTUB_ID(NewEcmaHClass), + { IntToTaggedInt(size), IntToTaggedInt(type), + IntToTaggedInt(inlineProps) }); + CopyAllHClass(glue, newJshclass, hclass); + CallRuntime(glue, RTSTUB_ID(CopyAndUpdateObjLayout), + { hclass, newJshclass, key, IntToTaggedInt(attr) }); +#if ECMASCRIPT_ENABLE_IC + NotifyHClassChanged(glue, hclass, newJshclass); +#endif + StoreHClass(glue, receiver, newJshclass); + env->SubCfgExit(); +} + +void StubBuilder::TransitToElementsKind(GateRef glue, GateRef receiver, GateRef value, GateRef kind) +{ + auto env = GetEnvironment(); + Label subEntry(env); + env->SubCfgEntry(&subEntry); + Label exit(env); + + GateRef hclass = LoadHClass(receiver); + GateRef elementsKind = GetElementsKindFromHClass(hclass); + + Label isNoneDefault(env); + Branch(Int32Equal(elementsKind, Int32(static_cast(ElementsKind::GENERIC))), &exit, &isNoneDefault); + Bind(&isNoneDefault); + { + GateRef newKind = TaggedToElementKind(value); + newKind = Int32Or(newKind, kind); + newKind = Int32Or(newKind, elementsKind); + Label change(env); + Branch(Int32Equal(elementsKind, newKind), &exit, &change); + Bind(&change); + { + CallRuntime(glue, RTSTUB_ID(UpdateHClassForElementsKind), { receiver, newKind }); + Jump(&exit); + } + } + + Bind(&exit); + env->SubCfgExit(); +} + GateRef StubBuilder::FindTransitions(GateRef glue, GateRef receiver, GateRef hclass, GateRef key, GateRef metaData) { auto env = GetEnvironment(); @@ -2399,7 +2726,8 @@ GateRef StubBuilder::FindTransitions(GateRef glue, GateRef receiver, GateRef hcl return ret; } -GateRef StubBuilder::SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index, GateRef value, bool useOwn) +GateRef StubBuilder::SetPropertyByIndex( + GateRef glue, GateRef receiver, GateRef index, GateRef value, bool useOwn, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -2438,6 +2766,7 @@ GateRef StubBuilder::SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef { returnValue = CallRuntime(glue, RTSTUB_ID(SetTypeArrayPropertyByIndex), { receiver, IntToTaggedInt(index), value, IntToTaggedInt(jsType)}); + callback.ProfileObjIndex(receiver); Jump(&exit); } } @@ -2484,6 +2813,9 @@ GateRef StubBuilder::SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef { GateRef newElements = CallRuntime(glue, RTSTUB_ID(CheckAndCopyArray), {*holder}); SetValueToTaggedArray(VariableType::JS_ANY(), glue, newElements, index, value); + TransitToElementsKind( + glue, receiver, value, Int32(static_cast(ElementsKind::NONE))); + callback.ProfileObjLayoutByStore(receiver); returnValue = Undefined(); Jump(&exit); } @@ -2494,6 +2826,9 @@ GateRef StubBuilder::SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef Bind(&setElementsArray); { SetValueToTaggedArray(VariableType::JS_ANY(), glue, elements, index, value); + TransitToElementsKind( + glue, receiver, value, Int32(static_cast(ElementsKind::NONE))); + callback.ProfileObjLayoutByStore(receiver); returnValue = Undefined(); Jump(&exit); } @@ -2532,6 +2867,7 @@ GateRef StubBuilder::SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef Branch(TaggedIsTrue(result), &success, &failed); Bind(&success); { + callback.ProfileObjLayoutByStore(receiver); returnValue = Undefined(); Jump(&exit); } @@ -2658,7 +2994,7 @@ GateRef StubBuilder::SetPropertyByName(GateRef glue, GateRef receiver, GateRef k Branch(ShouldCallSetter(receiver, *holder, accessor, attr), &shouldCall, ¬Accessor); Bind(&shouldCall); { - result = CallSetterHelper(glue, receiver, accessor, value); + result = CallSetterHelper(glue, receiver, accessor, value, callback); Jump(&exit); } } @@ -2716,7 +3052,7 @@ GateRef StubBuilder::SetPropertyByName(GateRef glue, GateRef receiver, GateRef k { // JSObject::Cast(holder)->SetProperty(thread, hclass, attr, value) // return JSTaggedValue::Undefined() - JSObjectSetProperty(glue, *holder, hclass, attr, value); + JSObjectSetProperty(glue, *holder, hclass, attr, key, value); ProfilerStubBuilder(env).UpdatePropAttrWithValue( glue, *holder, layOutInfo, attr, entry, value, callback); result = Undefined(); @@ -2754,7 +3090,7 @@ GateRef StubBuilder::SetPropertyByName(GateRef glue, GateRef receiver, GateRef k Branch(ShouldCallSetter(receiver, *holder, accessor1, attr1), &shouldCall1, ¬Accessor1); Bind(&shouldCall1); { - result = CallSetterHelper(glue, receiver, accessor1, value); + result = CallSetterHelper(glue, receiver, accessor1, value, callback); Jump(&exit); } } @@ -2815,7 +3151,7 @@ GateRef StubBuilder::SetPropertyByName(GateRef glue, GateRef receiver, GateRef k GateRef receiverLayoutInfo = GetLayoutFromHClass(receiverHClass); GateRef holePropAttr = GetPropAttrFromLayoutInfo(receiverLayoutInfo, *receiverHoleEntry); GateRef holeAttr = GetInt32OfTInt(holePropAttr); - JSObjectSetProperty(glue, receiver, receiverHClass, holeAttr, value); + JSObjectSetProperty(glue, receiver, receiverHClass, holeAttr, key, value); ProfilerStubBuilder(env).UpdatePropAttrWithValue( glue, receiver, receiverLayoutInfo, holeAttr, *receiverHoleEntry, value, callback); result = Undefined(); @@ -2870,13 +3206,22 @@ GateRef StubBuilder::SetPropertyByValue(GateRef glue, GateRef receiver, GateRef } Bind(&isNumberOrStringSymbol); { - GateRef index = TryToElementsIndex(glue, *varKey); + GateRef index64 = TryToElementsIndex(glue, *varKey); Label validIndex(env); Label notValidIndex(env); + Label greaterThanInt32Max(env); + Label notGreaterThanInt32Max(env); + Branch(Int64GreaterThanOrEqual(index64, Int64(INT32_MAX)), &greaterThanInt32Max, ¬GreaterThanInt32Max); + Bind(&greaterThanInt32Max); + { + Jump(&exit); + } + Bind(¬GreaterThanInt32Max); + GateRef index = TruncInt64ToInt32(index64); Branch(Int32GreaterThanOrEqual(index, Int32(0)), &validIndex, ¬ValidIndex); Bind(&validIndex); { - result = SetPropertyByIndex(glue, receiver, index, value, useOwn); + result = SetPropertyByIndex(glue, receiver, index, value, useOwn, callback); Jump(&exit); } Bind(¬ValidIndex); @@ -3109,7 +3454,8 @@ GateRef StubBuilder::FastTypeOf(GateRef glue, GateRef obj) return ret; } -GateRef StubBuilder::InstanceOf(GateRef glue, GateRef object, GateRef target, GateRef profileTypeInfo, GateRef slotId) +GateRef StubBuilder::InstanceOf( + GateRef glue, GateRef object, GateRef target, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -3159,7 +3505,7 @@ GateRef StubBuilder::InstanceOf(GateRef glue, GateRef object, GateRef target, Ga Branch(TaggedIsUndefined(instof), &instOfIsUndefined, &instOfNotUndefined); Bind(&instOfNotUndefined); { - TryFastHasInstance(glue, instof, target, object, &fastPath, &exit, &result); + TryFastHasInstance(glue, instof, target, object, &fastPath, &exit, &result, callback); } Bind(&instOfIsUndefined); { @@ -3187,7 +3533,7 @@ GateRef StubBuilder::InstanceOf(GateRef glue, GateRef object, GateRef target, Ga } void StubBuilder::TryFastHasInstance(GateRef glue, GateRef instof, GateRef target, GateRef object, Label *fastPath, - Label *exit, Variable *result) + Label *exit, Variable *result, ProfileOperation callback) { auto env = GetEnvironment(); @@ -3204,7 +3550,7 @@ void StubBuilder::TryFastHasInstance(GateRef glue, GateRef instof, GateRef targe Bind(&slowPath); { GateRef retValue = JSCallDispatch(glue, instof, Int32(1), 0, Circuit::NullGate(), - JSCallMode::CALL_SETTER, { target, object }); + JSCallMode::CALL_SETTER, { target, object }, callback); result->WriteVariable(FastToBoolean(retValue)); Jump(exit); } @@ -3263,7 +3609,7 @@ GateRef StubBuilder::GetMethod(GateRef glue, GateRef obj, GateRef key, GateRef p return ret; } -GateRef StubBuilder::FastGetPropertyByName(GateRef glue, GateRef obj, GateRef key) +GateRef StubBuilder::FastGetPropertyByName(GateRef glue, GateRef obj, GateRef key, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -3277,7 +3623,7 @@ GateRef StubBuilder::FastGetPropertyByName(GateRef glue, GateRef obj, GateRef ke Branch(TaggedIsHeapObject(obj), &fastpath, &slowpath); Bind(&fastpath); { - result = GetPropertyByName(glue, obj, key); + result = GetPropertyByName(glue, obj, key, callback); Branch(TaggedIsHole(*result), &slowpath, &exit); } Bind(&slowpath); @@ -3292,7 +3638,7 @@ GateRef StubBuilder::FastGetPropertyByName(GateRef glue, GateRef obj, GateRef ke return ret; } -GateRef StubBuilder::FastGetPropertyByIndex(GateRef glue, GateRef obj, GateRef index) +GateRef StubBuilder::FastGetPropertyByIndex(GateRef glue, GateRef obj, GateRef index, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -3305,7 +3651,7 @@ GateRef StubBuilder::FastGetPropertyByIndex(GateRef glue, GateRef obj, GateRef i Branch(TaggedIsHeapObject(obj), &fastPath, &slowPath); Bind(&fastPath); { - result = GetPropertyByIndex(glue, obj, index); + result = GetPropertyByIndex(glue, obj, index, callback); Label notHole(env); Branch(TaggedIsHole(*result), &slowPath, &exit); } @@ -3373,7 +3719,7 @@ GateRef StubBuilder::OrdinaryHasInstance(GateRef glue, GateRef target, GateRef o auto prototypeString = GetGlobalConstantValue( VariableType::JS_POINTER(), glue, ConstantIndex::PROTOTYPE_STRING_INDEX); - GateRef constructorPrototype = FastGetPropertyByName(glue, target, prototypeString); + GateRef constructorPrototype = FastGetPropertyByName(glue, target, prototypeString, ProfileOperation()); // 5. ReturnIfAbrupt(P). // no throw exception, so needn't return @@ -3638,74 +3984,204 @@ GateRef StubBuilder::SameValue(GateRef glue, GateRef left, GateRef right) return ret; } -GateRef StubBuilder::FastStringEqual(GateRef glue, GateRef left, GateRef right) +GateRef StubBuilder::SameValueZero(GateRef glue, GateRef left, GateRef right) { auto env = GetEnvironment(); Label entry(env); env->SubCfgEntry(&entry); DEFVARIABLE(result, VariableType::BOOL(), False()); Label exit(env); - Label lengthCompare(env); - Label hashcodeCompare(env); - Label contentsCompare(env); - - Branch(Int32Equal(ZExtInt1ToInt32(IsUtf16String(left)), ZExtInt1ToInt32(IsUtf16String(right))), - &lengthCompare, &exit); - - Bind(&lengthCompare); - Branch(Int32Equal(GetLengthFromString(left), GetLengthFromString(right)), &hashcodeCompare, - &exit); - - Bind(&hashcodeCompare); - Label leftNotNeg(env); - GateRef leftHash = TryGetHashcodeFromString(left); - GateRef rightHash = TryGetHashcodeFromString(right); - Branch(Int64Equal(leftHash, Int64(-1)), &contentsCompare, &leftNotNeg); - Bind(&leftNotNeg); - { - Label rightNotNeg(env); - Branch(Int64Equal(rightHash, Int64(-1)), &contentsCompare, &rightNotNeg); - Bind(&rightNotNeg); - Branch(Int64Equal(leftHash, rightHash), &contentsCompare, &exit); - } - - Bind(&contentsCompare); - { - GateRef stringEqual = CallRuntime(glue, RTSTUB_ID(StringEqual), { left, right }); - result = Equal(stringEqual, TaggedTrue()); - Jump(&exit); - } - - Bind(&exit); - auto ret = *result; - env->SubCfgExit(); - return ret; -} - -GateRef StubBuilder::FastStrictEqual(GateRef glue, GateRef left, GateRef right, ProfileOperation callback) -{ - auto env = GetEnvironment(); - Label entry(env); - env->SubCfgEntry(&entry); - DEFVARIABLE(result, VariableType::BOOL(), False()); + DEFVARIABLE(doubleLeft, VariableType::FLOAT64(), Double(0.0)); + DEFVARIABLE(doubleRight, VariableType::FLOAT64(), Double(0.0)); Label strictEqual(env); - Label leftIsNumber(env); - Label leftIsNotNumber(env); - Label sameVariableCheck(env); Label stringEqualCheck(env); Label stringCompare(env); Label bigIntEqualCheck(env); - Label exit(env); - Branch(TaggedIsNumber(left), &leftIsNumber, &leftIsNotNumber); - Bind(&leftIsNumber); + Label numberEqualCheck1(env); + + Branch(Equal(left, right), &strictEqual, &numberEqualCheck1); + Bind(&strictEqual); { - Label rightIsNumber(env); - Branch(TaggedIsNumber(right), &rightIsNumber, &exit); - Bind(&rightIsNumber); - { - DEFVARIABLE(doubleLeft, VariableType::FLOAT64(), Double(0.0)); - DEFVARIABLE(doubleRight, VariableType::FLOAT64(), Double(0.0)); - DEFVARIABLE(curType, VariableType::INT32(), Int32(PGOSampleType::IntType())); + result = True(); + Jump(&exit); + } + Bind(&numberEqualCheck1); + { + Label leftIsNumber(env); + Label leftIsNotNumber(env); + Branch(TaggedIsNumber(left), &leftIsNumber, &leftIsNotNumber); + Bind(&leftIsNumber); + { + Label rightIsNumber(env); + Branch(TaggedIsNumber(right), &rightIsNumber, &exit); + Bind(&rightIsNumber); + { + Label numberEqualCheck2(env); + Label leftIsInt(env); + Label leftNotInt(env); + Label getRight(env); + Branch(TaggedIsInt(left), &leftIsInt, &leftNotInt); + Bind(&leftIsInt); + { + doubleLeft = ChangeInt32ToFloat64(GetInt32OfTInt(left)); + Jump(&getRight); + } + Bind(&leftNotInt); + { + doubleLeft = GetDoubleOfTDouble(left); + Jump(&getRight); + } + Bind(&getRight); + { + Label rightIsInt(env); + Label rightNotInt(env); + Branch(TaggedIsInt(right), &rightIsInt, &rightNotInt); + Bind(&rightIsInt); + { + doubleRight = ChangeInt32ToFloat64(GetInt32OfTInt(right)); + Jump(&numberEqualCheck2); + } + Bind(&rightNotInt); + { + doubleRight = GetDoubleOfTDouble(right); + Jump(&numberEqualCheck2); + } + } + Bind(&numberEqualCheck2); + { + Label nanCheck(env); + Label doubleEqual(env); + Branch(DoubleEqual(*doubleLeft, *doubleRight), &doubleEqual, &nanCheck); + Bind(&doubleEqual); + { + result = True(); + Jump(&exit); + } + Bind(&nanCheck); + { + result = BoolAnd(DoubleIsNAN(*doubleLeft), DoubleIsNAN(*doubleRight)); + Jump(&exit); + } + } + } + } + Bind(&leftIsNotNumber); + Branch(TaggedIsNumber(right), &exit, &stringEqualCheck); + Bind(&stringEqualCheck); + Branch(BothAreString(left, right), &stringCompare, &bigIntEqualCheck); + Bind(&stringCompare); + { + result = FastStringEqual(glue, left, right); + Jump(&exit); + } + Bind(&bigIntEqualCheck); + { + Label leftIsBigInt(env); + Label leftIsNotBigInt(env); + Branch(TaggedIsBigInt(left), &leftIsBigInt, &exit); + Bind(&leftIsBigInt); + { + Label rightIsBigInt(env); + Branch(TaggedIsBigInt(right), &rightIsBigInt, &exit); + Bind(&rightIsBigInt); + result = CallNGCRuntime(glue, RTSTUB_ID(BigIntSameValueZero), { left, right }); + Jump(&exit); + } + } + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef StubBuilder::FastStringEqual(GateRef glue, GateRef left, GateRef right) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::BOOL(), False()); + Label exit(env); + Label hashcodeCompare(env); + Label contentsCompare(env); + Label lenEqualOneCheck(env); + Label lenIsOne(env); + Branch(Int32Equal(GetLengthFromString(left), GetLengthFromString(right)), &lenEqualOneCheck, &exit); + Bind(&lenEqualOneCheck); + Branch(Int32Equal(GetLengthFromString(left), Int32(1)), &lenIsOne, &hashcodeCompare); + Bind(&lenIsOne); + { + Label leftFlattenFastPath(env); + FlatStringStubBuilder leftFlat(this); + leftFlat.FlattenString(glue, left, &leftFlattenFastPath); + Bind(&leftFlattenFastPath); + { + Label rightFlattenFastPath(env); + FlatStringStubBuilder rightFlat(this); + rightFlat.FlattenString(glue, right, &rightFlattenFastPath); + Bind(&rightFlattenFastPath); + { + BuiltinsStringStubBuilder stringBuilder(this); + StringInfoGateRef leftStrInfoGate(&leftFlat); + StringInfoGateRef rightStrInfoGate(&rightFlat); + GateRef leftStrToInt = stringBuilder.StringAt(leftStrInfoGate, Int32(0)); + GateRef rightStrToInt = stringBuilder.StringAt(rightStrInfoGate, Int32(0)); + result = Equal(leftStrToInt, rightStrToInt); + Jump(&exit); + } + } + } + + Bind(&hashcodeCompare); + Label leftNotNeg(env); + GateRef leftHash = TryGetHashcodeFromString(left); + GateRef rightHash = TryGetHashcodeFromString(right); + Branch(Int64Equal(leftHash, Int64(-1)), &contentsCompare, &leftNotNeg); + Bind(&leftNotNeg); + { + Label rightNotNeg(env); + Branch(Int64Equal(rightHash, Int64(-1)), &contentsCompare, &rightNotNeg); + Bind(&rightNotNeg); + Branch(Int64Equal(leftHash, rightHash), &contentsCompare, &exit); + } + + Bind(&contentsCompare); + { + GateRef stringEqual = CallRuntime(glue, RTSTUB_ID(StringEqual), { left, right }); + result = Equal(stringEqual, TaggedTrue()); + Jump(&exit); + } + + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef StubBuilder::FastStrictEqual(GateRef glue, GateRef left, GateRef right, ProfileOperation callback) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::BOOL(), False()); + Label strictEqual(env); + Label leftIsNumber(env); + Label leftIsNotNumber(env); + Label sameVariableCheck(env); + Label stringEqualCheck(env); + Label stringCompare(env); + Label bigIntEqualCheck(env); + Label exit(env); + Branch(TaggedIsNumber(left), &leftIsNumber, &leftIsNotNumber); + Bind(&leftIsNumber); + { + Label rightIsNumber(env); + Branch(TaggedIsNumber(right), &rightIsNumber, &exit); + Bind(&rightIsNumber); + { + DEFVARIABLE(doubleLeft, VariableType::FLOAT64(), Double(0.0)); + DEFVARIABLE(doubleRight, VariableType::FLOAT64(), Double(0.0)); + DEFVARIABLE(curType, VariableType::INT32(), Int32(PGOSampleType::IntType())); Label leftIsInt(env); Label leftNotInt(env); Label getRight(env); @@ -3794,7 +4270,7 @@ GateRef StubBuilder::FastStrictEqual(GateRef glue, GateRef left, GateRef right, return ret; } -GateRef StubBuilder::FastEqual(GateRef left, GateRef right, ProfileOperation callback) +GateRef StubBuilder::FastEqual(GateRef glue, GateRef left, GateRef right, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -3918,6 +4394,23 @@ GateRef StubBuilder::FastEqual(GateRef left, GateRef right, ProfileOperation cal } Bind(&leftNotBoolOrRightNotSpecial); { + Label bothString(env); + Label eitherNotString(env); + Branch(BothAreString(left, right), &bothString, &eitherNotString); + Bind(&bothString); + { + callback.ProfileOpType(Int32(PGOSampleType::StringType())); + Label stringEqual(env); + Label stringNotEqual(env); + Branch(FastStringEqual(glue, left, right), &stringEqual, &stringNotEqual); + Bind(&stringEqual); + result = TaggedTrue(); + Jump(&exit); + Bind(&stringNotEqual); + result = TaggedFalse(); + Jump(&exit); + } + Bind(&eitherNotString); callback.ProfileOpType(Int32(PGOSampleType::AnyType())); Jump(&exit); } @@ -4228,10 +4721,9 @@ GateRef StubBuilder::FastAddSubAndMul(GateRef left, GateRef right, ProfileOperat Label exit(env); Label overflow(env); Label notOverflow(env); - auto res = BinaryOp(GetInt64OfTInt(left), GetInt64OfTInt(right)); - auto condition1 = Int64GreaterThan(res, Int64(INT32_MAX)); - auto condition2 = Int64LessThan(res, Int64(INT32_MIN)); - Branch(BoolOr(condition1, condition2), &overflow, ¬Overflow); + auto res = BinaryOpWithOverflow(GetInt32OfTInt(left), GetInt32OfTInt(right)); + GateRef condition = env->GetBuilder()->ExtractValue(MachineType::I1, res, Int32(1)); + Branch(condition, &overflow, ¬Overflow); Bind(&overflow); { auto doubleLeft = ChangeInt32ToFloat64(GetInt32OfTInt(left)); @@ -4243,11 +4735,12 @@ GateRef StubBuilder::FastAddSubAndMul(GateRef left, GateRef right, ProfileOperat } Bind(¬Overflow); { + res = env->GetBuilder()->ExtractValue(MachineType::I32, res, Int32(0)); if (Op == OpCode::MUL) { Label resultIsZero(env); Label returnNegativeZero(env); Label returnResult(env); - Branch(Int64Equal(res, Int64(0)), &resultIsZero, &returnResult); + Branch(Int32Equal(res, Int32(0)), &resultIsZero, &returnResult); Bind(&resultIsZero); GateRef leftNegative = Int32LessThan(GetInt32OfTInt(left), Int32(0)); GateRef rightNegative = Int32LessThan(GetInt32OfTInt(right), Int32(0)); @@ -4257,11 +4750,11 @@ GateRef StubBuilder::FastAddSubAndMul(GateRef left, GateRef right, ProfileOperat callback.ProfileOpType(Int32(PGOSampleType::DoubleType())); Jump(&exit); Bind(&returnResult); - result = IntToTaggedPtr(TruncInt64ToInt32(res)); + result = IntToTaggedPtr(res); callback.ProfileOpType(Int32(PGOSampleType::IntType())); Jump(&exit); } else { - result = IntToTaggedPtr(TruncInt64ToInt32(res)); + result = IntToTaggedPtr(res); callback.ProfileOpType(Int32(PGOSampleType::IntType())); Jump(&exit); } @@ -4476,7 +4969,7 @@ GateRef StubBuilder::FastMod(GateRef glue, GateRef left, GateRef right, ProfileO return ret; } -GateRef StubBuilder::GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRef key) +GateRef StubBuilder::GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback) { auto env = GetEnvironment(); Label entryLabel(env); @@ -4494,7 +4987,7 @@ GateRef StubBuilder::GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRe Branch(TaggedIsAccessor(*result), &callGetter, &exit); Bind(&callGetter); { - result = CallGetterHelper(glue, receiver, receiver, *result); + result = CallGetterHelper(glue, receiver, receiver, *result, callback); Jump(&exit); } } @@ -4504,6 +4997,11 @@ GateRef StubBuilder::GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRe return ret; } +GateRef StubBuilder::GetConstPoolFromFunction(GateRef jsFunc) +{ + return env_->GetBuilder()->GetConstPoolFromFunction(jsFunc); +} + GateRef StubBuilder::GetStringFromConstPool(GateRef glue, GateRef constpool, GateRef index) { GateRef module = Circuit::NullGate(); @@ -4532,121 +5030,6 @@ GateRef StubBuilder::GetObjectLiteralFromConstPool(GateRef glue, GateRef constpo ConstPoolType::OBJECT_LITERAL); } -// return elements -GateRef StubBuilder::BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj) -{ - auto env = GetEnvironment(); - Label subentry(env); - env->SubCfgEntry(&subentry); - DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); - Label exit(env); - Label hasStableElements(env); - Label targetIsStableJSArguments(env); - Label targetNotStableJSArguments(env); - Label targetIsInt(env); - Label hClassEqual(env); - Label targetIsStableJSArray(env); - Label targetNotStableJSArray(env); - - Branch(HasStableElements(glue, arrayObj), &hasStableElements, &exit); - Bind(&hasStableElements); - { - Branch(IsStableJSArguments(glue, arrayObj), &targetIsStableJSArguments, &targetNotStableJSArguments); - Bind(&targetIsStableJSArguments); - { - GateRef hClass = LoadHClass(arrayObj); - GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); - GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); - GateRef argmentsClass = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, - GlobalEnv::ARGUMENTS_CLASS); - Branch(Int32Equal(hClass, argmentsClass), &hClassEqual, &exit); - Bind(&hClassEqual); - { - GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); - GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); - Branch(TaggedIsInt(result), &targetIsInt, &exit); - Bind(&targetIsInt); - { - res = GetElementsArray(arrayObj); - Jump(&exit); - } - } - } - Bind(&targetNotStableJSArguments); - { - Branch(IsStableJSArray(glue, arrayObj), &targetIsStableJSArray, &targetNotStableJSArray); - Bind(&targetIsStableJSArray); - { - res = GetElementsArray(arrayObj); - Jump(&exit); - } - Bind(&targetNotStableJSArray); - { - FatalPrint(glue, { Int32(GET_MESSAGE_STRING_ID(ThisBranchIsUnreachable)) }); - Jump(&exit); - } - } - } - Bind(&exit); - auto ret = *res; - env->SubCfgExit(); - return ret; -} - -GateRef StubBuilder::MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length) -{ - auto env = GetEnvironment(); - Label subentry(env); - env->SubCfgEntry(&subentry); - DEFVARIABLE(res, VariableType::INT32(), length); - DEFVARIABLE(i, VariableType::INT32(), Int32(0)); - Label exit(env); - - GateRef argsLength = GetLengthOfTaggedArray(argv); - - Label lengthGreaterThanArgsLength(env); - Label lengthLessThanArgsLength(env); - Branch(Int32GreaterThan(length, argsLength), &lengthGreaterThanArgsLength, &lengthLessThanArgsLength); - Bind(&lengthGreaterThanArgsLength); - { - res = argsLength; - Jump(&lengthLessThanArgsLength); - } - Bind(&lengthLessThanArgsLength); - { - Label loopHead(env); - Label loopEnd(env); - Label afterLoop(env); - Label targetIsHole(env); - Label targetNotHole(env); - Branch(Int32UnsignedLessThan(*i, *res), &loopHead, &afterLoop); - LoopBegin(&loopHead); - { - GateRef value = GetValueFromTaggedArray(argv, *i); - Branch(TaggedIsHole(value), &targetIsHole, &targetNotHole); - Bind(&targetIsHole); - { - SetValueToTaggedArray(VariableType::JS_ANY(), glue, argv, *i, Undefined()); - Jump(&targetNotHole); - } - Bind(&targetNotHole); - i = Int32Add(*i, Int32(1)); - Branch(Int32UnsignedLessThan(*i, *res), &loopEnd, &afterLoop); - } - Bind(&loopEnd); - LoopEnd(&loopHead); - Bind(&afterLoop); - { - res = length; - Jump(&exit); - } - } - Bind(&exit); - auto ret = *res; - env->SubCfgExit(); - return ret; -} - GateRef StubBuilder::JSAPIContainerGet(GateRef glue, GateRef receiver, GateRef index) { auto env = GetEnvironment(); @@ -4806,7 +5189,7 @@ GateRef StubBuilder::ConstructorCheck(GateRef glue, GateRef ctor, GateRef outPut GateRef StubBuilder::JSCallDispatch(GateRef glue, GateRef func, GateRef actualNumArgs, GateRef jumpSize, GateRef hotnessCounter, JSCallMode mode, std::initializer_list args, - ProfileOperation callback, BytecodeInstruction::Format format) + ProfileOperation callback) { auto env = GetEnvironment(); Label entryPass(env); @@ -4956,7 +5339,7 @@ GateRef StubBuilder::JSCallDispatch(GateRef glue, GateRef func, GateRef actualNu // 4. call nonNative Bind(&methodNotNative); - callback.ProfileCall(func, format); + callback.ProfileCall(func); Label funcIsClassConstructor(env); Label funcNotClassConstructor(env); Label methodNotAot(env); @@ -5466,6 +5849,7 @@ GateRef StubBuilder::TryStringOrSymbolToElementIndex(GateRef glue, GateRef key) Label greatThanZero(env); Label inRange(env); + Label flattenFastPath(env); auto len = GetLengthFromString(key); Branch(Int32Equal(len, Int32(0)), &exit, &greatThanZero); Bind(&greatThanZero); @@ -5473,10 +5857,14 @@ GateRef StubBuilder::TryStringOrSymbolToElementIndex(GateRef glue, GateRef key) Bind(&inRange); { Label isUtf8(env); + DEFVARIABLE(c, VariableType::INT32(), Int32(0)); Branch(IsUtf16String(key), &exit, &isUtf8); Bind(&isUtf8); - GateRef data = GetNormalStringData(FlattenString(glue, key)); - DEFVARIABLE(c, VariableType::INT32(), Int32(0)); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, key, &flattenFastPath); + Bind(&flattenFastPath); + StringInfoGateRef stringInfoGate(&thisFlat); + GateRef data = GetNormalStringData(stringInfoGate); c = ZExtInt8ToInt32(Load(VariableType::INT8(), data)); Label isDigitZero(env); Label notDigitZero(env); @@ -5691,38 +6079,7 @@ void StubBuilder::Assert(int messageId, int line, GateRef glue, GateRef conditio } } -GateRef StubBuilder::FlattenString(GateRef glue, GateRef str) -{ - auto env = GetEnvironment(); - Label entry(env); - env->SubCfgEntry(&entry); - Label exit(env); - DEFVARIABLE(result, VariableType::JS_POINTER(), str); - Label isTreeString(env); - Branch(IsTreeString(str), &isTreeString, &exit); - Bind(&isTreeString); - { - Label isFlat(env); - Label notFlat(env); - Branch(TreeStringIsFlat(str), &isFlat, ¬Flat); - Bind(&isFlat); - { - result = GetFirstFromTreeString(str); - Jump(&exit); - } - Bind(¬Flat); - { - result = CallRuntime(glue, RTSTUB_ID(SlowFlattenString), { str }); - Jump(&exit); - } - } - Bind(&exit); - auto ret = *result; - env->SubCfgExit(); - return ret; -} - -GateRef StubBuilder::GetNormalStringData(GateRef str) +GateRef StubBuilder::GetNormalStringData(const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -5730,18 +6087,31 @@ GateRef StubBuilder::GetNormalStringData(GateRef str) Label exit(env); Label isConstantString(env); Label isLineString(env); + Label isUtf8(env); + Label isUtf16(env); DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); - Branch(IsConstantString(str), &isConstantString, &isLineString); + Branch(IsConstantString(stringInfoGate.GetString()), &isConstantString, &isLineString); Bind(&isConstantString); { - GateRef address = PtrAdd(str, IntPtr(ConstantString::CONSTANT_DATA_OFFSET)); - result = Load(VariableType::JS_ANY(), address, IntPtr(0)); + GateRef address = PtrAdd(stringInfoGate.GetString(), IntPtr(ConstantString::CONSTANT_DATA_OFFSET)); + result = PtrAdd(Load(VariableType::JS_ANY(), address, IntPtr(0)), ZExtInt32ToPtr(stringInfoGate.GetStartIndex())); Jump(&exit); } Bind(&isLineString); { - result = PtrAdd(str, IntPtr(LineEcmaString::DATA_OFFSET)); - Jump(&exit); + GateRef data = PtrAdd(stringInfoGate.GetString(), IntPtr(LineEcmaString::DATA_OFFSET)); + Branch(IsUtf8String(stringInfoGate.GetString()), &isUtf8, &isUtf16); + Bind(&isUtf8); + { + result = PtrAdd(data, ZExtInt32ToPtr(stringInfoGate.GetStartIndex())); + Jump(&exit); + } + Bind(&isUtf16); + { + GateRef offset = PtrMul(ZExtInt32ToPtr(stringInfoGate.GetStartIndex()), IntPtr(sizeof(uint16_t))); + result = PtrAdd(data, offset); + Jump(&exit); + } } Bind(&exit); auto ret = *result; @@ -5749,35 +6119,6 @@ GateRef StubBuilder::GetNormalStringData(GateRef str) return ret; } -void StubBuilder::FlattenString(GateRef str, Variable *flatStr, Label *fastPath, Label *slowPath) -{ - auto env = GetEnvironment(); - Label notLineString(env); - Label exit(env); - DEFVARIABLE(result, VariableType::JS_POINTER(), str); - Branch(BoolOr(IsLineString(str), IsConstantString(str)), &exit, ¬LineString); - Bind(¬LineString); - { - Label isTreeString(env); - Branch(IsTreeString(str), &isTreeString, &exit); - Bind(&isTreeString); - { - Label isFlat(env); - Branch(TreeStringIsFlat(str), &isFlat, slowPath); - Bind(&isFlat); - { - result = GetFirstFromTreeString(str); - Jump(&exit); - } - } - } - Bind(&exit); - { - flatStr->WriteVariable(*result); - Jump(fastPath); - } -} - GateRef StubBuilder::ToNumber(GateRef glue, GateRef tagged) { auto env = GetEnvironment(); @@ -5786,7 +6127,6 @@ GateRef StubBuilder::ToNumber(GateRef glue, GateRef tagged) Label exit(env); Label isNumber(env); Label notNumber(env); - Label defaultLabel(env); DEFVARIABLE(result, VariableType::JS_ANY(), Hole()); Branch(TaggedIsNumber(tagged), &isNumber, ¬Number); Bind(&isNumber); @@ -5796,204 +6136,15 @@ GateRef StubBuilder::ToNumber(GateRef glue, GateRef tagged) } Bind(¬Number); { - Label returnNan(env); - Label notNan(env); - Label returnNumber1(env); - Label notNumber1(env); - Label returnNumber0(env); - auto isHole = TaggedIsHole(tagged); - auto isUndefined = TaggedIsUndefined(tagged); - Branch(BoolOr(isHole, isUndefined), &returnNan, ¬Nan); - Bind(&returnNan); - { - result = DoubleToTaggedDoublePtr(Double(base::NAN_VALUE)); - Jump(&exit); - } - Bind(¬Nan); - Branch(TaggedIsTrue(tagged), &returnNumber1, ¬Number1); - Bind(&returnNumber1); - { - result = Int64ToTaggedPtr(Int32(1)); - Jump(&exit); - } - Bind(¬Number1); - auto isFalse = TaggedIsFalse(tagged); - auto isNull = TaggedIsNull(tagged); - Branch(BoolOr(isFalse, isNull), &returnNumber0, &defaultLabel); - Bind(&returnNumber0); - { - result = Int64ToTaggedPtr(Int32(0)); - Jump(&exit); - } - Bind(&defaultLabel); - { - CallRuntime(glue, RTSTUB_ID(OtherToNumber), { tagged }); - Jump(&exit); - } - } - Bind(&exit); - auto ret = *result; - env->SubCfgExit(); - return ret; -} - -GateRef StubBuilder::GetLengthOfJsArray(GateRef glue, GateRef array) -{ - auto env = GetEnvironment(); - Label entry(env); - env->SubCfgEntry(&entry); - Label exit(env); - Label isInt(env); - Label notInt(env); - Label notDouble(env); - Label isDouble(env); - DEFVARIABLE(result, VariableType::INT32(), Int32(0)); - GateRef len = Load(VariableType::JS_ANY(), array, IntPtr(JSArray::LENGTH_OFFSET)); - Branch(TaggedIsInt(len), &isInt, ¬Int); - Bind(&isInt); - { - result = TaggedGetInt(len); + result = CallRuntime(glue, RTSTUB_ID(ToNumber), { tagged }); Jump(&exit); } - Bind(¬Int); - { - Branch(TaggedIsDouble(len), &isDouble, ¬Double); - Bind(&isDouble); - { - result = DoubleToInt(glue, GetDoubleOfTDouble(len)); - Jump(&exit); - } - Bind(¬Double); - { - FatalPrint(glue, { Int32(GET_MESSAGE_STRING_ID(ThisBranchIsUnreachable)) }); - Jump(&exit); - } - } Bind(&exit); auto ret = *result; env->SubCfgExit(); return ret; } -GateRef StubBuilder::CreateListFromArrayLike(GateRef glue, GateRef arrayObj) -{ - auto env = GetEnvironment(); - Label entry(env); - env->SubCfgEntry(&entry); - DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); - DEFVARIABLE(index, VariableType::INT32(), Int32(0)); - Label exit(env); - - // 3. If Type(obj) is Object, throw a TypeError exception. - Label targetIsHeapObject(env); - Label targetIsEcmaObject(env); - Label targetNotEcmaObject(env); - Branch(TaggedIsHeapObject(arrayObj), &targetIsHeapObject, &targetNotEcmaObject); - Bind(&targetIsHeapObject); - Branch(TaggedObjectIsEcmaObject(arrayObj), &targetIsEcmaObject, &targetNotEcmaObject); - Bind(&targetNotEcmaObject); - { - GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(TargetTypeNotObject)); - CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); - Jump(&exit); - } - Bind(&targetIsEcmaObject); - { - // 4. Let len be ToLength(Get(obj, "length")). - GateRef lengthString = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, - ConstantIndex::LENGTH_STRING_INDEX); - GateRef value = FastGetPropertyByName(glue, arrayObj, lengthString); - GateRef number = ToLength(glue, value); - // 5. ReturnIfAbrupt(len). - Label isPendingException1(env); - Label noPendingException1(env); - Branch(HasPendingException(glue), &isPendingException1, &noPendingException1); - Bind(&isPendingException1); - { - Jump(&exit); - } - Bind(&noPendingException1); - { - Label indexInRange(env); - Label indexOutRange(env); - GateRef doubleLen = GetDoubleOfTNumber(number); - Branch(DoubleGreaterThan(doubleLen, Double(JSObject::MAX_ELEMENT_INDEX)), &indexOutRange, &indexInRange); - Bind(&indexOutRange); - { - GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(LenGreaterThanMax)); - CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); - Jump(&exit); - } - Bind(&indexInRange); - { - GateRef int32Len = DoubleToInt(glue, doubleLen); - // 6. Let list be an empty List. - NewObjectStubBuilder newBuilder(this); - GateRef array = newBuilder.NewTaggedArray(glue, int32Len); - Label targetIsTypeArray(env); - Label targetNotTypeArray(env); - Branch(IsTypedArray(arrayObj), &targetIsTypeArray, &targetNotTypeArray); - Bind(&targetIsTypeArray); - { - TypedArrayStubBuilder arrayStubBuilder(this); - arrayStubBuilder.FastCopyElementToArray(glue, arrayObj, array); - // c. ReturnIfAbrupt(next). - Label isPendingException2(env); - Label noPendingException2(env); - Branch(HasPendingException(glue), &isPendingException2, &noPendingException2); - Bind(&isPendingException2); - { - Jump(&exit); - } - Bind(&noPendingException2); - { - res = array; - Jump(&exit); - } - } - Bind(&targetNotTypeArray); - // 8. Repeat while index < len - Label loopHead(env); - Label loopEnd(env); - Label afterLoop(env); - Label isPendingException3(env); - Label noPendingException3(env); - Label storeValue(env); - Jump(&loopHead); - LoopBegin(&loopHead); - { - Branch(Int32UnsignedLessThan(*index, int32Len), &storeValue, &afterLoop); - Bind(&storeValue); - { - GateRef next = FastGetPropertyByIndex(glue, arrayObj, *index); - // c. ReturnIfAbrupt(next). - Branch(HasPendingException(glue), &isPendingException3, &noPendingException3); - Bind(&isPendingException3); - { - Jump(&exit); - } - Bind(&noPendingException3); - SetValueToTaggedArray(VariableType::JS_ANY(), glue, array, *index, next); - index = Int32Add(*index, Int32(1)); - Jump(&loopEnd); - } - } - Bind(&loopEnd); - LoopEnd(&loopHead); - Bind(&afterLoop); - { - res = array; - Jump(&exit); - } - } - } - } - Bind(&exit); - GateRef ret = *res; - env->SubCfgExit(); - return ret; -} - GateRef StubBuilder::ToLength(GateRef glue, GateRef target) { auto env = GetEnvironment(); @@ -6087,20 +6238,8 @@ GateRef StubBuilder::HasStableElements(GateRef glue, GateRef obj) Bind(&targetIsStableElements); { GateRef guardiansOffset = IntPtr(JSThread::GlueData::GetStableArrayElementsGuardiansOffset(env->Is32Bit())); - GateRef guardians = Load(VariableType::JS_ANY(), glue, guardiansOffset); - Label targetIsTaggedTrue(env); - Label targetIsTaggedFalse(env); - Branch(TaggedIsTrue(guardians), &targetIsTaggedTrue, &targetIsTaggedFalse); - Bind(&targetIsTaggedTrue); - { - result = True(); - Jump(&exit); - } - Bind(&targetIsTaggedFalse); - { - result = False(); - Jump(&exit); - } + result = Load(VariableType::BOOL(), glue, guardiansOffset); + Jump(&exit); } } Bind(&exit); @@ -6127,21 +6266,8 @@ GateRef StubBuilder::IsStableJSArguments(GateRef glue, GateRef obj) Bind(&targetIsStableArguments); { GateRef guardiansOffset = IntPtr(JSThread::GlueData::GetStableArrayElementsGuardiansOffset(env->Is32Bit())); - GateRef guardians = Load(VariableType::JS_ANY(), glue, guardiansOffset); - - Label targetIsTaggedTrue(env); - Label targetIsTaggedFalse(env); - Branch(TaggedIsTrue(guardians), &targetIsTaggedTrue, &targetIsTaggedFalse); - Bind(&targetIsTaggedTrue); - { - result = True(); - Jump(&exit); - } - Bind(&targetIsTaggedFalse); - { - result = False(); - Jump(&exit); - } + result = Load(VariableType::BOOL(), glue, guardiansOffset); + Jump(&exit); } } Bind(&exit); @@ -6168,21 +6294,9 @@ GateRef StubBuilder::IsStableJSArray(GateRef glue, GateRef obj) Bind(&targetIsStableArray); { GateRef guardiansOffset = IntPtr(JSThread::GlueData::GetStableArrayElementsGuardiansOffset(env->Is32Bit())); - GateRef guardians = Load(VariableType::JS_ANY(), glue, guardiansOffset); - - Label targetIsTaggedTrue(env); - Label targetIsTaggedFalse(env); - Branch(TaggedIsTrue(guardians), &targetIsTaggedTrue, &targetIsTaggedFalse); - Bind(&targetIsTaggedTrue); - { - result = True(); - Jump(&exit); - } - Bind(&targetIsTaggedFalse); - { - result = False(); - Jump(&exit); - } + GateRef guardians = Load(VariableType::BOOL(), glue, guardiansOffset); + result.WriteVariable(guardians); + Jump(&exit); } } Bind(&exit); diff --git a/ecmascript/compiler/stub_builder.h b/ecmascript/compiler/stub_builder.h index b524dc514320a1c3735515cac37a71377159dbee..9cf7a6cbf3048194cc595e95944b933688e2212e 100644 --- a/ecmascript/compiler/stub_builder.h +++ b/ecmascript/compiler/stub_builder.h @@ -23,6 +23,7 @@ #include "ecmascript/compiler/variable_type.h" namespace panda::ecmascript::kungfu { +struct StringInfoGateRef; using namespace panda::ecmascript; // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define DEFVARIABLE(varname, type, val) Variable varname(GetEnvironment(), type, NextVariableId(), val) @@ -209,6 +210,7 @@ public: GateRef ObjectAddressToRange(GateRef x); GateRef InYoungGeneration(GateRef region); GateRef TaggedIsGeneratorObject(GateRef x); + GateRef TaggedIsJSArray(GateRef x); GateRef TaggedIsAsyncGeneratorObject(GateRef x); GateRef TaggedIsJSGlobalObject(GateRef x); GateRef TaggedIsWeak(GateRef x); @@ -219,6 +221,7 @@ public: GateRef TaggedIsString(GateRef obj); GateRef BothAreString(GateRef x, GateRef y); GateRef TaggedIsStringOrSymbol(GateRef obj); + GateRef TaggedIsSymbol(GateRef obj); GateRef GetNextPositionForHash(GateRef last, GateRef count, GateRef size); GateRef DoubleIsNAN(GateRef x); GateRef DoubleIsINF(GateRef x); @@ -235,9 +238,12 @@ public: GateRef IntToTaggedInt(GateRef x); GateRef Int64ToTaggedInt(GateRef x); GateRef DoubleToTaggedDoublePtr(GateRef x); + GateRef TaggedPtrToTaggedDoublePtr(GateRef x); + GateRef TaggedPtrToTaggedIntPtr(GateRef x); GateRef CastDoubleToInt64(GateRef x); GateRef TaggedTrue(); GateRef TaggedFalse(); + GateRef TaggedUndefined(); // compare operation GateRef Int8Equal(GateRef x, GateRef y); GateRef Equal(GateRef x, GateRef y); @@ -258,6 +264,7 @@ public: GateRef Int32UnsignedGreaterThan(GateRef x, GateRef y); GateRef Int32UnsignedLessThan(GateRef x, GateRef y); GateRef Int32UnsignedGreaterThanOrEqual(GateRef x, GateRef y); + GateRef Int32UnsignedLessThanOrEqual(GateRef x, GateRef y); GateRef Int64GreaterThan(GateRef x, GateRef y); GateRef Int64LessThan(GateRef x, GateRef y); GateRef Int64LessThanOrEqual(GateRef x, GateRef y); @@ -280,7 +287,6 @@ public: void SetPropertiesArray(VariableType type, GateRef glue, GateRef object, GateRef propsArray); void SetHash(GateRef glue, GateRef object, GateRef hash); GateRef GetLengthOfTaggedArray(GateRef array); - GateRef GetLengthOfJsArray(GateRef glue, GateRef array); // object operation GateRef IsJSHClass(GateRef obj); GateRef LoadHClass(GateRef object); @@ -291,6 +297,8 @@ public: GateRef IsDictionaryModeByHClass(GateRef hClass); GateRef IsDictionaryElement(GateRef hClass); GateRef IsStableElements(GateRef hClass); + GateRef HasConstructorByHClass(GateRef hClass); + GateRef HasConstructor(GateRef object); GateRef IsClassConstructorFromBitField(GateRef bitfield); GateRef IsClassConstructor(GateRef object); GateRef IsClassPrototype(GateRef object); @@ -300,6 +308,7 @@ public: GateRef IsSymbol(GateRef obj); GateRef IsString(GateRef obj); GateRef IsLineString(GateRef obj); + GateRef IsSlicedString(GateRef obj); GateRef IsConstantString(GateRef obj); GateRef IsTreeString(GateRef obj); GateRef TreeStringIsFlat(GateRef string); @@ -332,12 +341,14 @@ public: GateRef IsJSAPILinkedList(GateRef obj); GateRef IsJSAPIList(GateRef obj); GateRef IsJSAPIArrayList(GateRef obj); + GateRef IsJSObjectType(GateRef obj, JSType jsType); GateRef GetTarget(GateRef proxyObj); GateRef HandlerBaseIsAccessor(GateRef attr); GateRef HandlerBaseIsJSArray(GateRef attr); GateRef HandlerBaseIsInlinedProperty(GateRef attr); GateRef HandlerBaseGetOffset(GateRef attr); GateRef HandlerBaseGetAttrIndex(GateRef attr); + GateRef HandlerBaseGetRep(GateRef attr); GateRef IsInvalidPropertyBox(GateRef obj); GateRef GetValueFromPropertyBox(GateRef obj); void SetValueToPropertyBox(GateRef glue, GateRef obj, GateRef value); @@ -357,11 +368,13 @@ public: GateRef HclassIsTransitionHandler(GateRef hClass); GateRef HclassIsPropertyBox(GateRef hClass); GateRef PropAttrGetOffset(GateRef attr); - GateRef InstanceOf(GateRef glue, GateRef object, GateRef target, GateRef profileTypeInfo, GateRef slotId); + GateRef InstanceOf(GateRef glue, GateRef object, GateRef target, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback); GateRef OrdinaryHasInstance(GateRef glue, GateRef target, GateRef obj); void TryFastHasInstance(GateRef glue, GateRef instof, GateRef target, GateRef object, Label *fastPath, - Label *exit, Variable *result); + Label *exit, Variable *result, ProfileOperation callback); GateRef SameValue(GateRef glue, GateRef left, GateRef right); + GateRef SameValueZero(GateRef glue, GateRef left, GateRef right); GateRef HasStableElements(GateRef glue, GateRef obj); GateRef IsStableJSArguments(GateRef glue, GateRef obj); GateRef IsStableJSArray(GateRef glue, GateRef obj); @@ -380,7 +393,9 @@ public: GateRef TryGetHashcodeFromString(GateRef string); GateRef GetFirstFromTreeString(GateRef string); GateRef GetSecondFromTreeString(GateRef string); + GateRef GetIsAllTaggedPropFromHClass(GateRef hclass); void SetBitFieldToHClass(GateRef glue, GateRef hClass, GateRef bitfield); + void SetIsAllTaggedProp(GateRef glue, GateRef hclass, GateRef hasRep); void SetPrototypeToHClass(VariableType type, GateRef glue, GateRef hClass, GateRef proto); void SetProtoChangeDetailsToHClass(VariableType type, GateRef glue, GateRef hClass, GateRef protoChange); @@ -394,16 +409,22 @@ public: GateRef value, GateRef attrOffset, VariableType type = VariableType::JS_ANY()); GateRef GetPropertyInlinedProps(GateRef obj, GateRef hClass, GateRef index); + GateRef GetInlinedPropOffsetFromHClass(GateRef hclass, GateRef attrOffset); void IncNumberOfProps(GateRef glue, GateRef hClass); GateRef GetNumberOfPropsFromHClass(GateRef hClass); GateRef IsTSHClass(GateRef hClass); void SetNumberOfPropsToHClass(GateRef glue, GateRef hClass, GateRef value); + GateRef GetElementsKindFromHClass(GateRef hClass); GateRef GetObjectSizeFromHClass(GateRef hClass); GateRef GetInlinedPropsStartFromHClass(GateRef hClass); GateRef GetInlinedPropertiesFromHClass(GateRef hClass); void ThrowTypeAndReturn(GateRef glue, int messageId, GateRef val); GateRef GetValueFromTaggedArray(GateRef elements, GateRef index); + void SetValueToTaggedArrayWithAttr( + GateRef glue, GateRef array, GateRef index, GateRef key, GateRef val, GateRef attr); + void SetValueToTaggedArrayWithRep( + GateRef glue, GateRef array, GateRef index, GateRef val, GateRef rep, Label *repChange); void SetValueToTaggedArray(VariableType valType, GateRef glue, GateRef array, GateRef index, GateRef val); void UpdateValueAndAttributes(GateRef glue, GateRef elements, GateRef index, GateRef value, GateRef attr); GateRef IsSpecialIndexedObj(GateRef jsType); @@ -427,9 +448,9 @@ public: GateRef IsMatchInTransitionDictionary(GateRef element, GateRef key, GateRef metaData, GateRef attr); GateRef FindEntryFromTransitionDictionary(GateRef glue, GateRef elements, GateRef key, GateRef metaData); GateRef JSObjectGetProperty(GateRef obj, GateRef hClass, GateRef propAttr); - void JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hClass, GateRef attr, GateRef value); + void JSObjectSetProperty(GateRef glue, GateRef obj, GateRef hClass, GateRef attr, GateRef key, GateRef value); GateRef ShouldCallSetter(GateRef receiver, GateRef holder, GateRef accessor, GateRef attr); - GateRef CallSetterHelper(GateRef glue, GateRef holder, GateRef accessor, GateRef value); + GateRef CallSetterHelper(GateRef glue, GateRef holder, GateRef accessor, GateRef value, ProfileOperation callback); GateRef SetHasConstructorCondition(GateRef glue, GateRef receiver, GateRef key); GateRef AddPropertyByName(GateRef glue, GateRef receiver, GateRef key, GateRef value, GateRef propertyAttributes, ProfileOperation callback); @@ -438,30 +459,35 @@ public: GateRef IsInternalString(GateRef string); GateRef IsDigit(GateRef ch); GateRef StringToElementIndex(GateRef glue, GateRef string); - GateRef ComputePropertyCapacityInJSObj(GateRef oldLength); + GateRef ComputeNonInlinedFastPropsCapacity(GateRef oldLength, GateRef maxNonInlinedFastPropsCapacity); GateRef FindTransitions(GateRef glue, GateRef receiver, GateRef hClass, GateRef key, GateRef attr); + void TransitionForRepChange(GateRef glue, GateRef receiver, GateRef key, GateRef attr); + void TransitToElementsKind(GateRef glue, GateRef receiver, GateRef value, GateRef kind); GateRef TaggedToRepresentation(GateRef value); + GateRef TaggedToElementKind(GateRef value); GateRef LdGlobalRecord(GateRef glue, GateRef key); GateRef LoadFromField(GateRef receiver, GateRef handlerInfo); GateRef LoadGlobal(GateRef cell); - GateRef LoadElement(GateRef glue, GateRef receiver, GateRef key); + GateRef LoadElement(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback); GateRef TryToElementsIndex(GateRef glue, GateRef key); GateRef CheckPolyHClass(GateRef cachedValue, GateRef hClass); - GateRef LoadICWithHandler(GateRef glue, GateRef receiver, GateRef holder, GateRef handler); + GateRef LoadICWithHandler( + GateRef glue, GateRef receiver, GateRef holder, GateRef handler, ProfileOperation callback); GateRef StoreICWithHandler(GateRef glue, GateRef receiver, GateRef holder, GateRef value, GateRef handler, ProfileOperation callback = ProfileOperation()); GateRef ICStoreElement(GateRef glue, GateRef receiver, GateRef key, - GateRef value, GateRef handlerInfo); + GateRef value, GateRef handlerInfo, ProfileOperation callback); GateRef GetArrayLength(GateRef object); GateRef DoubleToInt(GateRef glue, GateRef x); - void StoreField(GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback); - void StoreWithTransition(GateRef glue, GateRef receiver, GateRef value, GateRef handler, + GateRef StoreField(GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback); + GateRef StoreWithTransition(GateRef glue, GateRef receiver, GateRef value, GateRef handler, ProfileOperation callback, bool withPrototype = false); GateRef StoreGlobal(GateRef glue, GateRef value, GateRef cell); void JSHClassAddProperty(GateRef glue, GateRef receiver, GateRef key, GateRef attr); void NotifyHClassChanged(GateRef glue, GateRef oldHClass, GateRef newHClass); GateRef GetInt64OfTInt(GateRef x); GateRef GetInt32OfTInt(GateRef x); + GateRef GetDoubleOfTInt(GateRef x); GateRef GetDoubleOfTDouble(GateRef x); GateRef GetDoubleOfTNumber(GateRef x); GateRef LoadObjectFromWeakRef(GateRef x); @@ -505,17 +531,24 @@ public: GateRef SetIsInlinePropsFieldInPropAttr(GateRef attr, GateRef value); GateRef SetTrackTypeInPropAttr(GateRef attr, GateRef type); GateRef GetTrackTypeInPropAttr(GateRef attr); + GateRef GetRepInPropAttr(GateRef attr); + GateRef IsIntRepInPropAttr(GateRef attr); + GateRef IsDoubleRepInPropAttr(GateRef attr); + GateRef SetTaggedRepInPropAttr(GateRef attr); void SetHasConstructorToHClass(GateRef glue, GateRef hClass, GateRef value); void UpdateValueInDict(GateRef glue, GateRef elements, GateRef index, GateRef value); GateRef GetBitMask(GateRef bitoffset); GateRef IntPtrEuqal(GateRef x, GateRef y); + void SetValueWithAttr(GateRef glue, GateRef obj, GateRef offset, GateRef key, GateRef value, GateRef attr); + void SetValueWithRep(GateRef glue, GateRef obj, GateRef offset, GateRef value, GateRef rep, Label *repChange); void SetValueWithBarrier(GateRef glue, GateRef obj, GateRef offset, GateRef value); - GateRef GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index); - GateRef GetPropertyByName(GateRef glue, GateRef receiver, GateRef key); - GateRef FastGetPropertyByName(GateRef glue, GateRef obj, GateRef key); - GateRef FastGetPropertyByIndex(GateRef glue, GateRef obj, GateRef index); - GateRef GetPropertyByValue(GateRef glue, GateRef receiver, GateRef keyValue); - GateRef SetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index, GateRef value, bool useOwn); + GateRef GetPropertyByIndex(GateRef glue, GateRef receiver, GateRef index, ProfileOperation callback); + GateRef GetPropertyByName(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback); + GateRef FastGetPropertyByName(GateRef glue, GateRef obj, GateRef key, ProfileOperation callback); + GateRef FastGetPropertyByIndex(GateRef glue, GateRef obj, GateRef index, ProfileOperation callback); + GateRef GetPropertyByValue(GateRef glue, GateRef receiver, GateRef keyValue, ProfileOperation callback); + GateRef SetPropertyByIndex( + GateRef glue, GateRef receiver, GateRef index, GateRef value, bool useOwn, ProfileOperation callback); GateRef SetPropertyByName(GateRef glue, GateRef receiver, GateRef key, GateRef value, bool useOwn, ProfileOperation callback = ProfileOperation()); // Crawl prototype chain GateRef SetPropertyByValue(GateRef glue, GateRef receiver, GateRef key, GateRef value, bool useOwn, @@ -536,20 +569,20 @@ public: GateRef GetPropertiesFromJSObject(GateRef object); template GateRef BinaryOp(GateRef x, GateRef y); - GateRef GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRef key); + template + GateRef BinaryOpWithOverflow(GateRef x, GateRef y); + GateRef GetGlobalOwnProperty(GateRef glue, GateRef receiver, GateRef key, ProfileOperation callback); inline GateRef GetObjectFromConstPool(GateRef constpool, GateRef index); + GateRef GetConstPoolFromFunction(GateRef jsFunc); GateRef GetStringFromConstPool(GateRef glue, GateRef constpool, GateRef index); GateRef GetMethodFromConstPool(GateRef glue, GateRef constpool, GateRef index); GateRef GetArrayLiteralFromConstPool(GateRef glue, GateRef constpool, GateRef index, GateRef module); GateRef GetObjectLiteralFromConstPool(GateRef glue, GateRef constpool, GateRef index, GateRef module); - GateRef CreateListFromArrayLike(GateRef glue, GateRef arrayObj); - GateRef BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj); - GateRef MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length); void SetExtensibleToBitfield(GateRef glue, GateRef obj, bool isExtensible); // fast path - GateRef FastEqual(GateRef left, GateRef right, ProfileOperation callback); + GateRef FastEqual(GateRef glue, GateRef left, GateRef right, ProfileOperation callback); GateRef FastStrictEqual(GateRef glue, GateRef left, GateRef right, ProfileOperation callback); GateRef FastStringEqual(GateRef glue, GateRef left, GateRef right); GateRef FastMod(GateRef gule, GateRef left, GateRef right, ProfileOperation callback); @@ -589,12 +622,12 @@ public: inline GateRef GetGlobalConstantValue( VariableType type, GateRef glue, ConstantIndex index); inline GateRef GetGlobalEnvValue(VariableType type, GateRef env, size_t index); - GateRef CallGetterHelper(GateRef glue, GateRef receiver, GateRef holder, GateRef accessor); + GateRef CallGetterHelper( + GateRef glue, GateRef receiver, GateRef holder, GateRef accessor, ProfileOperation callback); GateRef ConstructorCheck(GateRef glue, GateRef ctor, GateRef outPut, GateRef thisObj); GateRef JSCallDispatch(GateRef glue, GateRef func, GateRef actualNumArgs, GateRef jumpSize, GateRef hotnessCounter, JSCallMode mode, std::initializer_list args, - ProfileOperation callback = ProfileOperation(), - BytecodeInstruction::Format format = BytecodeInstruction::Format::IMM8); + ProfileOperation callback = ProfileOperation()); GateRef IsFastTypeArray(GateRef jsType); GateRef GetTypeArrayPropertyByName(GateRef glue, GateRef receiver, GateRef holder, GateRef key, GateRef jsType); GateRef SetTypeArrayPropertyByName(GateRef glue, GateRef receiver, GateRef holder, GateRef key, GateRef value, @@ -610,16 +643,19 @@ public: GateRef callField, GateRef method, Label* notFastBuiltins, Label* exit, Variable* result, std::initializer_list args, JSCallMode mode); inline void SetLength(GateRef glue, GateRef str, GateRef length, bool compressed); + inline void SetLength(GateRef glue, GateRef str, GateRef length, GateRef isCompressed); inline void SetRawHashcode(GateRef glue, GateRef str, GateRef rawHashcode); void Assert(int messageId, int line, GateRef glue, GateRef condition, Label *nextLabel); - GateRef FlattenString(GateRef glue, GateRef str); - void FlattenString(GateRef str, Variable *flatStr, Label *fastPath, Label *slowPath); - GateRef GetNormalStringData(GateRef str); + GateRef GetNormalStringData(const StringInfoGateRef &stringInfoGate); void Comment(GateRef glue, const std::string &str); GateRef ToNumber(GateRef glue, GateRef tagged); inline GateRef LoadObjectFromConstPool(GateRef jsFunc, GateRef index); + inline GateRef LoadPfHeaderFromConstPool(GateRef jsFunc); + inline GateRef LoadHCIndexFromConstPool(GateRef jsFunc, GateRef traceId); + + GateRef RemoveTaggedWeakTag(GateRef weak); private: using BinaryOperation = std::function; GateRef ChangeTaggedPointerToInt64(GateRef x); diff --git a/ecmascript/compiler/stub_compiler.cpp b/ecmascript/compiler/stub_compiler.cpp index 318d2d8d961c7e9ae3839e9fe15c06eb7ae6bccc..6f84a0b32c16662954c26ab1b83e0720da69a9e3 100644 --- a/ecmascript/compiler/stub_compiler.cpp +++ b/ecmascript/compiler/stub_compiler.cpp @@ -140,18 +140,30 @@ bool StubCompiler::BuildStubModuleAndSave() const LOG_COMPILER(INFO) << "=============== compiling bytecode handler stubs ==============="; LOptions stubOp(optLevel_, FPFlag::ELIM_FP, relocMode_); Module* stubM = generator.AddModule(&allocator, "bc_stub", triple_, stubOp, log->OutputASM(), StubFileKind::BC); - RunPipeline(stubM->GetModule(), &allocator); + if (stubM->GetModule()->GetModuleKind() != MODULE_LLVM) { + LOG_COMPILER(FATAL) << " Stub compiler is not supported for litecg ==============="; + return false; + } + RunPipeline(static_cast(stubM->GetModule()), &allocator); LOG_COMPILER(INFO) << "=============== compiling common stubs ==============="; LOptions comOp(optLevel_, FPFlag::RESERVE_FP, relocMode_); Module* comM = generator.AddModule(&allocator, "com_stub", triple_, comOp, log->OutputASM(), StubFileKind::COM); - RunPipeline(comM->GetModule(), &allocator); + if (comM->GetModule()->GetModuleKind() != MODULE_LLVM) { + LOG_COMPILER(FATAL) << " Stub compiler is not supported for litecg ==============="; + return false; + } + RunPipeline(static_cast(comM->GetModule()), &allocator); LOG_COMPILER(INFO) << "=============== compiling builtins stubs ==============="; LOptions builtinOp(optLevel_, FPFlag::RESERVE_FP, relocMode_); Module* builtinM = generator.AddModule(&allocator, "builtin_stub", triple_, builtinOp, log->OutputASM(), StubFileKind::BUILTIN); - RunPipeline(builtinM->GetModule(), &allocator); + if (builtinM->GetModule()->GetModuleKind() != MODULE_LLVM) { + LOG_COMPILER(FATAL) << " Stub compiler is not supported for litecg ==============="; + return false; + } + RunPipeline(static_cast(builtinM->GetModule()), &allocator); generator.SaveStubFile(filePath_); return true; diff --git a/ecmascript/compiler/test_stubs.cpp b/ecmascript/compiler/test_stubs.cpp deleted file mode 100644 index 9d45f46cc63abd6eeb85251da9a399ab6725f785..0000000000000000000000000000000000000000 --- a/ecmascript/compiler/test_stubs.cpp +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (c) 2021 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ecmascript/compiler/test_stubs.h" - -#include "ecmascript/compiler/llvm_ir_builder.h" -#include "ecmascript/compiler/stub_builder-inl.h" -#include "ecmascript/compiler/variable_type.h" -#include "ecmascript/message_string.h" - -namespace panda::ecmascript::kungfu { -using namespace panda::ecmascript; -#ifndef NDEBUG -void FooAOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - (void)calltarget; - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::BarAOT)); - GateRef numArgs = IntToTaggedInt(Int32(2)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - GateRef result = - CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, argc, barfunc, newtarget, thisObj, a, b, pcOffset}); - Return(result); -} - -void BarAOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - [[maybe_unused]] GateRef argc = Int32Argument(1); - [[maybe_unused]] GateRef calltarget = TaggedArgument(2); - [[maybe_unused]] GateRef newtarget = TaggedArgument(3); - [[maybe_unused]] GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef result = CallRuntime(glue, RTSTUB_ID(Add2), {a, b}); - Return(result); -} - -void Foo1AOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - (void)calltarget; - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::Bar1AOT)); - GateRef numArgs = IntToTaggedInt(Int32(3)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - GateRef result = - CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, argc, barfunc, newtarget, thisObj, a, b, pcOffset}); - Return(result); -} - -void Bar1AOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - (void)argc; - (void)calltarget; - (void)newtarget; - (void)thisObj; - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef c = TaggedArgument(7); - GateRef result = CallRuntime(glue, RTSTUB_ID(Add2), {a, b}); - GateRef result2 = CallRuntime(glue, RTSTUB_ID(Add2), {result, c}); - Return(result2); -} - -void Foo2AOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - (void)calltarget; - GateRef actualArgC = Int64Add(argc, Int64(1)); - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::BarAOT)); - GateRef numArgs = IntToTaggedInt(Int32(2)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - GateRef result = CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, actualArgC, barfunc, newtarget, thisObj, - a, b, Undefined(), pcOffset}); - Return(result); -} - -void FooNativeAOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - (void)calltarget; - GateRef actualArgC = Int64Add(argc, Int64(1)); - GateRef printfunc = CallRuntime(glue, RTSTUB_ID(GetPrintFunc), {}); - GateRef result = CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, actualArgC, printfunc, newtarget, thisObj, - a, b, Undefined(), pcOffset}); - Return(result); -} - -void FooBoundAOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef bindArguments = IntToTaggedInt(Int32(37)); - GateRef pcOffset = Int32(1); - (void)calltarget; - GateRef numArgs = IntToTaggedInt(Int32(2)); - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::BarAOT)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - GateRef bindfunc = CallRuntime(glue, RTSTUB_ID(GetBindFunc), {barfunc}); - GateRef newjsfunc = CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, Int64(5), bindfunc, newtarget, barfunc, - Int64(0x02), bindArguments, pcOffset}); - GateRef result = CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, argc, newjsfunc, newtarget, thisObj, - a, b, pcOffset}); - Return(result); -} - -void FooProxyAOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - [[maybe_unused]] GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::BarAOT)); - GateRef numArgs = IntToTaggedInt(Int32(2)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - - GateRef proxyfunc = CallRuntime(glue, RTSTUB_ID(DefineProxyFunc), {barfunc}); - GateRef result = - CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, argc, proxyfunc, newtarget, thisObj, a, b, pcOffset}); - Return(result); -} - -void FooProxy2AOTStubBuilder::GenerateCircuit() -{ - GateRef glue = PtrArgument(0); - GateRef argc = Int32Argument(1); - [[maybe_unused]] GateRef calltarget = TaggedArgument(2); - GateRef newtarget = TaggedArgument(3); - GateRef thisObj = TaggedArgument(4); - GateRef a = TaggedArgument(5); - GateRef b = TaggedArgument(6); - GateRef pcOffset = Int32(1); - - GateRef barIndex = IntToTaggedInt(Int32(CommonStubCSigns::Bar2AOT)); - GateRef numArgs = IntToTaggedInt(Int32(2)); - GateRef barfunc = CallRuntime(glue, RTSTUB_ID(DefineAotFunc), {barIndex, numArgs}); - GateRef proxyHandler = CallRuntime(glue, RTSTUB_ID(DefineProxyHandler), {barfunc}); - - GateRef proxyfunc = CallRuntime(glue, RTSTUB_ID(DefineProxyFunc2), {barfunc, proxyHandler}); - GateRef result = - CallNGCRuntime(glue, RTSTUB_ID(JSCall), {glue, argc, proxyfunc, newtarget, thisObj, a, b, pcOffset}); - Return(result); -} - -void Bar2AOTStubBuilder::GenerateCircuit() -{ - [[maybe_unused]] GateRef glue = PtrArgument(0); - [[maybe_unused]] GateRef argc = Int32Argument(1); - [[maybe_unused]] GateRef calltarget = TaggedArgument(2); - [[maybe_unused]] GateRef newtarget = TaggedArgument(3); - [[maybe_unused]] GateRef thisObj = TaggedArgument(4); - CallRuntime(glue, RTSTUB_ID(DumpTaggedType), {thisObj}); - Return(thisObj); -} - -void TestAbsoluteAddressRelocationStubBuilder::GenerateCircuit() -{ - auto env = GetEnvironment(); - GateRef a = Int64Argument(0); - Label start(env); - Jump(&start); - Bind(&start); - GateRef globalValueC = RelocatableData(0xabc); - GateRef dummyValueC = Load(VariableType::INT64(), globalValueC); - GateRef result = ZExtInt1ToInt64(Int64Equal(a, dummyValueC)); - Return(result); -} -#endif -} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/test_stubs_signature.cpp b/ecmascript/compiler/test_stubs_signature.cpp deleted file mode 100644 index 263a40e9dbce3c5379ff82d251b826a1a43bad83..0000000000000000000000000000000000000000 --- a/ecmascript/compiler/test_stubs_signature.cpp +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (c) 2021 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "ecmascript/compiler/call_signature.h" -namespace panda::ecmascript::kungfu { -#ifndef NDEBUG -DEF_CALL_SIGNATURE(FooAOT) -{ - // 7 : 7 input parameters - CallSignature fooAot("FooAOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = fooAot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(Foo1AOT) -{ - // 7 : 7 input parameters - CallSignature foo1Aot("Foo1AOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = foo1Aot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(Foo2AOT) -{ - // 7 : 7 input parameters - CallSignature foo2Aot("Foo2AOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = foo2Aot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(FooNativeAOT) -{ - // 7 : 7 input parameters - CallSignature foo2Aot("FooNativeAOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = foo2Aot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(FooBoundAOT) -{ - // 7 : 7 input parameters - CallSignature foo2Aot("FooBoundAOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = foo2Aot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(Bar1AOT) -{ - // 8 : 8 input parameters - CallSignature barAot("Bar1AOT", 0, 8, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = barAot; - std::array params = { // 8 : 8 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - VariableType::JS_ANY(), // c - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(BarAOT) -{ - // 7 : 7 input parameters - CallSignature barAot("BarAOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = barAot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(FooProxyAOT) -{ - // 8 : 8 input parameters - CallSignature fooProxyAot("FooProxyAOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = fooProxyAot; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(FooProxy2AOT) -{ - // 7 : 7 input parameters - CallSignature FooProxy2AOT("FooProxy2AOT", 0, 7, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = FooProxy2AOT; - std::array params = { // 7 : 7 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - VariableType::JS_ANY(), // a - VariableType::JS_ANY(), // b - }; - callSign->SetParameters(params.data()); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(Bar2AOT) -{ - // 5 : 5 input parameters - CallSignature bar2Aot("Bar2AOT", 0, 5, - ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); - *callSign = bar2Aot; - std::array params = { // 5 : 5 input parameters - VariableType::NATIVE_POINTER(), - VariableType::INT64(), - VariableType::JS_ANY(), // calltarget - VariableType::JS_ANY(), // newTarget - VariableType::JS_ANY(), // thisTarget - }; - callSign->SetParameters(params.data()); - callSign->SetVariadicArgs(true); - callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv); -} - -DEF_CALL_SIGNATURE(TestAbsoluteAddressRelocation) -{ - // 1 : 1 input parameters - CallSignature TestAbsoluteAddressRelocation("TestAbsoluteAddressRelocation", 0, 1, - ArgumentsOrder::DEFAULT_ORDER, VariableType::INT64()); // undefined or hole - *callSign = TestAbsoluteAddressRelocation; - // 1 : 1 input parameters - std::array params = { - VariableType::INT64(), - }; - callSign->SetParameters(params.data()); -} - -#endif -} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/tests/BUILD.gn b/ecmascript/compiler/tests/BUILD.gn index 9728049606a8884568070a3aeafd7a5bf2d4fb2d..02722173af58f7a59d376765c6fbe6d7025a6a49 100644 --- a/ecmascript/compiler/tests/BUILD.gn +++ b/ecmascript/compiler/tests/BUILD.gn @@ -14,21 +14,6 @@ import("//arkcompiler/ets_runtime/js_runtime_config.gni") import("//arkcompiler/ets_runtime/test/test_helper.gni") -config("include_llvm_config") { - if (compile_llvm_online) { - include_dirs = [ - "//third_party/third_party_llvm-project/build/include", - "//third_party/third_party_llvm-project/llvm/include/", - ] - } else { - include_dirs = [ - "//prebuilts/ark_tools/ark_js_prebuilts/llvm_prebuilts/llvm/include", - "//prebuilts/ark_tools/ark_js_prebuilts/llvm_prebuilts/build/include", - ] - } - cflags_cc = [ "-DARK_GC_SUPPORT" ] -} - module_output_path = "arkcompiler/ets_runtime" host_unittest_action("AssemblerTest") { @@ -39,81 +24,11 @@ host_unittest_action("AssemblerTest") { "../assembler/tests/assembler_aarch64_test.cpp", "../assembler/tests/assembler_x64_test.cpp", ] - configs = [ - ":include_llvm_config", - "//arkcompiler/ets_runtime:ecma_test_config", - "//arkcompiler/ets_runtime:ark_jsruntime_compiler_config", - "//arkcompiler/ets_runtime:ark_jsruntime_public_config", - ] - - if (compile_llvm_online) { - lib_dirs = [ "//third_party/third_party_llvm-project/build/lib" ] - } else { - lib_dirs = - [ "//prebuilts/ark_tools/ark_js_prebuilts/llvm_prebuilts/build/lib" ] - } - - libs = [ - "stdc++", - "z", - "LLVMTarget", - "LLVMObject", - "LLVMMC", - "LLVMSupport", - "LLVMCore", - "LLVMExecutionEngine", - "LLVMInterpreter", - "LLVMMCJIT", - "LLVMExegesis", - "LLVMRuntimeDyld", - "LLVMInstCombine", - "LLVMAnalysis", - "LLVMScalarOpts", - "LLVMBinaryFormat", - "LLVMDebugInfoDWARF", - "LLVMRemarks", - "LLVMTextAPI", - "LLVMScalarOpts", - "LLVMTransformUtils", - "LLVMBitReader", - "LLVMAsmPrinter", - "LLVMProfileData", - "LLVMBitstreamReader", - "LLVMSelectionDAG", - "LLVMGlobalISel", - "LLVMLTO", - "LLVMCFGuard", - "LLVMVectorize", - "LLVMDemangle", - "LLVMipo", - "LLVMInstrumentation", - "LLVMDebugInfoCodeView", - "LLVMAggressiveInstCombine", - "LLVMAsmParser", - "LLVMMCParser", - "LLVMMIRParser", - "LLVMX86Info", - "LLVMAArch64Info", - "LLVMARMDesc", - "LLVMAArch64Desc", - "LLVMX86Desc", - "LLVMX86Disassembler", - "LLVMARMDisassembler", - "LLVMAArch64Disassembler", - "LLVMMCDisassembler", - "LLVMAArch64CodeGen", - "LLVMARMCodeGen", - "LLVMCodeGen", - "LLVMX86CodeGen", - "LLVMX86AsmParser", - "LLVMTransformUtils", - "LLVMAArch64Utils", - "LLVMARMUtils", - "LLVMIRReader", - ] deps = [ - "//arkcompiler/ets_runtime/ecmascript/compiler:libark_jsoptimizer_test", + "$ark_root/libpandafile:libarkfile_static", + "$js_root:libark_jsruntime_test_set", + "$js_root/ecmascript/compiler:libark_jsoptimizer_set", sdk_libc_secshared_dep, ] diff --git a/ecmascript/compiler/tests/lowering_relate_gate_test.cpp b/ecmascript/compiler/tests/lowering_relate_gate_test.cpp index c34c85333722cb8e82594605c639b07c35482a0a..395394b0628655435e01a0dd3861cf2abd0bee22 100644 --- a/ecmascript/compiler/tests/lowering_relate_gate_test.cpp +++ b/ecmascript/compiler/tests/lowering_relate_gate_test.cpp @@ -17,7 +17,7 @@ #include "ecmascript/compiler/ts_hcr_lowering.h" #include "ecmascript/compiler/type_mcr_lowering.h" #include "ecmascript/mem/native_area_allocator.h" -#include "ecmascript/pgo_profiler/pgo_profiler_type.h" +#include "ecmascript/pgo_profiler/type/pgo_profiler_type.h" #include "ecmascript/tests/test_helper.h" namespace panda::test { diff --git a/ecmascript/compiler/trampoline/aarch64/asm_interpreter_call.cpp b/ecmascript/compiler/trampoline/aarch64/asm_interpreter_call.cpp index 072d1a8f8dcab524d6762069d4808d989cf9683a..8d6f0597f6cdb7ea5f75bab766488c94f4c3977e 100644 --- a/ecmascript/compiler/trampoline/aarch64/asm_interpreter_call.cpp +++ b/ecmascript/compiler/trampoline/aarch64/asm_interpreter_call.cpp @@ -827,6 +827,55 @@ void AsmInterpreterCall::ResumeUncaughtFrameAndReturn(ExtendedAssembler *assembl __ Ret(); } +// ResumeRspAndRollback(uintptr_t glue, uintptr_t sp, uintptr_t pc, uintptr_t constantPool, +// uint64_t profileTypeInfo, uint64_t acc, uint32_t hotnessCounter, size_t jumpSize) +// GHC calling convention +// X19 - glue +// FP - sp +// X20 - pc +// X21 - constantPool +// X22 - profileTypeInfo +// X23 - acc +// X24 - hotnessCounter +// X25 - jumpSizeAfterCall +void AsmInterpreterCall::ResumeRspAndRollback(ExtendedAssembler *assembler) +{ + __ BindAssemblerStub(RTSTUB_ID(ResumeRspAndRollback)); + + Register glueRegister = __ GlueRegister(); + Register sp(FP); + Register rsp(SP); + Register pc(X20); + Register jumpSizeRegister(X25); + + Register ret(X23); + Register opcode(X6, W); + Register bcStub(X7); + Register fp(X8); + + int64_t fpOffset = static_cast(AsmInterpretedFrame::GetFpOffset(false)) + - static_cast(AsmInterpretedFrame::GetSize(false)); + int64_t spOffset = static_cast(AsmInterpretedFrame::GetBaseOffset(false)) + - static_cast(AsmInterpretedFrame::GetSize(false)); + int64_t funcOffset = static_cast(AsmInterpretedFrame::GetFunctionOffset(false)) + - static_cast(AsmInterpretedFrame::GetSize(false)); + ASSERT(fpOffset < 0); + ASSERT(spOffset < 0); + ASSERT(funcOffset < 0); + + __ Ldur(fp, MemoryOperand(sp, fpOffset)); // store fp for temporary + __ Ldur(ret, MemoryOperand(sp, funcOffset)); // restore acc + __ Ldur(sp, MemoryOperand(sp, spOffset)); // update sp + + __ Add(pc, pc, Operand(jumpSizeRegister, LSL, 0)); + __ Ldrb(opcode, MemoryOperand(pc, 0)); + + __ Mov(rsp, fp); // resume rsp + __ Add(bcStub, glueRegister, Operand(opcode, UXTW, FRAME_SLOT_SIZE_LOG2)); + __ Ldr(bcStub, MemoryOperand(bcStub, JSThread::GlueData::GetBCStubEntriesOffset(false))); + __ Br(bcStub); +} + // c++ calling convention // X0 - glue // X1 - callTarget diff --git a/ecmascript/compiler/trampoline/aarch64/common_call.h b/ecmascript/compiler/trampoline/aarch64/common_call.h index 8a4e4cb7e7ad7337af566d0eaec14ffd076a59bc..71ecb781f143011f94a562e95329e1ce6da56df3 100644 --- a/ecmascript/compiler/trampoline/aarch64/common_call.h +++ b/ecmascript/compiler/trampoline/aarch64/common_call.h @@ -177,6 +177,8 @@ public: static void ResumeUncaughtFrameAndReturn(ExtendedAssembler *assembler); + static void ResumeRspAndRollback(ExtendedAssembler *assembler); + static void CallGetter(ExtendedAssembler *assembler); static void CallSetter(ExtendedAssembler *assembler); diff --git a/ecmascript/compiler/trampoline/aarch64/optimized_call.cpp b/ecmascript/compiler/trampoline/aarch64/optimized_call.cpp index 0391ffcd8eafa8b73c7a22f68c1e8e18e1758349..1e52993848a14b05db5ce203f2d9df20ea022cf0 100644 --- a/ecmascript/compiler/trampoline/aarch64/optimized_call.cpp +++ b/ecmascript/compiler/trampoline/aarch64/optimized_call.cpp @@ -401,6 +401,7 @@ void OptimizedCall::JSCallInternal(ExtendedAssembler *assembler, Register jsfunc Label lCallConstructor; Label lCallBuiltinStub; Label lCallNativeCpp; + Label lNotClass; __ Ldr(Register(X5), MemoryOperand(jsfunc, 0)); __ Ldr(Register(X5), MemoryOperand(Register(X5), JSHClass::BIT_FIELD_OFFSET)); @@ -409,8 +410,10 @@ void OptimizedCall::JSCallInternal(ExtendedAssembler *assembler, Register jsfunc __ Ldr(callField, MemoryOperand(method, Method::CALL_FIELD_OFFSET)); __ Tbnz(callField, MethodLiteral::IsNativeBit::START_BIT, &callNativeMethod); if (!isNew) { - __ Tbnz(Register(X5), JSHClass::ClassConstructorBit::START_BIT, &lCallConstructor); + __ Tbz(Register(X5), JSHClass::IsClassConstructorOrPrototypeBit::START_BIT, &lNotClass); + __ Tbnz(Register(X5), JSHClass::ConstructorBit::START_BIT, &lCallConstructor); } + __ Bind(&lNotClass); { Register argV(X5); // skip argc @@ -603,6 +606,7 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re Label popArgs; Label slowCall; Label aotCall; + Label notClass; // get bound arguments __ Ldr(boundLength, MemoryOperand(jsfunc, JSBoundFunction::BOUND_ARGUMENTS_OFFSET)); // get bound length @@ -650,7 +654,9 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re Register hclass = __ AvailableRegister2(); __ Ldr(hclass, MemoryOperand(boundTarget, 0)); __ Ldr(hclass, MemoryOperand(hclass, JSHClass::BIT_FIELD_OFFSET)); - __ Tbnz(hclass, JSHClass::ClassConstructorBit::START_BIT, &slowCall); + __ Tbz(hclass, JSHClass::IsClassConstructorOrPrototypeBit::START_BIT, ¬Class); + __ Tbnz(hclass, JSHClass::ConstructorBit::START_BIT, &slowCall); + __ Bind(¬Class); __ Tbz(hclass, JSHClass::IsOptimizedBit::START_BIT, &slowCall); __ Bind(&aotCall); { @@ -747,13 +753,12 @@ void OptimizedCall::CallRuntimeWithArgv(ExtendedAssembler *assembler) Register sp(SP); // 2 : 2 means pair __ Stp(argc, argv, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, AddrMode::PREINDEX)); - __ Str(runtimeId, MemoryOperand(sp, -FRAME_SLOT_SIZE, AddrMode::PREINDEX)); - __ PushFpAndLr(); + __ Stp(Register(X30), runtimeId, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, AddrMode::PREINDEX)); // 2 : 2 means pair Register fp(X29); // construct leave frame Register frameType(X9); __ Mov(frameType, Immediate(static_cast(FrameType::LEAVE_FRAME_WITH_ARGV))); - __ Str(frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE, AddrMode::PREINDEX)); + __ Stp(frameType, Register(X29), MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, AddrMode::PREINDEX)); // 2 : 2 means pair __ Add(Register(FP), sp, Immediate(FRAME_SLOT_SIZE)); __ Str(fp, MemoryOperand(glue, JSThread::GlueData::GetLeaveFrameOffset(false))); @@ -766,9 +771,9 @@ void OptimizedCall::CallRuntimeWithArgv(ExtendedAssembler *assembler) __ Mov(X1, argc); __ Mov(X2, argv); __ Blr(rtfunc); - __ Add(sp, sp, Immediate(FRAME_SLOT_SIZE)); - __ RestoreFpAndLr(); - __ Add(sp, sp, Immediate(3 * FRAME_SLOT_SIZE)); // 3 : 3 means pair + __ Ldp(Register(Zero), Register(X29), MemoryOperand(sp, ExtendedAssembler::PAIR_SLOT_SIZE, POSTINDEX)); + __ Ldp(Register(X30), Register(Zero), MemoryOperand(sp, ExtendedAssembler::PAIR_SLOT_SIZE, POSTINDEX)); + __ Add(sp, sp, Immediate(2 * FRAME_SLOT_SIZE)); // 2 : 2 means pair __ Ret(); } @@ -1161,4 +1166,4 @@ void OptimizedCall::DeoptHandlerAsm(ExtendedAssembler *assembler) } } #undef __ -} // panda::ecmascript::aarch64 \ No newline at end of file +} // panda::ecmascript::aarch64 diff --git a/ecmascript/compiler/trampoline/x64/asm_interpreter_call.cpp b/ecmascript/compiler/trampoline/x64/asm_interpreter_call.cpp index f8853e730040be290298625a39fdcd67176608a6..bcf4b131577956a7129f6a2bcf975dee513def80 100644 --- a/ecmascript/compiler/trampoline/x64/asm_interpreter_call.cpp +++ b/ecmascript/compiler/trampoline/x64/asm_interpreter_call.cpp @@ -1186,6 +1186,44 @@ void AsmInterpreterCall::ResumeUncaughtFrameAndReturn(ExtendedAssembler *assembl __ Ret(); } +// ResumeRspAndRollback(uintptr_t glue, uintptr_t sp, uintptr_t pc, uintptr_t constantPool, +// uint64_t profileTypeInfo, uint64_t acc, uint32_t hotnessCounter, size_t jumpSize) +// GHC calling convention +// %r13 - glue +// %rbp - sp +// %r12 - pc +// %rbx - constantPool +// %r14 - profileTypeInfo +// %rsi - acc +// %rdi - hotnessCounter +// %r8 - jumpSizeAfterCall +void AsmInterpreterCall::ResumeRspAndRollback(ExtendedAssembler *assembler) +{ + __ BindAssemblerStub(RTSTUB_ID(ResumeRspAndRollback)); + Register glueRegister = __ GlueRegister(); + Register spRegister = rbp; + Register pcRegister = r12; + Register ret = rsi; + Register jumpSizeRegister = r8; + + Register frameStateBaseRegister = r11; + __ Movq(spRegister, frameStateBaseRegister); + __ Subq(AsmInterpretedFrame::GetSize(false), frameStateBaseRegister); + + __ Movq(Operand(frameStateBaseRegister, AsmInterpretedFrame::GetBaseOffset(false)), spRegister); // update sp + __ Addq(jumpSizeRegister, pcRegister); // newPC + Register opcodeRegister = rax; + __ Movzbq(Operand(pcRegister, 0), opcodeRegister); + + __ Movq(Operand(frameStateBaseRegister, AsmInterpretedFrame::GetFunctionOffset(false)), ret); // restore acc + + __ Movq(Operand(frameStateBaseRegister, AsmInterpretedFrame::GetFpOffset(false)), rsp); // resume rsp + Register bcStubRegister = r11; + __ Movq(Operand(glueRegister, opcodeRegister, Times8, JSThread::GlueData::GetBCStubEntriesOffset(false)), + bcStubRegister); + __ Jmp(bcStubRegister); +} + void AsmInterpreterCall::PushUndefinedWithArgcAndCheckStack(ExtendedAssembler *assembler, Register glue, Register argc, Register op1, Register op2, Label *stackOverflow) { diff --git a/ecmascript/compiler/trampoline/x64/common_call.h b/ecmascript/compiler/trampoline/x64/common_call.h index 3ed42aa9f68c75148e0945e8ea008634f5c9ece4..b641fb207f1ba4bc6f03f4c13d086404ae9b2ada 100644 --- a/ecmascript/compiler/trampoline/x64/common_call.h +++ b/ecmascript/compiler/trampoline/x64/common_call.h @@ -155,6 +155,8 @@ public: static void ResumeUncaughtFrameAndReturn(ExtendedAssembler *assembler); + static void ResumeRspAndRollback(ExtendedAssembler *assembler); + private: static void PushFrameState(ExtendedAssembler *assembler, Register prevSpRegister, Register fpRegister, Register callTargetRegister, Register thisRegister, Register methodRegister, Register pcRegister, diff --git a/ecmascript/compiler/trampoline/x64/optimized_call.cpp b/ecmascript/compiler/trampoline/x64/optimized_call.cpp index 2345cb298256870a56827ef7b285e314848bca9f..3532d6d55976749d591872d24b0f17f552182602 100644 --- a/ecmascript/compiler/trampoline/x64/optimized_call.cpp +++ b/ecmascript/compiler/trampoline/x64/optimized_call.cpp @@ -350,15 +350,19 @@ void OptimizedCall::GenJSCall(ExtendedAssembler *assembler, bool isNew) Register argV = r9; { Label lCallConstructor; + Label lNotClass; __ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method __ Movl(Operand(rsp, FRAME_SLOT_SIZE), argc); // skip return addr __ Mov(Operand(method, Method::CALL_FIELD_OFFSET), methodCallField); // get call field __ Btq(MethodLiteral::IsNativeBit::START_BIT, methodCallField); // is native __ Jb(&lCallNativeMethod); if (!isNew) { - __ Btq(JSHClass::ClassConstructorBit::START_BIT, rax); // is CallConstructor + __ Btq(JSHClass::IsClassConstructorOrPrototypeBit::START_BIT, rax); // is CallConstructor + __ Jnb(&lNotClass); + __ Btq(JSHClass::ConstructorBit::START_BIT, rax); // is CallConstructor __ Jb(&lCallConstructor); } + __ Bind(&lNotClass); __ Movq(rsp, argV); auto argvSlotOffset = kungfu::ArgumentAccessor::GetExtraArgsNum() + 1; // 1: return addr __ Addq(argvSlotOffset * FRAME_SLOT_SIZE, argV); // skip return addr and argc @@ -575,6 +579,7 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re Label aotCall; Label popArgs; Label isJsFunc; + Label isNotClass; __ Pushq(rbp); __ Pushq(static_cast(FrameType::OPTIMIZED_JS_FUNCTION_ARGS_CONFIG_FRAME)); __ Leaq(Operand(rsp, FRAME_SLOT_SIZE), rbp); @@ -640,8 +645,11 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re Register jsfunc = rsi; __ Bind(&isJsFunc); { - __ Btq(JSHClass::ClassConstructorBit::START_BIT, rax); // is CallConstructor + __ Btq(JSHClass::IsClassConstructorOrPrototypeBit::START_BIT, rax); // is CallConstructor + __ Jnb(&isNotClass); + __ Btq(JSHClass::ConstructorBit::START_BIT, rax); __ Jb(&slowCall); + __ Bind(&isNotClass); __ Btq(JSHClass::IsOptimizedBit::START_BIT, rax); // is aot __ Jnb(&slowCall); __ Bind(&aotCall); @@ -1161,4 +1169,4 @@ void OptimizedCall::DeoptHandlerAsm(ExtendedAssembler *assembler) } } #undef __ -} // namespace panda::ecmascript::x64 \ No newline at end of file +} // namespace panda::ecmascript::x64 diff --git a/ecmascript/compiler/ts_class_analysis.cpp b/ecmascript/compiler/ts_class_analysis.cpp index 18bcd1b65c779eaa389d9cffe7ebdb22ab5716cd..8b4109d735e9346985c749e328d5a86ca57512af 100644 --- a/ecmascript/compiler/ts_class_analysis.cpp +++ b/ecmascript/compiler/ts_class_analysis.cpp @@ -99,7 +99,7 @@ void TSClassAnalysis::AnalyzeProperties(const JSThread *thread, const TSClassTyp GlobalTSTypeRef classGT = classType->GetGT(); int hclassIndex = tsManager_->GetHClassIndex(classGT); ASSERT(hclassIndex != -1); - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); if (UNLIKELY(hclass->IsDictionaryMode())) { return; } diff --git a/ecmascript/compiler/ts_hclass_generator.cpp b/ecmascript/compiler/ts_hclass_generator.cpp index 70c216fa543d990e32d46079feccdf70e4c636d8..129640a1c89701e22bfc4c20b3ad31d12514dec3 100644 --- a/ecmascript/compiler/ts_hclass_generator.cpp +++ b/ecmascript/compiler/ts_hclass_generator.cpp @@ -14,11 +14,14 @@ */ #include "ecmascript/compiler/ts_hclass_generator.h" +#include "ecmascript/global_env_constants-inl.h" #include "ecmascript/subtyping_operator.h" #include "ecmascript/jspandafile/class_info_extractor.h" namespace panda::ecmascript::kungfu { using ClassInfoExtractor = panda::ecmascript::ClassInfoExtractor; +using PGOHClassLayoutDesc = pgo::PGOHClassLayoutDesc; +using PGOHandler = pgo::PGOHandler; void TSHClassGenerator::GenerateTSHClasses() const { const JSThread *thread = tsManager_->GetThread(); @@ -40,7 +43,8 @@ void TSHClassGenerator::GenerateTSHClasses() const } } -void TSHClassGenerator::UpdateTSHClassFromPGO(const kungfu::GateType &type, const PGOHClassLayoutDesc &desc) const +void TSHClassGenerator::UpdateTSHClassFromPGO(const kungfu::GateType &type, const PGOHClassLayoutDesc &desc, + bool enableOptTrackField) const { DISALLOW_GARBAGE_COLLECTION; auto hclassValue = tsManager_->GetTSHClass(type); @@ -48,12 +52,29 @@ void TSHClassGenerator::UpdateTSHClassFromPGO(const kungfu::GateType &type, cons return; } - tsManager_->InsertPtToGtMap(desc.GetClassType(), type); + tsManager_->InsertPtToGtMap(desc.GetProfileType(), type); + if (!enableOptTrackField) { + return; + } + if (tsManager_->IsInSkipTrackFieldSet(desc.GetProfileType())) { + return; + } + + std::vector superHClasses; + kungfu::GateType current = type; + while (tsManager_->GetSuperGateType(current)) { + auto superHClassValue = tsManager_->GetTSHClass(current); + if (!superHClassValue.IsJSHClass()) { + break; + } + superHClasses.emplace_back(JSHClass::Cast(superHClassValue.GetTaggedObject())); + } + auto hclass = JSHClass::Cast(hclassValue.GetTaggedObject()); const JSThread *thread = tsManager_->GetThread(); LayoutInfo *layoutInfo = LayoutInfo::Cast(hclass->GetLayout().GetTaggedObject()); - int element = layoutInfo->NumberOfElements(); - for (int i = 0; i < element; i++) { + int numOfProps = hclass->NumberOfProps(); + for (int i = 0; i < numOfProps; i++) { auto key = layoutInfo->GetKey(i); if (!key.IsString()) { continue; @@ -62,12 +83,24 @@ void TSHClassGenerator::UpdateTSHClassFromPGO(const kungfu::GateType &type, cons PGOHandler newHandler; if (desc.FindDescWithKey(keyString, newHandler)) { auto attr = layoutInfo->GetAttr(i); - if (newHandler.GetTrackType() == TrackType::DOUBLE) { - attr.SetRepresentation(Representation::DOUBLE); - } else { - attr.SetRepresentation(Representation::OBJECT); + if (newHandler.SetAttribute(attr)) { + hclass->SetIsAllTaggedProp(false); } layoutInfo->SetNormalAttr(thread, i, attr); + + // Update super class representation + for (auto superHClass : superHClasses) { + int entry = JSHClass::FindPropertyEntry(thread, superHClass, key); + if (entry == -1) { + continue; + } + auto superLayout = LayoutInfo::Cast(superHClass->GetLayout().GetTaggedObject()); + PropertyAttributes superAttr = superLayout->GetAttr(entry); + if (newHandler.SetAttribute(superAttr)) { + superHClass->SetIsAllTaggedProp(false); + } + superLayout->SetNormalAttr(thread, entry, superAttr); + } } } } @@ -151,7 +184,7 @@ JSHandle TSHClassGenerator::CreateIHClass(const JSThread *thread, JSHandle tsLayout(thread, instanceType->GetObjLayoutInfo()); uint32_t numOfProps = tsLayout->GetNumOfProperties(); JSHandle hclass; - if (LIKELY(numOfProps <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)) { + if (LIKELY(numOfProps <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY)) { JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle layout = factory->CreateLayoutInfo(numOfProps); for (uint32_t index = 0; index < numOfProps; ++index) { @@ -160,7 +193,7 @@ JSHandle TSHClassGenerator::CreateIHClass(const JSThread *thread, ASSERT_PRINT(JSTaggedValue::IsPropertyKey(key), "Key is not a property key"); PropertyAttributes attributes = PropertyAttributes::Default(); attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::NONE); attributes.SetOffset(index); layout->AddKey(thread, index, key.GetTaggedValue(), attributes); } @@ -188,12 +221,12 @@ JSHandle TSHClassGenerator::CreatePHClass(const JSThread *thread, JSHandle tsLayout(thread, prototypeType->GetObjLayoutInfo()); uint32_t numOfProps = tsLayout->GetNumOfProperties(); JSHandle hclass; - if (LIKELY(numOfProps <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)) { + if (LIKELY(numOfProps <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY)) { TSManager *tsManager = thread->GetCurrentEcmaContext()->GetTSManager(); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle ctor = globalConst->GetHandledConstructorString(); CVector, GlobalTSTypeRef>> sortedPrototype {{ctor, GlobalTSTypeRef()}}; - CVector, GlobalTSTypeRef>> signatureVec {}; + CUnorderedMap keysMap; for (uint32_t index = 0; index < numOfProps; ++index) { JSHandle key(thread, tsLayout->GetKey(index)); auto value = GlobalTSTypeRef(tsLayout->GetTypeId(index).GetInt()); @@ -204,18 +237,20 @@ JSHandle TSHClassGenerator::CreatePHClass(const JSThread *thread, bool isAbs = tsManager->IsAbstractMethod(value); if (!isSame && !isAbs) { bool isSign = tsManager->IsMethodSignature(value); - if (LIKELY(!isSign)) { - sortedPrototype.emplace_back(std::make_pair(key, value)); - } else { - signatureVec.emplace_back(std::make_pair(key, value)); + if (isSign) { + continue; + } + std::string keyStr = EcmaStringAccessor(key.GetTaggedValue()).ToStdString(); + auto it = keysMap.find(keyStr); + if (it != keysMap.end()) { + sortedPrototype[it->second] = std::make_pair(key, value); + continue; } + keysMap[keyStr] = sortedPrototype.size(); + sortedPrototype.emplace_back(std::make_pair(key, value)); } } - if (!signatureVec.empty()) { - sortedPrototype.insert(sortedPrototype.end(), signatureVec.begin(), signatureVec.end()); - } - uint32_t keysLen = sortedPrototype.size(); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle layout = factory->CreateLayoutInfo(keysLen); @@ -228,7 +263,7 @@ JSHandle TSHClassGenerator::CreatePHClass(const JSThread *thread, attributes.SetIsAccessor(true); } attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::NONE); attributes.SetOffset(index); layout->AddKey(thread, index, key.GetTaggedValue(), attributes); } @@ -261,7 +296,7 @@ JSHandle TSHClassGenerator::CreateCHClass(const JSThread *thread, uint32_t functionFirstIndex = numOfProps; uint32_t numNonStaticFunc = 0; bool hasFunction = false; - if (LIKELY(numOfProps <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)) { + if (LIKELY(numOfProps <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY)) { TSManager *tsManager = thread->GetCurrentEcmaContext()->GetTSManager(); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle layout = factory->CreateLayoutInfo(numOfProps); @@ -302,7 +337,7 @@ JSHandle TSHClassGenerator::CreateCHClass(const JSThread *thread, ASSERT_PRINT(JSTaggedValue::IsPropertyKey(JSHandle(thread, tsPropKey)), "Key is not a property key"); attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::NONE); attributes.SetOffset(index - numNonStaticFunc); layout->AddKey(thread, index - numNonStaticFunc, tsPropKey, attributes); } @@ -311,7 +346,7 @@ JSHandle TSHClassGenerator::CreateCHClass(const JSThread *thread, JSTaggedValue tsPropKey = tsLayout->GetKey(index - ClassInfoExtractor::STATIC_RESERVED_LENGTH); PropertyAttributes attributes = PropertyAttributes::Default(); attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::NONE); attributes.SetOffset(index + numStaticFunc); layout->AddKey(thread, index + numStaticFunc, tsPropKey, attributes); } diff --git a/ecmascript/compiler/ts_hclass_generator.h b/ecmascript/compiler/ts_hclass_generator.h index ebda94de7b4ff4f7d0c59b11f45bef7a89248b63..646befb245b454023f9bb240324d1c27e6ab6837 100644 --- a/ecmascript/compiler/ts_hclass_generator.h +++ b/ecmascript/compiler/ts_hclass_generator.h @@ -26,7 +26,8 @@ public: ~TSHClassGenerator() = default; void GenerateTSHClasses() const; - void UpdateTSHClassFromPGO(const kungfu::GateType &type, const PGOHClassLayoutDesc &desc) const; + void UpdateTSHClassFromPGO(const kungfu::GateType &type, const PGOHClassLayoutDesc &desc, + bool enableOptTrackField) const; private: void RecursiveGenerate(const JSHandle &classType) const; diff --git a/ecmascript/compiler/ts_hcr_lowering.cpp b/ecmascript/compiler/ts_hcr_lowering.cpp index bfeb07935e449bba5d2f9a483f224b1769f3fc46..32a928d99ceb449e4f287bb6929812d45ff1b616 100644 --- a/ecmascript/compiler/ts_hcr_lowering.cpp +++ b/ecmascript/compiler/ts_hcr_lowering.cpp @@ -44,6 +44,7 @@ bool TSHCRLowering::RunTSHCRLowering() success = false; } } + acc_.EliminateRedundantPhi(); if (IsTypeLogEnabled()) { pgoTypeLog_.PrintPGOTypeLog(); @@ -509,10 +510,21 @@ void TSHCRLowering::SpeculateConditionJump(GateRef gate, bool flag) GateRef value = acc_.GetValueIn(gate, 0); GateType valueType = acc_.GetGateType(value); GateRef jump = Circuit::NullGate(); + auto branchKind = BranchKind::NORMAL_BRANCH; + PGOSampleType sampleType = acc_.TryGetPGOType(value); + if (sampleType.IsLikely()) { + branchKind = BranchKind::TRUE_BRANCH; + } else if (sampleType.IsUnLikely()) { + branchKind = BranchKind::FALSE_BRANCH; + } else if (sampleType.IsStrongLikely()) { + branchKind = BranchKind::STRONG_TRUE_BRANCH; + } else if (sampleType.IsStrongUnLikely()) { + branchKind = BranchKind::STRONG_FALSE_BRANCH; + } if (flag) { - jump = builder_.TypedConditionJump(value, valueType); + jump = builder_.TypedConditionJump(value, valueType, branchKind); } else { - jump = builder_.TypedConditionJump(value, valueType); + jump = builder_.TypedConditionJump(value, valueType, branchKind); } acc_.ReplaceGate(gate, jump, jump, Circuit::NullGate()); } @@ -533,31 +545,6 @@ void TSHCRLowering::LowerTypedNot(GateRef gate) } } -void TSHCRLowering::LowerTypedLdArrayLength(GateRef gate) -{ - AddProfiling(gate); - GateRef array = acc_.GetValueIn(gate, 2); - if (!noCheck_) { - builder_.StableArrayCheck(array); - } - - GateRef result = builder_.LoadArrayLength(array); - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); -} - -void TSHCRLowering::LowerTypedLdTypedArrayLength(GateRef gate) -{ - AddProfiling(gate); - GateRef array = acc_.GetValueIn(gate, 2); - GateType arrayType = acc_.GetGateType(array); - arrayType = tsManager_->TryNarrowUnionType(arrayType); - if (!noCheck_) { - builder_.TypedArrayCheck(arrayType, array); - } - GateRef result = builder_.LoadTypedArrayLength(arrayType, array); - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); -} - void TSHCRLowering::DeleteConstDataIfNoUser(GateRef gate) { auto uses = acc_.Uses(gate); @@ -567,219 +554,277 @@ void TSHCRLowering::DeleteConstDataIfNoUser(GateRef gate) } } -void TSHCRLowering::LowerTypedLdObjByNameForClassOrObject(GateRef gate, GateRef receiver, JSTaggedValue prop) +void TSHCRLowering::LowerTypedLdObjByName(GateRef gate) { - GateType receiverType = acc_.GetGateType(receiver); - receiverType = tsManager_->TryNarrowUnionType(receiverType); - - int hclassIndex = -1; - if (tsManager_->IsClassTypeKind(receiverType)) { - hclassIndex = tsManager_->GetConstructorHClassIndexByClassGateType(receiverType); - } else if (tsManager_->IsObjectTypeKind(receiverType)){ - hclassIndex = tsManager_->GetHClassIndexByObjectType(receiverType); - } - if (hclassIndex == -1) { // slowpath - return; - } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); + DISALLOW_GARBAGE_COLLECTION; + auto constData = acc_.GetValueIn(gate, 1); // 1: valueIn 1 + uint16_t keyIndex = acc_.GetConstantValue(constData); + JSTaggedValue key = tsManager_->GetStringFromConstantPool(keyIndex); - PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, prop); - if (!plr.IsFound() || !plr.IsLocal() || plr.IsAccessor()) { // slowpath - return; - } - AddProfiling(gate); - if (!noCheck_) { - GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); - builder_.ObjectTypeCheck(receiverType, receiver, hclassIndexGate); - } + // 3: number of value inputs + ASSERT(acc_.GetNumValueIn(gate) == 3); + GateRef receiver = acc_.GetValueIn(gate, 2); // 2: acc or this object + LowerNamedAccess(gate, receiver, AccessMode::LOAD, key, Circuit::NullGate()); + DeleteConstDataIfNoUser(constData); +} - GateRef pfrGate = builder_.Int32(plr.GetData()); - GateRef result = builder_.LoadProperty(receiver, pfrGate, false); +void TSHCRLowering::LowerTypedStObjByName(GateRef gate, bool isThis) +{ + DISALLOW_GARBAGE_COLLECTION; + auto constData = acc_.GetValueIn(gate, 1); // 1: valueIn 1 + uint16_t keyIndex = acc_.GetConstantValue(constData); + JSTaggedValue key = tsManager_->GetStringFromConstantPool(keyIndex); - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); + GateRef receiver = Circuit::NullGate(); + GateRef value = Circuit::NullGate(); + if (isThis) { + // 3: number of value inputs + ASSERT(acc_.GetNumValueIn(gate) == 3); + receiver = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::THIS_OBJECT); + value = acc_.GetValueIn(gate, 2); // 2: acc + } else { + // 4: number of value inputs + ASSERT(acc_.GetNumValueIn(gate) == 4); + receiver = acc_.GetValueIn(gate, 2); // 2: receiver + value = acc_.GetValueIn(gate, 3); // 3: acc + } + LowerNamedAccess(gate, receiver, AccessMode::STORE, key, value); + DeleteConstDataIfNoUser(constData); } -void TSHCRLowering::LowerTypedLdObjByNameForClassInstance(GateRef gate, GateRef receiver, JSTaggedValue prop) +void TSHCRLowering::LowerNamedAccess(GateRef gate, GateRef receiver, AccessMode accessMode, JSTaggedValue key, + GateRef value) { + DISALLOW_GARBAGE_COLLECTION; GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); - - int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(receiverType); - if (hclassIndex == -1) { // slowpath - return; - } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); - if (!hclass->HasTSSubtyping()) { // slowpath + if (accessMode == AccessMode::LOAD && TryLowerTypedLdObjByNameForBuiltin(gate, receiverType, key)) { return; } - PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, prop); - if (!plr.IsFound()) { // slowpath - return; + ObjectAccessHelper accessHelper(tsManager_, accessMode, receiver, receiverType, key, value); + ChunkVector infos(circuit_->chunk()); + bool continuation = accessHelper.Compute(infos); + if (!continuation) { + return; // slowpath } + ASSERT(!infos.empty()); AddProfiling(gate); - if (!noCheck_) { + // If all elements of the array are objects, and receiver is one of the elements, + // no HeapObjectCheck is required. + bool isHeapObject = acc_.IsHeapObjectFromElementsKind(receiver); + + // monomorphic + if (infos.size() == 1) { + int hclassIndex = infos[0].HClassIndex(); + PropertyLookupResult plr = infos[0].Plr(); + if (!Uncheck()) { + GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); + builder_.ObjectTypeCheck(infos[0].Type(), isHeapObject, receiver, hclassIndexGate); + } + + GateRef result = BuildNamedPropertyAccess(gate, accessHelper, plr); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); + return; + } + + // polymorphic + size_t size = infos.size(); + GateRef fallthroughState = builder_.GetState(); + GateRef fallthroughDepend = builder_.GetDepend(); + std::vector values(size + 1, Circuit::NullGate()); // +1: state for value selector + std::vector depends(size + 1, Circuit::NullGate()); // +1: state for depend selector + std::vector states(size, Circuit::NullGate()); + for (size_t i = 0; i < size; ++i) { + GateType type = infos[i].Type(); + int hclassIndex = infos[i].HClassIndex(); + PropertyLookupResult plr = infos[i].Plr(); GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); - builder_.ObjectTypeCheck(receiverType, receiver, hclassIndexGate); + + builder_.SetState(fallthroughState); + builder_.SetDepend(fallthroughDepend); + if (i == size - 1) { + builder_.ObjectTypeCheck(type, isHeapObject, receiver, hclassIndexGate); + fallthroughState = Circuit::NullGate(); + fallthroughDepend = Circuit::NullGate(); + } else { + GateRef compare = builder_.ObjectTypeCompare(type, isHeapObject, receiver, hclassIndexGate); + GateRef branch = builder_.Branch(builder_.GetState(), compare); + GateRef ifTrue = builder_.IfTrue(branch); + GateRef ifFalse = builder_.IfFalse(branch); + GateRef tDepend = builder_.DependRelay(ifTrue, builder_.GetDepend()); + fallthroughState = ifFalse; + fallthroughDepend = builder_.DependRelay(ifFalse, builder_.GetDepend()); + builder_.SetState(ifTrue); + builder_.SetDepend(tDepend); + } + + values[i + 1] = BuildNamedPropertyAccess(gate, accessHelper, plr); + depends[i + 1] = builder_.GetDepend(); + states[i] = builder_.GetState(); } + ASSERT(fallthroughState == Circuit::NullGate()); + ASSERT(fallthroughDepend == Circuit::NullGate()); + GateRef mergeState = circuit_->NewGate(circuit_->Merge(size), states); + depends[0] = mergeState; + values[0] = mergeState; + GateRef dependSelector = circuit_->NewGate(circuit_->DependSelector(size), depends); + GateRef result = accessHelper.IsLoading() ? + circuit_->NewGate(circuit_->ValueSelector(size), MachineType::I64, size + 1, values.data(), GateType::AnyType()) + : Circuit::NullGate(); + acc_.ReplaceHirAndDeleteIfException(gate, StateDepend(mergeState, dependSelector), result); +} - GateRef pfrGate = builder_.Int32(plr.GetData()); +GateRef TSHCRLowering::BuildNamedPropertyAccess(GateRef hir, ObjectAccessHelper accessHelper, PropertyLookupResult plr) +{ + GateRef receiver = accessHelper.GetReceiver(); + GateRef plrGate = builder_.Int32(plr.GetData()); GateRef result = Circuit::NullGate(); - if (LIKELY(!plr.IsAccessor())) { - result = builder_.LoadProperty(receiver, pfrGate, plr.IsVtable()); - if (UNLIKELY(IsVerifyVTbale())) { - AddVTableLoadVerifer(gate, result); + + AccessMode mode = accessHelper.GetAccessMode(); + switch (mode) { + case AccessMode::LOAD: { + if (LIKELY(!plr.IsAccessor())) { + result = builder_.LoadProperty(receiver, plrGate, plr.IsFunction()); + if (UNLIKELY(IsVerifyVTbale())) { + BuildNamedPropertyAccessVerifier(hir, receiver, mode, result); + } + } else { + result = builder_.CallGetter(hir, receiver, plrGate); + } } - } else { - result = builder_.CallGetter(gate, receiver, pfrGate); + break; + case AccessMode::STORE: { + GateRef value = accessHelper.GetValue(); + if (LIKELY(plr.IsLocal())) { + builder_.StoreProperty(receiver, plrGate, value); + if (UNLIKELY(IsVerifyVTbale())) { + BuildNamedPropertyAccessVerifier(hir, receiver, mode, value); + } + } else { + builder_.CallSetter(hir, receiver, plrGate, value); + } + } + break; + default: + break; } - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); + return result; } -void TSHCRLowering::LowerTypedLdObjByNameForArray(GateRef gate, GateRef receiver, JSTaggedValue prop) +void TSHCRLowering::BuildNamedPropertyAccessVerifier(GateRef gate, GateRef receiver, AccessMode mode, GateRef value) { - GateType receiverType = acc_.GetGateType(receiver); + GateRef constData = acc_.GetValueIn(gate, 1); + uint16_t keyIndex = acc_.GetConstantValue(constData); + GateRef func = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::FUNC); + GateRef constPool = builder_.GetConstPool(func); + GateRef key = builder_.GetValueFromTaggedArray(constPool, builder_.Int32(keyIndex)); + int stubId = mode == AccessMode::LOAD ? RTSTUB_ID(VerifyVTableLoading) : RTSTUB_ID(VerifyVTableStoring); + builder_.CallRuntime(glue_, stubId, builder_.GetDepend(), { receiver, key, value }, gate); +} - EcmaString *propString = EcmaString::Cast(prop.GetTaggedObject()); +bool TSHCRLowering::TryLowerTypedLdObjByNameForBuiltin(GateRef gate, GateType receiverType, JSTaggedValue key) +{ + EcmaString *propString = EcmaString::Cast(key.GetTaggedObject()); EcmaString *lengthString = EcmaString::Cast(thread_->GlobalConstants()->GetLengthString().GetTaggedObject()); if (propString == lengthString) { if (tsManager_->IsArrayTypeKind(receiverType)) { LowerTypedLdArrayLength(gate); - return; + return true; } else if (tsManager_->IsValidTypedArrayType(receiverType)) { LowerTypedLdTypedArrayLength(gate); - return; + return true; + } else if (receiverType.IsStringType()) { + LowerTypedLdStringLength(gate); + return true; } } + return TryLowerTypedLdObjByNameForBuiltinMethod(gate, receiverType, key); } -void TSHCRLowering::LowerTypedLdObjByName(GateRef gate) +bool TSHCRLowering::IsCreateArray(GateRef gate) { - DISALLOW_GARBAGE_COLLECTION; - auto constData = acc_.GetValueIn(gate, 1); // 1: valueIn 1 - uint16_t propIndex = acc_.GetConstantValue(constData); - auto prop = tsManager_->GetStringFromConstantPool(propIndex); - - // 3: number of value inputs - ASSERT(acc_.GetNumValueIn(gate) == 3); - GateRef receiver = acc_.GetValueIn(gate, 2); // 2: acc or this object - GateType receiverType = acc_.GetGateType(receiver); - receiverType = tsManager_->TryNarrowUnionType(receiverType); - - if (tsManager_->IsClassInstanceTypeKind(receiverType)) { - LowerTypedLdObjByNameForClassInstance(gate, receiver, prop); - } else if (tsManager_->IsClassTypeKind(receiverType) || - tsManager_->IsObjectTypeKind(receiverType)) { - LowerTypedLdObjByNameForClassOrObject(gate, receiver, prop); - } else { - LowerTypedLdObjByNameForArray(gate, receiver, prop); + if (acc_.GetOpCode(gate) != OpCode::JS_BYTECODE) { + return false; } - DeleteConstDataIfNoUser(constData); + EcmaOpcode ecmaop = acc_.GetByteCodeOpcode(gate); + switch (ecmaop) { + case EcmaOpcode::CREATEEMPTYARRAY_IMM8: + case EcmaOpcode::CREATEEMPTYARRAY_IMM16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + return true; + default: + return false; + } + UNREACHABLE(); + return false; } -void TSHCRLowering::LowerTypedStObjByNameForClassOrObject(GateRef gate, GateRef receiver, GateRef value, - JSTaggedValue prop) +void TSHCRLowering::LowerTypedLdArrayLength(GateRef gate) { - GateType receiverType = acc_.GetGateType(receiver); - receiverType = tsManager_->TryNarrowUnionType(receiverType); - - int hclassIndex = -1; - if (tsManager_->IsClassTypeKind(receiverType)) { - hclassIndex = tsManager_->GetConstructorHClassIndexByClassGateType(receiverType); - } else if (tsManager_->IsObjectTypeKind(receiverType)){ - hclassIndex = tsManager_->GetHClassIndexByObjectType(receiverType); - } - if (hclassIndex == -1) { // slowpath - return; - } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); - - PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, prop); - if (!plr.IsFound() || !plr.IsLocal() || plr.IsAccessor() || !plr.IsWritable()) { // slowpath - return; - } AddProfiling(gate); - if (!noCheck_) { - GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); - builder_.ObjectTypeCheck(receiverType, receiver, hclassIndexGate); + GateRef array = acc_.GetValueIn(gate, 2); + if (!Uncheck()) { + ElementsKind kind = acc_.TryGetElementsKind(gate); + if (!IsCreateArray(array)) { + builder_.StableArrayCheck(array, kind, ArrayMetaDataAccessor::Mode::LOAD_LENGTH); + } } - GateRef pfrGate = builder_.Int32(plr.GetData()); - builder_.StoreProperty(receiver, pfrGate, value); - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), Circuit::NullGate()); + GateRef result = builder_.LoadArrayLength(array); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); } -void TSHCRLowering::LowerTypedStObjByNameForClassInstance(GateRef gate, GateRef receiver, GateRef value, - JSTaggedValue prop, bool isThis) +void TSHCRLowering::LowerTypedLdTypedArrayLength(GateRef gate) { - GateType receiverType = acc_.GetGateType(receiver); - receiverType = tsManager_->TryNarrowUnionType(receiverType); - - int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(receiverType); - if (hclassIndex == -1) { // slowpath - return; - } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); - if (!hclass->HasTSSubtyping()) { // slowpath - return; - } - - PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread_, hclass, prop); - if (!plr.IsFound() || plr.IsFunction()) { // slowpath - return; - } - AddProfiling(gate); - if (!noCheck_) { - GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); - builder_.ObjectTypeCheck(receiverType, receiver, hclassIndexGate); + GateRef array = acc_.GetValueIn(gate, 2); + GateType arrayType = acc_.GetGateType(array); + arrayType = tsManager_->TryNarrowUnionType(arrayType); + if (!Uncheck()) { + builder_.TypedArrayCheck(arrayType, array); } + GateRef result = builder_.LoadTypedArrayLength(arrayType, array); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); +} - GateRef pfrGate = builder_.Int32(plr.GetData()); - if (LIKELY(plr.IsLocal())) { - GateRef store = builder_.StoreProperty(receiver, pfrGate, value); - if (UNLIKELY(IsVerifyVTbale())) { - AddVTableStoreVerifer(gate, store, isThis); - } - } else { - builder_.CallSetter(gate, receiver, pfrGate, value); +void TSHCRLowering::LowerTypedLdStringLength(GateRef gate) +{ + AddProfiling(gate); + GateRef str = acc_.GetValueIn(gate, 2); + if (!Uncheck()) { + builder_.EcmaStringCheck(str); } - - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), Circuit::NullGate()); + GateRef result = builder_.LoadStringLength(str); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); } -void TSHCRLowering::LowerTypedStObjByName(GateRef gate, bool isThis) +bool TSHCRLowering::TryLowerTypedLdObjByNameForBuiltinMethod(GateRef gate, GateType receiverType, JSTaggedValue key) { - DISALLOW_GARBAGE_COLLECTION; - auto constData = acc_.GetValueIn(gate, 1); // 1: valueIn 1 - uint16_t propIndex = acc_.GetConstantValue(constData); - auto prop = tsManager_->GetStringFromConstantPool(propIndex); - - GateRef receiver = Circuit::NullGate(); - GateRef value = Circuit::NullGate(); - if (isThis) { - // 3: number of value inputs - ASSERT(acc_.GetNumValueIn(gate) == 3); - receiver = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::THIS_OBJECT); - value = acc_.GetValueIn(gate, 2); // 2: acc - } else { - // 4: number of value inputs - ASSERT(acc_.GetNumValueIn(gate) == 4); - receiver = acc_.GetValueIn(gate, 2); // 2: receiver - value = acc_.GetValueIn(gate, 3); // 3: acc - } - GateType receiverType = acc_.GetGateType(receiver); - receiverType = tsManager_->TryNarrowUnionType(receiverType); - if (tsManager_->IsClassInstanceTypeKind(receiverType)) { - LowerTypedStObjByNameForClassInstance(gate, receiver, value, prop, isThis); - } else if (tsManager_->IsClassTypeKind(receiverType) || - tsManager_->IsObjectTypeKind(receiverType)) { - LowerTypedStObjByNameForClassOrObject(gate, receiver, value, prop); + JSHandle globalEnv = thread_->GetEcmaVM()->GetGlobalEnv(); + if (receiverType.IsStringType()) { + JSHClass *stringPhc = globalEnv->GetStringPrototype()->GetTaggedObject()->GetClass(); + PropertyLookupResult plr = JSHClass::LookupPropertyInBuiltinPrototypeHClass(thread_, stringPhc, key); + // Unable to handle accessor at the moment + if (!plr.IsFound() || plr.IsAccessor()) { + return false; + } + AddProfiling(gate); + GateRef str = acc_.GetValueIn(gate, 2); + if (!Uncheck()) { + builder_.EcmaStringCheck(str); + } + GateRef plrGate = builder_.Int32(plr.GetData()); + GateRef strPrototype = builder_.GetGlobalEnvObj(builder_.GetGlobalEnv(), GlobalEnv::STRING_PROTOTYPE_INDEX); + GateRef result = builder_.LoadProperty(strPrototype, plrGate, plr.IsFunction()); + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); + return true; } - DeleteConstDataIfNoUser(constData); + return false; } void TSHCRLowering::LowerTypedLdObjByIndex(GateRef gate) @@ -790,9 +835,7 @@ void TSHCRLowering::LowerTypedLdObjByIndex(GateRef gate) GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); GateRef result = Circuit::NullGate(); - if (tsManager_->IsInt32ArrayType(receiverType) || - tsManager_->IsFloat32ArrayType(receiverType) || - tsManager_->IsFloat64ArrayType(receiverType)) { + if (tsManager_->IsValidTypedArrayType(receiverType)) { AddProfiling(gate); GateRef index = acc_.GetValueIn(gate, 0); uint32_t indexValue = static_cast(acc_.GetConstantValue(index)); @@ -813,14 +856,15 @@ void TSHCRLowering::LowerTypedStObjByIndex(GateRef gate) GateType receiverType = acc_.GetGateType(receiver); GateType valueType = acc_.GetGateType(value); receiverType = tsManager_->TryNarrowUnionType(receiverType); - if ((!tsManager_->IsFloat32ArrayType(receiverType)) || (!valueType.IsNumberType())) { // slowpath + if ((!tsManager_->IsBuiltinInstanceType(BuiltinTypeId::FLOAT32_ARRAY, receiverType)) || + (!valueType.IsNumberType())) { // slowpath return; } AddProfiling(gate); - if (tsManager_->IsFloat32ArrayType(receiverType)) { - if (!noCheck_) { + if (tsManager_->IsBuiltinInstanceType(BuiltinTypeId::FLOAT32_ARRAY, receiverType)) { + if (!Uncheck()) { builder_.TypedArrayCheck(receiverType, receiver); } } else { @@ -831,11 +875,11 @@ void TSHCRLowering::LowerTypedStObjByIndex(GateRef gate) uint32_t indexValue = static_cast(acc_.GetConstantValue(index)); index = builder_.Int32(indexValue); auto length = builder_.LoadTypedArrayLength(receiverType, receiver); - if (!noCheck_) { + if (!Uncheck()) { builder_.IndexCheck(receiverType, length, index); } - if (tsManager_->IsFloat32ArrayType(receiverType)) { + if (tsManager_->IsBuiltinInstanceType(BuiltinTypeId::FLOAT32_ARRAY, receiverType)) { builder_.StoreElement(receiver, index, value); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; @@ -868,12 +912,14 @@ void TSHCRLowering::LowerTypedLdObjByValue(GateRef gate, bool isThis) } GateRef result = Circuit::NullGate(); - if (tsManager_->IsArrayTypeKind(receiverType)) { + if (receiverType.IsStringType()) { + AddProfiling(gate); + result = LoadStringByIndex(receiver, propKey); + } else if (tsManager_->IsArrayTypeKind(receiverType)) { AddProfiling(gate); - result = LoadJSArrayByIndex(receiver, propKey); - } else if (tsManager_->IsInt32ArrayType(receiverType) || - tsManager_->IsFloat32ArrayType(receiverType) || - tsManager_->IsFloat64ArrayType(receiverType)) { + ElementsKind kind = acc_.TryGetArrayElementsKind(gate); + result = LoadJSArrayByIndex(receiver, propKey, kind); + } else if (tsManager_->IsValidTypedArrayType(receiverType)) { AddProfiling(gate); result = LoadTypedArrayByIndex(receiver, propKey); } else { @@ -882,49 +928,99 @@ void TSHCRLowering::LowerTypedLdObjByValue(GateRef gate, bool isThis) acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); } -GateRef TSHCRLowering::LoadJSArrayByIndex(GateRef receiver, GateRef propKey) +GateRef TSHCRLowering::LoadStringByIndex(GateRef receiver, GateRef propKey) +{ + if (!Uncheck()) { + GateType receiverType = acc_.GetGateType(receiver); + receiverType = tsManager_->TryNarrowUnionType(receiverType); + builder_.EcmaStringCheck(receiver); + GateRef length = builder_.LoadStringLength(receiver); + propKey = builder_.IndexCheck(receiverType, length, propKey); + receiver = builder_.FlattenStringCheck(receiver); + } + return builder_.LoadElement(receiver, propKey); +} + +GateRef TSHCRLowering::LoadJSArrayByIndex(GateRef receiver, GateRef propKey, ElementsKind kind) { - if (!noCheck_) { + if (!Uncheck()) { GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); - builder_.StableArrayCheck(receiver); + if (!IsCreateArray(receiver)) { + builder_.StableArrayCheck(receiver, kind, ArrayMetaDataAccessor::Mode::LOAD_ELEMENT); + } GateRef length = builder_.LoadArrayLength(receiver); propKey = builder_.IndexCheck(receiverType, length, propKey); } - return builder_.LoadElement(receiver, propKey); + + GateRef result = Circuit::NullGate(); + if (Elements::IsInt(kind)) { + result = builder_.LoadElement(receiver, propKey); + } else if (Elements::IsDouble(kind)) { + result = builder_.LoadElement(receiver, propKey); + } else if (Elements::IsObject(kind)) { + result = builder_.LoadElement(receiver, propKey); + } else if (!Elements::IsHole(kind)) { + result = builder_.LoadElement(receiver, propKey); + } else { + result = builder_.LoadElement(receiver, propKey); + } + return result; } GateRef TSHCRLowering::LoadTypedArrayByIndex(GateRef receiver, GateRef propKey) { GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); - if (!noCheck_) { + if (!Uncheck()) { builder_.TypedArrayCheck(receiverType, receiver); GateRef length = builder_.LoadTypedArrayLength(receiverType, receiver); propKey = builder_.IndexCheck(receiverType, length, propKey); } - if (tsManager_->IsInt32ArrayType(receiverType)) { - return builder_.LoadElement(receiver, propKey); - } else if (tsManager_->IsFloat32ArrayType(receiverType)) { - return builder_.LoadElement(receiver, propKey); - } else if (tsManager_->IsFloat64ArrayType(receiverType)) { - return builder_.LoadElement(receiver, propKey); - } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); + auto builtinTypeId = tsManager_->GetTypedArrayBuiltinId(receiverType); + switch (builtinTypeId) { + case BuiltinTypeId::INT8_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::UINT8_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::INT16_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::UINT16_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::INT32_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::UINT32_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::FLOAT32_ARRAY: + return builder_.LoadElement(receiver, propKey); + case BuiltinTypeId::FLOAT64_ARRAY: + return builder_.LoadElement(receiver, propKey); + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); } return Circuit::NullGate(); } -void TSHCRLowering::StoreJSArrayByIndex(GateRef receiver, GateRef propKey, GateRef value) +void TSHCRLowering::StoreJSArrayByIndex(GateRef receiver, GateRef propKey, GateRef value, ElementsKind kind) { - if (!noCheck_) { + if (!Uncheck()) { GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); - builder_.StableArrayCheck(receiver); + if (!IsCreateArray(receiver)) { + builder_.StableArrayCheck(receiver, kind, ArrayMetaDataAccessor::Mode::STORE_ELEMENT); + } GateRef length = builder_.LoadArrayLength(receiver); builder_.IndexCheck(receiverType, length, propKey); + builder_.COWArrayCheck(receiver); + + if (Elements::IsObject(kind)) { + GateRef frameState = acc_.FindNearestFrameState(builder_.GetDepend()); + builder_.HeapObjectCheck(value, frameState); + } } builder_.StoreElement(receiver, propKey, value); } @@ -934,20 +1030,44 @@ void TSHCRLowering::StoreTypedArrayByIndex(GateRef receiver, GateRef propKey, Ga { GateType receiverType = acc_.GetGateType(receiver); receiverType = tsManager_->TryNarrowUnionType(receiverType); - if (!noCheck_) { + if (!Uncheck()) { builder_.TypedArrayCheck(receiverType, receiver); GateRef length = builder_.LoadTypedArrayLength(receiverType, receiver); propKey = builder_.IndexCheck(receiverType, length, propKey); } - if (tsManager_->IsInt32ArrayType(receiverType)) { - builder_.StoreElement(receiver, propKey, value); - } else if (tsManager_->IsFloat32ArrayType(receiverType)) { - builder_.StoreElement(receiver, propKey, value); - } else if (tsManager_->IsFloat64ArrayType(receiverType)) { - builder_.StoreElement(receiver, propKey, value); - } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); + + auto builtinTypeId = tsManager_->GetTypedArrayBuiltinId(receiverType); + switch (builtinTypeId) { + case BuiltinTypeId::INT8_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::UINT8_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::INT16_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::UINT16_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::INT32_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::UINT32_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::FLOAT32_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + case BuiltinTypeId::FLOAT64_ARRAY: + builder_.StoreElement(receiver, propKey, value); + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); } } @@ -966,10 +1086,9 @@ void TSHCRLowering::LowerTypedStObjByValue(GateRef gate) if (tsManager_->IsArrayTypeKind(receiverType)) { AddProfiling(gate); - StoreJSArrayByIndex(receiver, propKey, value); - } else if (tsManager_->IsInt32ArrayType(receiverType) || - tsManager_->IsFloat32ArrayType(receiverType) || - tsManager_->IsFloat64ArrayType(receiverType)) { + ElementsKind kind = acc_.TryGetArrayElementsKind(gate); + StoreJSArrayByIndex(receiver, propKey, value, kind); + } else if (tsManager_->IsValidTypedArrayType(receiverType)) { AddProfiling(gate); StoreTypedArrayByIndex(receiver, propKey, value); } else { @@ -1056,31 +1175,26 @@ void TSHCRLowering::LowerTypedSuperCall(GateRef gate) acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), constructGate); } -void TSHCRLowering::SpeculateCallBuiltin(GateRef gate, GateRef func, GateRef a0, BuiltinsStubCSigns::ID id) +void TSHCRLowering::SpeculateCallBuiltin(GateRef gate, GateRef func, const std::vector &args, + BuiltinsStubCSigns::ID id, bool isThrow) { - if (!noCheck_) { - builder_.CallTargetCheck(func, builder_.IntPtr(static_cast(id)), a0); + if (!Uncheck()) { + builder_.CallTargetCheck(gate, func, builder_.IntPtr(static_cast(id)), args[0]); } - GateRef result = builder_.TypedCallBuiltin(gate, a0, id); - - acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); -} -void TSHCRLowering::SpeculateCallThis3Builtin(GateRef gate, BuiltinsStubCSigns::ID id) -{ - GateRef thisObj = acc_.GetValueIn(gate, 0); - GateRef a0 = acc_.GetValueIn(gate, 1); // 1: the first-para - GateRef a1 = acc_.GetValueIn(gate, 2); // 2: the third-para - GateRef a2 = acc_.GetValueIn(gate, 3); // 3: the fourth-para - GateRef result = builder_.TypedCallThis3Builtin(gate, thisObj, a0, a1, a2, id); + GateRef result = builder_.TypedCallBuiltin(gate, args, id); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + if (isThrow) { + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + } else { + acc_.ReplaceHirAndDeleteIfException(gate, builder_.GetStateDepend(), result); + } } BuiltinsStubCSigns::ID TSHCRLowering::GetBuiltinId(BuiltinTypeId id, GateRef func) { GateType funcType = acc_.GetGateType(func); - if (!tsManager_->IsBuiltinObject(id, funcType)) { + if (!tsManager_->IsBuiltinObjectMethod(id, funcType)) { return BuiltinsStubCSigns::ID::NONE; } std::string name = tsManager_->GetFuncName(funcType); @@ -1088,51 +1202,51 @@ BuiltinsStubCSigns::ID TSHCRLowering::GetBuiltinId(BuiltinTypeId id, GateRef fun return stubId; } +void TSHCRLowering::CheckCallTargetFromDefineFuncAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, const std::vector &args, const std::vector &argsFastCall, bool isNoGC) +{ + if (!Uncheck()) { + builder_.JSCallTargetFromDefineFuncCheck(funcType, func, gate); + } + if (tsManager_->CanFastCall(funcGt)) { + LowerFastCall(gate, func, argsFastCall, isNoGC); + } else { + LowerCall(gate, func, args, isNoGC); + } +} + +void TSHCRLowering::LowerFastCall(GateRef gate, GateRef func, + const std::vector &argsFastCall, bool isNoGC) +{ + builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); + GateRef result = builder_.TypedFastCall(gate, argsFastCall, isNoGC); + builder_.EndCallTimer(glue_, gate, {glue_, func}, true); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); +} + +void TSHCRLowering::LowerCall(GateRef gate, GateRef func, + const std::vector &args, bool isNoGC) +{ + builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); + GateRef result = builder_.TypedCall(gate, args, isNoGC); + builder_.EndCallTimer(glue_, gate, {glue_, func}, true); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); +} + void TSHCRLowering::CheckCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, GateType funcType, const std::vector &args, const std::vector &argsFastCall) { if (IsLoadVtable(func)) { - if (tsManager_->CanFastCall(funcGt)) { - if (!noCheck_) { - builder_.JSFastCallThisTargetTypeCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedFastCall(gate, argsFastCall); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); - } else { - if (!noCheck_) { - builder_.JSCallThisTargetTypeCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedCall(gate, args); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); - } + CheckThisCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall); // func = a.foo, func() } else { + bool isNoGC = tsManager_->IsNoGC(funcGt); auto op = acc_.GetOpCode(func); if (!tsManager_->FastCallFlagIsVaild(funcGt)) { return; } if (op == OpCode::JS_BYTECODE && (acc_.GetByteCodeOpcode(func) == EcmaOpcode::DEFINEFUNC_IMM8_ID16_IMM8 || acc_.GetByteCodeOpcode(func) == EcmaOpcode::DEFINEFUNC_IMM16_ID16_IMM8)) { - if (tsManager_->CanFastCall(funcGt)) { - if (!noCheck_) { - builder_.JSCallTargetFromDefineFuncCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedFastCall(gate, argsFastCall); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); - } else { - if (!noCheck_) { - builder_.JSCallTargetFromDefineFuncCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedCall(gate, args); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); - } + CheckCallTargetFromDefineFuncAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall, isNoGC); return; } int methodIndex = tsManager_->GetMethodIndex(funcGt); @@ -1140,21 +1254,17 @@ void TSHCRLowering::CheckCallTargetAndLowerCall(GateRef gate, GateRef func, Glob return; } if (tsManager_->CanFastCall(funcGt)) { - if (!noCheck_) { - builder_.JSFastCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex)); + if (!Uncheck()) { + builder_.JSCallTargetTypeCheck(funcType, + func, builder_.IntPtr(methodIndex), gate); } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedFastCall(gate, argsFastCall); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + LowerFastCall(gate, func, argsFastCall, isNoGC); } else { - if (!noCheck_) { - builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex)); + if (!Uncheck()) { + builder_.JSCallTargetTypeCheck(funcType, + func, builder_.IntPtr(methodIndex), gate); } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedCall(gate, args); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + LowerCall(gate, func, args, isNoGC); } } } @@ -1181,9 +1291,9 @@ void TSHCRLowering::LowerTypedCallArg1(GateRef gate) GateRef a0Value = acc_.GetValueIn(gate, 0); GateType a0Type = acc_.GetGateType(a0Value); BuiltinsStubCSigns::ID id = GetBuiltinId(BuiltinTypeId::MATH, func); - if (id != BuiltinsStubCSigns::ID::NONE && a0Type.IsNumberType()) { + if (IS_TYPED_BUILTINS_MATH_ID(id) && a0Type.IsNumberType()) { AddProfiling(gate); - SpeculateCallBuiltin(gate, func, a0Value, id); + SpeculateCallBuiltin(gate, func, { a0Value }, id, false); } else { GateRef actualArgc = builder_.Int64(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate), EcmaOpcode::CALLARG1_IMM8_V8)); @@ -1274,28 +1384,51 @@ bool TSHCRLowering::CanOptimizeAsFastCall(GateRef func) return true; } +void TSHCRLowering::CheckFastCallThisCallTarget(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, bool isNoGC) +{ + if (noCheck_) { + return; + } + if (isNoGC) { + auto methodOffset = tsManager_->GetFuncMethodOffset(funcGt); + builder_.JSNoGCCallThisTargetTypeCheck(funcType, + func, builder_.IntPtr(methodOffset), gate); + } else { + builder_.JSCallThisTargetTypeCheck(funcType, + func, gate); + } +} + +void TSHCRLowering::CheckCallThisCallTarget(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, bool isNoGC) +{ + if (noCheck_) { + return; + } + if (isNoGC) { + auto methodOffset = tsManager_->GetFuncMethodOffset(funcGt); + builder_.JSNoGCCallThisTargetTypeCheck(funcType, + func, builder_.IntPtr(methodOffset), gate); + } else { + builder_.JSCallThisTargetTypeCheck(funcType, + func, gate); + } +} + void TSHCRLowering::CheckThisCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, GateType funcType, const std::vector &args, const std::vector &argsFastCall) { if (!tsManager_->FastCallFlagIsVaild(funcGt)) { return; } + bool isNoGC = tsManager_->IsNoGC(funcGt); if (tsManager_->CanFastCall(funcGt)) { - if (!noCheck_) { - builder_.JSFastCallThisTargetTypeCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedFastCall(gate, argsFastCall); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + CheckFastCallThisCallTarget(gate, func, funcGt, funcType, isNoGC); + LowerFastCall(gate, func, argsFastCall, isNoGC); } else { - if (!noCheck_) { - builder_.JSCallThisTargetTypeCheck(funcType, func); - } - builder_.StartCallTimer(glue_, gate, {glue_, func, builder_.True()}, true); - GateRef result = builder_.TypedCall(gate, args); - builder_.EndCallTimer(glue_, gate, {glue_, func}, true); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + CheckCallThisCallTarget(gate, func, funcGt, funcType, isNoGC); + LowerCall(gate, func, args, isNoGC); } } @@ -1304,6 +1437,13 @@ void TSHCRLowering::LowerTypedCallthis0(GateRef gate) // 2: number of value inputs ASSERT(acc_.GetNumValueIn(gate) == 2); GateRef func = acc_.GetValueIn(gate, 1); + BuiltinsStubCSigns::ID id = GetBuiltinId(BuiltinTypeId::ARRAY, func); + if (id == BuiltinsStubCSigns::ID::SORT) { + AddProfiling(gate); + GateRef thisObj = acc_.GetValueIn(gate, 0); + SpeculateCallBuiltin(gate, func, { thisObj }, id, true); + return; + } if (!CanOptimizeAsFastCall(func)) { return; } @@ -1320,16 +1460,24 @@ void TSHCRLowering::LowerTypedCallthis1(GateRef gate) GateType a0Type = acc_.GetGateType(a0); GateRef func = acc_.GetValueIn(gate, 2); // 2:function BuiltinsStubCSigns::ID id = GetBuiltinId(BuiltinTypeId::MATH, func); - if (id != BuiltinsStubCSigns::ID::NONE && a0Type.IsNumberType()) { - AddProfiling(gate); - SpeculateCallBuiltin(gate, func, a0, id); + if (id == BuiltinsStubCSigns::ID::NONE) { + id = GetBuiltinId(BuiltinTypeId::JSON, func); + if (id != BuiltinsStubCSigns::ID::NONE) { + AddProfiling(gate); + SpeculateCallBuiltin(gate, func, { a0 }, id, true); + } } else { - if (!CanOptimizeAsFastCall(func)) { - return; + if (a0Type.IsNumberType()) { + AddProfiling(gate); + SpeculateCallBuiltin(gate, func, { a0 }, id, false); + } else { + if (!CanOptimizeAsFastCall(func)) { + return; + } + GateRef actualArgc = builder_.Int64(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate), + EcmaOpcode::CALLTHIS1_IMM8_V8_V8)); + LowerTypedThisCall(gate, func, actualArgc, 1); } - GateRef actualArgc = builder_.Int64(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate), - EcmaOpcode::CALLTHIS1_IMM8_V8_V8)); - LowerTypedThisCall(gate, func, actualArgc, 1); } } @@ -1354,7 +1502,11 @@ void TSHCRLowering::LowerTypedCallthis3(GateRef gate) BuiltinsStubCSigns::ID id = GetBuiltinId(BuiltinTypeId::STRING, func); if (id == BuiltinsStubCSigns::ID::LocaleCompare) { AddProfiling(gate); - SpeculateCallThis3Builtin(gate, id); + GateRef thisObj = acc_.GetValueIn(gate, 0); + GateRef a0 = acc_.GetValueIn(gate, 1); // 1: the first-para + GateRef a1 = acc_.GetValueIn(gate, 2); // 2: the third-para + GateRef a2 = acc_.GetValueIn(gate, 3); // 3: the fourth-para + SpeculateCallBuiltin(gate, func, { thisObj, a0, a1, a2 }, id, true); return; } @@ -1473,32 +1625,4 @@ void TSHCRLowering::AddHitBytecodeCount() bytecodeHitTimeMap_[currentOp_] = 1; } } - -void TSHCRLowering::AddVTableLoadVerifer(GateRef gate, GateRef value) -{ - GateRef receiver = acc_.GetValueIn(gate, 2); // 2: receiver - GateRef key = acc_.GetValueIn(gate, 1); // 1: key - - GateRef verifier = builder_.CallRuntime(glue_, RTSTUB_ID(VerifyVTableLoading), acc_.GetDep(gate), - { receiver, key, value }, gate); - acc_.SetDep(gate, verifier); -} - -void TSHCRLowering::AddVTableStoreVerifer(GateRef gate, GateRef store, bool isThis) -{ - GateRef key = acc_.GetValueIn(gate, 1); - GateRef receiver = Circuit::NullGate(); - GateRef value = Circuit::NullGate(); - if (isThis) { - receiver = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::THIS_OBJECT); - value = acc_.GetValueIn(gate, 2); // 2: acc - } else { - receiver = acc_.GetValueIn(gate, 2); // 2: receiver - value = acc_.GetValueIn(gate, 3); // 3: acc - } - - GateRef verifier = builder_.CallRuntime(glue_, RTSTUB_ID(VerifyVTableStoring), store, - { receiver, key, value }, gate); - acc_.SetDep(gate, verifier); -} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/ts_hcr_lowering.h b/ecmascript/compiler/ts_hcr_lowering.h index 74a6f9a9cad486a6ce2c7233dfe2cdf78fad8ab6..677ca8ec3d3b0e426828d09862864abe998d9dbb 100644 --- a/ecmascript/compiler/ts_hcr_lowering.h +++ b/ecmascript/compiler/ts_hcr_lowering.h @@ -20,6 +20,7 @@ #include "ecmascript/compiler/builtins/builtins_call_signature.h" #include "ecmascript/compiler/bytecode_circuit_builder.h" #include "ecmascript/compiler/circuit_builder-inl.h" +#include "ecmascript/compiler/object_access_helper.h" #include "ecmascript/compiler/pass_manager.h" namespace panda::ecmascript::kungfu { @@ -99,17 +100,19 @@ private: void LowerConditionJump(GateRef gate, bool flag); void LowerTypedNeg(GateRef gate); void LowerTypedNot(GateRef gate); + void LowerTypedLdObjByName(GateRef gate); - void LowerTypedLdObjByNameForClassOrObject(GateRef gate, GateRef receiver, JSTaggedValue prop); - void LowerTypedLdObjByNameForClassInstance(GateRef gate, GateRef receiver, JSTaggedValue prop); - void LowerTypedLdObjByNameForArray(GateRef gate, GateRef receiver, JSTaggedValue prop); + void LowerTypedStObjByName(GateRef gate, bool isThis); + using AccessMode = ObjectAccessHelper::AccessMode; + void LowerNamedAccess(GateRef gate, GateRef receiver, AccessMode accessMode, JSTaggedValue key, GateRef value); + GateRef BuildNamedPropertyAccess(GateRef hir, ObjectAccessHelper accessHelper, PropertyLookupResult plr); + void BuildNamedPropertyAccessVerifier(GateRef gate, GateRef receiver, AccessMode mode, GateRef value); + bool TryLowerTypedLdObjByNameForBuiltin(GateRef gate, GateType receiverType, JSTaggedValue key); + bool TryLowerTypedLdObjByNameForBuiltinMethod(GateRef gate, GateType receiverType, JSTaggedValue key); void LowerTypedLdArrayLength(GateRef gate); void LowerTypedLdTypedArrayLength(GateRef gate); - void LowerTypedStObjByName(GateRef gate, bool isThis); - void LowerTypedStObjByNameForClassOrObject(GateRef gate, GateRef receiver, GateRef value, - JSTaggedValue prop); - void LowerTypedStObjByNameForClassInstance(GateRef gate, GateRef receiver, GateRef value, - JSTaggedValue prop, bool isThis); + void LowerTypedLdStringLength(GateRef gate); + void LowerTypedLdObjByIndex(GateRef gate); void LowerTypedStObjByIndex(GateRef gate); void LowerTypedLdObjByValue(GateRef gate, bool isThis); @@ -134,13 +137,22 @@ private: bool CanOptimizeAsFastCall(GateRef func); void CheckCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, GateType funcType, const std::vector &args, const std::vector &argsFastCall); + void CheckCallTargetFromDefineFuncAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, const std::vector &args, const std::vector &argsFastCall, bool isNoGC); void CheckThisCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, GateType funcType, const std::vector &args, const std::vector &argsFastCall); - - GateRef LoadJSArrayByIndex(GateRef receiver, GateRef propKey); + void CheckCallThisCallTarget(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, bool isNoGC); + void CheckFastCallThisCallTarget(GateRef gate, GateRef func, GlobalTSTypeRef funcGt, + GateType funcType, bool isNoGC); + void LowerFastCall(GateRef gate, GateRef func, const std::vector &argsFastCall, bool isNoGC); + void LowerCall(GateRef gate, GateRef func, const std::vector &args, bool isNoGC); + GateRef LoadStringByIndex(GateRef receiver, GateRef propKey); + GateRef LoadJSArrayByIndex(GateRef receiver, GateRef propKey, ElementsKind kind); GateRef LoadTypedArrayByIndex(GateRef receiver, GateRef propKey); - void StoreJSArrayByIndex(GateRef receiver, GateRef propKey, GateRef value); + void StoreJSArrayByIndex(GateRef receiver, GateRef propKey, GateRef value, ElementsKind kind); void StoreTypedArrayByIndex(GateRef receiver, GateRef propKey, GateRef value); + bool IsCreateArray(GateRef receiver); // TypeTrusted means the type of gate is already PrimitiveTypeCheck-passed, // or the gate is constant and no need to check. @@ -157,14 +169,18 @@ private: template void SpeculateNumber(GateRef gate); void SpeculateConditionJump(GateRef gate, bool flag); - void SpeculateCallBuiltin(GateRef gate, GateRef func, GateRef a0, BuiltinsStubCSigns::ID Op); - void SpeculateCallThis3Builtin(GateRef gate, BuiltinsStubCSigns::ID id); + void SpeculateCallBuiltin(GateRef gate, GateRef func, const std::vector &args, + BuiltinsStubCSigns::ID id, bool isThrow); BuiltinsStubCSigns::ID GetBuiltinId(BuiltinTypeId id, GateRef func); void DeleteConstDataIfNoUser(GateRef gate); void AddProfiling(GateRef gate); - void AddVTableLoadVerifer(GateRef gate, GateRef value); - void AddVTableStoreVerifer(GateRef gate, GateRef store, bool isThis); + + bool Uncheck() const + { + return noCheck_; + } + Circuit *circuit_ {nullptr}; GateAccessor acc_; CircuitBuilder builder_; @@ -181,7 +197,7 @@ private: std::string methodName_; GateRef glue_ {Circuit::NullGate()}; ArgumentAccessor argAcc_; - EcmaOpcode currentOp_; + EcmaOpcode currentOp_ {static_cast(0xff)}; PGOTypeLogList pgoTypeLog_; std::unordered_map bytecodeMap_; std::unordered_map bytecodeHitTimeMap_; diff --git a/ecmascript/compiler/ts_inline_lowering.cpp b/ecmascript/compiler/ts_inline_lowering.cpp index c7febf54f274b76e3a9953b6374d3330d0f9c36a..c951c220d5683c99c72e62ade0cc41d0025d2c5f 100644 --- a/ecmascript/compiler/ts_inline_lowering.cpp +++ b/ecmascript/compiler/ts_inline_lowering.cpp @@ -21,31 +21,37 @@ #include "ecmascript/ts_types/ts_type.h" #include "libpandabase/utils/utf.h" #include "libpandafile/class_data_accessor-inl.h" +#include "ecmascript/ts_types/ts_type_accessor.h" namespace panda::ecmascript::kungfu { void TSInlineLowering::RunTSInlineLowering() { - std::vector gateList; - circuit_->GetAllGates(gateList); - for (const auto &gate : gateList) { - auto op = acc_.GetOpCode(gate); - if (op == OpCode::JS_BYTECODE) { - TryInline(gate); - } + circuit_->AdvanceTime(); + ChunkQueue workList(chunk_); + UpdateWorkList(workList); + + while (!workList.empty()) { + CallGateInfo info = workList.front(); + workList.pop(); + TryInline(info, workList); } } -void TSInlineLowering::TryInline(GateRef gate) +void TSInlineLowering::CandidateInlineCall(GateRef gate, ChunkQueue &workList) { EcmaOpcode ecmaOpcode = acc_.GetByteCodeOpcode(gate); switch (ecmaOpcode) { - case EcmaOpcode::CALLARG0_IMM8: - case EcmaOpcode::CALLARG1_IMM8_V8: - case EcmaOpcode::CALLARGS2_IMM8_V8_V8: - case EcmaOpcode::CALLARGS3_IMM8_V8_V8_V8: - case EcmaOpcode::CALLRANGE_IMM8_IMM8_V8: - case EcmaOpcode::WIDE_CALLRANGE_PREF_IMM16_V8: - TryInline(gate, false); + case EcmaOpcode::LDOBJBYNAME_IMM8_ID16: + case EcmaOpcode::LDOBJBYNAME_IMM16_ID16: + case EcmaOpcode::LDTHISBYNAME_IMM8_ID16: + case EcmaOpcode::LDTHISBYNAME_IMM16_ID16: + CandidateAccessor(gate, workList, CallKind::CALL_GETTER); + break; + case EcmaOpcode::STOBJBYNAME_IMM8_ID16_V8: + case EcmaOpcode::STOBJBYNAME_IMM16_ID16_V8: + case EcmaOpcode::STTHISBYNAME_IMM8_ID16: + case EcmaOpcode::STTHISBYNAME_IMM16_ID16: + CandidateAccessor(gate, workList, CallKind::CALL_SETTER); break; case EcmaOpcode::CALLTHIS0_IMM8_V8: case EcmaOpcode::CALLTHIS1_IMM8_V8_V8: @@ -53,50 +59,67 @@ void TSInlineLowering::TryInline(GateRef gate) case EcmaOpcode::CALLTHIS3_IMM8_V8_V8_V8_V8: case EcmaOpcode::CALLTHISRANGE_IMM8_IMM8_V8: case EcmaOpcode::WIDE_CALLTHISRANGE_PREF_IMM16_V8: - TryInline(gate, true); + CandidateNormalCall(gate, workList, CallKind::CALL_THIS); + break; + case EcmaOpcode::CALLARG0_IMM8: + case EcmaOpcode::CALLARG1_IMM8_V8: + case EcmaOpcode::CALLARGS2_IMM8_V8_V8: + case EcmaOpcode::CALLARGS3_IMM8_V8_V8_V8: + case EcmaOpcode::CALLRANGE_IMM8_IMM8_V8: + case EcmaOpcode::WIDE_CALLRANGE_PREF_IMM16_V8: + CandidateNormalCall(gate, workList, CallKind::CALL); break; default: break; } } -void TSInlineLowering::TryInline(GateRef gate, bool isCallThis) +void TSInlineLowering::TryInline(CallGateInfo &info, ChunkQueue &workList) { + GateRef gate = info.GetCallGate(); // inline doesn't support try-catch bool inTryCatch = FilterCallInTryCatch(gate); if (inTryCatch) { return; } - // first elem is function in old isa - size_t funcIndex = acc_.GetNumValueIn(gate) - 1; - auto funcType = acc_.GetGateType(acc_.GetValueIn(gate, funcIndex)); + MethodLiteral* inlinedMethod = nullptr; - if (tsManager_->IsFunctionTypeKind(funcType)) { - GlobalTSTypeRef gt = funcType.GetGTRef(); - auto methodOffset = tsManager_->GetFuncMethodOffset(gt); - if (methodOffset == 0 || ctx_->IsSkippedMethod(methodOffset)) { - return; - } - inlinedMethod = ctx_->GetJSPandaFile()->FindMethodLiteral(methodOffset); - if (!CheckParameter(gate, isCallThis, inlinedMethod)) { - return; - } - auto &bytecodeInfo = ctx_->GetBytecodeInfo(); - auto &methodInfo = bytecodeInfo.GetMethodList().at(methodOffset); - auto &methodPcInfos = bytecodeInfo.GetMethodPcInfos(); - auto &methodPcInfo = methodPcInfos[methodInfo.GetMethodPcInfoIndex()]; - if (methodPcInfo.pcOffsets.size() <= maxInlineBytecodesCount_ && - inlinedCall_ < MAX_INLINE_CALL_ALLOWED) { - inlineSuccess_ = FilterInlinedMethod(inlinedMethod, methodPcInfo.pcOffsets); - if (inlineSuccess_) { - GateRef glue = acc_.GetGlueFromArgList(); - CircuitRootScope scope(circuit_); - if (!noCheck_) { - InlineFuncCheck(gate); - } - InlineCall(methodInfo, methodPcInfo, inlinedMethod, gate); - ReplaceCallInput(gate, isCallThis, glue, inlinedMethod); - inlinedCall_++; + GlobalTSTypeRef gt = info.GetFuncGT(); + auto methodOffset = tsManager_->GetFuncMethodOffset(gt); + if (methodOffset == 0 || ctx_->IsSkippedMethod(methodOffset)) { + return; + } + if (IsRecursiveFunc(info, methodOffset)) { + return; + } + inlinedMethod = ctx_->GetJSPandaFile()->FindMethodLiteral(methodOffset); + if (!CheckParameter(gate, info, inlinedMethod)) { + return; + } + auto &bytecodeInfo = ctx_->GetBytecodeInfo(); + auto &methodInfo = bytecodeInfo.GetMethodList().at(methodOffset); + auto &methodPcInfos = bytecodeInfo.GetMethodPcInfos(); + auto &methodPcInfo = methodPcInfos[methodInfo.GetMethodPcInfoIndex()]; + GateRef frameState = GetFrameState(info); + GateRef frameArgs = acc_.GetValueIn(frameState); + size_t inlineCallCounts = GetOrInitialInlineCounts(frameArgs); + if (IsSmallMethod(methodPcInfo.pcOffsets.size()) && !IsInlineCountsOverflow(inlineCallCounts)) { + inlineSuccess_ = FilterInlinedMethod(inlinedMethod, methodPcInfo.pcOffsets); + if (inlineSuccess_) { + GateRef glue = acc_.GetGlueFromArgList(); + SetInitCallTargetAndConstPoolId(info); + CircuitRootScope scope(circuit_); + AnalyseFastAccessor(info, methodPcInfo.pcOffsets, methodOffset); + if (!noCheck_) { + InlineCheck(info); + } + InlineCall(methodInfo, methodPcInfo, inlinedMethod, info); + ReplaceInput(info, glue, inlinedMethod); + UpdateInlineCounts(frameArgs, inlineCallCounts); + if (info.IsNormalCall()) { + UpdateWorkList(workList); + } else { + lastCallId_ = circuit_->GetGateCount() - 1; } } } @@ -151,7 +174,7 @@ bool TSInlineLowering::FilterInlinedMethod(MethodLiteral* method, std::vectorGetJSPandaFile(); TSManager *tsManager = ctx_->GetTSManager(); @@ -169,10 +192,11 @@ void TSInlineLowering::InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPC BytecodeCircuitBuilder builder(jsPandaFile, method, methodPCInfo, tsManager, circuit_, ctx_->GetByteCodes(), true, IsLogEnabled(), - enableTypeLowering_, fullName, recordName, ctx_->GetPfDecoder(), true); + enableTypeLowering_, fullName, recordName, ctx_->GetPfDecoder(), true, + passOptions_->EnableOptTrackField()); { if (enableTypeLowering_) { - BuildFrameStateChain(gate, builder); + BuildFrameStateChain(info, builder); } TimeScope timeScope("BytecodeToCircuit", methodName, method->GetMethodId().GetOffset(), log); builder.BytecodeToCircuit(); @@ -180,7 +204,7 @@ void TSInlineLowering::InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPC PassData data(&builder, circuit_, ctx_, log, fullName, &methodInfo, hasTyps, recordName, - method, method->GetMethodId().GetOffset(), nativeAreaAllocator_); + method, method->GetMethodId().GetOffset(), nativeAreaAllocator_, ctx_->GetPfDecoder(), passOptions_); PassRunner pipeline(&data); if (builder.EnableLoopOptimization()) { pipeline.RunPass(); @@ -189,17 +213,22 @@ void TSInlineLowering::InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPC pipeline.RunPass(); } -bool TSInlineLowering::CheckParameter(GateRef gate, bool isCallThis, MethodLiteral* method) +bool TSInlineLowering::CheckParameter(GateRef gate, CallGateInfo &info, MethodLiteral* method) { + if (info.IsCallAccessor()) { + return true; + } size_t numIns = acc_.GetNumValueIn(gate); - size_t fixedInputsNum = isCallThis ? 2 : 1; // 2: calltarget and this + size_t fixedInputsNum = info.IsCallThis() ? 2 : 1; // 2: calltarget and this uint32_t declaredNumArgs = method->GetNumArgsWithCallField(); return declaredNumArgs == (numIns - fixedInputsNum); } -void TSInlineLowering::ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue, MethodLiteral *method) +void TSInlineLowering::ReplaceCallInput(CallGateInfo &info, GateRef glue, MethodLiteral *method) { + GateRef gate = info.GetCallGate(); + bool isCallThis = info.IsCallThis(); std::vector vec; size_t numIns = acc_.GetNumValueIn(gate); // 1: last one elem is function @@ -228,7 +257,84 @@ void TSInlineLowering::ReplaceCallInput(GateRef gate, bool isCallThis, GateRef g for (size_t i = fixedInputsNum - 1; i < numIns - 1; i++) { vec.emplace_back(acc_.GetValueIn(gate, i)); } - LowerToInlineCall(gate, vec, method); + LowerToInlineCall(info, vec, method); +} + +void TSInlineLowering::ReplaceAccessorInput(CallGateInfo &info, GateRef glue, MethodLiteral *method) +{ + GateRef gate = info.GetCallGate(); + std::vector vec; + GateRef thisObj = GetAccessorReceiver(gate); + GateRef callTarget = Circuit::NullGate(); + // Fast accessor will not load getter or setter func + if (EnableFastAccessor()) { + callTarget = initCallTarget_; + } else { + callTarget = BuildAccessor(info); + } + size_t actualArgc = 0; + if (info.IsCallGetter()) { + actualArgc = NUM_MANDATORY_JSFUNC_ARGS; + } else if (info.IsCallSetter()) { + actualArgc = NUM_MANDATORY_JSFUNC_ARGS + 1; + } else { + UNREACHABLE(); + } + + vec.emplace_back(glue); // glue + if (!method->IsFastCall()) { + vec.emplace_back(builder_.Int64(actualArgc)); // argc + } + vec.emplace_back(callTarget); + if (!method->IsFastCall()) { + vec.emplace_back(builder_.Undefined()); // newTarget + } + vec.emplace_back(thisObj); + + if (info.IsCallSetter()) { + vec.emplace_back(GetCallSetterValue(gate)); + } + LowerToInlineCall(info, vec, method); +} + +GateRef TSInlineLowering::BuildAccessor(CallGateInfo &info) +{ + GateRef gate = info.GetCallGate(); + GateRef depend = acc_.GetDep(gate); + GateRef receiver = GetAccessorReceiver(gate); + GateRef accessor = Circuit::NullGate(); + uint32_t plrData = GetPlrData(receiver, acc_.GetValueIn(gate, 1)); + if (info.IsCallGetter()) { + accessor = circuit_->NewGate(circuit_->LoadGetter(), MachineType::I64, + {depend, receiver, builder_.Int32(plrData)}, GateType::AnyType()); + } else { + accessor = circuit_->NewGate(circuit_->LoadSetter(), MachineType::I64, + {depend, receiver, builder_.Int32(plrData)}, GateType::AnyType()); + } + acc_.ReplaceDependIn(gate, accessor); + return accessor; +} + +uint32_t TSInlineLowering::GetPlrData(GateRef receiver, GateRef constData) +{ + uint16_t propIndex = acc_.GetConstantValue(constData); + auto prop = tsManager_->GetStringFromConstantPool(propIndex); + GateType receiverType = acc_.GetGateType(receiver); + receiverType = tsManager_->TryNarrowUnionType(receiverType); + int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(receiverType); + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); + PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(tsManager_->GetThread(), hclass, prop); + return plr.GetData(); +} + +void TSInlineLowering::ReplaceInput(CallGateInfo &info, GateRef glue, MethodLiteral *method) +{ + if (info.IsNormalCall()) { + ReplaceCallInput(info, glue, method); + } else { + ASSERT(info.IsCallAccessor()); + ReplaceAccessorInput(info, glue, method); + } } GateRef TSInlineLowering::MergeAllReturn(const std::vector &returnVector, GateRef &state, GateRef &depend) @@ -310,6 +416,7 @@ void TSInlineLowering::ReplaceReturnGate(GateRef callGate) } else { value = MergeAllReturn(returnVector, state, depend); } + SupplementType(callGate, value); ReplaceHirAndDeleteState(callGate, state, depend, value); } @@ -331,14 +438,16 @@ void TSInlineLowering::ReplaceHirAndDeleteState(GateRef gate, GateRef state, Gat acc_.DeleteGate(gate); } -void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vector &args, MethodLiteral* method) +void TSInlineLowering::LowerToInlineCall(CallGateInfo &info, const std::vector &args, MethodLiteral* method) { + GateRef callGate = info.GetCallGate(); // replace in value/args ArgumentAccessor argAcc(circuit_); ASSERT(argAcc.ArgsCount() == args.size()); for (size_t i = 0; i < argAcc.ArgsCount(); i++) { GateRef arg = argAcc.ArgsAt(i); acc_.UpdateAllUses(arg, args.at(i)); + acc_.SetGateType(args.at(i), acc_.GetGateType(arg)); acc_.DeleteGate(arg); } // replace in depend and state @@ -349,7 +458,8 @@ void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vector(CommonArgIdx::FUNC)); } - GateRef callerFunc = argAcc.GetFrameArgsIn(callGate, FrameArgIdx::FUNC); + GateRef frameArgs = GetFrameArgs(info); + GateRef callerFunc = acc_.GetValueIn(frameArgs, 0); ReplaceEntryGate(callGate, callerFunc, inlineFunc, glue); // replace use gate ReplaceReturnGate(callGate); @@ -357,24 +467,52 @@ void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vectorGetFuncMethodOffset(funcGt); - GateRef ret = circuit_->NewGate(circuit_->JSInlineTargetTypeCheck(static_cast(type.Value())), + GateRef ret = circuit_->NewGate(circuit_->JSInlineTargetTypeCheck(info.GetType()), MachineType::I1, {callState, callDepend, inlineFunc, builder_.IntPtr(methodOffset), frameState}, GateType::NJSValue()); acc_.ReplaceStateIn(gate, ret); acc_.ReplaceDependIn(gate, ret); } +void TSInlineLowering::InlineAccessorCheck(GateRef gate, GateRef receiver) +{ + GateRef callState = acc_.GetState(gate); + GateRef callDepend = acc_.GetDep(gate); + GateType receiverType = acc_.GetGateType(receiver); + receiverType = tsManager_->TryNarrowUnionType(receiverType); + int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(receiverType); + GateRef hclassIndexGate = builder_.IntPtr(hclassIndex); + auto frameState = acc_.FindNearestFrameState(callDepend); + GateRef ret = circuit_->NewGate(circuit_->InlineAccessorCheck(static_cast(receiverType.Value())), + MachineType::I1, {callState, callDepend, receiver, hclassIndexGate, frameState}, GateType::NJSValue()); + acc_.ReplaceStateIn(gate, ret); + acc_.ReplaceDependIn(gate, ret); +} + +void TSInlineLowering::InlineCheck(CallGateInfo &info) +{ + if (info.IsNormalCall()) { + InlineFuncCheck(info); + } else { + ASSERT(info.IsCallAccessor()); + GateRef gate = info.GetCallGate(); + GateRef receiver = GetAccessorReceiver(gate); + InlineAccessorCheck(gate, receiver); + } +} + void TSInlineLowering::RemoveRoot() { GateRef circuitRoot = acc_.GetCircuitRoot(); @@ -386,17 +524,9 @@ void TSInlineLowering::RemoveRoot() acc_.DeleteGate(circuitRoot); } -void TSInlineLowering::BuildFrameStateChain(GateRef gate, BytecodeCircuitBuilder &builder) +void TSInlineLowering::BuildFrameStateChain(CallGateInfo &info, BytecodeCircuitBuilder &builder) { - GateRef stateSplit = Circuit::NullGate(); - if (noCheck_) { - stateSplit = acc_.GetDep(gate); - } else { - GateRef check = acc_.GetDep(gate); - stateSplit = acc_.GetDep(check); - } - ASSERT(acc_.GetOpCode(stateSplit) == OpCode::STATE_SPLIT); - GateRef preFrameState = acc_.GetFrameState(stateSplit); + GateRef preFrameState = GetFrameState(info); ASSERT(acc_.GetOpCode(preFrameState) == OpCode::FRAME_STATE); builder.SetPreFrameState(preFrameState); } @@ -411,4 +541,213 @@ bool TSInlineLowering::FilterCallInTryCatch(GateRef gate) } return false; } + +void TSInlineLowering::SupplementType(GateRef callGate, GateRef targetGate) +{ + GateType callGateType = acc_.GetGateType(callGate); + GateType targetGateType = acc_.GetGateType(targetGate); + if (!callGateType.IsAnyType() && targetGateType.IsAnyType()) { + acc_.SetGateType(targetGate, callGateType); + } +} + +void TSInlineLowering::UpdateWorkList(ChunkQueue &workList) +{ + std::vector gateList; + circuit_->GetAllGates(gateList); + for (const auto &gate : gateList) { + if (acc_.GetId(gate) <= lastCallId_) { + continue; + } + auto op = acc_.GetOpCode(gate); + if (op == OpCode::JS_BYTECODE) { + CandidateInlineCall(gate, workList); + } + } +} + +size_t TSInlineLowering::GetOrInitialInlineCounts(GateRef frameArgs) +{ + auto it = inlinedCallMap_.find(frameArgs); + if (it == inlinedCallMap_.end()) { + inlinedCallMap_[frameArgs] = 0; + } + return inlinedCallMap_[frameArgs]; +} + + +bool TSInlineLowering::IsRecursiveFunc(CallGateInfo &info, size_t calleeMethodOffset) +{ + GateRef frameArgs = GetFrameArgs(info); + GateRef caller = acc_.GetValueIn(frameArgs); + auto funcType = acc_.GetGateType(caller); + GlobalTSTypeRef gt = funcType.GetGTRef(); + if (!tsManager_->IsFunctionTypeKind(gt)) { + return false; + } + auto callerMethodOffset = tsManager_->GetFuncMethodOffset(gt); + return callerMethodOffset == calleeMethodOffset; +} + +bool TSInlineLowering::IsAccessor(GateRef receiver, GateRef constData) +{ + uint16_t propIndex = acc_.GetConstantValue(constData); + auto prop = tsManager_->GetStringFromConstantPool(propIndex); + GateType receiverType = acc_.GetGateType(receiver); + receiverType = tsManager_->TryNarrowUnionType(receiverType); + if (tsManager_->IsClassInstanceTypeKind(receiverType)) { + int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(receiverType); + if (hclassIndex == -1) { + return false; + } + JSHClass *hclass = JSHClass::Cast(tsManager_->GetValueFromCache(hclassIndex).GetTaggedObject()); + if (!hclass->HasTSSubtyping()) { + return false; + } + PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(tsManager_->GetThread(), hclass, prop); + if (!plr.IsFound()) { + return false; + } + + return plr.IsAccessor(); + } + return false; +} + +GlobalTSTypeRef TSInlineLowering::GetAccessorFuncGT(GateRef receiver, GateRef constData, bool isCallSetter) +{ + GateType receiverType = acc_.GetGateType(receiver); + receiverType = tsManager_->TryNarrowUnionType(receiverType); + GlobalTSTypeRef classInstanceGT = receiverType.GetGTRef(); + GlobalTSTypeRef classGT = tsManager_->GetClassType(classInstanceGT); + TSTypeAccessor tsTypeAcc(tsManager_, classGT); + uint16_t propIndex = acc_.GetConstantValue(constData); + auto prop = tsManager_->GetStringFromConstantPool(propIndex); + GlobalTSTypeRef funcGT = tsTypeAcc.GetAccessorGT(prop, isCallSetter); + return funcGT; +} + +void TSInlineLowering::CandidateAccessor(GateRef gate, ChunkQueue &workList, CallKind kind) +{ + GateRef receiver = GetAccessorReceiver(gate); + GateRef constData = acc_.GetValueIn(gate, 1); + if (IsAccessor(receiver, constData)) { + GlobalTSTypeRef gt = GetAccessorFuncGT(receiver, constData, IsCallSetter(kind)); + if (!gt.IsDefault()) { + workList.push(CallGateInfo(gate, kind, gt, 0)); + lastCallId_ = acc_.GetId(gate); + } + } +} + +void TSInlineLowering::CandidateNormalCall(GateRef gate, ChunkQueue &workList, CallKind kind) +{ + size_t funcIndex = acc_.GetNumValueIn(gate) - 1; + auto funcType = acc_.GetGateType(acc_.GetValueIn(gate, funcIndex)); + if (tsManager_->IsFunctionTypeKind(funcType)) { + GlobalTSTypeRef gt = funcType.GetGTRef(); + workList.push(CallGateInfo(gate, kind, gt, funcType.Value())); + lastCallId_ = acc_.GetId(gate); + } +} + +GateRef TSInlineLowering::GetAccessorReceiver(GateRef gate) +{ + EcmaOpcode ecmaOpcode = acc_.GetByteCodeOpcode(gate); + if (UNLIKELY(ecmaOpcode == EcmaOpcode::STTHISBYNAME_IMM8_ID16 || + ecmaOpcode == EcmaOpcode::STTHISBYNAME_IMM16_ID16)) { + return argAcc_.GetFrameArgsIn(gate, FrameArgIdx::THIS_OBJECT); + } + return acc_.GetValueIn(gate, 2); // 2: receiver +} + +GateRef TSInlineLowering::GetCallSetterValue(GateRef gate) +{ + EcmaOpcode ecmaOpcode = acc_.GetByteCodeOpcode(gate); + if (ecmaOpcode == EcmaOpcode::STTHISBYNAME_IMM8_ID16 || + ecmaOpcode == EcmaOpcode::STTHISBYNAME_IMM16_ID16) { + return acc_.GetValueIn(gate, 2); // 2: value + } + return acc_.GetValueIn(gate, 3); // 3: value +} + +GateRef TSInlineLowering::GetFrameState(CallGateInfo &info) +{ + GateRef gate = info.GetCallGate(); + if (info.IsNormalCall()) { + return acc_.GetFrameState(gate); + } + ASSERT(info.IsCallAccessor()); + GateRef frameState = acc_.FindNearestFrameState(gate); + return frameState; +} + +GateRef TSInlineLowering::GetFrameArgs(CallGateInfo &info) +{ + GateRef frameState = GetFrameState(info); + return acc_.GetValueIn(frameState); +} + +void TSInlineLowering::SetInitCallTargetAndConstPoolId(CallGateInfo &info) +{ + if (initCallTarget_ == Circuit::NullGate()) { + GateRef frameArgs = GetFrameArgs(info); + initCallTarget_ = acc_.GetValueIn(frameArgs, 0); + const JSPandaFile *pf = ctx_->GetJSPandaFile(); + initConstantPoolId_ = tsManager_->GetConstantPoolIDByMethodOffset(pf, initMethodOffset_); + } +} + +void TSInlineLowering::AnalyseFastAccessor(CallGateInfo &info, std::vector pcOffsets, + uint32_t inlineMethodOffset) +{ + isFastAccessor_ = false; + if (!info.IsCallAccessor()) { + return; + } + const JSPandaFile *pf = ctx_->GetJSPandaFile(); + int32_t constantpoolId = tsManager_->GetConstantPoolIDByMethodOffset(pf, inlineMethodOffset); + if (constantpoolId == initConstantPoolId_) { + for (size_t i = 0; i < pcOffsets.size(); i++) { + auto pc = pcOffsets[i]; + auto ecmaOpcode = ctx_->GetByteCodes()->GetOpcode(pc); + // These bytecodes require calltarget during the lowering process, so the acquisition of the accessor + // function cannot be omitted. + switch (ecmaOpcode) { + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + case EcmaOpcode::STMODULEVAR_IMM8: + case EcmaOpcode::WIDE_STMODULEVAR_PREF_IMM16: + case EcmaOpcode::DYNAMICIMPORT: + case EcmaOpcode::LDLOCALMODULEVAR_IMM8: + case EcmaOpcode::WIDE_LDLOCALMODULEVAR_PREF_IMM16: + case EcmaOpcode::LDEXTERNALMODULEVAR_IMM8: + case EcmaOpcode::WIDE_LDEXTERNALMODULEVAR_PREF_IMM16: + case EcmaOpcode::GETMODULENAMESPACE_IMM8: + case EcmaOpcode::WIDE_GETMODULENAMESPACE_PREF_IMM16: + case EcmaOpcode::SUPERCALLSPREAD_IMM8_V8: + case EcmaOpcode::NEWLEXENVWITHNAME_IMM8_ID16: + case EcmaOpcode::WIDE_NEWLEXENVWITHNAME_PREF_IMM16_ID16: + case EcmaOpcode::LDSUPERBYVALUE_IMM8_V8: + case EcmaOpcode::LDSUPERBYVALUE_IMM16_V8: + case EcmaOpcode::STSUPERBYVALUE_IMM16_V8_V8: + case EcmaOpcode::STSUPERBYVALUE_IMM8_V8_V8: + case EcmaOpcode::LDSUPERBYNAME_IMM8_ID16: + case EcmaOpcode::LDSUPERBYNAME_IMM16_ID16: + case EcmaOpcode::STSUPERBYNAME_IMM8_ID16_V8: + case EcmaOpcode::STSUPERBYNAME_IMM16_ID16_V8: + case EcmaOpcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: + case EcmaOpcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: + case EcmaOpcode::DEFINEFUNC_IMM8_ID16_IMM8: + case EcmaOpcode::DEFINEFUNC_IMM16_ID16_IMM8: + case EcmaOpcode::DEFINEMETHOD_IMM8_ID16_IMM8: + case EcmaOpcode::DEFINEMETHOD_IMM16_ID16_IMM8: + return; + default: + break; + } + } + isFastAccessor_ = true; + } +} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/ts_inline_lowering.h b/ecmascript/compiler/ts_inline_lowering.h index 709a747c1a17e8ad3b0382c398620a0c07bb0e96..b4e6f945933bfc83f4ddc7390367ea0761215311 100644 --- a/ecmascript/compiler/ts_inline_lowering.h +++ b/ecmascript/compiler/ts_inline_lowering.h @@ -25,6 +25,13 @@ #include "ecmascript/jspandafile/js_pandafile.h" namespace panda::ecmascript::kungfu { +enum CallKind : uint8_t { + CALL, + CALL_THIS, + CALL_SETTER, + CALL_GETTER, + INVALID +}; class CircuitRootScope { public: explicit CircuitRootScope(Circuit *circuit) @@ -42,23 +49,84 @@ private: GateRef root_ { 0 }; }; +class CallGateInfo { +public: + explicit CallGateInfo(GateRef call, CallKind kind, GlobalTSTypeRef gt, uint32_t type) + : call_(call), kind_(kind), gt_(gt), type_(type) + { + } + + ~CallGateInfo() = default; + + GateRef GetCallGate() const + { + return call_; + } + + bool IsCallThis() const + { + return kind_ == CallKind::CALL_THIS; + } + + bool IsNormalCall() const + { + return kind_ == CallKind::CALL || kind_ == CallKind::CALL_THIS; + } + + bool IsCallAccessor() const + { + return kind_ == CallKind::CALL_SETTER || kind_ == CallKind::CALL_GETTER; + } + + bool IsCallGetter() const + { + return kind_ == CallKind::CALL_GETTER; + } + + bool IsCallSetter() const + { + return kind_ == CallKind::CALL_SETTER; + } + + GlobalTSTypeRef GetFuncGT() const + { + return gt_; + } + + uint32_t GetType() const + { + return type_; + } + +private: + GateRef call_ {Circuit::NullGate()}; + CallKind kind_ {CallKind::INVALID}; + GlobalTSTypeRef gt_; + uint32_t type_; +}; + class TSInlineLowering { public: - static constexpr size_t MAX_INLINE_CALL_ALLOWED = 5; + static constexpr size_t MAX_INLINE_CALL_ALLOWED = 6; TSInlineLowering(Circuit *circuit, PassContext *ctx, bool enableLog, const std::string& name, - NativeAreaAllocator* nativeAreaAllocator) + NativeAreaAllocator* nativeAreaAllocator, PassOptions *options, uint32_t methodOffset) : circuit_(circuit), acc_(circuit), builder_(circuit, ctx->GetCompilerConfig()), tsManager_(ctx->GetTSManager()), ctx_(ctx), + passOptions_(options), enableLog_(enableLog), methodName_(name), enableTypeLowering_(ctx->GetEcmaVM()->GetJSOptions().IsEnableTypeLowering()), traceInline_(ctx->GetEcmaVM()->GetJSOptions().GetTraceInline()), maxInlineBytecodesCount_(ctx->GetEcmaVM()->GetJSOptions().GetMaxInlineBytecodes()), nativeAreaAllocator_(nativeAreaAllocator), - noCheck_(ctx->GetEcmaVM()->GetJSOptions().IsCompilerNoCheck()) {} + noCheck_(ctx->GetEcmaVM()->GetJSOptions().IsCompilerNoCheck()), + chunk_(circuit->chunk()), + inlinedCallMap_(circuit->chunk()), + argAcc_(circuit), + initMethodOffset_(methodOffset) {} ~TSInlineLowering() = default; @@ -75,41 +143,91 @@ private: return methodName_; } - void TryInline(GateRef gate); - void TryInline(GateRef gate, bool isCallThis); + bool IsSmallMethod(size_t bcSize) const + { + return bcSize <= maxInlineBytecodesCount_; + } + + bool IsInlineCountsOverflow(size_t inlineCount) const + { + return inlineCount >= MAX_INLINE_CALL_ALLOWED; + } + + void UpdateInlineCounts(GateRef frameArgs, size_t inlineCallCounts) + { + inlinedCallMap_[frameArgs] = ++inlineCallCounts; + } + + bool EnableFastAccessor() const + { + return isFastAccessor_ && !traceInline_; + } + + bool IsCallSetter(CallKind kind) const + { + return kind == CallKind::CALL_SETTER; + } + + void CandidateInlineCall(GateRef gate, ChunkQueue &workList); + void TryInline(CallGateInfo &info, ChunkQueue &workList); bool FilterInlinedMethod(MethodLiteral* method, std::vector pcOffsets); bool FilterCallInTryCatch(GateRef gate); - void InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPCInfo, MethodLiteral* method, GateRef gate); - void ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue, MethodLiteral *method); - + void InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPCInfo, MethodLiteral* method, CallGateInfo &info); + void ReplaceCallInput(CallGateInfo &info, GateRef glue, MethodLiteral *method); void ReplaceEntryGate(GateRef callGate, GateRef callerFunc, GateRef inlineFunc, GateRef glue); void ReplaceReturnGate(GateRef callGate); - void ReplaceHirAndDeleteState(GateRef gate, GateRef state, GateRef depend, GateRef value); - GateRef MergeAllReturn(const std::vector &returnVector, GateRef &state, GateRef &depend); - bool CheckParameter(GateRef gate, bool isCallThis, MethodLiteral* method); - - void LowerToInlineCall(GateRef gate, const std::vector &args, MethodLiteral* method); + bool CheckParameter(GateRef gate, CallGateInfo &info, MethodLiteral* method); + void LowerToInlineCall(CallGateInfo &info, const std::vector &args, MethodLiteral* method); void RemoveRoot(); - void BuildFrameStateChain(GateRef gate, BytecodeCircuitBuilder &builder); + void BuildFrameStateChain(CallGateInfo &info, BytecodeCircuitBuilder &builder); GateRef TraceInlineFunction(GateRef glue, GateRef depend, std::vector &args, GateRef callGate); - void InlineFuncCheck(GateRef gate); + void InlineFuncCheck(const CallGateInfo &info); + void SupplementType(GateRef callGate, GateRef targetGate); + void UpdateWorkList(ChunkQueue &workList); + size_t GetOrInitialInlineCounts(GateRef frameArgs); + bool IsRecursiveFunc(CallGateInfo &info, size_t calleeMethodOffset); + bool IsAccessor(GateRef receiver, GateRef constData); + GlobalTSTypeRef GetAccessorFuncType(GateRef receiver, GateRef constData); + void CandidateAccessor(GateRef gate, ChunkQueue &workList, CallKind kind); + void CandidateNormalCall(GateRef gate, ChunkQueue &workList, CallKind kind); + void InlineAccessorCheck(GateRef gate, GateRef receiver); + void InlineCheck(CallGateInfo &info); + GateRef GetAccessorReceiver(GateRef gate); + GateRef GetFrameArgs(CallGateInfo &info); + void ReplaceAccessorInput(CallGateInfo &info, GateRef glue, MethodLiteral *method); + void ReplaceInput(CallGateInfo &info, GateRef glue, MethodLiteral *method); + GateRef BuildAccessor(CallGateInfo &info); + uint32_t GetPlrData(GateRef receiver, GateRef constData); + GateRef GetCallSetterValue(GateRef gate); + GlobalTSTypeRef GetAccessorFuncGT(GateRef receiver, GateRef constData, bool isCallSetter); + GateRef GetFrameState(CallGateInfo &info); + void SetInitCallTargetAndConstPoolId(CallGateInfo &info); + void AnalyseFastAccessor(CallGateInfo &info, std::vector pcOffsets, uint32_t inlineMethodOffset); Circuit *circuit_ {nullptr}; GateAccessor acc_; CircuitBuilder builder_; TSManager *tsManager_ {nullptr}; PassContext *ctx_ {nullptr}; + PassOptions *passOptions_ {nullptr}; bool enableLog_ {false}; std::string methodName_; - size_t inlinedCall_ { 0 }; bool enableTypeLowering_ {false}; bool inlineSuccess_ {false}; bool traceInline_ {false}; size_t maxInlineBytecodesCount_ {0}; NativeAreaAllocator *nativeAreaAllocator_ {nullptr}; bool noCheck_ {false}; + Chunk* chunk_ {nullptr}; + ChunkMap inlinedCallMap_; + size_t lastCallId_ {0}; + ArgumentAccessor argAcc_; + uint32_t initMethodOffset_ {0}; + int32_t initConstantPoolId_ {0}; + GateRef initCallTarget_ {Circuit::NullGate()}; + bool isFastAccessor_ {false}; }; } // panda::ecmascript::kungfu -#endif // ECMASCRIPT_COMPILER_TS_INLINE_LOWERING_H \ No newline at end of file +#endif // ECMASCRIPT_COMPILER_TS_INLINE_LOWERING_H diff --git a/ecmascript/compiler/type.h b/ecmascript/compiler/type.h index 82d85c0346f3602aecacf489dd26205b6ab5f2cd..3d0abd0dd0de1ef4ef445f645d6f88b604ecc3f6 100644 --- a/ecmascript/compiler/type.h +++ b/ecmascript/compiler/type.h @@ -277,6 +277,7 @@ private: enum class ValueType : uint8_t { BOOL, INT32, + UINT32, FLOAT64, TAGGED_BOOLEAN, TAGGED_INT, @@ -284,6 +285,12 @@ enum class ValueType : uint8_t { TAGGED_NUMBER, }; +enum class ConvertSupport : uint8_t { + ENABLE, + // Not support conversion from srcType to dstType. It is necessary to use 'deopt' to ensure semantic correctness. + DISABLE +}; + class Type { public: explicit Type(GateType payload); diff --git a/ecmascript/compiler/type_inference/global_type_infer.cpp b/ecmascript/compiler/type_inference/global_type_infer.cpp index 3158d98a06711dfa4f4d843b794f881a83662e34..f5cf879f819898a72644a6b11120e6a839963613 100644 --- a/ecmascript/compiler/type_inference/global_type_infer.cpp +++ b/ecmascript/compiler/type_inference/global_type_infer.cpp @@ -17,9 +17,9 @@ namespace panda::ecmascript::kungfu { GlobalTypeInfer::GlobalTypeInfer(PassContext *ctx, const uint32_t methodOffset, const CString &recordName, - PGOProfilerDecoder *decoder, bool enableLog) + PGOProfilerDecoder *decoder, bool enableOptTrackField, bool enableLog) : ctx_(ctx), jsPandaFile_(ctx_->GetJSPandaFile()), bcInfo_(ctx->GetBytecodeInfo()), methodOffset_(methodOffset), - recordName_(recordName), decoder_(decoder), enableLog_(enableLog), + recordName_(recordName), decoder_(decoder), enableOptTrackField_(enableOptTrackField), enableLog_(enableLog), enableGlobalTypeInfer_(ctx_->GetTSManager()->GetEcmaVM()->GetJSOptions().IsEnableGlobalTypeInfer()) { CollectNamespaceMethods(methodOffset); @@ -62,16 +62,15 @@ void GlobalTypeInfer::NewTypeInfer(const uint32_t methodOffset) auto methodLiteral = jsPandaFile_->FindMethodLiteral(methodOffset); std::string fullName = module->GetFuncName(methodLiteral, jsPandaFile_); - Circuit *circuit = - new Circuit(ctx_->GetNativeAreaAllocator(), module->GetDebugInfo(), - fullName.c_str(), ctx_->GetCompilerConfig()->Is64Bit()); - circuit->SetFrameType(FrameType::OPTIMIZED_JS_FUNCTION_FRAME); + Circuit *circuit = new Circuit(ctx_->GetNativeAreaAllocator(), module->GetDebugInfo(), + fullName.c_str(), ctx_->GetCompilerConfig()->Is64Bit(), + FrameType::OPTIMIZED_JS_FUNCTION_FRAME); circuits_.emplace_back(circuit); BytecodeCircuitBuilder *builder = new BytecodeCircuitBuilder(jsPandaFile_, methodLiteral, methodPcInfo, ctx_->GetTSManager(), circuit, ctx_->GetByteCodes(), jsPandaFile_->HasTSTypes(recordName_), enableLog_, true, - fullName, recordName_, decoder_, false); + fullName, recordName_, decoder_, false, enableOptTrackField_); builder->BytecodeToCircuit(); builders_.emplace_back(builder); diff --git a/ecmascript/compiler/type_inference/global_type_infer.h b/ecmascript/compiler/type_inference/global_type_infer.h index 190cc0f63c03692f7f87ce86078afe37727ab0cf..62fd390602e9394408679cb59b453270a645dcca 100644 --- a/ecmascript/compiler/type_inference/global_type_infer.h +++ b/ecmascript/compiler/type_inference/global_type_infer.h @@ -22,7 +22,7 @@ namespace panda::ecmascript::kungfu { class GlobalTypeInfer { public: GlobalTypeInfer(PassContext *ctx, const uint32_t methodOffset, const CString &recordName, - PGOProfilerDecoder *decoder, bool enableLog); + PGOProfilerDecoder *decoder, bool enableOptTrackField, bool enableLog); ~GlobalTypeInfer(); void ProcessTypeInference(BytecodeCircuitBuilder *builder, Circuit *circuit); @@ -55,6 +55,7 @@ private: uint32_t methodOffset_ {0}; const CString &recordName_; PGOProfilerDecoder *decoder_ {nullptr}; + bool enableOptTrackField_ {false}; bool enableLog_ {false}; bool enableGlobalTypeInfer_ {false}; std::set namespaceTypes_ {}; diff --git a/ecmascript/compiler/type_inference/method_type_infer.cpp b/ecmascript/compiler/type_inference/method_type_infer.cpp index 220daaae9a71e664109e8f6075cad00cd4648fa8..7ba060eb20599176adecf5c36b7addfee7788dbe 100644 --- a/ecmascript/compiler/type_inference/method_type_infer.cpp +++ b/ecmascript/compiler/type_inference/method_type_infer.cpp @@ -14,7 +14,6 @@ */ #include "ecmascript/compiler/type_inference/method_type_infer.h" - #include "ecmascript/jspandafile/js_pandafile_manager.h" #include "ecmascript/ts_types/ts_type_accessor.h" #include "ecmascript/ts_types/ts_type_parser.h" @@ -131,7 +130,7 @@ bool MethodTypeInfer::UpdateType(GateRef gate, const GateType type, bool savePre } if (type != preType) { - gateAccessor_.SetGateType(gate, type); + gateAccessor_.SetGateType(gate, HandleTypeCompatibility(preType, type)); return true; } return false; @@ -143,6 +142,14 @@ bool MethodTypeInfer::UpdateType(GateRef gate, const GlobalTSTypeRef &typeRef, b return UpdateType(gate, type, savePreType); } +GateType MethodTypeInfer::HandleTypeCompatibility(const GateType preType, const GateType type) const +{ + if (tsManager_->IsArrayTypeKind(preType) && tsManager_->IsBuiltinInstanceType(BuiltinTypeId::ARRAY, type)) { + return preType; + } + return type; +} + bool MethodTypeInfer::IsNewLexEnv(EcmaOpcode opcode) const { switch (opcode) { @@ -198,12 +205,7 @@ bool MethodTypeInfer::Infer(GateRef gate) switch (bytecodeInfo.GetOpcode()) { case EcmaOpcode::LDNAN: case EcmaOpcode::LDINFINITY: - case EcmaOpcode::MOD2_IMM8_V8: - case EcmaOpcode::AND2_IMM8_V8: - case EcmaOpcode::OR2_IMM8_V8: - case EcmaOpcode::XOR2_IMM8_V8: case EcmaOpcode::TONUMBER_IMM8: - case EcmaOpcode::TONUMERIC_IMM8: case EcmaOpcode::NEG_IMM8: case EcmaOpcode::EXP_IMM8_V8: case EcmaOpcode::STARRAYSPREAD_V8_V8: @@ -212,6 +214,9 @@ bool MethodTypeInfer::Infer(GateRef gate) case EcmaOpcode::ASHR2_IMM8_V8: case EcmaOpcode::SHR2_IMM8_V8: case EcmaOpcode::NOT_IMM8: + case EcmaOpcode::AND2_IMM8_V8: + case EcmaOpcode::OR2_IMM8_V8: + case EcmaOpcode::XOR2_IMM8_V8: return SetIntType(gate); case EcmaOpcode::LDBIGINT_ID16: return SetBigIntType(gate); @@ -254,6 +259,8 @@ bool MethodTypeInfer::Infer(GateRef gate) return InferSub2(gate); case EcmaOpcode::MUL2_IMM8_V8: return InferMul2(gate); + case EcmaOpcode::MOD2_IMM8_V8: + return InferMod2(gate); case EcmaOpcode::DIV2_IMM8_V8: return InferDiv2(gate); case EcmaOpcode::INC_IMM8: @@ -281,20 +288,23 @@ bool MethodTypeInfer::Infer(GateRef gate) return InferLdObjByName(gate); case EcmaOpcode::LDA_STR_ID16: return InferLdStr(gate); + case EcmaOpcode::TONUMERIC_IMM8: + return InferToNumberic(gate); case EcmaOpcode::CALLARG0_IMM8: case EcmaOpcode::CALLARG1_IMM8_V8: case EcmaOpcode::CALLARGS2_IMM8_V8_V8: case EcmaOpcode::CALLARGS3_IMM8_V8_V8_V8: case EcmaOpcode::CALLRANGE_IMM8_IMM8_V8: case EcmaOpcode::WIDE_CALLRANGE_PREF_IMM16_V8: + case EcmaOpcode::APPLY_IMM8_V8_V8: + return InferCallFunction(gate); case EcmaOpcode::CALLTHIS0_IMM8_V8: case EcmaOpcode::CALLTHIS1_IMM8_V8_V8: case EcmaOpcode::CALLTHIS2_IMM8_V8_V8_V8: case EcmaOpcode::CALLTHIS3_IMM8_V8_V8_V8_V8: case EcmaOpcode::CALLTHISRANGE_IMM8_IMM8_V8: case EcmaOpcode::WIDE_CALLTHISRANGE_PREF_IMM16_V8: - case EcmaOpcode::APPLY_IMM8_V8_V8: - return InferCallFunction(gate); + return InferCallMethod(gate); case EcmaOpcode::LDOBJBYVALUE_IMM8_V8: case EcmaOpcode::LDOBJBYVALUE_IMM16_V8: return InferLdObjByValue(gate); @@ -543,6 +553,31 @@ bool MethodTypeInfer::InferMul2(GateRef gate) return UpdateType(gate, GateType::NumberType()); } +/* + * Type Infer rule(satisfy commutative law): + * number % number = number + * int % number = number + * double % number = double + * int % int = int + * int % double = double + * double % double = double + */ +bool MethodTypeInfer::InferMod2(GateRef gate) +{ + // 2: number of value inputs + ASSERT(gateAccessor_.GetNumValueIn(gate) == 2); + auto firInType = gateAccessor_.GetGateType(gateAccessor_.GetValueIn(gate, 0)); + auto secInType = gateAccessor_.GetGateType(gateAccessor_.GetValueIn(gate, 1)); + if ((firInType.IsNumberType() && secInType.IsDoubleType()) || + (firInType.IsDoubleType() && secInType.IsNumberType())) { + return UpdateType(gate, GateType::DoubleType()); + } + if ((firInType.IsIntType() && secInType.IsIntType())) { + return UpdateType(gate, GateType::IntType()); + } + return UpdateType(gate, GateType::NumberType()); +} + /* * Type Infer rule(satisfy commutative law): * in type lowering, both elements will be changed to float64 firstly. @@ -590,6 +625,16 @@ bool MethodTypeInfer::InferIncDec(GateRef gate) return UpdateType(gate, GateType::NumberType()); } +bool MethodTypeInfer::InferToNumberic(GateRef gate) +{ + GateRef src = gateAccessor_.GetValueIn(gate, 0); + GateType srcType = gateAccessor_.GetGateType(src); + if (srcType.IsNumberType()) { + return UpdateType(gate, srcType); + } + return UpdateType(gate, GateType::NumberType()); +} + bool MethodTypeInfer::InferLdObjByIndex(GateRef gate) { // 2: number of value inputs @@ -601,12 +646,11 @@ bool MethodTypeInfer::InferLdObjByIndex(GateRef gate) return UpdateType(gate, type); } - if (tsManager_->IsInt32ArrayType(inValueType)) { + if (tsManager_->IsIntTypedArrayType(inValueType)) { return UpdateType(gate, GateType::IntType()); } - if (tsManager_->IsFloat32ArrayType(inValueType) || - tsManager_->IsFloat64ArrayType(inValueType)) { + if (tsManager_->IsDoubleTypedArrayType(inValueType)) { return UpdateType(gate, GateType::DoubleType()); } @@ -757,7 +801,7 @@ bool MethodTypeInfer::InferLdStr(GateRef gate) bool MethodTypeInfer::GetObjPropWithName(GateRef gate, GateType objType, uint64_t index) { JSTaggedValue name = tsManager_->GetStringFromConstantPool(index); - if (tsManager_->IsBuiltinArrayType(objType) || tsManager_->IsTypedArrayType(objType)) { + if (tsManager_->IsBuiltinInstanceType(BuiltinTypeId::ARRAY, objType) || tsManager_->IsTypedArrayType(objType)) { auto thread = tsManager_->GetThread(); JSTaggedValue lengthKey = thread->GlobalConstants()->GetLengthString(); if (JSTaggedValue::SameValue(name, lengthKey)) { @@ -778,7 +822,7 @@ bool MethodTypeInfer::GetObjPropWithName(GateRef gate, GateType objType, uint64_ return UpdateType(gate, type); } -bool MethodTypeInfer::InferCallFunction(GateRef gate) +bool MethodTypeInfer::InferCallMethod(GateRef gate) { // 1: last one elem is function size_t funcIndex = gateAccessor_.GetNumValueIn(gate) - 1; @@ -802,15 +846,27 @@ bool MethodTypeInfer::InferCallFunction(GateRef gate) // normal Call auto returnType = tsManager_->GetFuncReturnValueTypeGT(funcType); - return UpdateType(gate, returnType); - } - - if (tsManager_->IsIteratorInstanceTypeKind(funcType)) { + GateRef thisObj = gateAccessor_.GetValueIn(gate, 0); // 0: index of thisObject + auto thisObjType = gateAccessor_.GetGateType(thisObj); + return UpdateType(gate, HandleTypeCompatibility(thisObjType, GateType(returnType))); + } else if (tsManager_->IsIteratorInstanceTypeKind(funcType)) { GlobalTSTypeRef elementGT = tsManager_->GetIteratorInstanceElementGt(funcType); GlobalTSTypeRef iteratorResultInstanceType = tsManager_->GetOrCreateTSIteratorInstanceType( TSRuntimeType::ITERATOR_RESULT, elementGT); return UpdateType(gate, iteratorResultInstanceType); } + return UpdateType(gate, GateType::AnyType()); +} + +bool MethodTypeInfer::InferCallFunction(GateRef gate) +{ + // 1: last one elem is function + size_t funcIndex = gateAccessor_.GetNumValueIn(gate) - 1; + auto funcType = gateAccessor_.GetGateType(gateAccessor_.GetValueIn(gate, funcIndex)); + if (tsManager_->IsFunctionTypeKind(funcType)) { + auto returnType = tsManager_->GetFuncReturnValueTypeGT(funcType); + return UpdateType(gate, returnType); + } /* According to the ECMAScript specification, user-defined classes can only be instantiated by constructing (with * new keyword). However, a few builtin types can be called like a function. Upon the results of calling and * constructing, there are 4 categories of builtin types: @@ -835,8 +891,11 @@ bool MethodTypeInfer::InferCallFunction(GateRef gate) * See the list of builtin types' constructors at: * https://tc39.es/ecma262/2021/#sec-constructor-properties-of-the-global-object */ - if (tsManager_->IsClassTypeKind(funcType) && tsManager_->IsBuiltin(funcType)) { + if (tsManager_->IsBuiltinObjectType(funcType)) { // For simplicity, calling and constructing are considered equivalent. + if (tsManager_->IsBuiltinClassType(BuiltinTypeId::ARRAY, funcType)) { + return UpdateType(gate, tsManager_->CreateArrayType()); + } return UpdateType(gate, tsManager_->CreateClassInstanceType(funcType)); } return UpdateType(gate, GateType::AnyType()); @@ -851,11 +910,10 @@ bool MethodTypeInfer::InferLdObjByValue(GateRef gate) auto elementType = tsManager_->GetArrayParameterTypeGT(objType); return UpdateType(gate, elementType); } - if (tsManager_->IsInt32ArrayType(objType)) { + if (tsManager_->IsIntTypedArrayType(objType)) { return UpdateType(gate, GateType::IntType()); } - if (tsManager_->IsFloat32ArrayType(objType) || - tsManager_->IsFloat64ArrayType(objType)) { + if (tsManager_->IsDoubleTypedArrayType(objType)) { return UpdateType(gate, GateType::DoubleType()); } // handle object @@ -1060,11 +1118,11 @@ bool MethodTypeInfer::InferLdExternalModuleVar(GateRef gate) JSHandle moduleArray(thread, moduleEnvironment); JSTaggedValue resolvedBinding = moduleArray->Get(index); // if resolvedBinding.IsHole(), means that importname is * or it belongs to empty Aot module. - if (resolvedBinding.IsHole()) { + if (!resolvedBinding.IsResolvedIndexBinding()) { return UpdateType(gate, GateType::AnyType()); } ResolvedIndexBinding *binding = ResolvedIndexBinding::Cast(resolvedBinding.GetTaggedObject()); - resolvedRecord = ModuleManager::GetRecordName(binding->GetModule()); + resolvedRecord = SourceTextModule::GetRecordName(binding->GetModule()); resolvedIndex = static_cast(binding->GetIndex()); if (tsManager_->HasExportGT(jsPandaFile, resolvedRecord, resolvedIndex)) { return UpdateType(gate, tsManager_->GetGTFromModuleMap(jsPandaFile, resolvedRecord, resolvedIndex)); diff --git a/ecmascript/compiler/type_inference/method_type_infer.h b/ecmascript/compiler/type_inference/method_type_infer.h index 072e17290a244aa40c963830856895a2e05b76bf..31d8f99d877e36396c27ad1528c62064c23691f2 100644 --- a/ecmascript/compiler/type_inference/method_type_infer.h +++ b/ecmascript/compiler/type_inference/method_type_infer.h @@ -66,6 +66,7 @@ private: // savePreType: save the previous type, which is true by default bool UpdateType(GateRef gate, const GateType type, bool savePreType = true); bool UpdateType(GateRef gate, const GlobalTSTypeRef &typeRef, bool savePreType = true); + GateType HandleTypeCompatibility(const GateType preType, const GateType type) const; bool ShouldInfer(const GateRef gate) const; bool Infer(GateRef gate); bool InferPhiGate(GateRef gate); @@ -83,8 +84,10 @@ private: bool InferAdd2(GateRef gate); bool InferSub2(GateRef gate); bool InferMul2(GateRef gate); + bool InferMod2(GateRef gate); bool InferDiv2(GateRef gate); bool InferIncDec(GateRef gate); + bool InferToNumberic(GateRef gate); bool InferLdObjByIndex(GateRef gate); bool InferLdGlobalVar(GateRef gate); bool InferReturnUndefined(GateRef gate); @@ -94,6 +97,7 @@ private: bool SetStGlobalBcType(GateRef gate, bool hasIC = false); bool InferLdStr(GateRef gate); bool InferCallFunction(GateRef gate); + bool InferCallMethod(GateRef gate); bool InferLdObjByValue(GateRef gate); bool InferGetNextPropName(GateRef gate); bool InferDefineGetterSetterByValue(GateRef gate); diff --git a/ecmascript/compiler/type_inference/pgo_type_infer.cpp b/ecmascript/compiler/type_inference/pgo_type_infer.cpp index 0b5a603313683d6fc61df33197a92430e0c62530..431ad425cf47f7fa4bdfa585a8004e485acafa73 100644 --- a/ecmascript/compiler/type_inference/pgo_type_infer.cpp +++ b/ecmascript/compiler/type_inference/pgo_type_infer.cpp @@ -34,40 +34,6 @@ void PGOTypeInfer::Run() } } -struct CollectedType { - CollectedType(Chunk *chunk) : - classTypes(chunk), classInstanceTypes(chunk), otherTypes(chunk) {} - - bool AllInSameKind() const - { - uint8_t kind = classTypes.empty() ? 0 : 1; - kind += classInstanceTypes.empty() ? 0 : 1; - kind += otherTypes.empty() ? 0 : 1; - return kind == 1; - } - - ChunkSet Merge(Chunk *chunk, TSManager *tsManager) - { - ChunkSet inferTypes(chunk); - for (GateType type : classTypes) { - inferTypes.insert(type); - } - for (GateType type : classInstanceTypes) { - GlobalTSTypeRef gt = type.GetGTRef(); - GlobalTSTypeRef instanceGT = tsManager->CreateClassInstanceType(gt); - inferTypes.insert(GateType(instanceGT)); - } - for (GateType type : otherTypes) { - inferTypes.insert(type); - } - return inferTypes; - } - - ChunkSet classTypes; - ChunkSet classInstanceTypes; - ChunkSet otherTypes; -}; - void PGOTypeInfer::RunTypeInfer(GateRef gate) { ASSERT(acc_.GetOpCode(gate) == OpCode::JS_BYTECODE); @@ -91,124 +57,27 @@ void PGOTypeInfer::RunTypeInfer(GateRef gate) case EcmaOpcode::STOWNBYNAME_IMM16_ID16_V8: InferStOwnByName(gate); break; + case EcmaOpcode::LDOBJBYVALUE_IMM8_V8: + case EcmaOpcode::LDOBJBYVALUE_IMM16_V8: + case EcmaOpcode::LDTHISBYVALUE_IMM8: + case EcmaOpcode::LDTHISBYVALUE_IMM16: + case EcmaOpcode::STOBJBYVALUE_IMM8_V8_V8: + case EcmaOpcode::STOBJBYVALUE_IMM16_V8_V8: + case EcmaOpcode::STTHISBYVALUE_IMM8_V8: + case EcmaOpcode::STTHISBYVALUE_IMM16_V8: + InferAccessObjByValue(gate); + break; + case EcmaOpcode::CREATEEMPTYARRAY_IMM8: + case EcmaOpcode::CREATEEMPTYARRAY_IMM16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + InferCreateArray(gate); + break; default: break; } } -void PGOTypeInfer::CheckAndInsert(CollectedType &types, GateType type) -{ - if (tsManager_->IsClassTypeKind(type)) { - int hclassIndex = tsManager_->GetConstructorHClassIndexByClassGateType(type); - if (hclassIndex == -1) { - return; - } - types.classTypes.insert(type); - } else if (tsManager_->IsClassInstanceTypeKind(type)) { - int hclassIndex = tsManager_->GetHClassIndexByInstanceGateType(type); - if (hclassIndex == -1) { - return; - } - JSHClass *hclass = JSHClass::Cast(tsManager_->GetHClassFromCache(hclassIndex).GetTaggedObject()); - if (hclass->HasTSSubtyping()) { - GlobalTSTypeRef instanceGT = type.GetGTRef(); - type = GateType(tsManager_->GetClassType(instanceGT)); - types.classInstanceTypes.insert(type); - } - } else if (!type.IsAnyType()) { - types.otherTypes.insert(type); - } -} - -void PGOTypeInfer::CollectGateType(CollectedType &types, GateType tsType, PGORWOpType pgoTypes) -{ - CheckAndInsert(types, tsType); - - for (int i = 0; i < pgoTypes.GetCount(); i++) { - ClassType classType = pgoTypes.GetObjectInfo(i).GetClassType(); - GateType pgoType = tsManager_->GetGateTypeByPt(classType); - if (tsManager_->IsClassTypeKind(pgoType) && !pgoTypes.GetObjectInfo(i).InConstructor()) { - pgoType = GateType(tsManager_->CreateClassInstanceType(pgoType)); - } - CheckAndInsert(types, pgoType); - } -} - -void PGOTypeInfer::UpdateType(CollectedType &types, JSTaggedValue prop) -{ - ChunkSet &classTypes = types.classTypes; - InferTypeForClass(classTypes, prop); - - ChunkSet &classInstanceTypes = types.classInstanceTypes; - InferTypeForClass(classInstanceTypes, prop); -} - -void PGOTypeInfer::InferTypeForClass(ChunkSet &types, JSTaggedValue prop) -{ - if (NoNeedUpdate(types)) { - return; - } - EliminateSubclassTypes(types); - ComputeCommonSuperClassTypes(types, prop); -} - -void PGOTypeInfer::EliminateSubclassTypes(ChunkSet &types) -{ - std::set deletedGates; - for (GateType type : types) { - if (deletedGates.find(type) != deletedGates.end()) { - continue; - } - - std::stack ancestors; - GateType curType = type; - do { - if (types.find(curType) != types.end()) { - ancestors.push(curType); - } - } while (tsManager_->GetSuperGateType(curType)); - - ancestors.pop(); // top type is alive - while (!ancestors.empty()) { - curType = ancestors.top(); - ancestors.pop(); - auto it = deletedGates.find(curType); - if (it != deletedGates.end()) { - deletedGates.insert(curType); - } - } - } - for (GateType gateType : deletedGates) { - types.erase(gateType); - } -} - -void PGOTypeInfer::ComputeCommonSuperClassTypes(ChunkSet &types, JSTaggedValue prop) -{ - JSThread *thread = tsManager_->GetEcmaVM()->GetJSThread(); - std::map removeTypes; - for (GateType type : types) { - GateType curType = type; - GateType preType = type; - while (tsManager_->GetSuperGateType(curType)) { - JSHClass *ihc = JSHClass::Cast(tsManager_->GetTSHClass(curType).GetTaggedObject()); - PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread, ihc, prop); - if (!plr.IsFound()) { - break; - } - preType = curType; - } - if (type != preType) { - removeTypes[type] = preType; - } - } - - for (auto item : removeTypes) { - types.erase(item.first); - types.insert(item.second); - } -} - void PGOTypeInfer::Print() const { LOG_COMPILER(INFO) << " "; @@ -247,8 +116,8 @@ void PGOTypeInfer::AddProfiler(GateRef gate, GateType tsType, PGORWOpType pgoTyp Profiler::Value value; value.gate = gate; value.tsType = tsType; - for (int i = 0; i < pgoType.GetCount(); i++) { - value.pgoTypes.emplace_back(tsManager_->GetGateTypeByPt(pgoType.GetObjectInfo(i).GetClassType())); + for (uint32_t i = 0; i < pgoType.GetCount(); i++) { + value.pgoTypes.emplace_back(tsManager_->GetGateTypeByPt(pgoType.GetObjectInfo(i).GetProfileType())); } for (GateType type : inferTypes) { value.inferTypes.emplace_back(type); @@ -307,12 +176,59 @@ void PGOTypeInfer::InferStOwnByName(GateRef gate) UpdateTypeForRWOp(gate, receiver, prop); } +void PGOTypeInfer::InferCreateArray(GateRef gate) +{ + if (!builder_->ShouldPGOTypeInfer(gate)) { + return; + } + + ElementsKind kind = builder_->GetArrayElementsKind(gate); + if (Elements::IsGeneric(kind)) { + return; + } + + acc_.TrySetElementsKind(gate, kind); +} + +void PGOTypeInfer::InferAccessObjByValue(GateRef gate) +{ + if (!builder_->ShouldPGOTypeInfer(gate)) { + return; + } + GateRef receiver = Circuit::NullGate(); + GateRef propKey = Circuit::NullGate(); + EcmaOpcode ecmaOpcode = acc_.GetByteCodeOpcode(gate); + switch (ecmaOpcode) { + case EcmaOpcode::LDOBJBYVALUE_IMM8_V8: + case EcmaOpcode::LDOBJBYVALUE_IMM16_V8: + case EcmaOpcode::STOBJBYVALUE_IMM8_V8_V8: + case EcmaOpcode::STOBJBYVALUE_IMM16_V8_V8: + receiver = acc_.GetValueIn(gate, 1); // 1: receiver + propKey = acc_.GetValueIn(gate, 2); // 2: the third parameter + break; + case EcmaOpcode::LDTHISBYVALUE_IMM8: + case EcmaOpcode::LDTHISBYVALUE_IMM16: + case EcmaOpcode::STTHISBYVALUE_IMM8_V8: + case EcmaOpcode::STTHISBYVALUE_IMM16_V8: + receiver = argAcc_.GetFrameArgsIn(gate, FrameArgIdx::THIS_OBJECT); + propKey = acc_.GetValueIn(gate, 1); + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + UpdateTypeForRWOp(gate, receiver); + TrySetPropKeyKind(gate, propKey); + TrySetElementsKind(gate); +} + void PGOTypeInfer::UpdateTypeForRWOp(GateRef gate, GateRef receiver, JSTaggedValue prop) { GateType tsType = acc_.GetGateType(receiver); PGORWOpType pgoTypes = builder_->GetPGOType(gate); + CollectedType types(chunk_); - CollectGateType(types, tsType, pgoTypes); + helper_.CollectGateType(types, tsType, pgoTypes); // polymorphism is not currently supported, // all types must in the same kind @@ -321,13 +237,30 @@ void PGOTypeInfer::UpdateTypeForRWOp(GateRef gate, GateRef receiver, JSTaggedVal } // polymorphism is not currently supported - UpdateType(types, prop); - ChunkSet inferTypes = types.Merge(chunk_, tsManager_); + ChunkSet inferTypes = helper_.GetInferTypes(chunk_, types, prop); + AddProfiler(gate, tsType, pgoTypes, inferTypes); if (!IsMonoTypes(inferTypes)) { return; } - AddProfiler(gate, tsType, pgoTypes, inferTypes); acc_.SetGateType(receiver, *inferTypes.begin()); } + +void PGOTypeInfer::TrySetElementsKind(GateRef gate) +{ + ElementsKind kind = builder_->GetElementsKind(gate); + if (Elements::IsGeneric(kind)) { + return; + } + acc_.TrySetElementsKind(gate, kind); +} + +void PGOTypeInfer::TrySetPropKeyKind(GateRef gate, GateRef propKey) +{ + PGORWOpType pgoTypes = builder_->GetPGOType(gate); + GateType oldType = acc_.GetGateType(propKey); + if (oldType.IsAnyType() && IsMonoNumberType(pgoTypes)) { + acc_.SetGateType(propKey, GateType::NumberType()); + } +} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/type_inference/pgo_type_infer.h b/ecmascript/compiler/type_inference/pgo_type_infer.h index 83e81ad94277e6e13a3828d8596f8e6494b689b7..45469155ecac6537057af8ae4d3a6a8029ce43b9 100644 --- a/ecmascript/compiler/type_inference/pgo_type_infer.h +++ b/ecmascript/compiler/type_inference/pgo_type_infer.h @@ -16,6 +16,7 @@ #ifndef ECMASCRIPT_COMPILER_TYPE_INFERENCE_PGO_TYPE_INFER_H #define ECMASCRIPT_COMPILER_TYPE_INFERENCE_PGO_TYPE_INFER_H +#include "ecmascript/compiler/type_inference/pgo_type_infer_helper.h" #include "ecmascript/compiler/gate_accessor.h" #include "ecmascript/ts_types/ts_manager.h" #include "ecmascript/compiler/argument_accessor.h" @@ -26,7 +27,7 @@ class PGOTypeInfer { public: PGOTypeInfer(Circuit *circuit, TSManager *tsManager, BytecodeCircuitBuilder *builder, const std::string &name, Chunk *chunk, bool enableLog) - : circuit_(circuit), acc_(circuit), argAcc_(circuit), tsManager_(tsManager), + : circuit_(circuit), acc_(circuit), argAcc_(circuit), tsManager_(tsManager), helper_(tsManager), builder_(builder), methodName_(name), chunk_(chunk), enableLog_(enableLog), profiler_(chunk) {} ~PGOTypeInfer() = default; @@ -59,23 +60,23 @@ private: return types.size() == 1; } - inline bool NoNeedUpdate(const ChunkSet &types) const + inline bool IsMonoNumberType(PGORWOpType &pgoTypes) const { - return types.size() <= 1; + // "ldobjbyvalue" will collect the type of the variable inside the square brackets while pgo collecting. + // If the type is "number", it will be marked as an "Element". + return pgoTypes.GetCount() == 1 && pgoTypes.GetObjectInfo(0).InElement(); } void RunTypeInfer(GateRef gate); void InferLdObjByName(GateRef gate); void InferStObjByName(GateRef gate, bool isThis); void InferStOwnByName(GateRef gate); + void InferAccessObjByValue(GateRef gate); + void InferCreateArray(GateRef gate); - void UpdateTypeForRWOp(GateRef gate, GateRef receiver, JSTaggedValue prop); - void CollectGateType(CollectedType &types, GateType tsType, PGORWOpType pgoTypes); - void UpdateType(CollectedType &types, JSTaggedValue prop); - void InferTypeForClass(ChunkSet &types, JSTaggedValue prop); - void CheckAndInsert(CollectedType &types, GateType type); - void EliminateSubclassTypes(ChunkSet &types); - void ComputeCommonSuperClassTypes(ChunkSet &types, JSTaggedValue prop); + void UpdateTypeForRWOp(GateRef gate, GateRef receiver, JSTaggedValue prop = JSTaggedValue::Undefined()); + void TrySetElementsKind(GateRef gate); + void TrySetPropKeyKind(GateRef gate, GateRef propKey); void Print() const; void AddProfiler(GateRef gate, GateType tsType, PGORWOpType pgoType, ChunkSet& inferTypes); @@ -84,6 +85,7 @@ private: GateAccessor acc_; ArgumentAccessor argAcc_; TSManager *tsManager_ {nullptr}; + PGOTypeInferHelper helper_; BytecodeCircuitBuilder *builder_ {nullptr}; const std::string &methodName_; Chunk *chunk_ {nullptr}; diff --git a/ecmascript/compiler/type_inference/pgo_type_infer_helper.cpp b/ecmascript/compiler/type_inference/pgo_type_infer_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..356ef156f8ca207d90d95079280fcf2121d185fd --- /dev/null +++ b/ecmascript/compiler/type_inference/pgo_type_infer_helper.cpp @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/type_inference/pgo_type_infer_helper.h" +#include "ecmascript/ts_types/ts_type_accessor.h" + +namespace panda::ecmascript::kungfu { +bool ClassTypeStrategy::CheckAndInsert(CollectedType &types, const GateType &type, TSManager *tsManager) +{ + if (tsManager->IsUserDefinedClassTypeKind(type)) { + int hclassIndex = tsManager->GetConstructorHClassIndexByClassGateType(type); + if (hclassIndex == -1) { + return true; + } + types.classTypes.insert(type); + return true; + } + return false; +} + +void ClassTypeStrategy::Merge(ChunkSet &inferTypes, CollectedType &types, + [[maybe_unused]] TSManager *tsManager) +{ + for (GateType type : types.classTypes) { + inferTypes.insert(type); + } +} + +bool ClassInstanceTypeStrategy::CheckAndInsert(CollectedType &types, const GateType &type, TSManager *tsManager) +{ + if (tsManager->IsClassInstanceTypeKind(type) && + tsManager->IsUserDefinedClassTypeKind(tsManager->GetClassType(type))) { + int hclassIndex = tsManager->GetHClassIndexByInstanceGateType(type); + if (hclassIndex == -1) { + return true; + } + JSHClass *hclass = JSHClass::Cast(tsManager->GetValueFromCache(hclassIndex).GetTaggedObject()); + if (hclass->HasTSSubtyping()) { + GlobalTSTypeRef instanceGT = type.GetGTRef(); + GateType classType = GateType(tsManager->GetClassType(instanceGT)); + types.classInstanceTypes.insert(classType); + } + return true; + } + return false; +} + +void ClassInstanceTypeStrategy::Merge(ChunkSet &inferTypes, CollectedType &types, TSManager *tsManager) +{ + for (GateType type : types.classInstanceTypes) { + GlobalTSTypeRef gt = type.GetGTRef(); + GlobalTSTypeRef instanceGT = tsManager->CreateClassInstanceType(gt); + inferTypes.insert(GateType(instanceGT)); + } +} + +bool BuiltinTypeStrategy::CheckAndInsert(CollectedType &types, const GateType &type, TSManager *tsManager) +{ + if (tsManager->IsValidTypedArrayType(type)) { + types.builtinTypes.insert(type); + return true; + } + return false; +} + +void BuiltinTypeStrategy::Merge(ChunkSet &inferTypes, CollectedType &types, + [[maybe_unused]] TSManager *tsManager) +{ + for (GateType type : types.builtinTypes) { + inferTypes.insert(type); + } +} + +bool OtherTypeStrategy::CheckAndInsert(CollectedType &types, const GateType &type, [[maybe_unused]]TSManager *tsManager) +{ + if (!type.IsAnyType()) { + types.otherTypes.insert(type); + return true; + } + return false; +} + +void OtherTypeStrategy::Merge(ChunkSet &inferTypes, CollectedType &types, + [[maybe_unused]] TSManager *tsManager) +{ + for (GateType type : types.otherTypes) { + inferTypes.insert(type); + } +} + +void PGOTypeInferHelper::CollectGateType(CollectedType &types, GateType tsType, PGORWOpType pgoTypes) +{ + // for static TS uinon type + if (tsManager_->IsUnionTypeKind(tsType)) { + JSHandle unionType(tsManager_->GetTSType(tsType.GetGTRef())); + TaggedArray *components = TaggedArray::Cast(unionType->GetComponents().GetTaggedObject()); + uint32_t length = components->GetLength(); + for (uint32_t i = 0; i < length; ++i) { + GlobalTSTypeRef gt(components->Get(i).GetInt()); + CheckAndInsert(types, GateType(gt)); + } + } else { + CheckAndInsert(types, tsType); + } + + // for pgo type + for (uint32_t i = 0; i < pgoTypes.GetCount(); i++) { + ProfileType profileType = pgoTypes.GetObjectInfo(i).GetProfileType(); + GateType pgoType = tsManager_->GetGateTypeByPt(profileType); + if (pgoType.IsAnyType()) { + tsManager_->AddToSkipTrackFieldSet(profileType); + continue; + } + if (tsManager_->IsClassTypeKind(pgoType) && !pgoTypes.GetObjectInfo(i).InConstructor()) { + pgoType = GateType(tsManager_->CreateClassInstanceType(pgoType)); + } + CheckAndInsert(types, pgoType); + } +} + +ChunkSet PGOTypeInferHelper::GetInferTypes(Chunk *chunk, CollectedType &types, JSTaggedValue prop) +{ + UpdateType(types, prop); + ChunkSet inferTypes(chunk); + for (auto &strategy : strategies_) { + strategy->Merge(inferTypes, types, tsManager_); + } + return inferTypes; +} + +void PGOTypeInferHelper::CheckAndInsert(CollectedType &types, GateType type) +{ + for (auto &strategy : strategies_) { + if (strategy->CheckAndInsert(types, type, tsManager_)) { + return; + } + } +} + +void PGOTypeInferHelper::UpdateType(CollectedType &types, JSTaggedValue prop) +{ + ChunkSet &classTypes = types.classTypes; + InferTypeForClass(classTypes, prop); + + ChunkSet &classInstanceTypes = types.classInstanceTypes; + InferTypeForClass(classInstanceTypes, prop); + + ChunkSet &builtinTypes = types.builtinTypes; + InferTypeForBuiltin(builtinTypes); +} + +void PGOTypeInferHelper::InferTypeForClass(ChunkSet &types, JSTaggedValue prop) +{ + if (NoNeedUpdate(types)) { + return; + } + EliminateSubclassTypes(types); + ComputeCommonSuperClassTypes(types, prop); +} + +void PGOTypeInferHelper::InferTypeForBuiltin(ChunkSet &types) +{ + if (NoNeedUpdate(types)) { + return; + } + std::set deletedGates; + std::set builtinGTSet; + + for (GateType type : types) { + GlobalTSTypeRef classGT = tsManager_->GetClassType(type); + auto it = builtinGTSet.find(classGT); + if (it != builtinGTSet.end()) { + deletedGates.insert(type); + continue; + } + builtinGTSet.insert(classGT); + } + for (GateType gateType : deletedGates) { + types.erase(gateType); + } +} + +void PGOTypeInferHelper::EliminateSubclassTypes(ChunkSet &types) +{ + std::set deletedGates; + for (GateType type : types) { + if (deletedGates.find(type) != deletedGates.end()) { + continue; + } + + std::stack ancestors; + GateType curType = type; + do { + if (types.find(curType) != types.end()) { + ancestors.push(curType); + } + } while (tsManager_->GetSuperGateType(curType)); + + ancestors.pop(); // top type is alive + while (!ancestors.empty()) { + curType = ancestors.top(); + ancestors.pop(); + auto it = deletedGates.find(curType); + if (it != deletedGates.end()) { + deletedGates.insert(curType); + } + } + } + for (GateType gateType : deletedGates) { + types.erase(gateType); + } +} + +void PGOTypeInferHelper::ComputeCommonSuperClassTypes(ChunkSet &types, JSTaggedValue prop) +{ + JSThread *thread = tsManager_->GetEcmaVM()->GetJSThread(); + std::map removeTypes; + for (GateType type : types) { + GateType curType = type; + GateType preType = type; + while (tsManager_->GetSuperGateType(curType)) { + JSHClass *ihc = JSHClass::Cast(tsManager_->GetTSHClass(curType).GetTaggedObject()); + PropertyLookupResult plr = JSHClass::LookupPropertyInAotHClass(thread, ihc, prop); + if (!plr.IsFound()) { + break; + } + preType = curType; + } + if (type != preType) { + removeTypes[type] = preType; + } + } + + for (auto item : removeTypes) { + types.erase(item.first); + types.insert(item.second); + } +} +} // namespace panda::ecmascript diff --git a/ecmascript/compiler/type_inference/pgo_type_infer_helper.h b/ecmascript/compiler/type_inference/pgo_type_infer_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..f4427776135ff5b831ed5774b8787bd6e6aec1fe --- /dev/null +++ b/ecmascript/compiler/type_inference/pgo_type_infer_helper.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_TYPE_INFERENCE_PGO_TYPE_INFER_HELPER_H +#define ECMASCRIPT_COMPILER_TYPE_INFERENCE_PGO_TYPE_INFER_HELPER_H + +#include "ecmascript/ts_types/ts_manager.h" + +namespace panda::ecmascript::kungfu { +#define TYPE_LIST(V) \ + V(Class, class) \ + V(ClassInstance, classInstance) \ + V(Builtin, builtin) \ + V(Other, other) + +struct CollectedType { + explicit CollectedType(Chunk *chunk) : + classTypes(chunk), + classInstanceTypes(chunk), + builtinTypes(chunk), + otherTypes(chunk) + {} + + bool AllInSameKind() const + { + uint8_t kind = classTypes.empty() ? 0 : 1; + kind += classInstanceTypes.empty() ? 0 : 1; + kind += builtinTypes.empty() ? 0 : 1; + kind += otherTypes.empty() ? 0 : 1; + return kind == 1; + } + +#define DEFINE_TYPE_SET(V, name) \ + ChunkSet name##Types; + + TYPE_LIST(DEFINE_TYPE_SET) +#undef DEFINE_TYPE_SET +}; + +class TypeStrategy { +public: + TypeStrategy() = default; + virtual ~TypeStrategy() = default; + virtual bool CheckAndInsert(CollectedType &types, const GateType &type, TSManager *tsManager) = 0; + virtual void Merge(ChunkSet &inferTypes, CollectedType &types, TSManager *tsManager) = 0; +}; + +#define DEFINE_TYPE_STRATEGY_DERIVED_CLASS(name, ...) \ + class name##TypeStrategy : public TypeStrategy { \ + public: \ + virtual bool CheckAndInsert(CollectedType &types, const GateType &type, TSManager *tsManager) override; \ + virtual void Merge(ChunkSet &inferTypes, CollectedType &types, TSManager *tsManager) override; \ + }; + + TYPE_LIST(DEFINE_TYPE_STRATEGY_DERIVED_CLASS) +#undef DEFINE_TYPE_STRATEGY_DERIVED_CLASS + +class PGOTypeInferHelper { +public: + PGOTypeInferHelper(TSManager *tsManager) : tsManager_(tsManager) + { +#define ADD_STRATEGY(name, ...) \ + strategies_.push_back(std::make_unique()); + + TYPE_LIST(ADD_STRATEGY) +#undef ADD_STRATEGY + } + ~PGOTypeInferHelper() = default; + + void CollectGateType(CollectedType &types, GateType tsType, PGORWOpType pgoTypes); + + ChunkSet GetInferTypes(Chunk *chunk, CollectedType &types, JSTaggedValue prop); + +private: + inline bool NoNeedUpdate(const ChunkSet &types) const + { + return types.size() <= 1; + } + + void CheckAndInsert(CollectedType &types, GateType type); + void UpdateType(CollectedType &types, JSTaggedValue prop); + void InferTypeForClass(ChunkSet &types, JSTaggedValue prop); + void EliminateSubclassTypes(ChunkSet &types); + void ComputeCommonSuperClassTypes(ChunkSet &types, JSTaggedValue prop); + void InferTypeForBuiltin(ChunkSet &types); + + TSManager *tsManager_ {nullptr}; + std::vector> strategies_ {}; +}; +#undef TYPE_LIST +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_TYPE_INFERENCE_PGO_TYPE_INFER_HELPER_H diff --git a/ecmascript/compiler/type_mcr_lowering.cpp b/ecmascript/compiler/type_mcr_lowering.cpp index 3378c243b2cc57c892cec0198d54bcefaa459be3..c4747ee1f99e276d4b1acaade70388f6e939f609 100644 --- a/ecmascript/compiler/type_mcr_lowering.cpp +++ b/ecmascript/compiler/type_mcr_lowering.cpp @@ -15,6 +15,7 @@ #include "ecmascript/compiler/type_mcr_lowering.h" #include "ecmascript/compiler/builtins_lowering.h" +#include "ecmascript/compiler/gate_meta_data.h" #include "ecmascript/compiler/new_object_stub_builder.h" #include "ecmascript/deoptimizer/deoptimizer.h" #include "ecmascript/js_arraybuffer.h" @@ -34,7 +35,7 @@ void TypeMCRLowering::RunTypeMCRLowering() if (IsLogEnabled()) { LOG_COMPILER(INFO) << ""; LOG_COMPILER(INFO) << "\033[34m" << "==================" - << " after TypeMCRlowering " + << " After TypeMCRlowering " << "[" << GetMethodName() << "] " << "==================" << "\033[0m"; circuit_->PrintAllGatesWithBytecode(); @@ -56,29 +57,32 @@ void TypeMCRLowering::LowerType(GateRef gate) case OpCode::TYPED_ARRAY_CHECK: LowerTypedArrayCheck(gate); break; + case OpCode::ECMA_STRING_CHECK: + LowerEcmaStringCheck(gate); + break; + case OpCode::FLATTEN_STRING_CHECK: + LowerFlattenStringCheck(gate, glue); + break; + case OpCode::LOAD_STRING_LENGTH: + LowerStringLength(gate); + break; case OpCode::LOAD_TYPED_ARRAY_LENGTH: LowerLoadTypedArrayLength(gate); break; case OpCode::OBJECT_TYPE_CHECK: LowerObjectTypeCheck(gate); break; - case OpCode::INDEX_CHECK: - LowerIndexCheck(gate); - break; - case OpCode::JSCALLTARGET_FROM_DEFINEFUNC_CHECK: - LowerJSCallTargetFromDefineFuncCheck(gate); - break; - case OpCode::JSCALLTARGET_TYPE_CHECK: - LowerJSCallTargetTypeCheck(gate); + case OpCode::OBJECT_TYPE_COMPARE: + LowerObjectTypeCompare(gate); break; - case OpCode::JSFASTCALLTARGET_TYPE_CHECK: - LowerJSFastCallTargetTypeCheck(gate); + case OpCode::RANGE_CHECK_PREDICATE: + LowerRangeCheckPredicate(gate); break; - case OpCode::JSCALLTHISTARGET_TYPE_CHECK: - LowerJSCallThisTargetTypeCheck(gate); + case OpCode::INDEX_CHECK: + LowerIndexCheck(gate); break; - case OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK: - LowerJSFastCallThisTargetTypeCheck(gate); + case OpCode::TYPED_CALLTARGETCHECK_OP: + LowerJSCallTargetCheck(gate); break; case OpCode::TYPED_CALL_CHECK: LowerCallTargetCheck(gate); @@ -123,11 +127,61 @@ void TypeMCRLowering::LowerType(GateRef gate) case OpCode::GET_SUPER_CONSTRUCTOR: LowerGetSuperConstructor(gate); break; + case OpCode::COW_ARRAY_CHECK: + LowerCowArrayCheck(gate, glue); + break; + case OpCode::LOAD_GETTER: + LowerLoadGetter(gate); + break; + case OpCode::LOAD_SETTER: + LowerLoadSetter(gate); + break; + case OpCode::INLINE_ACCESSOR_CHECK: + LowerInlineAccessorCheck(gate); + break; default: break; } } +void TypeMCRLowering::LowerJSCallTargetCheck(GateRef gate) +{ + TypedCallTargetCheckOp Op = acc_.GetTypedCallTargetCheckOp(gate); + switch (Op) { + case TypedCallTargetCheckOp::JSCALL_IMMEDIATE_AFTER_FUNC_DEF: { + LowerJSCallTargetFromDefineFuncCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALL: { + LowerJSCallTargetTypeCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALL_FAST: { + LowerJSFastCallTargetTypeCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALLTHIS: { + LowerJSCallThisTargetTypeCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALLTHIS_FAST: { + LowerJSFastCallThisTargetTypeCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALLTHIS_NOGC: { + LowerJSNoGCCallThisTargetTypeCheck(gate); + break; + } + case TypedCallTargetCheckOp::JSCALLTHIS_FAST_NOGC: { + LowerJSNoGCFastCallThisTargetTypeCheck(gate); + break; + } + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + void TypeMCRLowering::LowerPrimitiveTypeCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); @@ -200,31 +254,60 @@ void TypeMCRLowering::LowerStableArrayCheck(GateRef gate) GateRef receiverHClass = builder_.LoadConstOffset( VariableType::JS_POINTER(), receiver, TaggedObject::HCLASS_OFFSET); - builder_.HClassStableArrayCheck(receiverHClass, frameState); + ArrayMetaDataAccessor accessor = acc_.GetArrayMetaDataAccessor(gate); + builder_.HClassStableArrayCheck(receiverHClass, frameState, accessor); builder_.ArrayGuardianCheck(frameState); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } +void TypeMCRLowering::SetDeoptTypeInfo(BuiltinTypeId id, DeoptType &type, size_t &funcIndex) +{ + type = DeoptType::NOTARRAY; + switch (id) { + case BuiltinTypeId::INT8_ARRAY: + funcIndex = GlobalEnv::INT8_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::UINT8_ARRAY: + funcIndex = GlobalEnv::UINT8_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + funcIndex = GlobalEnv::UINT8_CLAMPED_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::INT16_ARRAY: + funcIndex = GlobalEnv::INT16_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::UINT16_ARRAY: + funcIndex = GlobalEnv::UINT16_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::INT32_ARRAY: + funcIndex = GlobalEnv::INT32_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::UINT32_ARRAY: + funcIndex = GlobalEnv::UINT32_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::FLOAT32_ARRAY: + funcIndex = GlobalEnv::FLOAT32_ARRAY_FUNCTION_INDEX; + break; + case BuiltinTypeId::FLOAT64_ARRAY: + funcIndex = GlobalEnv::FLOAT64_ARRAY_FUNCTION_INDEX; + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + void TypeMCRLowering::LowerTypedArrayCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); auto type = acc_.GetParamGateType(gate); size_t typedArrayFuncIndex = GlobalEnv::TYPED_ARRAY_FUNCTION_INDEX; auto deoptType = DeoptType::NOTCHECK; - if (tsManager_->IsFloat32ArrayType(type)) { - typedArrayFuncIndex = GlobalEnv::FLOAT32_ARRAY_FUNCTION_INDEX; - deoptType = DeoptType::NOTF32ARRAY; - } else if (tsManager_->IsInt32ArrayType(type)) { - typedArrayFuncIndex = GlobalEnv::INT32_ARRAY_FUNCTION_INDEX; - deoptType = DeoptType::NOTI32ARRAY; - } else if (tsManager_->IsFloat64ArrayType(type)) { - typedArrayFuncIndex = GlobalEnv::FLOAT64_ARRAY_FUNCTION_INDEX; - deoptType = DeoptType::NOTF64ARRAY; - } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); - } + + auto builtinTypeId = tsManager_->GetTypedArrayBuiltinId(type); + SetDeoptTypeInfo(builtinTypeId, deoptType, typedArrayFuncIndex); + GateRef frameState = GetFrameState(gate); GateRef glueGlobalEnv = builder_.GetGlobalEnv(); GateRef receiver = acc_.GetValueIn(gate, 0); @@ -232,71 +315,153 @@ void TypeMCRLowering::LowerTypedArrayCheck(GateRef gate) GateRef protoOrHclass = builder_.GetGlobalEnvObjHClass(glueGlobalEnv, typedArrayFuncIndex); GateRef check = builder_.Equal(receiverHClass, protoOrHclass); builder_.DeoptCheck(check, frameState, deoptType); - GateRef isOnHeap = builder_.LoadConstOffset(VariableType::BOOL(), receiver, JSTypedArray::ON_HEAP_OFFSET); - builder_.DeoptCheck(isOnHeap, frameState, DeoptType::NOTONHEAP); + + if (IsOnHeap()) { + GateRef isOnHeap = builder_.LoadConstOffset(VariableType::BOOL(), receiver, JSTypedArray::ON_HEAP_OFFSET); + builder_.DeoptCheck(isOnHeap, frameState, DeoptType::NOTONHEAP); + } + + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} + +void TypeMCRLowering::LowerEcmaStringCheck(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + GateRef frameState = GetFrameState(gate); + GateRef receiver = acc_.GetValueIn(gate, 0); + builder_.HeapObjectCheck(receiver, frameState); + GateRef isString = builder_.TaggedObjectIsString(receiver); + builder_.DeoptCheck(isString, frameState, DeoptType::NOTSTRING); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } +void TypeMCRLowering::LowerFlattenStringCheck(GateRef gate, GateRef glue) +{ + Environment env(gate, circuit_, &builder_); + GateRef str = acc_.GetValueIn(gate, 0); + DEFVAlUE(result, (&builder_), VariableType::JS_POINTER(), str); + Label isTreeString(&builder_); + Label notTreeString(&builder_); + Label needFlat(&builder_); + Label exit(&builder_); + + builder_.Branch(builder_.IsTreeString(str), &isTreeString, ¬TreeString); + builder_.Bind(&isTreeString); + { + Label isFlat(&builder_); + builder_.Branch(builder_.TreeStringIsFlat(str), &isFlat, &needFlat); + builder_.Bind(&isFlat); + { + result = builder_.GetFirstFromTreeString(str); + builder_.Jump(&exit); + } + } + builder_.Bind(¬TreeString); + builder_.Branch(builder_.IsSlicedString(str), &needFlat, &exit); + builder_.Bind(&needFlat); + { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(SlowFlattenString), { str }, true); + builder_.Jump(&exit); + } + builder_.Bind(&exit); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), *result); +} + +void TypeMCRLowering::LowerStringLength(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef length = builder_.Int32LSR( + builder_.LoadConstOffset(VariableType::INT32(), receiver, EcmaString::MIX_LENGTH_OFFSET), builder_.Int32(2)); + + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), length); +} + void TypeMCRLowering::LowerLoadTypedArrayLength(GateRef gate) { Environment env(gate, circuit_, &builder_); GateRef receiver = acc_.GetValueIn(gate, 0); - GateRef length = builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::ARRAY_LENGTH_OFFSET)); + GateRef length = builder_.LoadConstOffset(VariableType::INT32(), receiver, JSTypedArray::ARRAY_LENGTH_OFFSET); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), length); } void TypeMCRLowering::LowerObjectTypeCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); - auto type = acc_.GetParamGateType(gate); + GateType type = acc_.GetObjectTypeAccessor(gate).GetType(); if (tsManager_->IsClassInstanceTypeKind(type)) { LowerTSSubtypingCheck(gate); } else if (tsManager_->IsClassTypeKind(type) || tsManager_->IsObjectTypeKind(type)) { LowerSimpleHClassCheck(gate); } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; + LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); } } +void TypeMCRLowering::LowerTSSubtypingCheck(GateRef gate) +{ + GateRef frameState = GetFrameState(gate); + Label levelValid(&builder_); + Label exit(&builder_); + GateRef compare = BuildCompareSubTyping(gate, frameState, &levelValid, &exit); + builder_.DeoptCheck(compare, frameState, DeoptType::INCONSISTENTHCLASS); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} + void TypeMCRLowering::LowerSimpleHClassCheck(GateRef gate) +{ + GateRef frameState = GetFrameState(gate); + GateRef compare = BuildCompareHClass(gate, frameState); + builder_.DeoptCheck(compare, frameState, DeoptType::INCONSISTENTHCLASS); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} + +void TypeMCRLowering::LowerObjectTypeCompare(GateRef gate) { Environment env(gate, circuit_, &builder_); - auto type = acc_.GetParamGateType(gate); - if (tsManager_->IsClassTypeKind(type) || - tsManager_->IsObjectTypeKind(type)) { - GateRef frameState = GetFrameState(gate); - GateRef receiver = acc_.GetValueIn(gate, 0); - builder_.HeapObjectCheck(receiver, frameState); - GateRef aotHCIndex = acc_.GetValueIn(gate, 1); - auto hclassIndex = acc_.GetConstantValue(aotHCIndex); - ArgumentAccessor argAcc(circuit_); - GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); - GateRef aotHCGate = LoadFromConstPool(jsFunc, hclassIndex); - GateRef receiverHClass = builder_.LoadConstOffset( - VariableType::JS_POINTER(), receiver, TaggedObject::HCLASS_OFFSET); - GateRef check = builder_.Equal(aotHCGate, receiverHClass); - builder_.DeoptCheck(check, frameState, DeoptType::INCONSISTENTHCLASS); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); + auto type = acc_.GetObjectTypeAccessor(gate).GetType(); + if (tsManager_->IsClassInstanceTypeKind(type)) { + LowerTSSubtypingCompare(gate); + } else if (tsManager_->IsClassTypeKind(type) || + tsManager_->IsObjectTypeKind(type)) { + LowerSimpleHClassCompare(gate); } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; + LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); } } -void TypeMCRLowering::LowerTSSubtypingCheck(GateRef gate) +void TypeMCRLowering::LowerSimpleHClassCompare(GateRef gate) { GateRef frameState = GetFrameState(gate); + GateRef compare = BuildCompareHClass(gate, frameState) ; + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), compare); +} + +void TypeMCRLowering::LowerTSSubtypingCompare(GateRef gate) +{ + GateRef frameState = GetFrameState(gate); + Label levelValid(&builder_); + Label exit(&builder_); + GateRef compare = BuildCompareSubTyping(gate, frameState, &levelValid, &exit); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), compare); +} + +GateRef TypeMCRLowering::BuildCompareSubTyping(GateRef gate, GateRef frameState, Label *levelValid, Label *exit) +{ GateRef receiver = acc_.GetValueIn(gate, 0); - builder_.HeapObjectCheck(receiver, frameState); + bool isHeapObject = acc_.GetObjectTypeAccessor(gate).IsHeapObject(); + if (!isHeapObject) { + builder_.HeapObjectCheck(receiver, frameState); + } GateRef aotHCIndex = acc_.GetValueIn(gate, 1); ArgumentAccessor argAcc(circuit_); GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); - DEFVAlUE(check, (&builder_), VariableType::BOOL(), builder_.False()); - JSTaggedValue aotHC = tsManager_->GetHClassFromCache(acc_.TryGetValue(aotHCIndex)); + JSTaggedValue aotHC = tsManager_->GetValueFromCache(acc_.TryGetValue(aotHCIndex)); ASSERT(aotHC.IsJSHClass()); int32_t level = JSHClass::Cast(aotHC.GetTaggedObject())->GetLevel(); @@ -310,54 +475,87 @@ void TypeMCRLowering::LowerTSSubtypingCheck(GateRef gate) GateRef aotHCGate = LoadFromConstPool(jsFunc, hclassIndex); if (LIKELY(static_cast(level) < SubtypingOperator::DEFAULT_SUPERS_CAPACITY)) { + return builder_.Equal(aotHCGate, GetValueFromSupers(supers, level)); + } + + DEFVAlUE(check, (&builder_), VariableType::BOOL(), builder_.False()); + GateRef levelGate = builder_.Int32(level); + GateRef length = GetLengthFromSupers(supers); + + builder_.Branch(builder_.Int32LessThan(levelGate, length), levelValid, exit, + BranchWeight::DEOPT_WEIGHT, BranchWeight::ONE_WEIGHT); + builder_.Bind(levelValid); + { check = builder_.Equal(aotHCGate, GetValueFromSupers(supers, level)); - } else { - GateRef levelGate = builder_.Int32(level); - GateRef length = GetLengthFromSupers(supers); + builder_.Jump(exit); + } + builder_.Bind(exit); - Label levelValid(&builder_); - Label exit(&builder_); - builder_.Branch(builder_.Int32LessThan(levelGate, length), &levelValid, &exit); - builder_.Bind(&levelValid); - { - check = builder_.Equal(aotHCGate, GetValueFromSupers(supers, level)); - builder_.Jump(&exit); - } - builder_.Bind(&exit); + return *check; +} + +GateRef TypeMCRLowering::BuildCompareHClass(GateRef gate, GateRef frameState) +{ + GateRef receiver = acc_.GetValueIn(gate, 0); + bool isHeapObject = acc_.GetObjectTypeAccessor(gate).IsHeapObject(); + if (!isHeapObject) { + builder_.HeapObjectCheck(receiver, frameState); } - builder_.DeoptCheck(*check, frameState, DeoptType::INCONSISTENTHCLASS); + GateRef aotHCIndex = acc_.GetValueIn(gate, 1); + auto hclassIndex = acc_.GetConstantValue(aotHCIndex); + ArgumentAccessor argAcc(circuit_); + GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); + GateRef aotHCGate = LoadFromConstPool(jsFunc, hclassIndex); + GateRef receiverHClass = builder_.LoadConstOffset( + VariableType::JS_POINTER(), receiver, TaggedObject::HCLASS_OFFSET); + return builder_.Equal(aotHCGate, receiverHClass); +} + +void TypeMCRLowering::LowerRangeCheckPredicate(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + auto deoptType = DeoptType::NOTARRAY; + GateRef frameState = GetFrameState(gate); + GateRef x = acc_.GetValueIn(gate, 0); + GateRef y = acc_.GetValueIn(gate, 1); + TypedBinaryAccessor accessor = acc_.GetTypedBinaryAccessor(gate); + TypedBinOp cond = accessor.GetTypedBinOp(); + GateRef check = Circuit::NullGate(); + // check the condition + switch (cond) { + case TypedBinOp::TYPED_GREATER: + check = builder_.Int32GreaterThan(x, y); + break; + case TypedBinOp::TYPED_GREATEREQ: + check = builder_.Int32GreaterThanOrEqual(x, y); + break; + case TypedBinOp::TYPED_LESS: + check = builder_.Int32LessThan(x, y); + break; + case TypedBinOp::TYPED_LESSEQ: + check = builder_.Int32LessThanOrEqual(x, y); + break; + default: + UNREACHABLE(); + break; + } + builder_.DeoptCheck(check, frameState, deoptType); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } void TypeMCRLowering::LowerIndexCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); - auto type = acc_.GetParamGateType(gate); - auto deoptType = DeoptType::NOTCHECK; - - if (tsManager_->IsArrayTypeKind(type)) { - deoptType = DeoptType::NOTARRAYIDX; - } else if (tsManager_->IsFloat32ArrayType(type)) { - deoptType = DeoptType::NOTF32ARRAYIDX; - } else if (tsManager_->IsInt32ArrayType(type)) { - deoptType = DeoptType::NOTI32ARRAYIDX; - } else if (tsManager_->IsFloat64ArrayType(type)) { - deoptType = DeoptType::NOTF64ARRAYIDX; - } else { - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); - } + auto deoptType = DeoptType::NOTLEGALIDX; GateRef frameState = GetFrameState(gate); GateRef length = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); ASSERT(acc_.GetGateType(length).IsNJSValueType()); - GateRef nonNegativeCheck = builder_.Int32LessThanOrEqual(builder_.Int32(0), index); + // UnsignedLessThan can check both lower and upper bounds GateRef lengthCheck = builder_.Int32UnsignedLessThan(index, length); - GateRef check = builder_.BoolAnd(nonNegativeCheck, lengthCheck); - builder_.DeoptCheck(check, frameState, deoptType); - + builder_.DeoptCheck(lengthCheck, frameState, deoptType); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), index); } @@ -424,7 +622,7 @@ void TypeMCRLowering::LowerPrimitiveToNumber(GateRef dst, GateRef src, GateType GateRef TypeMCRLowering::LoadFromConstPool(GateRef jsFunc, size_t index) { GateRef constPool = builder_.GetConstPool(jsFunc); - return LoadFromTaggedArray(constPool, index); + return builder_.LoadFromTaggedArray(constPool, index); } GateRef TypeMCRLowering::GetObjectFromConstPool(GateRef jsFunc, GateRef index) @@ -466,8 +664,7 @@ void TypeMCRLowering::LowerCallGetter(GateRef gate, GateRef glue) PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); ASSERT(plr.IsAccessor()); GateRef accessor = LoadFromVTable(receiver, plr.GetOffset()); - GateRef getter = builder_.Load(VariableType::JS_ANY(), accessor, - builder_.IntPtr(AccessorData::GETTER_OFFSET)); + GateRef getter = builder_.LoadConstOffset(VariableType::JS_ANY(), accessor, AccessorData::GETTER_OFFSET); DEFVAlUE(result, (&builder_), VariableType::JS_ANY(), builder_.UndefineConstant()); Label callGetter(&builder_); @@ -495,7 +692,7 @@ void TypeMCRLowering::LowerStoreProperty(GateRef gate) if (op == OpCode::STORE_PROPERTY) { builder_.StoreConstOffset(VariableType::JS_ANY(), receiver, plr.GetOffset(), value); } else if (op == OpCode::STORE_PROPERTY_NO_BARRIER) { - builder_.StoreConstOffset(VariableType::INT64(), receiver, plr.GetOffset(), value); + builder_.StoreConstOffset(GetVarType(plr), receiver, plr.GetOffset(), value); } else { UNREACHABLE(); } @@ -513,8 +710,7 @@ void TypeMCRLowering::LowerCallSetter(GateRef gate, GateRef glue) PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); ASSERT(plr.IsAccessor()); GateRef accessor = LoadFromVTable(receiver, plr.GetOffset()); - GateRef setter = builder_.Load(VariableType::JS_ANY(), - accessor, builder_.IntPtr(AccessorData::SETTER_OFFSET)); + GateRef setter = builder_.LoadConstOffset(VariableType::JS_ANY(), accessor, AccessorData::SETTER_OFFSET); Label callSetter(&builder_); Label exit(&builder_); @@ -532,28 +728,112 @@ void TypeMCRLowering::LowerLoadArrayLength(GateRef gate) { Environment env(gate, circuit_, &builder_); GateRef array = acc_.GetValueIn(gate, 0); - GateRef offset = builder_.IntPtr(JSArray::LENGTH_OFFSET); - GateRef result = builder_.Load(VariableType::INT32(), array, offset); + GateRef result = builder_.LoadConstOffset(VariableType::INT32(), array, JSArray::LENGTH_OFFSET); acc_.SetGateType(gate, GateType::NJSValue()); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } +GateRef TypeMCRLowering::GetElementSize(BuiltinTypeId id) +{ + GateRef elementSize = Circuit::NullGate(); + switch (id) { + case BuiltinTypeId::INT8_ARRAY: + case BuiltinTypeId::UINT8_ARRAY: + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + elementSize = builder_.Int32(sizeof(uint8_t)); + break; + case BuiltinTypeId::INT16_ARRAY: + case BuiltinTypeId::UINT16_ARRAY: + elementSize = builder_.Int32(sizeof(uint16_t)); + break; + case BuiltinTypeId::INT32_ARRAY: + case BuiltinTypeId::UINT32_ARRAY: + case BuiltinTypeId::FLOAT32_ARRAY: + elementSize = builder_.Int32(sizeof(uint32_t)); + break; + case BuiltinTypeId::FLOAT64_ARRAY: + elementSize = builder_.Int32(sizeof(double)); + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + return elementSize; +} + +VariableType TypeMCRLowering::GetVariableType(BuiltinTypeId id) +{ + VariableType type = VariableType::JS_ANY(); + switch (id) { + case BuiltinTypeId::INT8_ARRAY: + case BuiltinTypeId::UINT8_ARRAY: + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + type = VariableType::INT8(); + break; + case BuiltinTypeId::INT16_ARRAY: + case BuiltinTypeId::UINT16_ARRAY: + type = VariableType::INT16(); + break; + case BuiltinTypeId::INT32_ARRAY: + case BuiltinTypeId::UINT32_ARRAY: + type = VariableType::INT32(); + break; + case BuiltinTypeId::FLOAT32_ARRAY: + type = VariableType::FLOAT32(); + break; + case BuiltinTypeId::FLOAT64_ARRAY: + type = VariableType::FLOAT64(); + break; + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + return type; +} + void TypeMCRLowering::LowerLoadElement(GateRef gate) { Environment env(gate, circuit_, &builder_); auto op = acc_.GetTypedLoadOp(gate); switch (op) { - case TypedLoadOp::ARRAY_LOAD_ELEMENT: - LowerArrayLoadElement(gate); + case TypedLoadOp::ARRAY_LOAD_INT_ELEMENT: + case TypedLoadOp::ARRAY_LOAD_DOUBLE_ELEMENT: + case TypedLoadOp::ARRAY_LOAD_OBJECT_ELEMENT: + case TypedLoadOp::ARRAY_LOAD_TAGGED_ELEMENT: + LowerArrayLoadElement(gate, ArrayState::PACKED); break; - case TypedLoadOp::FLOAT32ARRAY_LOAD_ELEMENT: - LowerFloat32ArrayLoadElement(gate); + case TypedLoadOp::ARRAY_LOAD_HOLE_TAGGED_ELEMENT: + LowerArrayLoadElement(gate, ArrayState::HOLEY); + break; + case TypedLoadOp::INT8ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::INT8_ARRAY); + break; + case TypedLoadOp::UINT8ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::UINT8_ARRAY); + break; + case TypedLoadOp::UINT8CLAMPEDARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::UINT8_CLAMPED_ARRAY); + break; + case TypedLoadOp::INT16ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::INT16_ARRAY); + break; + case TypedLoadOp::UINT16ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::UINT16_ARRAY); break; case TypedLoadOp::INT32ARRAY_LOAD_ELEMENT: - LowerInt32ArrayLoadElement(gate); + LowerTypedArrayLoadElement(gate, BuiltinTypeId::INT32_ARRAY); + break; + case TypedLoadOp::UINT32ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::UINT32_ARRAY); + break; + case TypedLoadOp::FLOAT32ARRAY_LOAD_ELEMENT: + LowerTypedArrayLoadElement(gate, BuiltinTypeId::FLOAT32_ARRAY); break; case TypedLoadOp::FLOAT64ARRAY_LOAD_ELEMENT: - LowerFloat64ArrayLoadElement(gate); + LowerTypedArrayLoadElement(gate, BuiltinTypeId::FLOAT64_ARRAY); + break; + case TypedLoadOp::STRING_LOAD_ELEMENT: + LowerStringLoadElement(gate); break; default: LOG_ECMA(FATAL) << "this branch is unreachable"; @@ -561,70 +841,127 @@ void TypeMCRLowering::LowerLoadElement(GateRef gate) } } +void TypeMCRLowering::LowerCowArrayCheck(GateRef gate, GateRef glue) +{ + Environment env(gate, circuit_, &builder_); + GateRef receiver = acc_.GetValueIn(gate, 0); + Label notCOWArray(&builder_); + Label isCOWArray(&builder_); + builder_.Branch(builder_.IsJsCOWArray(receiver), &isCOWArray, ¬COWArray); + builder_.Bind(&isCOWArray); + { + LowerCallRuntime(glue, gate, RTSTUB_ID(CheckAndCopyArray), {receiver}, true); + builder_.Jump(¬COWArray); + } + builder_.Bind(¬COWArray); + + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} + // for JSArray -void TypeMCRLowering::LowerArrayLoadElement(GateRef gate) +void TypeMCRLowering::LowerArrayLoadElement(GateRef gate, ArrayState arrayState) { Environment env(gate, circuit_, &builder_); GateRef receiver = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); GateRef element = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSObject::ELEMENTS_OFFSET); GateRef result = builder_.GetValueFromTaggedArray(element, index); - result = builder_.ConvertHoleAsUndefined(result); + if (arrayState == ArrayState::HOLEY) { + result = builder_.ConvertHoleAsUndefined(result); + } acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } -// for Float32Array -void TypeMCRLowering::LowerFloat32ArrayLoadElement(GateRef gate) +void TypeMCRLowering::LowerTypedArrayLoadElement(GateRef gate, BuiltinTypeId id) { Environment env(gate, circuit_, &builder_); GateRef receiver = acc_.GetValueIn(gate, 0); - GateRef arrbuffer = - builder_.Load(VariableType::JS_POINTER(), receiver, builder_.IntPtr(JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET)); GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(4); // 4: float32 occupy 4 bytes + GateRef elementSize = GetElementSize(id); GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); + VariableType type = GetVariableType(id); - GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - GateRef result = builder_.Load(VariableType::FLOAT32(), data, builder_.PtrAdd(offset, byteOffset)); - result = builder_.ExtFloat32ToDouble(result); + GateRef result = Circuit::NullGate(); + if (IsOnHeap()) { + result = BuildOnHeapTypedArrayLoadElement(receiver, offset, type); + } else { + Label isByteArray(&builder_); + Label isArrayBuffer(&builder_); + Label exit(&builder_); + result = BuildTypedArrayLoadElement(receiver, offset, type, &isByteArray, &isArrayBuffer, &exit); + } + + switch (id) { + case BuiltinTypeId::INT8_ARRAY: + result = builder_.SExtInt8ToInt32(result); + break; + case BuiltinTypeId::UINT8_ARRAY: + case BuiltinTypeId::UINT8_CLAMPED_ARRAY: + result = builder_.ZExtInt8ToInt32(result); + break; + case BuiltinTypeId::INT16_ARRAY: + result = builder_.SExtInt16ToInt32(result); + break; + case BuiltinTypeId::UINT16_ARRAY: + result = builder_.ZExtInt16ToInt32(result); + break; + case BuiltinTypeId::FLOAT32_ARRAY: + result = builder_.ExtFloat32ToDouble(result); + break; + default: + break; + } acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } -// for Int32Array -void TypeMCRLowering::LowerInt32ArrayLoadElement(GateRef gate) +GateRef TypeMCRLowering::BuildOnHeapTypedArrayLoadElement(GateRef receiver, GateRef offset, VariableType type) { - Environment env(gate, circuit_, &builder_); - GateRef receiver = acc_.GetValueIn(gate, 0); GateRef arrbuffer = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); - GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(4); // 4: int32 occupy 4 bytes - GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.LoadConstOffset(VariableType::INT32(), receiver, JSTypedArray::BYTE_OFFSET_OFFSET); - GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - GateRef result = builder_.Load(VariableType::INT32(), data, builder_.PtrAdd(offset, byteOffset)); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); + GateRef result = builder_.Load(type, data, offset); + return result; } -// for Float64Array -void TypeMCRLowering::LowerFloat64ArrayLoadElement(GateRef gate) +GateRef TypeMCRLowering::BuildTypedArrayLoadElement(GateRef receiver, GateRef offset, VariableType type, + Label *isByteArray, Label *isArrayBuffer, Label *exit) +{ + GateRef byteArrayOrArraybuffer = + builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); + DEFVAlUE(data, (&builder_), VariableType::JS_ANY(), builder_.Undefined()); + DEFVAlUE(result, (&builder_), type, builder_.Double(0)); + + GateRef isOnHeap = builder_.Load(VariableType::BOOL(), receiver, builder_.IntPtr(JSTypedArray::ON_HEAP_OFFSET)); + builder_.Branch(isOnHeap, isByteArray, isArrayBuffer); + builder_.Bind(isByteArray); + { + data = builder_.PtrAdd(byteArrayOrArraybuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); + result = builder_.Load(type, *data, offset); + builder_.Jump(exit); + } + builder_.Bind(isArrayBuffer); + { + data = builder_.Load(VariableType::JS_POINTER(), byteArrayOrArraybuffer, + builder_.IntPtr(JSArrayBuffer::DATA_OFFSET)); + GateRef block = builder_.Load(VariableType::JS_ANY(), *data, builder_.IntPtr(JSNativePointer::POINTER_OFFSET)); + GateRef byteOffset = + builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); + result = builder_.Load(type, block, builder_.PtrAdd(offset, byteOffset)); + builder_.Jump(exit); + } + builder_.Bind(exit); + + return *result; +} + +void TypeMCRLowering::LowerStringLoadElement(GateRef gate) { Environment env(gate, circuit_, &builder_); + GateRef glue = acc_.GetGlueFromArgList(); GateRef receiver = acc_.GetValueIn(gate, 0); - GateRef arrbuffer = - builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(8); // 4: float64 occupy 8 bytes - GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.LoadConstOffset(VariableType::INT32(), receiver, JSTypedArray::BYTE_OFFSET_OFFSET); - GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - GateRef result = builder_.Load(VariableType::FLOAT64(), data, builder_.PtrAdd(offset, byteOffset)); + GateRef result = builder_.CallStub(glue, gate, CommonStubCSigns::GetCharFromEcmaString, { glue, receiver, index }); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result); } @@ -636,14 +973,32 @@ void TypeMCRLowering::LowerStoreElement(GateRef gate, GateRef glue) case TypedStoreOp::ARRAY_STORE_ELEMENT: LowerArrayStoreElement(gate, glue); break; + case TypedStoreOp::INT8ARRAY_STORE_ELEMENT: + LowerTypedArrayStoreElement(gate, BuiltinTypeId::INT8_ARRAY); + break; + case TypedStoreOp::UINT8ARRAY_STORE_ELEMENT: + LowerTypedArrayStoreElement(gate, BuiltinTypeId::UINT8_ARRAY); + break; + case TypedStoreOp::UINT8CLAMPEDARRAY_STORE_ELEMENT: + LowerUInt8ClampedArrayStoreElement(gate); + break; + case TypedStoreOp::INT16ARRAY_STORE_ELEMENT: + LowerTypedArrayStoreElement(gate, BuiltinTypeId::INT16_ARRAY); + break; + case TypedStoreOp::UINT16ARRAY_STORE_ELEMENT: + LowerTypedArrayStoreElement(gate, BuiltinTypeId::UINT16_ARRAY); + break; case TypedStoreOp::INT32ARRAY_STORE_ELEMENT: - LowerInt32ArrayStoreElement(gate, glue); + LowerTypedArrayStoreElement(gate, BuiltinTypeId::INT32_ARRAY); + break; + case TypedStoreOp::UINT32ARRAY_STORE_ELEMENT: + LowerTypedArrayStoreElement(gate, BuiltinTypeId::UINT32_ARRAY); break; case TypedStoreOp::FLOAT32ARRAY_STORE_ELEMENT: - LowerFloat32ArrayStoreElement(gate, glue); + LowerTypedArrayStoreElement(gate, BuiltinTypeId::FLOAT32_ARRAY); break; case TypedStoreOp::FLOAT64ARRAY_STORE_ELEMENT: - LowerFloat64ArrayStoreElement(gate, glue); + LowerTypedArrayStoreElement(gate, BuiltinTypeId::FLOAT64_ARRAY); break; default: LOG_ECMA(FATAL) << "this branch is unreachable"; @@ -659,93 +1014,129 @@ void TypeMCRLowering::LowerArrayStoreElement(GateRef gate, GateRef glue) GateRef index = acc_.GetValueIn(gate, 1); // 1: index GateRef value = acc_.GetValueIn(gate, 2); // 2: value - Label storeWithCOWArray(&builder_); - Label storeDirectly(&builder_); - Label exit(&builder_); - builder_.Branch(builder_.IsJsCOWArray(receiver), &storeWithCOWArray, &storeDirectly); - builder_.Bind(&storeWithCOWArray); - { - GateRef newElement = LowerCallRuntime(glue, gate, RTSTUB_ID(CheckAndCopyArray), {receiver}, true); - builder_.SetValueToTaggedArray(VariableType::JS_ANY(), glue, newElement, index, value); - builder_.Jump(&exit); - } - builder_.Bind(&storeDirectly); - { - GateRef element = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSObject::ELEMENTS_OFFSET); - builder_.SetValueToTaggedArray(VariableType::JS_ANY(), glue, element, index, value); - builder_.Jump(&exit); - } - builder_.Bind(&exit); + GateRef element = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, JSObject::ELEMENTS_OFFSET); + builder_.SetValueToTaggedArray(VariableType::JS_ANY(), glue, element, index, value); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } -// for Int32Array -void TypeMCRLowering::LowerInt32ArrayStoreElement(GateRef gate, GateRef glue) +// for JSTypedArray +void TypeMCRLowering::LowerTypedArrayStoreElement(GateRef gate, BuiltinTypeId id) { Environment env(gate, circuit_, &builder_); - Label isArrayBuffer(&builder_); - Label isByteArray(&builder_); - Label afterGetValue(&builder_); - Label exit(&builder_); GateRef receiver = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(4); // 4: int32 occupy 4 bytes - GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); GateRef value = acc_.GetValueIn(gate, 2); - GateRef arrbuffer = - builder_.Load(VariableType::JS_POINTER(), receiver, builder_.IntPtr(JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET)); - GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - builder_.Store(VariableType::VOID(), glue, data, builder_.PtrAdd(offset, byteOffset), value); + GateRef elementSize = GetElementSize(id); + GateRef offset = builder_.PtrMul(index, elementSize); + switch (id) { + case BuiltinTypeId::INT8_ARRAY: + case BuiltinTypeId::UINT8_ARRAY: + value = builder_.TruncInt32ToInt8(value); + break; + case BuiltinTypeId::INT16_ARRAY: + case BuiltinTypeId::UINT16_ARRAY: + value = builder_.TruncInt32ToInt16(value); + break; + case BuiltinTypeId::FLOAT32_ARRAY: + value = builder_.TruncDoubleToFloat32(value); + break; + default: + break; + } + + if (IsOnHeap()) { + BuildOnHeapTypedArrayStoreElement(receiver, offset, value); + } else { + Label isByteArray(&builder_); + Label isArrayBuffer(&builder_); + Label exit(&builder_); + BuildTypedArrayStoreElement(receiver, offset, value, &isByteArray, &isArrayBuffer, &exit); + } + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } -// for Float32Array -void TypeMCRLowering::LowerFloat32ArrayStoreElement(GateRef gate, GateRef glue) +void TypeMCRLowering::BuildOnHeapTypedArrayStoreElement(GateRef receiver, GateRef offset, GateRef value) { - Environment env(gate, circuit_, &builder_); - Label isArrayBuffer(&builder_); - Label isByteArray(&builder_); - Label afterGetValue(&builder_); - Label exit(&builder_); - GateRef receiver = acc_.GetValueIn(gate, 0); - GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(4); // 4: float32 occupy 4 bytes - GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); - GateRef value = acc_.GetValueIn(gate, 2); - value = builder_.TruncDoubleToFloat32(value); - GateRef arrbuffer = - builder_.Load(VariableType::JS_POINTER(), receiver, builder_.IntPtr(JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET)); - + GateRef arrbuffer = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, + JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - builder_.Store(VariableType::VOID(), glue, data, builder_.PtrAdd(offset, byteOffset), value); - acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); + + builder_.StoreMemory(MemoryType::ELEMENT_TYPE, VariableType::VOID(), data, offset, value); } -// for Float64Array -void TypeMCRLowering::LowerFloat64ArrayStoreElement(GateRef gate, GateRef glue) +void TypeMCRLowering::BuildTypedArrayStoreElement(GateRef receiver, GateRef offset, GateRef value, + Label *isByteArray, Label *isArrayBuffer, Label *exit) +{ + GateRef byteArrayOrArraybuffer = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, + JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); + GateRef isOnHeap = builder_.Load(VariableType::BOOL(), receiver, builder_.IntPtr(JSTypedArray::ON_HEAP_OFFSET)); + DEFVAlUE(data, (&builder_), VariableType::JS_ANY(), builder_.Undefined()); + builder_.Branch(isOnHeap, isByteArray, isArrayBuffer); + builder_.Bind(isByteArray); + { + data = builder_.PtrAdd(byteArrayOrArraybuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); + builder_.StoreMemory(MemoryType::ELEMENT_TYPE, VariableType::VOID(), *data, offset, value); + builder_.Jump(exit); + } + builder_.Bind(isArrayBuffer); + { + data = builder_.Load(VariableType::JS_POINTER(), byteArrayOrArraybuffer, + builder_.IntPtr(JSArrayBuffer::DATA_OFFSET)); + GateRef block = builder_.Load(VariableType::JS_ANY(), *data, builder_.IntPtr(JSNativePointer::POINTER_OFFSET)); + GateRef byteOffset = + builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); + builder_.StoreMemory(MemoryType::ELEMENT_TYPE, VariableType::VOID(), block, + builder_.PtrAdd(offset, byteOffset), value); + builder_.Jump(exit); + } + builder_.Bind(exit); +} + +// for UInt8ClampedArray +void TypeMCRLowering::LowerUInt8ClampedArrayStoreElement(GateRef gate) { Environment env(gate, circuit_, &builder_); - Label isArrayBuffer(&builder_); - Label isByteArray(&builder_); - Label afterGetValue(&builder_); - Label exit(&builder_); + GateRef receiver = acc_.GetValueIn(gate, 0); GateRef index = acc_.GetValueIn(gate, 1); - GateRef elementSize = builder_.Int32(8); // 8: float64 occupy 8 bytes + GateRef elementSize = builder_.Int32(sizeof(uint8_t)); GateRef offset = builder_.PtrMul(index, elementSize); - GateRef byteOffset = - builder_.Load(VariableType::INT32(), receiver, builder_.IntPtr(JSTypedArray::BYTE_OFFSET_OFFSET)); GateRef value = acc_.GetValueIn(gate, 2); - GateRef arrbuffer = - builder_.Load(VariableType::JS_POINTER(), receiver, builder_.IntPtr(JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET)); + + DEFVAlUE(result, (&builder_), VariableType::INT32(), value); + GateRef topValue = builder_.Int32(static_cast(UINT8_MAX)); + GateRef bottomValue = builder_.Int32(static_cast(0)); + Label isOverFlow(&builder_); + Label notOverFlow(&builder_); + Label exit(&builder_); + builder_.Branch(builder_.Int32GreaterThan(value, topValue), &isOverFlow, ¬OverFlow); + builder_.Bind(&isOverFlow); + { + result = topValue; + builder_.Jump(&exit); + } + builder_.Bind(¬OverFlow); + { + Label isUnderSpill(&builder_); + builder_.Branch(builder_.Int32LessThan(value, bottomValue), &isUnderSpill, &exit); + builder_.Bind(&isUnderSpill); + { + result = bottomValue; + builder_.Jump(&exit); + } + } + builder_.Bind(&exit); + value = builder_.TruncInt32ToInt8(*result); + + GateRef arrbuffer = builder_.LoadConstOffset(VariableType::JS_POINTER(), receiver, + JSTypedArray::VIEWED_ARRAY_BUFFER_OFFSET); GateRef data = builder_.PtrAdd(arrbuffer, builder_.IntPtr(ByteArray::DATA_OFFSET)); - builder_.Store(VariableType::VOID(), glue, data, builder_.PtrAdd(offset, byteOffset), value); + + builder_.StoreMemory(MemoryType::ELEMENT_TYPE, VariableType::VOID(), data, offset, value); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } @@ -848,7 +1239,7 @@ void TypeMCRLowering::LowerJSCallThisTargetTypeCheck(GateRef gate) GateRef frameState = GetFrameState(gate); auto func = acc_.GetValueIn(gate, 0); GateRef isObj = builder_.TaggedIsHeapObject(func); - GateRef isOptimized = builder_.IsOptimized(func); + GateRef isOptimized = builder_.IsOptimizedAndNotFastCall(func); GateRef check = builder_.BoolAnd(isObj, isOptimized); builder_.DeoptCheck(check, frameState, DeoptType::NOTJSCALLTGT); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); @@ -858,6 +1249,26 @@ void TypeMCRLowering::LowerJSCallThisTargetTypeCheck(GateRef gate) } } +void TypeMCRLowering::LowerJSNoGCCallThisTargetTypeCheck(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + auto type = acc_.GetParamGateType(gate); + if (tsManager_->IsFunctionTypeKind(type)) { + GateRef frameState = GetFrameState(gate); + auto func = acc_.GetValueIn(gate, 0); + GateRef isObj = builder_.TaggedIsHeapObject(func); + GateRef isOptimized = builder_.IsOptimizedAndNotFastCall(func); + GateRef methodId = builder_.GetMethodId(func); + GateRef checkOptimized = builder_.BoolAnd(isObj, isOptimized); + GateRef check = builder_.BoolAnd(checkOptimized, builder_.Equal(methodId, acc_.GetValueIn(gate, 1))); + builder_.DeoptCheck(check, frameState, DeoptType::NOTJSCALLTGT); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); + } else { + LOG_COMPILER(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + void TypeMCRLowering::LowerJSFastCallThisTargetTypeCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); @@ -876,6 +1287,26 @@ void TypeMCRLowering::LowerJSFastCallThisTargetTypeCheck(GateRef gate) } } +void TypeMCRLowering::LowerJSNoGCFastCallThisTargetTypeCheck(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + auto type = acc_.GetParamGateType(gate); + if (tsManager_->IsFunctionTypeKind(type)) { + GateRef frameState = GetFrameState(gate); + auto func = acc_.GetValueIn(gate, 0); + GateRef isObj = builder_.TaggedIsHeapObject(func); + GateRef canFastCall = builder_.CanFastCall(func); + GateRef methodId = builder_.GetMethodId(func); + GateRef checkOptimized = builder_.BoolAnd(isObj, canFastCall); + GateRef check = builder_.BoolAnd(checkOptimized, builder_.Equal(methodId, acc_.GetValueIn(gate, 1))); + builder_.DeoptCheck(check, frameState, DeoptType::NOTJSFASTCALLTGT); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); + } else { + LOG_COMPILER(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } +} + void TypeMCRLowering::LowerCallTargetCheck(GateRef gate) { Environment env(gate, circuit_, &builder_); @@ -883,8 +1314,13 @@ void TypeMCRLowering::LowerCallTargetCheck(GateRef gate) BuiltinLowering lowering(circuit_); GateRef funcheck = lowering.LowerCallTargetCheck(&env, gate); - GateRef check = lowering.CheckPara(gate, funcheck); - builder_.DeoptCheck(check, frameState, DeoptType::NOTCALLTGT); + GateRef constId = acc_.GetValueIn(gate, 1); // 1: stub id + if (acc_.GetConstantValue(constId) != static_cast(BuiltinsStubCSigns::ID::STRINGIFY)) { + GateRef check = lowering.CheckPara(gate, funcheck); + builder_.DeoptCheck(check, frameState, DeoptType::NOTCALLTGT); + } else { + builder_.DeoptCheck(funcheck, frameState, DeoptType::NOTCALLTGT); + } acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); } @@ -922,8 +1358,8 @@ void TypeMCRLowering::LowerTypedNewAllocateThis(GateRef gate, GateRef glue) { // add typecheck to detect protoOrHclass is equal with ihclass, // if pass typecheck: 1.no need to check whether hclass is valid 2.no need to check return result - GateRef protoOrHclass = builder_.Load(VariableType::JS_ANY(), ctor, - builder_.IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + GateRef protoOrHclass = builder_.LoadConstOffset(VariableType::JS_ANY(), ctor, + JSFunction::PROTO_OR_DYNCLASS_OFFSET); GateRef ihclassIndex = acc_.GetValueIn(gate, 1); GateRef ihclass = GetObjectFromConstPool(jsFunc, ihclassIndex); GateRef check = builder_.Equal(protoOrHclass, ihclass); @@ -950,8 +1386,8 @@ void TypeMCRLowering::LowerTypedSuperAllocateThis(GateRef gate, GateRef glue) builder_.Branch(isBase, &allocate, &exit); builder_.Bind(&allocate); { - GateRef protoOrHclass = builder_.Load(VariableType::JS_ANY(), newTarget, - builder_.IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + GateRef protoOrHclass = builder_.LoadConstOffset(VariableType::JS_ANY(), newTarget, + JSFunction::PROTO_OR_DYNCLASS_OFFSET); GateRef check = builder_.IsJSHClass(protoOrHclass); GateRef frameState = GetFrameState(gate); builder_.DeoptCheck(check, frameState, DeoptType::NOTNEWOBJ); @@ -968,17 +1404,10 @@ void TypeMCRLowering::LowerGetSuperConstructor(GateRef gate) Environment env(gate, circuit_, &builder_); GateRef ctor = acc_.GetValueIn(gate, 0); GateRef hclass = builder_.LoadHClass(ctor); - GateRef protoOffset = builder_.IntPtr(JSHClass::PROTOTYPE_OFFSET); - GateRef superCtor = builder_.Load(VariableType::JS_ANY(), hclass, protoOffset); + GateRef superCtor = builder_.LoadConstOffset(VariableType::JS_ANY(), hclass, JSHClass::PROTOTYPE_OFFSET); acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), superCtor); } -GateRef TypeMCRLowering::LoadFromTaggedArray(GateRef array, size_t index) -{ - auto dataOffset = TaggedArray::DATA_OFFSET + index * JSTaggedValue::TaggedTypeSize(); - return builder_.LoadConstOffset(VariableType::JS_ANY(), array, dataOffset); -} - GateRef TypeMCRLowering::LoadFromVTable(GateRef receiver, size_t index) { GateRef hclass = builder_.LoadConstOffset( @@ -986,11 +1415,22 @@ GateRef TypeMCRLowering::LoadFromVTable(GateRef receiver, size_t index) GateRef vtable = builder_.LoadConstOffset(VariableType::JS_ANY(), hclass, JSHClass::VTABLE_OFFSET); - GateRef itemOwner = LoadFromTaggedArray(vtable, VTable::TupleItem::OWNER + index); - GateRef itemOffset = LoadFromTaggedArray(vtable, VTable::TupleItem::OFFSET + index); + GateRef itemOwner = builder_.LoadFromTaggedArray(vtable, VTable::TupleItem::OWNER + index); + GateRef itemOffset = builder_.LoadFromTaggedArray(vtable, VTable::TupleItem::OFFSET + index); return builder_.Load(VariableType::JS_ANY(), itemOwner, builder_.TaggedGetInt(itemOffset)); } +VariableType TypeMCRLowering::GetVarType(PropertyLookupResult plr) +{ + if (plr.GetRepresentation() == Representation::DOUBLE) { + return kungfu::VariableType::FLOAT64(); + } else if (plr.GetRepresentation() == Representation::INT) { + return kungfu::VariableType::INT32(); + } else { + return kungfu::VariableType::INT64(); + } +} + GateRef TypeMCRLowering::LoadSupers(GateRef hclass) { return builder_.LoadConstOffset(VariableType::JS_ANY(), hclass, JSHClass::SUPERS_OFFSET); @@ -1003,12 +1443,12 @@ GateRef TypeMCRLowering::GetLengthFromSupers(GateRef supers) GateRef TypeMCRLowering::GetValueFromSupers(GateRef supers, size_t index) { - GateRef val = LoadFromTaggedArray(supers, index); + GateRef val = builder_.LoadFromTaggedArray(supers, index); return builder_.LoadObjectFromWeakRef(val); } -GateRef TypeMCRLowering::CallAccessor(GateRef glue, GateRef gate, GateRef function, GateRef receiver, AccessorMode mode, - GateRef value) +GateRef TypeMCRLowering::CallAccessor(GateRef glue, GateRef gate, GateRef function, GateRef receiver, + AccessorMode mode, GateRef value) { const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(JSCall)); GateRef target = builder_.IntPtr(RTSTUB_ID(JSCall)); @@ -1036,4 +1476,85 @@ void TypeMCRLowering::ReplaceHirWithPendingException(GateRef hirGate, GateRef gl StateDepend exception(ifTrue, eDepend); acc_.ReplaceHirWithIfBranch(hirGate, success, exception, value); } + +void TypeMCRLowering::LowerLoadGetter(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + ASSERT(acc_.GetNumValueIn(gate) == 2); // 2: receiver, plr + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef propertyLookupResult = acc_.GetValueIn(gate, 1); + + PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); + ASSERT(plr.IsAccessor()); + GateRef accessor = LoadFromVTable(receiver, plr.GetOffset()); + GateRef getter = builder_.Load(VariableType::JS_ANY(), accessor, + builder_.IntPtr(AccessorData::GETTER_OFFSET)); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), getter); +} + +void TypeMCRLowering::LowerLoadSetter(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + ASSERT(acc_.GetNumValueIn(gate) == 2); // 2: receiver, plr + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef propertyLookupResult = acc_.GetValueIn(gate, 1); + + PropertyLookupResult plr(acc_.TryGetValue(propertyLookupResult)); + ASSERT(plr.IsAccessor()); + GateRef accessor = LoadFromVTable(receiver, plr.GetOffset()); + GateRef setter = builder_.Load(VariableType::JS_ANY(), + accessor, builder_.IntPtr(AccessorData::SETTER_OFFSET)); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), setter); +} + +// subtyping check and hclss check +void TypeMCRLowering::LowerInlineAccessorCheck(GateRef gate) +{ + Environment env(gate, circuit_, &builder_); + GateRef receiver = acc_.GetValueIn(gate, 0); + GateRef frameState = acc_.GetFrameState(gate); + builder_.HeapObjectCheck(receiver, frameState); + + GateRef aotHCIndex = acc_.GetValueIn(gate, 1); + ArgumentAccessor argAcc(circuit_); + GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC); + JSTaggedValue aotHC = tsManager_->GetValueFromCache(acc_.TryGetValue(aotHCIndex)); + ASSERT(aotHC.IsJSHClass()); + + int32_t level = JSHClass::Cast(aotHC.GetTaggedObject())->GetLevel(); + ASSERT(level >= 0); + + GateRef receiverHClass = builder_.LoadConstOffset( + VariableType::JS_POINTER(), receiver, TaggedObject::HCLASS_OFFSET); + GateRef supers = LoadSupers(receiverHClass); + + auto hclassIndex = acc_.GetConstantValue(aotHCIndex); + GateRef aotHCGate = LoadFromConstPool(jsFunc, hclassIndex); + GateRef hclassCompare = builder_.Equal(aotHCGate, receiverHClass); + if (LIKELY(static_cast(level) < SubtypingOperator::DEFAULT_SUPERS_CAPACITY)) { + GateRef subtypingCompare = builder_.Equal(aotHCGate, GetValueFromSupers(supers, level)); + GateRef compare = builder_.BoolAnd(hclassCompare, subtypingCompare); + builder_.DeoptCheck(compare, frameState, DeoptType::INCONSISTENTHCLASS); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); + return; + } + + Label levelValid(&builder_); + Label exit(&builder_); + DEFVAlUE(check, (&builder_), VariableType::BOOL(), builder_.False()); + GateRef levelGate = builder_.Int32(level); + GateRef length = GetLengthFromSupers(supers); + + builder_.Branch(builder_.Int32LessThan(levelGate, length), &levelValid, &exit); + builder_.Bind(&levelValid); + { + check = builder_.Equal(aotHCGate, GetValueFromSupers(supers, level)); + builder_.Jump(&exit); + } + builder_.Bind(&exit); + + GateRef compare = builder_.BoolAnd(hclassCompare, *check); + builder_.DeoptCheck(compare, frameState, DeoptType::INCONSISTENTHCLASS); + acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate()); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/type_mcr_lowering.h b/ecmascript/compiler/type_mcr_lowering.h index c73402e7893b66b544734a65c3235bc39d5a630f..f6d512639b2e94041a2d57e52d0947ea929fdaa7 100644 --- a/ecmascript/compiler/type_mcr_lowering.h +++ b/ecmascript/compiler/type_mcr_lowering.h @@ -98,10 +98,10 @@ namespace panda::ecmascript::kungfu { class TypeMCRLowering { public: TypeMCRLowering(Circuit *circuit, CompilationConfig *cmpCfg, TSManager *tsManager, - bool enableLog, const std::string& name) + bool enableLog, const std::string& name, bool onHeapCheck) : circuit_(circuit), acc_(circuit), builder_(circuit, cmpCfg), dependEntry_(circuit->GetDependRoot()), tsManager_(tsManager), - enableLog_(enableLog), methodName_(name) {} + enableLog_(enableLog), methodName_(name), onHeapCheck_(onHeapCheck) {} ~TypeMCRLowering() = default; @@ -118,6 +118,11 @@ private: return methodName_; } + bool IsOnHeap() const + { + return onHeapCheck_; + } + void Lower(GateRef gate); void LowerType(GateRef gate); void LowerPrimitiveTypeCheck(GateRef gate); @@ -131,9 +136,18 @@ private: void LowerObjectTypeCheck(GateRef gate); void LowerSimpleHClassCheck(GateRef gate); void LowerTSSubtypingCheck(GateRef gate); + void LowerObjectTypeCompare(GateRef gate); + void LowerSimpleHClassCompare(GateRef gate); + void LowerTSSubtypingCompare(GateRef gate); + GateRef BuildCompareSubTyping(GateRef gate, GateRef frameState, Label *levelValid, Label *exit); + GateRef BuildCompareHClass(GateRef gate, GateRef frameState); + void BuildCompareSubTyping(GateRef gate); void LowerStableArrayCheck(GateRef gate); void LowerTypedArrayCheck(GateRef gate); + void LowerEcmaStringCheck(GateRef gate); + void LowerFlattenStringCheck(GateRef gate, GateRef glue); void LowerLoadTypedArrayLength(GateRef gate); + void LowerStringLength(GateRef gate); void LowerLoadProperty(GateRef gate); void LowerCallGetter(GateRef gate, GateRef glue); void LowerStoreProperty(GateRef gate); @@ -143,25 +157,43 @@ private: void LowerLoadElement(GateRef gate); void LowerLoadFromTaggedArray(GateRef gate); void LowerStoreToTaggedArray(GateRef gate, GateRef glue); - void LowerArrayLoadElement(GateRef gate); - void LowerInt32ArrayLoadElement(GateRef gate); - void LowerFloat64ArrayLoadElement(GateRef gate); - void LowerFloat32ArrayLoadElement(GateRef gate); + void LowerRangeCheckPredicate(GateRef gate); + + enum class ArrayState : uint8_t { + PACKED = 0, + HOLEY, + }; + void LowerArrayLoadElement(GateRef gate, ArrayState arrayState); + void LowerCowArrayCheck(GateRef gate, GateRef glue); + void LowerTypedArrayLoadElement(GateRef gate, BuiltinTypeId id); + void LowerStringLoadElement(GateRef gate); + GateRef BuildOnHeapTypedArrayLoadElement(GateRef receiver, GateRef offset, VariableType type); + GateRef BuildTypedArrayLoadElement(GateRef receiver, GateRef offset, VariableType type, Label *isByteArray, + Label *isArrayBuffer, Label *exit); void LowerArrayStoreElement(GateRef gate, GateRef glue); - void LowerInt32ArrayStoreElement(GateRef gate, GateRef glue); - void LowerFloat32ArrayStoreElement(GateRef gate, GateRef glue); - void LowerFloat64ArrayStoreElement(GateRef gate, GateRef glue); + void LowerTypedArrayStoreElement(GateRef gate, BuiltinTypeId id); + void BuildOnHeapTypedArrayStoreElement(GateRef receiver, GateRef offset, GateRef value); + void BuildTypedArrayStoreElement(GateRef receiver, GateRef offset, GateRef value, Label *isByteArray, + Label *isArrayBuffer, Label *exit); + void LowerUInt8ClampedArrayStoreElement(GateRef gate); void LowerTypedCallBuitin(GateRef gate); void LowerCallTargetCheck(GateRef gate); + void LowerJSCallTargetCheck(GateRef gate); void LowerJSCallTargetFromDefineFuncCheck(GateRef gate); void LowerJSCallTargetTypeCheck(GateRef gate); void LowerJSFastCallTargetTypeCheck(GateRef gate); void LowerJSCallThisTargetTypeCheck(GateRef gate); void LowerJSFastCallThisTargetTypeCheck(GateRef gate); + void LowerJSNoGCCallThisTargetTypeCheck(GateRef gate); + void LowerJSNoGCFastCallThisTargetTypeCheck(GateRef gate); void LowerTypedNewAllocateThis(GateRef gate, GateRef glue); void LowerTypedSuperAllocateThis(GateRef gate, GateRef glue); void LowerGetSuperConstructor(GateRef gate); void LowerJSInlineTargetTypeCheck(GateRef gate); + void SetDeoptTypeInfo(BuiltinTypeId id, DeoptType &type, size_t &funcIndex); + void LowerLoadGetter(GateRef gate); + void LowerLoadSetter(GateRef gate); + void LowerInlineAccessorCheck(GateRef gate); GateRef LowerCallRuntime(GateRef glue, GateRef hirGate, int index, const std::vector &args, bool useLabel = false); @@ -182,12 +214,15 @@ private: GateType GetLeftType(GateRef gate); GateType GetRightType(GateRef gate); GateRef GetObjectFromConstPool(GateRef jsFunc, GateRef index); + GateRef GetElementSize(BuiltinTypeId id); + VariableType GetVariableType(BuiltinTypeId id); GateRef GetFrameState(GateRef gate) const { return acc_.GetFrameState(gate); } + VariableType GetVarType(PropertyLookupResult plr); GateRef LoadSupers(GateRef hclass); GateRef GetLengthFromSupers(GateRef supers); GateRef GetValueFromSupers(GateRef supers, size_t index); @@ -202,6 +237,7 @@ private: [[maybe_unused]] TSManager *tsManager_ {nullptr}; bool enableLog_ {false}; std::string methodName_; + bool onHeapCheck_ {false}; }; } // panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_TYPE_MCR_LOWERING_H diff --git a/ecmascript/compiler/type_recorder.cpp b/ecmascript/compiler/type_recorder.cpp index c888cb3403398dd3696103395ffa8544d7b2376b..37d1765826792740dfacd0f59ecc5117053c5759 100644 --- a/ecmascript/compiler/type_recorder.cpp +++ b/ecmascript/compiler/type_recorder.cpp @@ -19,15 +19,18 @@ #include "ecmascript/jspandafile/type_literal_extractor.h" #include "ecmascript/pgo_profiler/pgo_profiler_decoder.h" #include "ecmascript/pgo_profiler/pgo_profiler_layout.h" -#include "ecmascript/pgo_profiler/pgo_profiler_type.h" +#include "ecmascript/pgo_profiler/types/pgo_profiler_type.h" #include "ecmascript/ts_types/ts_type_parser.h" namespace panda::ecmascript::kungfu { +using PGOType = pgo::PGOType; +using PGOObjectInfo = pgo::PGOObjectInfo; TypeRecorder::TypeRecorder(const JSPandaFile *jsPandaFile, const MethodLiteral *methodLiteral, TSManager *tsManager, const CString &recordName, PGOProfilerDecoder *decoder, - const MethodPcInfo &methodPCInfo, const Bytecodes *bytecodes) + const MethodPcInfo &methodPCInfo, const Bytecodes *bytecodes, bool enableOptTrackField) : argTypes_(methodLiteral->GetNumArgsWithCallField() + static_cast(TypedArgIdx::NUM_OF_TYPED_ARGS), - GateType::AnyType()), decoder_(decoder), pcOffsets_(methodPCInfo.pcOffsets), bytecodes_(bytecodes) + GateType::AnyType()), decoder_(decoder), enableOptTrackField_(enableOptTrackField), + pcOffsets_(methodPCInfo.pcOffsets), bytecodes_(bytecodes) { TSHClassGenerator generator(tsManager); if (jsPandaFile->HasTSTypes(recordName)) { @@ -43,30 +46,32 @@ void TypeRecorder::LoadTypes(const JSPandaFile *jsPandaFile, const MethodLiteral { TSTypeParser typeParser(tsManager); panda_file::File::EntityId fieldId = methodLiteral->GetMethodId(); - uint32_t methodId = fieldId.GetOffset(); - TypeAnnotationExtractor annoExtractor(jsPandaFile, methodId); + uint32_t methodOffset = fieldId.GetOffset(); + TypeAnnotationExtractor annoExtractor(jsPandaFile, methodOffset); GlobalTSTypeRef funcGT = typeParser.CreateGT(jsPandaFile, recordName, annoExtractor.GetMethodTypeOffset()); GlobalTSTypeRef thisGT; annoExtractor.EnumerateInstsAndTypes([this, &typeParser, &jsPandaFile, &recordName, - &thisGT](const int32_t bcOffset, const uint32_t typeId) { + &thisGT, tsManager, methodOffset](const int32_t bcIdx, const uint32_t typeId) { GlobalTSTypeRef gt = typeParser.CreateGT(jsPandaFile, recordName, typeId); if (TypeNeedFilter(gt)) { return; } + TypeLocation loc(jsPandaFile, methodOffset, bcIdx); + CollectLiteralGT(tsManager, loc, gt); // The type of a function is recorded as (-1, funcTypeId). If the function is a member of a class, // the type of the class or its instance is is recorded as (-2, classTypeId). If it is a static // member, the type id refers to the type of the class; otherwise, it links to the type of the // instances of the class. - if (bcOffset == METHOD_ANNOTATION_THIS_TYPE_OFFSET) { + if (bcIdx == METHOD_ANNOTATION_THIS_TYPE_OFFSET) { thisGT = gt; return; } auto type = GateType(gt); - bcOffsetGtMap_.emplace(bcOffset, type); + bcOffsetGtMap_.emplace(bcIdx, type); }); const auto &methodList = typeParser.GetMethodList(); - auto methodIter = methodList.find(methodId); + auto methodIter = methodList.find(methodOffset); if (methodIter != methodList.end()) { const auto &methodInfo = methodIter->second; const auto &bcTypes = methodInfo.GetBCAndTypes(); @@ -82,6 +87,41 @@ void TypeRecorder::LoadTypes(const JSPandaFile *jsPandaFile, const MethodLiteral LoadArgTypes(tsManager, funcGT, thisGT); } +void TypeRecorder::CollectLiteralGT(TSManager *tsManager, TypeLocation &loc, GlobalTSTypeRef gt) +{ + int32_t bcIdx = loc.GetBcIdx(); + if (bcIdx < 0) { + return; + } + + if (bytecodes_->GetOpcode(pcOffsets_[bcIdx]) == EcmaOpcode::STA_V8) { + // bcIndex of literal marked in es2abc maybe in the next bc whose opcode should be sta. + bcIdx--; + loc.SetBcIdx(bcIdx); + } + + EcmaOpcode ecmaOpcode = bytecodes_->GetOpcode(pcOffsets_[bcIdx]); + + switch (ecmaOpcode) { + case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: + case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: { + if (tsManager->IsUserDefinedClassTypeKind(gt)) { + tsManager->InsertLiteralGTMap(loc, gt); + } + return; + } + case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: + case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: { + if (tsManager->IsObjectTypeKind(gt)) { + tsManager->InsertLiteralGTMap(loc, gt); + } + return; + } + default: + return; + } +} + bool TypeRecorder::CheckTypeMarkForDefineFunc(uint32_t checkBc) const { // bcOffset of definefunc marked in es2abc maybe in the next bc whose opcode should be sta. @@ -113,12 +153,47 @@ void TypeRecorder::CreateTypesForPGO(const JSPandaFile *jsPandaFile, const Metho TSTypeParser typeParser(tsManager); uint32_t methodOffset = methodLiteral->GetMethodId().GetOffset(); PGOBCInfo *bcInfo = tsManager->GetBytecodeInfoCollector()->GetPGOBCInfo(); - bcInfo->IterateInfoAndType(methodOffset, [this, &typeParser, methodOffset, &recordName, &jsPandaFile] - (const PGOBCInfo::Type type, const uint32_t bcIdx, const uint32_t cpIdx){ - GlobalTSTypeRef gt = typeParser.CreatePGOGT(jsPandaFile, recordName, methodOffset, cpIdx, type); + bcInfo->IterateInfoAndType(methodOffset, [this, &typeParser, methodOffset, &recordName, &jsPandaFile, tsManager] + (const PGOBCInfo::Type type, const uint32_t bcIdx, const uint32_t bcOffset, const uint32_t cpIdx) { + auto it = bcOffsetPGOOpTypeMap_.find(bcOffset); + if (it == bcOffsetPGOOpTypeMap_.end()) { + return; + } + + EcmaOpcode ecmaOpcode = bytecodes_->GetOpcode(pcOffsets_[bcIdx]); + if (jsPandaFile->HasTSTypes(recordName) && Bytecodes::IsCallOp(ecmaOpcode)) { + uint32_t callTargetMethodOffset = it->second.GetProfileType().GetId(); + if (callTargetMethodOffset == 0) { + return; + } + // Recompiling the application ABC changes the content, and there may be a calltargetMethodOffset + // that does not exist in ABC. Type resolution should be skipped when it does not exist, + // or parsing pandafile will fail + if (jsPandaFile->GetMethodLiteralByIndex(callTargetMethodOffset) == nullptr) { + return; + } + TypeAnnotationExtractor annoExtractor(jsPandaFile, callTargetMethodOffset); + GlobalTSTypeRef funcGT = + typeParser.CreateGT(jsPandaFile, recordName, annoExtractor.GetMethodTypeOffset()); + if (funcGT.IsDefault()) { + return; + } + GateType callTargetType = GateType(funcGT); + bcOffsetCallTargetGtMap_.emplace(bcIdx, callTargetType); + return; + } + + TypeLocation loc(jsPandaFile, methodOffset, bcIdx); + if (!tsManager->GetLiteralGT(loc).IsDefault()) { + return; + } + + GlobalTSTypeRef gt = typeParser.CreatePGOGT(TSTypeParser::PGOInfo { + jsPandaFile, recordName, methodOffset, cpIdx, it->second, type, decoder_, enableOptTrackField_ }); if (TypeNeedFilter(gt)) { return; } + CollectLiteralGT(tsManager, loc, gt); GateType gateType = GateType(gt); bcOffsetGtMap_.emplace(bcIdx, gateType); }); @@ -200,17 +275,27 @@ GateType TypeRecorder::UpdateType(const int32_t offset, const GateType &type) co return type; } +ElementsKind TypeRecorder::GetElementsKind(PGOSampleType type) const +{ + PGOHClassLayoutDesc *desc; + if (type.IsProfileType() && decoder_->GetHClassLayoutDesc(type, &desc)) { + auto elementsKind = desc->GetElementsKind(); + return elementsKind; + } + return ElementsKind::GENERIC; +} + PGOSampleType TypeRecorder::GetOrUpdatePGOType(TSManager *tsManager, int32_t offset, const GateType &type) const { if (bcOffsetPGOOpTypeMap_.find(offset) != bcOffsetPGOOpTypeMap_.end()) { const auto iter = bcOffsetPGOOpTypeMap_.at(offset); - if (iter.IsClassType()) { + if (iter.IsProfileType()) { PGOHClassLayoutDesc *desc; if (!decoder_->GetHClassLayoutDesc(iter, &desc)) { - return PGOSampleType::NoneClassType(); + return PGOSampleType::NoneProfileType(); } TSHClassGenerator generator(tsManager); - generator.UpdateTSHClassFromPGO(type, *desc); + generator.UpdateTSHClassFromPGO(type, *desc, enableOptTrackField_); } return iter; } @@ -218,6 +303,14 @@ PGOSampleType TypeRecorder::GetOrUpdatePGOType(TSManager *tsManager, int32_t off return PGOSampleType::NoneType(); } +GateType TypeRecorder::GetCallTargetType(int32_t offset) const +{ + if (bcOffsetCallTargetGtMap_.find(offset) != bcOffsetCallTargetGtMap_.end()) { + return bcOffsetCallTargetGtMap_.at(offset); + } + return GateType::AnyType(); +} + PGORWOpType TypeRecorder::GetRwOpType(int32_t offset) const { if (bcOffsetPGORwTypeMap_.find(offset) != bcOffsetPGORwTypeMap_.end()) { @@ -226,6 +319,28 @@ PGORWOpType TypeRecorder::GetRwOpType(int32_t offset) const return PGORWOpType(); } +ElementsKind TypeRecorder::GetElementsKind(int32_t offset) const +{ + if (bcOffsetPGORwTypeMap_.find(offset) == bcOffsetPGORwTypeMap_.end()) { + return ElementsKind::GENERIC; + } + + PGORWOpType rwType = bcOffsetPGORwTypeMap_.at(offset); + PGOObjectInfo info = rwType.GetObjectInfo(0); + if (info.IsNone()) { + return ElementsKind::GENERIC; + } + + PGOSampleType type(info.GetProfileType()); + PGOHClassLayoutDesc *desc; + if (!decoder_->GetHClassLayoutDesc(type, &desc)) { + return ElementsKind::GENERIC; + } + + auto elementsKind = desc->GetElementsKind(); + return elementsKind; +} + bool TypeRecorder::TypeNeedFilter(GlobalTSTypeRef gt) const { return gt.IsDefault() || gt.IsGenericsModule(); diff --git a/ecmascript/compiler/type_recorder.h b/ecmascript/compiler/type_recorder.h index e223deabddc2c20eb806d495bef76545f3bc9cf6..29df6a66b5bff066c9daed4a1281657606f15087 100644 --- a/ecmascript/compiler/type_recorder.h +++ b/ecmascript/compiler/type_recorder.h @@ -34,14 +34,17 @@ class TypeRecorder { public: TypeRecorder(const JSPandaFile *jsPandaFile, const MethodLiteral *methodLiteral, TSManager *tsManager, const CString &recordName, PGOProfilerDecoder *decoder, - const MethodPcInfo &methodPCInfo, const Bytecodes *bytecodes); + const MethodPcInfo &methodPCInfo, const Bytecodes *bytecodes, bool enableOptTrackField); ~TypeRecorder() = default; GateType GetType(const int32_t offset) const; + ElementsKind GetElementsKind(PGOSampleType type) const; PGOSampleType GetOrUpdatePGOType(TSManager *tsManager, int32_t offset, const GateType &type) const; PGORWOpType GetRwOpType(int32_t offset) const; + ElementsKind GetElementsKind(int32_t offset) const; GateType GetArgType(const uint32_t argIndex) const; GateType UpdateType(const int32_t offset, const GateType &type) const; + GateType GetCallTargetType(int32_t offset) const; static constexpr int METHOD_ANNOTATION_THIS_TYPE_OFFSET = -2; @@ -72,11 +75,15 @@ private: bool CheckTypeMarkForDefineFunc(uint32_t checkBc) const; + void CollectLiteralGT(TSManager *tsManager, TypeLocation &tLoc, GlobalTSTypeRef gt); + std::unordered_map bcOffsetGtMap_ {}; + std::unordered_map bcOffsetCallTargetGtMap_ {}; std::unordered_map bcOffsetPGOOpTypeMap_ {}; std::unordered_map bcOffsetPGORwTypeMap_ {}; std::vector argTypes_; PGOProfilerDecoder *decoder_ {nullptr}; + bool enableOptTrackField_ {false}; const std::vector &pcOffsets_; const Bytecodes *bytecodes_ {nullptr}; }; diff --git a/ecmascript/compiler/typed_array_stub_builder.cpp b/ecmascript/compiler/typed_array_stub_builder.cpp index a0300e83ac4389e22f989a73739064f7aa8c1d3f..9df29670fb0b46dc1dd0e640186efe9fb2712759 100644 --- a/ecmascript/compiler/typed_array_stub_builder.cpp +++ b/ecmascript/compiler/typed_array_stub_builder.cpp @@ -104,7 +104,7 @@ GateRef TypedArrayStubBuilder::FastGetPropertyByIndex(GateRef glue, GateRef arra { GateRef offset = GetByteOffset(array); result = GetValueFromBuffer(buffer, index, offset, jsType); - Jump(&exit); + Branch(TaggedIsNumber(*result), &exit, &slowPath); } } Bind(&slowPath); diff --git a/ecmascript/compiler/verifier.cpp b/ecmascript/compiler/verifier.cpp index 775479f7d688247248d22a9c4b5873d0af4c813b..e90bc526a5492bda334fc907618a0fb716646820 100644 --- a/ecmascript/compiler/verifier.cpp +++ b/ecmascript/compiler/verifier.cpp @@ -428,6 +428,53 @@ void Verifier::FindFixedGates(const Circuit *circuit, const std::vector } } +bool Verifier::RunFlowCyclesFind(const Circuit* circuit) +{ + GateAccessor acc(const_cast(circuit)); + circuit->AdvanceTime(); + struct DFSStack { + GateRef gate; + GateAccessor::UseIterator it; + GateAccessor::UseIterator end; + }; + std::stack st; + auto root = acc.GetCircuitRoot(); + auto rootUse = acc.Uses(root); + st.push({root, rootUse.begin(), rootUse.end()}); + acc.SetVisited(root); + while (!st.empty()) { + auto& cur = st.top(); + if (cur.it == cur.end) { + st.pop(); + acc.SetFinished(cur.gate); + continue; + } + auto succ = *cur.it; + if (acc.IsLoopBackUse(cur.it)) { + cur.it++; + continue; + } + if (acc.IsNotMarked(succ)) { + auto succUse = acc.Uses(succ); + st.push({succ, succUse.begin(), succUse.end()}); + acc.SetVisited(succ); + } else if (acc.IsVisited(succ)) { + LOG_COMPILER(ERROR) << "====================== Cycle Found ======================"; + std::string log = ""; + while (st.top().gate != succ) { + log += std::to_string(acc.GetId(st.top().gate)) + " > "; + st.pop(); + } + log += std::to_string(acc.GetId(st.top().gate)); + std::reverse(log.begin(), log.end()); + LOG_COMPILER(ERROR) << log; + return true; + } + cur.it++; + } + return false; +} + bool Verifier::Run(const Circuit *circuit, const std::string& methodName, bool enableLog) { if (!RunDataIntegrityCheck(circuit)) { diff --git a/ecmascript/compiler/verifier.h b/ecmascript/compiler/verifier.h index 450a1b04b438a996fa73ce3f9aee4b4209ecc265..c5e9416eb9b261aa479e635522bdbd2dcb49877e 100644 --- a/ecmascript/compiler/verifier.h +++ b/ecmascript/compiler/verifier.h @@ -61,6 +61,8 @@ public: static void FindFixedGates(const Circuit *circuit, const std::vector &bbGatesList, std::vector &fixedGatesList); + + static bool RunFlowCyclesFind(const Circuit* circuit); static bool Run(const Circuit *circuit, const std::string& methodName = "", bool enableLog = false); }; diff --git a/ecmascript/containers/containers_arraylist.cpp b/ecmascript/containers/containers_arraylist.cpp index dbf85648a46e6a5c6db326b78d18bf2c04866f9f..d1a69b14e366fe2baedfe38e132262379df44c59 100644 --- a/ecmascript/containers/containers_arraylist.cpp +++ b/ecmascript/containers/containers_arraylist.cpp @@ -94,6 +94,7 @@ JSTaggedValue ContainersArrayList::Insert(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 1); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); @@ -218,6 +219,7 @@ JSTaggedValue ContainersArrayList::IncreaseCapacityTo(EcmaRuntimeCallInfo *argv) JSHandle newCapacity = GetCallArg(argv, 0); if (!newCapacity->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, newCapacity); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"newCapacity\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -366,6 +368,7 @@ JSTaggedValue ContainersArrayList::RemoveByIndex(EcmaRuntimeCallInfo *argv) JSHandle value = GetCallArg(argv, 0); if (!value->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -429,6 +432,7 @@ JSTaggedValue ContainersArrayList::RemoveByRange(EcmaRuntimeCallInfo *argv) if (!startIndex->IsInteger()) { std::ostringstream oss; JSHandle result = JSTaggedValue::ToString(thread, startIndex); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"fromIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -437,6 +441,7 @@ JSTaggedValue ContainersArrayList::RemoveByRange(EcmaRuntimeCallInfo *argv) if (!endIndex->IsInteger()) { std::ostringstream oss; JSHandle result = JSTaggedValue::ToString(thread, endIndex); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"toIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -470,6 +475,7 @@ JSTaggedValue ContainersArrayList::ReplaceAllElements(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -528,6 +534,7 @@ JSTaggedValue ContainersArrayList::SubArrayList(EcmaRuntimeCallInfo *argv) if (!value1->IsInteger()) { std::ostringstream oss; JSHandle result = JSTaggedValue::ToString(thread, value1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"fromIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -536,6 +543,7 @@ JSTaggedValue ContainersArrayList::SubArrayList(EcmaRuntimeCallInfo *argv) if (!value2->IsInteger()) { std::ostringstream oss; JSHandle result = JSTaggedValue::ToString(thread, value2); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"toIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -567,6 +575,7 @@ JSTaggedValue ContainersArrayList::Sort(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsUndefined() && !callbackFnHandle->IsCallable() && !callbackFnHandle->IsNull()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"comparator\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -677,6 +686,7 @@ JSTaggedValue ContainersArrayList::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_deque.cpp b/ecmascript/containers/containers_deque.cpp index 40e930b46341fd498f50c7942a91df0eed140dc8..6d86f6a52a7455609e35590aea01fc3cef20e25c 100644 --- a/ecmascript/containers/containers_deque.cpp +++ b/ecmascript/containers/containers_deque.cpp @@ -237,6 +237,7 @@ JSTaggedValue ContainersDeque::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_errors.cpp b/ecmascript/containers/containers_errors.cpp index 30aa038f357896918abed90ed2f845e300baaac2..d51e499097fa33d1893694c9d2001c50d72944ec 100644 --- a/ecmascript/containers/containers_errors.cpp +++ b/ecmascript/containers/containers_errors.cpp @@ -25,8 +25,10 @@ JSTaggedValue ContainerError::BusinessError(JSThread *thread, int32_t errorCode, JSHandle name = factory->NewFromUtf8("name"); JSHandle value = factory->NewFromUtf8("BusinessError"); JSObject::CreateDataPropertyOrThrow(thread, error, JSHandle::Cast(key), code); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSObject::CreateDataPropertyOrThrow(thread, error, JSHandle::Cast(name), JSHandle::Cast(value)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return error.GetTaggedValue(); } } // namespace panda::ecmascript::containers \ No newline at end of file diff --git a/ecmascript/containers/containers_hashmap.cpp b/ecmascript/containers/containers_hashmap.cpp index 59f2c5f6f93c287f9d894354927be72081cadc3c..c83fb67d60a424f5572e615f4f98ad12efd5251d 100644 --- a/ecmascript/containers/containers_hashmap.cpp +++ b/ecmascript/containers/containers_hashmap.cpp @@ -138,6 +138,7 @@ JSTaggedValue ContainersHashMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -218,6 +219,7 @@ JSTaggedValue ContainersHashMap::SetAll(EcmaRuntimeCallInfo *argv) obj = JSHandle(thread, JSHandle::Cast(obj)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, obj); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"map\" must be HashMap. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_hashset.cpp b/ecmascript/containers/containers_hashset.cpp index c7f150b85a1182737a4f661914d33cf7509c995b..744d7c1cf459a68ebb228ed7989780aac270cf86 100644 --- a/ecmascript/containers/containers_hashset.cpp +++ b/ecmascript/containers/containers_hashset.cpp @@ -248,6 +248,7 @@ JSTaggedValue ContainersHashSet::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_lightweightmap.cpp b/ecmascript/containers/containers_lightweightmap.cpp index 02055edbc7c180ac8fdb16e491f1c78adf20f217..4d2535897ea36275d8f046b7eb9342bfff97735d 100644 --- a/ecmascript/containers/containers_lightweightmap.cpp +++ b/ecmascript/containers/containers_lightweightmap.cpp @@ -103,6 +103,7 @@ JSTaggedValue ContainersLightWeightMap::HasAll(EcmaRuntimeCallInfo *argv) lightWeightMap = JSHandle(thread, JSHandle::Cast(lightWeightMap)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, lightWeightMap.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"map\" must be LightWeightMap. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -180,6 +181,7 @@ JSTaggedValue ContainersLightWeightMap::IncreaseCapacityTo(EcmaRuntimeCallInfo * if (!index->IsInt()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"minimumCapacity\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -317,6 +319,7 @@ JSTaggedValue ContainersLightWeightMap::GetKeyAt(EcmaRuntimeCallInfo *argv) if (!index->IsInt()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -365,6 +368,7 @@ JSTaggedValue ContainersLightWeightMap::SetAll(EcmaRuntimeCallInfo *argv) lightWeightMap = JSHandle(thread, JSHandle::Cast(lightWeightMap)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, lightWeightMap.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"map\" must be LightWeightMap. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -445,6 +449,7 @@ JSTaggedValue ContainersLightWeightMap::RemoveAt(EcmaRuntimeCallInfo *argv) JSHandle index(GetCallArg(argv, 0)); if (!index->IsInt()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -499,6 +504,7 @@ JSTaggedValue ContainersLightWeightMap::SetValueAt(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 1)); if (!index->IsInt()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -530,6 +536,7 @@ JSTaggedValue ContainersLightWeightMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle func(GetCallArg(argv, 0)); if (!func->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, func.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -541,9 +548,9 @@ JSTaggedValue ContainersLightWeightMap::ForEach(EcmaRuntimeCallInfo *argv) JSMutableHandle keys(thread, tmap->GetKeys()); JSMutableHandle values(thread, tmap->GetValues()); - int index = 0; - int32_t length = tmap->GetSize(); - const int32_t argsLength = 3; + uint32_t index = 0; + uint32_t length = tmap->GetSize(); + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); while (index < length) { // ignore the hash value is required to determine the true index @@ -606,6 +613,7 @@ JSTaggedValue ContainersLightWeightMap::GetValueAt(EcmaRuntimeCallInfo *argv) JSHandle index(GetCallArg(argv, 0)); if (!index->IsInt()) { JSHandle result = JSTaggedValue::ToString(thread, index); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_lightweightset.cpp b/ecmascript/containers/containers_lightweightset.cpp index f7e2fecb9295b8b37f73b389341fb1ee00fa2730..b1a8b8bdade489965bcb50a8f313ab872e79200a 100644 --- a/ecmascript/containers/containers_lightweightset.cpp +++ b/ecmascript/containers/containers_lightweightset.cpp @@ -96,6 +96,7 @@ JSTaggedValue ContainersLightWeightSet::AddAll(EcmaRuntimeCallInfo *argv) value = JSHandle(thread, JSHandle::Cast(value)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"set\" must be LightWeightSet. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -145,6 +146,7 @@ JSTaggedValue ContainersLightWeightSet::GetValueAt(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 0)); if (!value->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -177,6 +179,7 @@ JSTaggedValue ContainersLightWeightSet::HasAll(EcmaRuntimeCallInfo *argv) value = JSHandle(thread, JSHandle::Cast(value)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"set\" must be LightWeightSet. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -268,6 +271,7 @@ JSTaggedValue ContainersLightWeightSet::IncreaseCapacityTo(EcmaRuntimeCallInfo * JSHandle value(GetCallArg(argv, 0)); if (!value->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"minimumCapacity\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -364,6 +368,7 @@ JSTaggedValue ContainersLightWeightSet::ForEach(EcmaRuntimeCallInfo *argv) if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -435,6 +440,7 @@ JSTaggedValue ContainersLightWeightSet::RemoveAt(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 0)); if (!value->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_linked_list.cpp b/ecmascript/containers/containers_linked_list.cpp index 5a50e7f49ac2c3276bc692b40c7516f256145828..eb8abdec66234c6ced0d568f46c998e23560e1af 100644 --- a/ecmascript/containers/containers_linked_list.cpp +++ b/ecmascript/containers/containers_linked_list.cpp @@ -178,6 +178,7 @@ JSTaggedValue ContainersLinkedList::Insert(EcmaRuntimeCallInfo *argv) if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -275,6 +276,7 @@ JSTaggedValue ContainersLinkedList::Get(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 0); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -344,6 +346,7 @@ JSTaggedValue ContainersLinkedList::RemoveByIndex(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 0); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -488,6 +491,7 @@ JSTaggedValue ContainersLinkedList::Set(EcmaRuntimeCallInfo *argv) JSHandle element = GetCallArg(argv, 1); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -540,6 +544,7 @@ JSTaggedValue ContainersLinkedList::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle(GetCallArg(argv, 0)); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -549,9 +554,9 @@ JSTaggedValue ContainersLinkedList::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArgHandle = GetCallArg(argv, 1); JSHandle linkedList = JSHandle::Cast(thisHandle); JSHandle doubleList(thread, linkedList->GetDoubleList()); - int length = linkedList->Length(); + uint32_t length = linkedList->Length(); - int index = 0; + uint32_t index = 0; const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); int valueNode = TaggedDoubleList::ELEMENTS_START_INDEX; diff --git a/ecmascript/containers/containers_list.cpp b/ecmascript/containers/containers_list.cpp index 43aca99f2ef187887782f288b4fb227c5b416e9e..f87d5f5d388a5c3eb092c3045ccf16358d1d8ce1 100644 --- a/ecmascript/containers/containers_list.cpp +++ b/ecmascript/containers/containers_list.cpp @@ -94,6 +94,7 @@ JSTaggedValue ContainersList::Insert(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 1); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -206,6 +207,7 @@ JSTaggedValue ContainersList::Get(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 0); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -276,6 +278,7 @@ JSTaggedValue ContainersList::Set(EcmaRuntimeCallInfo *argv) JSHandle element = GetCallArg(argv, 1); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -309,6 +312,7 @@ JSTaggedValue ContainersList::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle(GetCallArg(argv, 0)); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -319,10 +323,10 @@ JSTaggedValue ContainersList::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArgHandle = GetCallArg(argv, 1); JSHandle list = JSHandle::Cast(thisHandle); JSHandle singleList(thread, list->GetSingleList()); - int length = list->Length(); + uint32_t length = list->Length(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - int index = 0; + uint32_t index = 0; const uint32_t argsLength = 3; // 3: «kValue, k, O» int valueNode = TaggedSingleList::ELEMENTS_START_INDEX; while (index < length) { @@ -379,6 +383,7 @@ JSTaggedValue ContainersList::RemoveByIndex(EcmaRuntimeCallInfo *argv) JSHandle index = GetCallArg(argv, 0); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -429,6 +434,7 @@ JSTaggedValue ContainersList::ReplaceAllElements(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -486,6 +492,7 @@ JSTaggedValue ContainersList::Sort(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsUndefined() && !callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"comparator\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -545,6 +552,7 @@ JSTaggedValue ContainersList::GetSubList(EcmaRuntimeCallInfo *argv) if (!fromIndex->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, fromIndex.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"fromIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -555,6 +563,7 @@ JSTaggedValue ContainersList::GetSubList(EcmaRuntimeCallInfo *argv) if (!toIndex->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, toIndex.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"toIndex\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/containers/containers_plainarray.cpp b/ecmascript/containers/containers_plainarray.cpp index 922b450fc0b4fdbeae7dc79c495e02e8f15345a5..d665bf76b788ee2e92441dc2c9c8c964f79671df 100644 --- a/ecmascript/containers/containers_plainarray.cpp +++ b/ecmascript/containers/containers_plainarray.cpp @@ -70,6 +70,7 @@ JSTaggedValue ContainersPlainArray::Add(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 1)); if (!key->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -140,6 +141,7 @@ JSTaggedValue ContainersPlainArray::Has(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 0)); if (!value->IsNumber()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -170,6 +172,7 @@ JSTaggedValue ContainersPlainArray::Get(EcmaRuntimeCallInfo *argv) JSHandle key(GetCallArg(argv, 0)); if (!key->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -220,6 +223,7 @@ JSTaggedValue ContainersPlainArray::ForEach(EcmaRuntimeCallInfo *argv) JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -266,6 +270,7 @@ JSTaggedValue ContainersPlainArray::GetIndexOfKey(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 0)); if (!value->IsNumber()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -336,6 +341,7 @@ JSTaggedValue ContainersPlainArray::GetKeyAt(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 0)); if (!value->IsNumber()) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -365,6 +371,7 @@ JSTaggedValue ContainersPlainArray::Remove(EcmaRuntimeCallInfo *argv) JSHandle key(GetCallArg(argv, 0)); if (!key->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -393,6 +400,7 @@ JSTaggedValue ContainersPlainArray::RemoveAt(EcmaRuntimeCallInfo *argv) JSHandle index(GetCallArg(argv, 0)); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -422,6 +430,7 @@ JSTaggedValue ContainersPlainArray::RemoveRangeFrom(EcmaRuntimeCallInfo *argv) JSHandle valueSize(GetCallArg(argv, 1)); if (!valueIndex->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, valueIndex.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -429,6 +438,7 @@ JSTaggedValue ContainersPlainArray::RemoveRangeFrom(EcmaRuntimeCallInfo *argv) } if (!valueSize->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, valueSize.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"size\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -461,6 +471,7 @@ JSTaggedValue ContainersPlainArray::SetValueAt(EcmaRuntimeCallInfo *argv) JSHandle value(GetCallArg(argv, 1)); if (!index->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, index.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -490,6 +501,7 @@ JSTaggedValue ContainersPlainArray::GetValueAt(EcmaRuntimeCallInfo *argv) JSHandle idx(GetCallArg(argv, 0)); if (!idx->IsInteger()) { JSHandle result = JSTaggedValue::ToString(thread, idx.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"index\" must be number. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -516,7 +528,7 @@ JSTaggedValue ContainersPlainArray::GetSize(EcmaRuntimeCallInfo *argv) THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); } } - int32_t length = JSHandle::Cast(self)->GetSize(); + uint32_t length = JSHandle::Cast(self)->GetSize(); return JSTaggedValue(length); } } // namespace panda::ecmascript::containers diff --git a/ecmascript/containers/containers_private.cpp b/ecmascript/containers/containers_private.cpp index 44faffd6909b6229a63cf3fa0491a77080db541a..26f1addc39d528fa5127d42b9a41b1b113ed61bf 100644 --- a/ecmascript/containers/containers_private.cpp +++ b/ecmascript/containers/containers_private.cpp @@ -529,6 +529,7 @@ JSHandle ContainersPrivate::InitializeTreeMap(JSThread *thread) JSHandle entries = globalConst->GetHandledEntriesString(); JSHandle entriesFunc = JSObject::GetMethod(thread, JSHandle::Cast(mapFuncPrototype), entries); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); PropertyDescriptor descriptor(thread, entriesFunc, false, false, false); JSObject::DefineOwnProperty(thread, mapFuncPrototype, iteratorSymbol, descriptor); // length @@ -601,6 +602,7 @@ JSHandle ContainersPrivate::InitializeTreeSet(JSThread *thread) JSHandle values(thread, globalConst->GetValuesString()); JSHandle valuesFunc = JSObject::GetMethod(thread, JSHandle::Cast(setFuncPrototype), values); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); PropertyDescriptor descriptor(thread, valuesFunc, false, false, false); JSObject::DefineOwnProperty(thread, setFuncPrototype, iteratorSymbol, descriptor); // length @@ -1139,6 +1141,7 @@ JSHandle ContainersPrivate::InitializeHashMap(JSThread *thread) JSHandle entries(factory->NewFromASCII("entries")); JSHandle entriesFunc = JSObject::GetMethod(thread, JSHandle::Cast(hashMapFuncPrototype), entries); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); PropertyDescriptor descriptor(thread, entriesFunc, false, false, false); JSObject::DefineOwnProperty(thread, hashMapFuncPrototype, iteratorSymbol, descriptor); @@ -1205,6 +1208,7 @@ JSHandle ContainersPrivate::InitializeHashSet(JSThread *thread) JSHandle values(thread, globalConst->GetValuesString()); JSHandle valuesFunc = JSObject::GetMethod(thread, JSHandle::Cast(hashSetFuncPrototype), values); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); PropertyDescriptor descriptor(thread, valuesFunc, false, false, false); JSObject::DefineOwnProperty(thread, hashSetFuncPrototype, iteratorSymbol, descriptor); diff --git a/ecmascript/containers/containers_queue.cpp b/ecmascript/containers/containers_queue.cpp index b79d938387e4196305534d27bfbf2db57db2ac13..70db1bfdd72ec84d0ac4165722ba0a94cfd52fb3 100644 --- a/ecmascript/containers/containers_queue.cpp +++ b/ecmascript/containers/containers_queue.cpp @@ -144,6 +144,7 @@ JSTaggedValue ContainersQueue::ForEach(EcmaRuntimeCallInfo *argv) // If IsCallable(callbackfn) is false, throw a TypeError exception. if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -161,7 +162,7 @@ JSTaggedValue ContainersQueue::ForEach(EcmaRuntimeCallInfo *argv) JSHandle(thread, queue->Get(thread, index)); index = queue->GetNextPosition(index); key.Update(JSTaggedValue(k)); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); diff --git a/ecmascript/containers/containers_stack.cpp b/ecmascript/containers/containers_stack.cpp index 8ec40a0e7c5b2e6f89f34c064580e9fb858103bc..6b8f0f68171e6994d6026cfe948358b0c056cde4 100644 --- a/ecmascript/containers/containers_stack.cpp +++ b/ecmascript/containers/containers_stack.cpp @@ -186,12 +186,13 @@ JSTaggedValue ContainersStack::ForEach(EcmaRuntimeCallInfo *argv) } JSHandle stack = JSHandle::Cast(thisHandle); - int len = stack->GetSize(); + uint32_t len = stack->GetSize(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, callbackFnHandle.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -200,7 +201,7 @@ JSTaggedValue ContainersStack::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArgHandle = GetCallArg(argv, 1); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - int k = 0; + uint32_t k = 0; while (k < len + 1) { JSTaggedValue kValue = stack->Get(k); EcmaRuntimeCallInfo *info = @@ -253,7 +254,7 @@ JSTaggedValue ContainersStack::GetLength(EcmaRuntimeCallInfo *argv) } } - int len = (JSHandle::Cast(self))->GetSize(); + uint32_t len = (JSHandle::Cast(self))->GetSize(); return JSTaggedValue(len + 1); } } // namespace panda::ecmascript::containers diff --git a/ecmascript/containers/containers_treemap.cpp b/ecmascript/containers/containers_treemap.cpp index 3a7159dcfa9584e8d51fba6b01390ce08d7e85ff..ea356325774c52c76c425b6ef26cbed91c26521b 100644 --- a/ecmascript/containers/containers_treemap.cpp +++ b/ecmascript/containers/containers_treemap.cpp @@ -58,6 +58,7 @@ JSTaggedValue ContainersTreeMap::TreeMapConstructor(EcmaRuntimeCallInfo *argv) } if (!compareFn->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, compareFn.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"comparefn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -252,6 +253,7 @@ JSTaggedValue ContainersTreeMap::SetAll(EcmaRuntimeCallInfo *argv) obj = JSHandle(thread, JSHandle::Cast(obj)->GetTarget()); } else { JSHandle result = JSTaggedValue::ToString(thread, obj.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"map\" must be TreeMap. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -415,6 +417,7 @@ JSTaggedValue ContainersTreeMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle func(GetCallArg(argv, 0)); if (!func->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, func.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -424,11 +427,11 @@ JSTaggedValue ContainersTreeMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArg = GetCallArg(argv, 1); JSHandle tmap = JSHandle::Cast(self); JSMutableHandle iteratedMap(thread, tmap->GetTreeMap()); - int elements = iteratedMap->NumberOfElements(); + uint32_t elements = iteratedMap->NumberOfElements(); JSHandle entries = TaggedTreeMap::GetArrayFromMap(thread, iteratedMap); - int index = 0; + uint32_t index = 0; size_t length = entries->GetLength(); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle value(thread, JSTaggedValue::Undefined()); diff --git a/ecmascript/containers/containers_treeset.cpp b/ecmascript/containers/containers_treeset.cpp index 6c8f4ba884a2721632a645dbcdeb7224a24391a9..28a0d34cd7e474e9696f267369264af431295f20 100644 --- a/ecmascript/containers/containers_treeset.cpp +++ b/ecmascript/containers/containers_treeset.cpp @@ -57,6 +57,7 @@ JSTaggedValue ContainersTreeSet::TreeSetConstructor(EcmaRuntimeCallInfo *argv) } if (!compareFn->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, compareFn.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"comparefn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -223,6 +224,7 @@ JSTaggedValue ContainersTreeSet::GetLowerValue(EcmaRuntimeCallInfo *argv) JSHandle key = GetCallArg(argv, 0); if (!key->IsString() && !key->IsNumber()) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be not null. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -253,6 +255,7 @@ JSTaggedValue ContainersTreeSet::GetHigherValue(EcmaRuntimeCallInfo *argv) JSHandle key = GetCallArg(argv, 0); if (!key->IsString() && !key->IsNumber()) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be not null. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -365,6 +368,7 @@ JSTaggedValue ContainersTreeSet::ForEach(EcmaRuntimeCallInfo *argv) JSHandle func(GetCallArg(argv, 0)); if (!func->IsCallable()) { JSHandle result = JSTaggedValue::ToString(thread, func.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"callbackfn\" must be callable. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -374,11 +378,11 @@ JSTaggedValue ContainersTreeSet::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArg = GetCallArg(argv, 1); JSHandle tset = JSHandle::Cast(self); JSMutableHandle iteratedSet(thread, tset->GetTreeSet()); - int elements = iteratedSet->NumberOfElements(); + uint32_t elements = iteratedSet->NumberOfElements(); JSHandle entries = TaggedTreeSet::GetArrayFromSet(thread, iteratedSet); - int index = 0; + uint32_t index = 0; size_t length = entries->GetLength(); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSMutableHandle key(thread, JSTaggedValue::Undefined()); while (index < elements) { diff --git a/ecmascript/containers/containers_vector.cpp b/ecmascript/containers/containers_vector.cpp index c781921d28488d25fe2a0868a845b67d7dd0501e..948f603d3fa6d7d46ed175aaf5fa1c2231dc601b 100644 --- a/ecmascript/containers/containers_vector.cpp +++ b/ecmascript/containers/containers_vector.cpp @@ -509,7 +509,7 @@ JSTaggedValue ContainersVector::GetSize(EcmaRuntimeCallInfo *argv) } } - int32_t length = JSHandle::Cast(self)->GetSize(); + uint32_t length = JSHandle::Cast(self)->GetSize(); return JSTaggedValue(length); } @@ -684,7 +684,7 @@ JSTaggedValue ContainersVector::CopyToArray(EcmaRuntimeCallInfo *argv) JSHandle newArrayElement = factory->NewAndCopyTaggedArray(vectorElements, vectorLength, vectorLength); array->SetElements(thread, newArrayElement); - array->SetLength(thread, JSTaggedValue(vectorLength)); + array->SetLength(vectorLength); } RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSTaggedValue::True(); @@ -710,7 +710,7 @@ JSTaggedValue ContainersVector::ConvertToArray(EcmaRuntimeCallInfo *argv) auto factory = thread->GetEcmaVM()->GetFactory(); JSHandle array = factory->NewJSArray(); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); array->SetArrayLength(thread, length); JSHandle srcElements(thread, vector->GetElements()); diff --git a/ecmascript/date_parse.cpp b/ecmascript/date_parse.cpp index 61178d2221497995a3fd5dc7b65fc96ec541c978..7f37c5af436636eec61011f88cf32cb0e982acd2 100644 --- a/ecmascript/date_parse.cpp +++ b/ecmascript/date_parse.cpp @@ -236,7 +236,7 @@ bool DateParse::ParseLegacyDates(DateProxy *proxy, DayValue *dayValue, TimeValue return false; } int timeNum = timeNumUnit.GetValue(); - int numLength = timeNumUnit.GetLength(); + uint32_t numLength = timeNumUnit.GetLength(); proxy->NextDate(); // parse +hh:mm if (proxy->GetDate().IsSymbol(':')) { diff --git a/ecmascript/date_parse.h b/ecmascript/date_parse.h index 266e8121ffc44ebd37d196a8f270bffa03f00f97..86b9fcba559e1aa56f8fdb9f4b5fb637ad696baf 100644 --- a/ecmascript/date_parse.h +++ b/ecmascript/date_parse.h @@ -202,7 +202,7 @@ private: { return type_ == DATE_INVALID_WORD; } - + bool IsMonth() const { return type_ == DATE_MONTH; @@ -301,7 +301,7 @@ private: return type_; } - int GetLength() const + uint32_t GetLength() const { return len_; } @@ -309,9 +309,9 @@ private: explicit DateUnit(DateValueType type, int value, int len) : type_(type), value_(value), len_(len) {} DateValueType type_; int value_; - int len_; + uint32_t len_; }; - + class DateProxy { public: explicit DateProxy(StringReader *str) : str_(str), date_(Read()) {} @@ -422,7 +422,7 @@ private: static int NormMilliSecond(DateUnit sec) { - int len = sec.GetLength(); + uint32_t len = sec.GetLength(); int value = sec.GetValue(); // 3: "sss" norm length if (len == 3) { diff --git a/ecmascript/debugger/debugger_api.cpp b/ecmascript/debugger/debugger_api.cpp index 03b26c505f664ee9729bee27a89b745f6e50daa7..d633f7e3fa5ad24021591c4687fd8099d879488a 100644 --- a/ecmascript/debugger/debugger_api.cpp +++ b/ecmascript/debugger/debugger_api.cpp @@ -40,6 +40,8 @@ #include "ecmascript/js_api/js_api_tree_set.h" #include "ecmascript/js_api/js_api_lightweightmap.h" #include "ecmascript/js_api/js_api_lightweightset.h" +#include "ecmascript/jobs/micro_job_queue.h" +#include "ecmascript/frames.h" namespace panda::ecmascript::tooling { using panda::ecmascript::base::ALLOW_BINARY; @@ -106,6 +108,22 @@ bool DebuggerApi::StackWalker(const EcmaVM *ecmaVm, std::functionGetJSThread()); + for (; frameHandler.HasFrame(); frameHandler.PrevJSFrame()) { + if (frameHandler.IsEntryFrame()) { + continue; + } + if (frameHandler.IsBuiltinFrame()) { + break; + } + ++count; + } + return count; +} + uint32_t DebuggerApi::GetBytecodeOffset(const EcmaVM *ecmaVm) { return FrameHandler(ecmaVm->GetJSThread()).GetBytecodeOffset(); @@ -179,11 +197,12 @@ int32_t DebuggerApi::GetVregIndex(const FrameHandler *frameHandler, std::string_ return -1; } auto table = extractor->GetLocalVariableTable(method->GetMethodId()); - auto iter = table.find(name.data()); - if (iter == table.end()) { - return -1; + for (auto iter = table.begin(); iter != table.end(); iter++) { + if (iter->name == name.data()) { + return iter->regNumber; + } } - return iter->second; + return -1; } Local DebuggerApi::GetVRegValue(const EcmaVM *ecmaVm, @@ -375,18 +394,22 @@ JSTaggedValue DebuggerApi::GetCurrentModule(const EcmaVM *ecmaVm) JSHandle DebuggerApi::GetImportModule(const EcmaVM *ecmaVm, const JSHandle ¤tModule, std::string &name) { + JSThread *thread = ecmaVm->GetJSThread(); + JSMutableHandle importModule(thread, thread->GlobalConstants()->GetUndefined()); + if (!currentModule->IsSourceTextModule()) { + return importModule; + } + JSTaggedValue importEntries = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetImportEntries(); if (importEntries.IsUndefined()) { - return currentModule; + return importModule; } - JSThread *thread = ecmaVm->GetJSThread(); JSHandle importArray(thread, TaggedArray::Cast(importEntries.GetTaggedObject())); size_t importEntriesLen = importArray->GetLength(); JSHandle starString = thread->GlobalConstants()->GetHandledStarString(); JSMutableHandle ee(thread, thread->GlobalConstants()->GetUndefined()); JSMutableHandle environment(thread, thread->GlobalConstants()->GetUndefined()); - JSMutableHandle importModule(thread, thread->GlobalConstants()->GetUndefined()); for (size_t idx = 0; idx < importEntriesLen; idx++) { ee.Update(importArray->Get(idx)); JSTaggedValue localName = ee->GetLocalName(); @@ -407,12 +430,15 @@ JSHandle DebuggerApi::GetImportModule(const EcmaVM *ecmaVm, return importModule; } } - return currentModule; + return importModule; } int32_t DebuggerApi::GetModuleVariableIndex(const EcmaVM *ecmaVm, const JSHandle ¤tModule, std::string &name) { + if (!currentModule->IsSourceTextModule()) { + return -1; + } JSTaggedValue dictionary = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetNameDictionary(); if (dictionary.IsUndefined()) { return -1; @@ -445,34 +471,37 @@ int32_t DebuggerApi::GetModuleVariableIndex(const EcmaVM *ecmaVm, const JSHandle int32_t DebuggerApi::GetRequestModuleIndex(const EcmaVM *ecmaVm, JSTaggedValue moduleRequest, const JSHandle ¤tModule) { + if (!currentModule->IsSourceTextModule()) { + return -1; + } JSThread *thread = ecmaVm->GetJSThread(); JSHandle module(thread, SourceTextModule::Cast(currentModule->GetTaggedObject())); JSHandle required(thread, moduleRequest); - JSHandle requiredModule = JSHandle::Cast( - SourceTextModule::HostResolveImportedModuleWithMerge(thread, module, required)); - JSTaggedValue requireModule = requiredModule->GetEcmaModuleRecordName(); JSHandle requestedModules(thread, module->GetRequestedModules()); - int32_t requestedModulesLen = static_cast(requestedModules->GetLength()); - for (int32_t idx = 0; idx < requestedModulesLen; idx++) { + uint32_t requestedModulesLen = requestedModules->GetLength(); + for (uint32_t idx = 0; idx < requestedModulesLen; idx++) { JSTaggedValue requestModule = requestedModules->Get(idx); - if (JSTaggedValue::SameValue(requireModule, requestModule)) { + if (JSTaggedValue::SameValue(required.GetTaggedValue(), requestModule)) { return idx; } } + LOG_ECMA(FATAL) << "this branch is unreachable"; return -1; } -Local DebuggerApi::GetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, - std::string &name) +Local DebuggerApi::GetExportVariableValue(const EcmaVM *ecmaVm, + const JSHandle ¤tModule, std::string &name) { Local result; - JSHandle module = GetImportModule(ecmaVm, currentModule, name); - int32_t index = GetModuleVariableIndex(ecmaVm, module, name); + if (!currentModule->IsSourceTextModule()) { + return result; + } + int32_t index = GetModuleVariableIndex(ecmaVm, currentModule, name); if (index == -1) { return result; } - JSTaggedValue dictionary = SourceTextModule::Cast(module->GetTaggedObject())->GetNameDictionary(); + JSTaggedValue dictionary = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetNameDictionary(); if (dictionary.IsUndefined()) { return result; } @@ -487,16 +516,18 @@ Local DebuggerApi::GetModuleValue(const EcmaVM *ecmaVm, const JSHand return result; } -bool DebuggerApi::SetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, - std::string &name, Local value) +bool DebuggerApi::SetExportVariableValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, + std::string &name, Local value) { - JSHandle module = GetImportModule(ecmaVm, currentModule, name); - int32_t index = GetModuleVariableIndex(ecmaVm, module, name); + if (!currentModule->IsSourceTextModule()) { + return false; + } + int32_t index = GetModuleVariableIndex(ecmaVm, currentModule, name); if (index == -1) { return false; } - JSTaggedValue dictionary = SourceTextModule::Cast(module->GetTaggedObject())->GetNameDictionary(); + JSTaggedValue dictionary = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetNameDictionary(); if (dictionary.IsUndefined()) { return false; } @@ -506,13 +537,56 @@ bool DebuggerApi::SetModuleValue(const EcmaVM *ecmaVm, const JSHandleSet(thread, index, curValue); + return true; } - return true; + return false; +} + +Local DebuggerApi::GetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, + std::string &name) +{ + Local result; + if (!currentModule->IsSourceTextModule()) { + return result; + } + // Get variable from local export + result = GetExportVariableValue(ecmaVm, currentModule, name); + if (!result.IsEmpty()) { + return result; + } + // Get variable from import module + JSHandle importModule = GetImportModule(ecmaVm, currentModule, name); + result = GetExportVariableValue(ecmaVm, importModule, name); + return result; +} + +bool DebuggerApi::SetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, + std::string &name, Local value) +{ + bool result; + if (!currentModule->IsSourceTextModule()) { + return false; + } + // Set local export variable + result = SetExportVariableValue(ecmaVm, currentModule, name, value); + if (result == true) { + return result; + } + // Set import module variable + JSHandle importModule = GetImportModule(ecmaVm, currentModule, name); + result = SetExportVariableValue(ecmaVm, importModule, name, value); + if (result == true) { + return result; + } + return false; } void DebuggerApi::InitializeExportVariables(const EcmaVM *ecmaVm, Local &moduleObj, const JSHandle ¤tModule) { + if (!currentModule->IsSourceTextModule()) { + return; + } JSTaggedValue localExportEntries = SourceTextModule::Cast( currentModule->GetTaggedObject())->GetLocalExportEntries(); if (localExportEntries.IsUndefined()) { @@ -543,6 +617,9 @@ void DebuggerApi::InitializeExportVariables(const EcmaVM *ecmaVm, Local &moduleObj, const JSHandle ¤tModule, bool isImportStar) { + if (!currentModule->IsSourceTextModule()) { + return; + } JSTaggedValue dictionary = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetNameDictionary(); if (dictionary.IsUndefined()) { InitializeExportVariables(ecmaVm, moduleObj, currentModule); @@ -592,6 +669,9 @@ void DebuggerApi::GetLocalExportVariables(const EcmaVM *ecmaVm, Local void DebuggerApi::GetIndirectExportVariables(const EcmaVM *ecmaVm, Local &moduleObj, const JSHandle ¤tModule) { + if (!currentModule->IsSourceTextModule()) { + return; + } JSTaggedValue indirectExportEntries = SourceTextModule::Cast( currentModule->GetTaggedObject())->GetIndirectExportEntries(); if (indirectExportEntries.IsUndefined()) { @@ -609,12 +689,15 @@ void DebuggerApi::GetIndirectExportVariables(const EcmaVM *ecmaVm, Local variableName = JSNApiHelper::ToLocal(name); - JSTaggedValue moduleRequest = ee->GetModuleRequest(); - int32_t index = GetRequestModuleIndex(ecmaVm, moduleRequest, currentModule); - JSTaggedValue importNamespace = - thread->GetCurrentEcmaContext()->GetModuleManager()->GetModuleNamespace(index); - JSHandle importModule(thread, - ModuleNamespace::Cast(importNamespace.GetTaggedObject())->GetModule()); + JSHandle moduleRequest(thread, ee->GetModuleRequest()); + JSHandle importModule; + JSHandle module = JSHandle::Cast(currentModule); + JSTaggedValue moduleRecordName = module->GetEcmaModuleRecordName(); + if (moduleRecordName.IsUndefined()) { + importModule = SourceTextModule::HostResolveImportedModule(thread, module, moduleRequest); + } else { + importModule = SourceTextModule::HostResolveImportedModuleWithMerge(thread, module, moduleRequest); + } std::string importName = EcmaStringAccessor(ee->GetImportName()).ToStdString(); Local value = GetModuleValue(ecmaVm, importModule, importName); PropertyAttribute descriptor(value, true, true, true); @@ -626,6 +709,9 @@ void DebuggerApi::GetIndirectExportVariables(const EcmaVM *ecmaVm, Local &moduleObj, const JSHandle ¤tModule) { + if (!currentModule->IsSourceTextModule()) { + return; + } JSTaggedValue importEntries = SourceTextModule::Cast(currentModule->GetTaggedObject())->GetImportEntries(); if (importEntries.IsUndefined()) { return; @@ -638,12 +724,12 @@ void DebuggerApi::GetImportVariables(const EcmaVM *ecmaVm, Local &mod JSThread *thread = ecmaVm->GetJSThread(); JSHandle importArray(thread, TaggedArray::Cast(importEntries.GetTaggedObject())); - int32_t importEntriesLen = static_cast(importArray->GetLength()); + uint32_t importEntriesLen = importArray->GetLength(); JSHandle environment(thread, TaggedArray::Cast(moduleEnvironment.GetTaggedObject())); JSHandle starString = thread->GlobalConstants()->GetHandledStarString(); JSMutableHandle ee(thread, thread->GlobalConstants()->GetUndefined()); JSMutableHandle name(thread, thread->GlobalConstants()->GetUndefined()); - for (int32_t idx = 0; idx < importEntriesLen; idx++) { + for (uint32_t idx = 0; idx < importEntriesLen; idx++) { ee.Update(importArray->Get(idx)); JSTaggedValue key = ee->GetImportName(); JSTaggedValue localName = ee->GetLocalName(); @@ -652,12 +738,15 @@ void DebuggerApi::GetImportVariables(const EcmaVM *ecmaVm, Local &mod continue; } if (JSTaggedValue::SameValue(key, starString.GetTaggedValue())) { - JSTaggedValue moduleRequest = ee->GetModuleRequest(); - int32_t index = GetRequestModuleIndex(ecmaVm, moduleRequest, currentModule); - JSTaggedValue importNamespace = - thread->GetCurrentEcmaContext()->GetModuleManager()->GetModuleNamespace(index); - JSHandle importModule(thread, - ModuleNamespace::Cast(importNamespace.GetTaggedObject())->GetModule()); + JSHandle moduleRequest(thread, ee->GetModuleRequest()); + JSHandle importModule; + JSHandle module = JSHandle::Cast(currentModule); + JSTaggedValue moduleRecordName = module->GetEcmaModuleRecordName(); + if (moduleRecordName.IsUndefined()) { + importModule = SourceTextModule::HostResolveImportedModule(thread, module, moduleRequest); + } else { + importModule = SourceTextModule::HostResolveImportedModuleWithMerge(thread, module, moduleRequest); + } Local importModuleObj = ObjectRef::New(ecmaVm); GetLocalExportVariables(ecmaVm, importModuleObj, importModule, true); Local variableName = JSNApiHelper::ToLocal(name); @@ -669,10 +758,22 @@ void DebuggerApi::GetImportVariables(const EcmaVM *ecmaVm, Local &mod if (resolvedBinding.IsHole()) { continue; } + Local value; ResolvedIndexBinding *binding = ResolvedIndexBinding::Cast(resolvedBinding.GetTaggedObject()); JSHandle importModule(thread, binding->GetModule()); - std::string importName = EcmaStringAccessor(key).ToStdString(); - Local value = GetModuleValue(ecmaVm, importModule, importName); + ModuleTypes moduleType = SourceTextModule::Cast(importModule->GetTaggedObject())->GetTypes(); + if (moduleType == ModuleTypes::CJS_MODULE) { + JSTaggedValue moduleValue = thread->GetCurrentEcmaContext()->GetModuleManager()->GetCJSModuleValue( + thread, currentModule.GetTaggedValue(), importModule.GetTaggedValue(), binding); + value = JSNApiHelper::ToLocal(JSHandle(thread, moduleValue)); + } else if (SourceTextModule::IsNativeModule(moduleType)) { + JSTaggedValue moduleValue = thread->GetCurrentEcmaContext()->GetModuleManager()->GetNativeModuleValue( + thread, currentModule.GetTaggedValue(), importModule.GetTaggedValue(), binding); + value = JSNApiHelper::ToLocal(JSHandle(thread, moduleValue)); + } else { + std::string importName = EcmaStringAccessor(key).ToStdString(); + value = GetModuleValue(ecmaVm, importModule, importName); + } Local variableName = JSNApiHelper::ToLocal(name); PropertyAttribute descriptor(value, true, true, true); moduleObj->DefineProperty(ecmaVm, variableName, descriptor); @@ -686,6 +787,7 @@ void DebuggerApi::HandleUncaughtException(const EcmaVM *ecmaVm, std::string &mes const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle exHandle(thread, thread->GetException()); + thread->ClearException(); if (exHandle->IsJSError()) { JSHandle nameKey = globalConst->GetHandledNameString(); JSHandle name(JSObject::GetProperty(thread, exHandle, nameKey).GetValue()); @@ -696,7 +798,6 @@ void DebuggerApi::HandleUncaughtException(const EcmaVM *ecmaVm, std::string &mes JSHandle ecmaStr = JSTaggedValue::ToString(thread, exHandle); message = ConvertToString(*ecmaStr); } - thread->ClearException(); } Local DebuggerApi::GenerateFuncFromBuffer(const EcmaVM *ecmaVm, const void *buffer, @@ -802,6 +903,9 @@ uint32_t DebuggerApi::GetContainerLength(const EcmaVM *ecmaVm, Local void DebuggerApi::AddInternalProperties(const EcmaVM *ecmaVm, Local object, ArkInternalValueType type, Global internalObjects) { + if (internalObjects.IsEmpty()) { + return; + } internalObjects->Set(ecmaVm, object, NumberRef::New(ecmaVm, static_cast(type))); } @@ -842,7 +946,7 @@ Local DebuggerApi::GetDequeValue(const EcmaVM *ecmaVm, Local DebuggerApi::GetHashMapValue(const EcmaVM *ecmaVm, Local value, Global internalObjects) { - JSHandle hashMap(JSNApiHelper::ToJSHandle(value)); + JSHandle hashMap(JSNApiHelper::ToJSHandle(value)); JSThread *thread = ecmaVm->GetJSThread(); JSHandle table(thread, hashMap->GetTable()); uint32_t length = table->GetLength(); @@ -968,7 +1072,7 @@ Local DebuggerApi::GetLinkedListValue(const EcmaVM *ecmaVm, Local(linkedList->Length()); Local jsValueRef = ArrayRef::New(ecmaVm, size); JSMutableHandle currentValue(thread, JSTaggedValue::Undefined()); - uint32_t valueNode = TaggedDoubleList::ELEMENTS_START_INDEX; + int valueNode = TaggedDoubleList::ELEMENTS_START_INDEX; uint32_t index = 0; while (index < size) { valueNode = doubleList->GetNextDataIndex(valueNode); @@ -988,7 +1092,7 @@ Local DebuggerApi::GetListValue(const EcmaVM *ecmaVm, Local(list->Length()); Local jsValueRef = ArrayRef::New(ecmaVm, size); JSMutableHandle currentValue(thread, JSTaggedValue::Undefined()); - uint32_t valueNode = TaggedDoubleList::ELEMENTS_START_INDEX; + int valueNode = TaggedDoubleList::ELEMENTS_START_INDEX; uint32_t index = 0; while (index < size) { valueNode = singleList->GetNextDataIndex(valueNode); @@ -1135,4 +1239,20 @@ Local DebuggerApi::GetVectorValue(const EcmaVM *ecmaVm, LocalGetJsDebuggerManager(); + uint32_t queueSizeEntry = debuggerMgr->GetPromiseQueueSizeRecordOfTopFrame(); + JSThread *thread = ecmaVm->GetJSThread(); + EcmaContext *context = thread->GetCurrentEcmaContext(); + uint32_t queueSizeCurrent = job::MicroJobQueue::GetPromiseQueueSize(thread, context->GetMicroJobQueue()); + return queueSizeEntry == queueSizeCurrent; +} + +void DebuggerApi::DropLastFrame(const EcmaVM *ecmaVm) +{ + auto *debuggerMgr = ecmaVm->GetJsDebuggerManager(); + debuggerMgr->DropLastFrame(); +} } // namespace panda::ecmascript::tooling diff --git a/ecmascript/debugger/debugger_api.h b/ecmascript/debugger/debugger_api.h index a96580671bb45bf6c7e56f78cbda22d0306faec9..f2e7807e33816e645494113af6237e17a79910c8 100644 --- a/ecmascript/debugger/debugger_api.h +++ b/ecmascript/debugger/debugger_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_BACKEND_DEBUGGER_API_H -#define ECMASCRIPT_TOOLING_BACKEND_DEBUGGER_API_H +#ifndef ECMASCRIPT_DEBUGGER_DEBUGGER_API_H +#define ECMASCRIPT_DEBUGGER_DEBUGGER_API_H #include @@ -55,6 +55,7 @@ public: static uint32_t GetStackDepth(const EcmaVM *ecmaVm); static std::shared_ptr NewFrameHandler(const EcmaVM *ecmaVm); static bool StackWalker(const EcmaVM *ecmaVm, std::function func); + static uint32_t GetStackDepthOverBuiltin(const EcmaVM *ecmaVm); static uint32_t GetBytecodeOffset(const EcmaVM *ecmaVm); static uint32_t GetBytecodeOffset(const FrameHandler *frameHandler); @@ -88,6 +89,10 @@ public: std::string &name); static int32_t GetRequestModuleIndex(const EcmaVM *ecmaVm, const JSTaggedValue moduleRequest, const JSHandle ¤tModule); + static Local GetExportVariableValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, + std::string &name); + static bool SetExportVariableValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, + std::string &name, Local value); static Local GetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, std::string &name); static bool SetModuleValue(const EcmaVM *ecmaVm, const JSHandle ¤tModule, @@ -112,14 +117,14 @@ public: static void DestroyJSDebugger(JSDebugger *debugger); static void RegisterHooks(JSDebugger *debugger, PtHooks *hooks); static bool SetBreakpoint(JSDebugger *debugger, const JSPtLocation &location, - Local condFuncRef); + Local condFuncRef); static bool RemoveBreakpoint(JSDebugger *debugger, const JSPtLocation &location); static void RemoveAllBreakpoints(JSDebugger *debugger); static void HandleUncaughtException(const EcmaVM *ecmaVm, std::string &message); static Local EvaluateViaFuncCall(EcmaVM *ecmaVm, Local funcRef, - std::shared_ptr &frameHandler); + std::shared_ptr &frameHandler); static Local GenerateFuncFromBuffer(const EcmaVM *ecmaVm, const void *buffer, size_t size, - std::string_view entryPoint); + std::string_view entryPoint); // HotReload static DebugInfoExtractor *GetPatchExtractor(const EcmaVM *ecmaVm, const std::string &url); @@ -158,7 +163,10 @@ public: Global internalObjects); static Local GetVectorValue(const EcmaVM *ecmaVm, Local value, Global internalObjects); + + static bool CheckPromiseQueueSize(const EcmaVM *ecmaVm); + static void DropLastFrame(const EcmaVM *ecmaVm); }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_BACKEND_DEBUGGER_API_H +#endif // ECMASCRIPT_DEBUGGER_DEBUGGER_API_H diff --git a/ecmascript/debugger/dropframe_manager.cpp b/ecmascript/debugger/dropframe_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0fa51a18d8a4a41e6bbde03549dbc0f6f07f6c5b --- /dev/null +++ b/ecmascript/debugger/dropframe_manager.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/debugger/dropframe_manager.h" + +#include "ecmascript/frames.h" +#include "ecmascript/global_env.h" +#include "ecmascript/global_handle_collection.h" +#include "ecmascript/interpreter/frame_handler.h" +#include "ecmascript/interpreter/interpreter-inl.h" +#include "ecmascript/jobs/micro_job_queue.h" + +namespace panda::ecmascript::tooling { +bool DropframeManager::IsNewlexenvOpcode(BytecodeInstruction::Opcode op) +{ + switch (op) { + case BytecodeInstruction::Opcode::NEWLEXENV_IMM8: + case BytecodeInstruction::Opcode::NEWLEXENVWITHNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::WIDE_NEWLEXENV_PREF_IMM16: + case BytecodeInstruction::Opcode::WIDE_NEWLEXENVWITHNAME_PREF_IMM16_ID16: + return true; + default: + break; + } + return false; +} + +bool DropframeManager::IsStlexvarOpcode(BytecodeInstruction::Opcode op) +{ + switch (op) { + case BytecodeInstruction::Opcode::STLEXVAR_IMM4_IMM4: + case BytecodeInstruction::Opcode::STLEXVAR_IMM8_IMM8: + case BytecodeInstruction::Opcode::WIDE_STLEXVAR_PREF_IMM16_IMM16: + return true; + default: + break; + } + return false; +} + +std::pair DropframeManager::ReadStlexvarParams(const uint8_t *pc, BytecodeInstruction::Opcode op) +{ + uint16_t level = 0; + uint16_t slot = 0; + switch (op) { + case BytecodeInstruction::Opcode::STLEXVAR_IMM4_IMM4: + level = READ_INST_4_0(); + slot = READ_INST_4_1(); + break; + case BytecodeInstruction::Opcode::STLEXVAR_IMM8_IMM8: + level = READ_INST_8_0(); + slot = READ_INST_8_1(); + break; + case BytecodeInstruction::Opcode::WIDE_STLEXVAR_PREF_IMM16_IMM16: + level = READ_INST_16_1(); + slot = READ_INST_16_3(); + break; + default: + break; + } + return std::make_pair(level, slot); +} + +void DropframeManager::MethodEntry(JSThread *thread, JSHandle method, JSHandle envHandle) +{ + std::set> modifiedLexVarPos; + NewLexModifyRecordLevel(); + if (envHandle.GetTaggedValue().IsUndefinedOrNull()) { + return; + } + uint32_t codeSize = method->GetCodeSize(); + uint16_t newEnvCount = 0; + auto bcIns = BytecodeInstruction(method->GetBytecodeArray()); + auto bcInsLast = bcIns.JumpTo(codeSize); + while (bcIns.GetAddress() != bcInsLast.GetAddress()) { + BytecodeInstruction::Opcode op = bcIns.GetOpcode(); + if (IsNewlexenvOpcode(op)) { + newEnvCount++; + } else if (IsStlexvarOpcode(op)) { + std::pair lexVarPos = ReadStlexvarParams(bcIns.GetAddress(), op); + uint16_t level; + uint16_t slot; + std::tie(level, slot) = lexVarPos; + JSTaggedValue env = envHandle.GetTaggedValue(); + for (uint16_t i = 0; ; i++) { + if ((level < newEnvCount || i >= level - newEnvCount) && + slot < LexicalEnv::Cast(env.GetTaggedObject())->GetLength() - LexicalEnv::RESERVED_ENV_LENGTH && + !modifiedLexVarPos.count({i, slot})) { + JSTaggedValue value = LexicalEnv::Cast(env.GetTaggedObject())->GetProperties(slot); + EmplaceLexModifyRecord(thread, env, slot, value); + modifiedLexVarPos.insert({i, slot}); + } + if (i >= level) { + break; + } + JSTaggedValue taggedParentEnv = LexicalEnv::Cast(env.GetTaggedObject())->GetParentEnv(); + if (taggedParentEnv.IsUndefined()) { + break; + } + env = taggedParentEnv; + } + } + bcIns = bcIns.GetNext(); + } + PushPromiseQueueSizeRecord(thread); +} + +void DropframeManager::MethodExit(JSThread *thread, [[maybe_unused]] JSHandle method) +{ + MergeLexModifyRecordOfTopFrame(thread); + PopPromiseQueueSizeRecord(); +} + +void DropframeManager::DropLastFrame(JSThread *thread) +{ + std::vector, uint16_t, JSHandle>> lexModifyRecord; + lexModifyRecord = GetLexModifyRecordOfTopFrame(); + for (const auto &item : lexModifyRecord) { + JSHandle envHandle; + uint16_t slot; + JSHandle valueHandle; + std::tie(envHandle, slot, valueHandle) = item; + JSTaggedValue env = envHandle.GetTaggedValue(); + ASSERT(slot < LexicalEnv::Cast(env.GetTaggedObject())->GetLength() - LexicalEnv::RESERVED_ENV_LENGTH); + LexicalEnv::Cast(env.GetTaggedObject())->SetProperties(thread, slot, valueHandle.GetTaggedValue()); + } + RemoveLexModifyRecordOfTopFrame(thread); + PopPromiseQueueSizeRecord(); + + FrameHandler frameHandler(thread); + bool isEntryFrameDropped = false; + while (frameHandler.HasFrame()) { + frameHandler.PrevJSFrame(); + if (frameHandler.IsEntryFrame()) { + isEntryFrameDropped = true; + continue; + } + if (frameHandler.IsBuiltinFrame()) { + continue; + } + if (!thread->IsAsmInterpreter()) { + JSTaggedType *prevSp = frameHandler.GetSp(); + thread->SetCurrentFrame(prevSp); + } + if (isEntryFrameDropped) { + thread->SetEntryFrameDroppedState(); + } + thread->SetFrameDroppedState(); + break; + } +} + +void DropframeManager::NewLexModifyRecordLevel() +{ + modifiedLexVar_.push(std::vector, uint16_t, JSHandle>>()); +} + +void DropframeManager::EmplaceLexModifyRecord(JSThread *thread, JSTaggedValue env, uint16_t slot, JSTaggedValue value) +{ + GlobalHandleCollection globalHandleCollection(thread); + for (const auto &item : modifiedLexVar_.top()) { + JSHandle envHandleRecord = std::get<0>(item); + uint16_t slotRecord = std::get<1>(item); + if (envHandleRecord.GetTaggedType() == env.GetRawData() && slotRecord == slot) { + return; + } + } + JSHandle envHandle = globalHandleCollection.NewHandle(env.GetRawData()); + JSHandle valueHandle = globalHandleCollection.NewHandle(value.GetRawData()); + modifiedLexVar_.top().emplace_back(envHandle, slot, valueHandle); +} + +uint32_t DropframeManager::GetLexModifyRecordLevel() +{ + return modifiedLexVar_.size(); +} + +std::vector, uint16_t, JSHandle>> + DropframeManager::GetLexModifyRecordOfTopFrame() +{ + if (modifiedLexVar_.empty()) { + return std::vector, uint16_t, JSHandle>>(); + } + return modifiedLexVar_.top(); +} + +void DropframeManager::RemoveLexModifyRecordOfTopFrame(JSThread *thread) +{ + if (modifiedLexVar_.empty()) { + return; + } + GlobalHandleCollection globalHandleCollection(thread); + for (const auto &item : modifiedLexVar_.top()) { + JSHandle envHandle = std::get<0>(item); + JSHandle valueHandle = std::get<2>(item); // 2:get the third item of the tuple + globalHandleCollection.Dispose(envHandle); + globalHandleCollection.Dispose(valueHandle); + } + modifiedLexVar_.pop(); +} + +void DropframeManager::MergeLexModifyRecordOfTopFrame(JSThread *thread) +{ + if (modifiedLexVar_.empty()) { + return; + } + GlobalHandleCollection globalHandleCollection(thread); + std::vector, uint16_t, JSHandle>> lexModifyRecord; + lexModifyRecord = modifiedLexVar_.top(); + modifiedLexVar_.pop(); + for (const auto &item : lexModifyRecord) { + JSHandle envHandle; + uint16_t slot; + JSHandle valueHandle; + std::tie(envHandle, slot, valueHandle) = item; + bool existRecord = false; + if (!modifiedLexVar_.empty()) { + for (const auto &itemLast : modifiedLexVar_.top()) { + JSHandle envHandleRecord = std::get<0>(itemLast); + uint16_t slotRecord = std::get<1>(itemLast); + if (envHandleRecord.GetTaggedType() == envHandle.GetTaggedType() && slotRecord == slot) { + existRecord = true; + break; + } + } + } + if (modifiedLexVar_.empty() || existRecord) { + globalHandleCollection.Dispose(envHandle); + globalHandleCollection.Dispose(valueHandle); + } else { + modifiedLexVar_.top().emplace_back(envHandle, slot, valueHandle); + } + } +} + +void DropframeManager::PushPromiseQueueSizeRecord(JSThread *thread) +{ + EcmaContext *context = thread->GetCurrentEcmaContext(); + uint32_t queueSize = job::MicroJobQueue::GetPromiseQueueSize(thread, context->GetMicroJobQueue()); + promiseQueueSizeRecord_.push(queueSize); +} + +uint32_t DropframeManager::GetPromiseQueueSizeRecordOfTopFrame() +{ + ASSERT(!promiseQueueSizeRecord_.empty()); + return promiseQueueSizeRecord_.top(); +} + +void DropframeManager::PopPromiseQueueSizeRecord() +{ + if (!promiseQueueSizeRecord_.empty()) { + promiseQueueSizeRecord_.pop(); + } +} +} \ No newline at end of file diff --git a/ecmascript/debugger/dropframe_manager.h b/ecmascript/debugger/dropframe_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..7da78936df4e5d809a66d7eb7acec8a053a53dc1 --- /dev/null +++ b/ecmascript/debugger/dropframe_manager.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_DEBUGGER_DROPFRAME_MANAGER_H +#define ECMASCRIPT_DEBUGGER_DROPFRAME_MANAGER_H + +#include "ecmascript/js_handle.h" +#include "ecmascript/js_thread.h" +#include "ecmascript/method.h" + +namespace panda::ecmascript::tooling { +class DropframeManager { +public: + DropframeManager() = default; + ~DropframeManager() = default; + NO_COPY_SEMANTIC(DropframeManager); + NO_MOVE_SEMANTIC(DropframeManager); + + void MethodEntry(JSThread *thread, JSHandle method, JSHandle env); + void MethodExit(JSThread *thread, JSHandle method); + void DropLastFrame(JSThread *thread); + uint32_t GetPromiseQueueSizeRecordOfTopFrame(); +private: + bool IsNewlexenvOpcode(BytecodeInstruction::Opcode op); + bool IsStlexvarOpcode(BytecodeInstruction::Opcode op); + std::pair ReadStlexvarParams(const uint8_t *pc, BytecodeInstruction::Opcode op); + + void NewLexModifyRecordLevel(); + void EmplaceLexModifyRecord(JSThread *thread, JSTaggedValue env, uint16_t slot, JSTaggedValue value); + uint32_t GetLexModifyRecordLevel(); + std::vector, uint16_t, JSHandle>> GetLexModifyRecordOfTopFrame(); + void RemoveLexModifyRecordOfTopFrame(JSThread *thread); + void MergeLexModifyRecordOfTopFrame(JSThread *thread); + + void PushPromiseQueueSizeRecord(JSThread *thread); + void PopPromiseQueueSizeRecord(); + + std::stack, uint16_t, JSHandle>>> modifiedLexVar_; + std::stack promiseQueueSizeRecord_; +}; +} + +#endif // ECMASCRIPT_DEBUGGER_DROPFRAME_MANAGER_H \ No newline at end of file diff --git a/ecmascript/debugger/hot_reload_manager.h b/ecmascript/debugger/hot_reload_manager.h index 0c55d0e0afca31cd1faee1e62bd77a13e0c23bad..146f39b707ca32d1add0ab19aa6568838222d856 100644 --- a/ecmascript/debugger/hot_reload_manager.h +++ b/ecmascript/debugger/hot_reload_manager.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_HOT_RELOAD_MANAGER_H -#define ECMASCRIPT_TOOLING_INTERFACE_HOT_RELOAD_MANAGER_H +#ifndef ECMASCRIPT_DEBUGGER_HOT_RELOAD_MANAGER_H +#define ECMASCRIPT_DEBUGGER_HOT_RELOAD_MANAGER_H #include "ecmascript/jspandafile/js_pandafile.h" #include "ecmascript/jspandafile/debug_info_extractor.h" @@ -42,4 +42,4 @@ private: CUnorderedMap patchExtractors_ {}; }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_INTERFACE_HOT_RELOAD_MANAGER_H +#endif // ECMASCRIPT_DEBUGGER_HOT_RELOAD_MANAGER_H diff --git a/ecmascript/debugger/js_debugger.cpp b/ecmascript/debugger/js_debugger.cpp index cebff7f5f05523af4818d8d5afd09e1a4222c61a..9945836f648ea09b0312616f7b876f12d6b46f81 100644 --- a/ecmascript/debugger/js_debugger.cpp +++ b/ecmascript/debugger/js_debugger.cpp @@ -21,6 +21,7 @@ #include "ecmascript/interpreter/fast_runtime_stub-inl.h" #include "ecmascript/interpreter/frame_handler.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" +#include "ecmascript/interpreter/interpreter-inl.h" namespace panda::ecmascript::tooling { using panda::ecmascript::base::BuiltinsBase; @@ -205,4 +206,30 @@ void JSDebugger::DumpBreakpoints() LOG_DEBUGGER(DEBUG) << bp.ToString(); } } + +void JSDebugger::MethodEntry(JSHandle method, JSHandle envHandle) +{ + if (hooks_ == nullptr || !ecmaVm_->GetJsDebuggerManager()->IsDebugMode()) { + return; + } + FrameHandler frameHandler(ecmaVm_->GetJSThread()); + if (frameHandler.IsEntryFrame() || frameHandler.IsBuiltinFrame()) { + return; + } + auto *debuggerMgr = ecmaVm_->GetJsDebuggerManager(); + debuggerMgr->MethodEntry(method, envHandle); +} + +void JSDebugger::MethodExit([[maybe_unused]] JSHandle method) +{ + if (hooks_ == nullptr || !ecmaVm_->GetJsDebuggerManager()->IsDebugMode()) { + return; + } + FrameHandler frameHandler(ecmaVm_->GetJSThread()); + if (frameHandler.IsEntryFrame() || frameHandler.IsBuiltinFrame()) { + return; + } + auto *debuggerMgr = ecmaVm_->GetJsDebuggerManager(); + debuggerMgr->MethodExit(method); +} } // namespace panda::tooling::ecmascript diff --git a/ecmascript/debugger/js_debugger.h b/ecmascript/debugger/js_debugger.h index fb1772870051d2f717b1471684fb53b90bdddae6..b0ed921e46338a24f51e65cbe3eecb7bb70791bd 100644 --- a/ecmascript/debugger/js_debugger.h +++ b/ecmascript/debugger/js_debugger.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_BACKEND_JS_DEBUGGER_H -#define ECMASCRIPT_TOOLING_BACKEND_JS_DEBUGGER_H +#ifndef ECMASCRIPT_DEBUGGER_JS_DEBUGGER_H +#define ECMASCRIPT_DEBUGGER_JS_DEBUGGER_H #include "ecmascript/debugger/debugger_api.h" #include "ecmascript/debugger/js_debugger_manager.h" @@ -115,7 +115,7 @@ public: notificationMgr_->VmDeathEvent(); hooks_ = nullptr; } - bool HandleDebuggerStmt(JSHandle method, uint32_t bc_offset) override; + bool HandleDebuggerStmt(JSHandle method, uint32_t bcOffset) override; bool SetBreakpoint(const JSPtLocation &location, Local condFuncRef) override; bool RemoveBreakpoint(const JSPtLocation &location) override; void RemoveAllBreakpoints() override; @@ -141,13 +141,6 @@ public: } hooks_->VmDeath(); } - void PendingJobEntry() override - { - if (hooks_ == nullptr) { - return; - } - hooks_->PendingJobEntry(); - } void NativeCalling(const void *nativeAddress) override { if (hooks_ == nullptr) { @@ -155,6 +148,8 @@ public: } hooks_->NativeCalling(nativeAddress); } + void MethodEntry(JSHandle method, JSHandle envHandle) override; + void MethodExit(JSHandle method) override; private: std::unique_ptr FindMethod(const JSPtLocation &location) const; std::optional FindBreakpoint(JSHandle method, uint32_t bcOffset) const; @@ -172,4 +167,4 @@ private: }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_BACKEND_JS_DEBUGGER_H +#endif // ECMASCRIPT_DEBUGGER_JS_DEBUGGER_H diff --git a/ecmascript/debugger/js_debugger_interface.h b/ecmascript/debugger/js_debugger_interface.h index e84483a33b2521b6f76aebf16dd2219e24cca458..386124d5f042ba3a725ee6954eb2cd012536abff 100644 --- a/ecmascript/debugger/js_debugger_interface.h +++ b/ecmascript/debugger/js_debugger_interface.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUG_INTERFACE_H -#define ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUG_INTERFACE_H +#ifndef ECMASCRIPT_DEBUGGER_JS_DEBUG_INTERFACE_H +#define ECMASCRIPT_DEBUGGER_JS_DEBUG_INTERFACE_H #include @@ -70,11 +70,6 @@ public: */ virtual void LoadModule(std::string_view pandaFileName, std::string_view entryPoint) = 0; - /** - * \brief called before executing pending job - */ - virtual void PendingJobEntry() = 0; - /** * \brief called by the ecmavm when virtual machine start initialization */ @@ -103,9 +98,10 @@ public: /** * \brief handle debugger statement event - * @param location debugger statement location + * @param method Current method + * @param bcOffset Current bytecode offset */ - virtual bool HandleDebuggerStmt(JSHandle method, uint32_t bc_offset) = 0; + virtual bool HandleDebuggerStmt(JSHandle method, uint32_t bcOffset) = 0; /** * \brief Register debug hooks in the ecmavm @@ -134,8 +130,7 @@ public: virtual bool RemoveBreakpoint(const JSPtLocation &location) = 0; /** - * \brief Remove all breakpoints from \param location - * @param location Breakpoint location + * \brief Remove all breakpoints */ virtual void RemoveAllBreakpoints() = 0; @@ -146,4 +141,4 @@ public: }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUG_INTERFACE_H +#endif // ECMASCRIPT_DEBUGGER_JS_DEBUG_INTERFACE_H diff --git a/ecmascript/debugger/js_debugger_manager.h b/ecmascript/debugger/js_debugger_manager.h index 12dec88bf8fdfa0408db6379b3f860fa11918160..7dc2b2704449be433e7e6a26e7f132fbf1389d2f 100644 --- a/ecmascript/debugger/js_debugger_manager.h +++ b/ecmascript/debugger/js_debugger_manager.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,15 +13,17 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUGGER_MANAGER_H -#define ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUGGER_MANAGER_H +#ifndef ECMASCRIPT_DEBUGGER_JS_DEBUGGER_MANAGER_H +#define ECMASCRIPT_DEBUGGER_JS_DEBUGGER_MANAGER_H #include "ecmascript/debugger/hot_reload_manager.h" #include "ecmascript/debugger/notification_manager.h" +#include "ecmascript/debugger/dropframe_manager.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/interpreter/frame_handler.h" #include "ecmascript/js_thread.h" #include "ecmascript/napi/include/jsnapi.h" +#include "ecmascript/global_handle_collection.h" #include "libpandabase/os/library_loader.h" @@ -60,6 +62,12 @@ public: } isDebugMode_ = isDebugMode; + + if (isDebugMode) { + jsThread_->SetDebugModeState(); + } else { + jsThread_->ResetDebugModeState(); + } } bool IsDebugMode() const @@ -131,6 +139,26 @@ public: } } + void MethodEntry(JSHandle method, JSHandle envHandle) + { + dropframeManager_.MethodEntry(jsThread_, method, envHandle); + } + + void MethodExit(JSHandle method) + { + dropframeManager_.MethodExit(jsThread_, method); + } + + void DropLastFrame() + { + dropframeManager_.DropLastFrame(jsThread_); + } + + uint32_t GetPromiseQueueSizeRecordOfTopFrame() + { + return dropframeManager_.GetPromiseQueueSizeRecordOfTopFrame(); + } + private: bool isDebugMode_ {false}; bool isMixedDebugEnabled_ { false }; @@ -140,10 +168,11 @@ private: SingleStepperFunc *stepperFunc_ {nullptr}; JSThread *jsThread_ {nullptr}; std::shared_ptr frameHandler_; + DropframeManager dropframeManager_ { }; NotificationManager notificationManager_; HotReloadManager hotReloadManager_; }; } // panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_INTERFACE_JS_DEBUGGER_MANAGER_H \ No newline at end of file +#endif // ECMASCRIPT_DEBUGGER_JS_DEBUGGER_MANAGER_H \ No newline at end of file diff --git a/ecmascript/debugger/js_pt_location.h b/ecmascript/debugger/js_pt_location.h index a5105fdab8dea87bb3db144ef14086e5009a213f..762efe87d0a9d9d874e28cc63299f01ac33df894 100644 --- a/ecmascript/debugger/js_pt_location.h +++ b/ecmascript/debugger/js_pt_location.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_BACKEND_JS_PT_LOCATION_H -#define ECMASCRIPT_TOOLING_BACKEND_JS_PT_LOCATION_H +#ifndef ECMASCRIPT_DEBUGGER_JS_PT_LOCATION_H +#define ECMASCRIPT_DEBUGGER_JS_PT_LOCATION_H #include @@ -84,4 +84,4 @@ private: }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_BACKEND_JS_PT_LOCATION_H +#endif // ECMASCRIPT_DEBUGGER_JS_PT_LOCATION_H diff --git a/ecmascript/debugger/js_pt_method.h b/ecmascript/debugger/js_pt_method.h index 5c8bffa2f352cb60f7c7691e48f69c3bdcbe3d4e..0441db8ceaf6498ac1c7e517e1ec914e646fd380 100644 --- a/ecmascript/debugger/js_pt_method.h +++ b/ecmascript/debugger/js_pt_method.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_BASE_PT_METHOD_H -#define ECMASCRIPT_TOOLING_BASE_PT_METHOD_H +#ifndef ECMASCRIPT_DEBUGGER_JS_PT_METHOD_H +#define ECMASCRIPT_DEBUGGER_JS_PT_METHOD_H #include "ecmascript/jspandafile/js_pandafile.h" @@ -71,4 +71,4 @@ private: bool isNative_ {false}; }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_BASE_PT_METHOD_H +#endif // ECMASCRIPT_DEBUGGER_JS_PT_METHOD_H diff --git a/ecmascript/debugger/notification_manager.h b/ecmascript/debugger/notification_manager.h index 422f8de7c3afb743e4a59aa703e5c22a1095e5ab..8adb610406af999baf08330952ab46f2ec83c182 100644 --- a/ecmascript/debugger/notification_manager.h +++ b/ecmascript/debugger/notification_manager.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_NOTIFICATION_MANAGER_H -#define ECMASCRIPT_TOOLING_INTERFACE_NOTIFICATION_MANAGER_H +#ifndef ECMASCRIPT_DEBUGGER_NOTIFICATION_MANAGER_H +#define ECMASCRIPT_DEBUGGER_NOTIFICATION_MANAGER_H #include @@ -32,13 +32,14 @@ public: virtual void LoadModule(std::string_view name, std::string_view) = 0; virtual void BytecodePcChanged(JSThread *thread, JSHandle method, - uint32_t bc_offset) = 0; + uint32_t bcOffset) = 0; - virtual bool HandleDebuggerStmt(JSHandle method, uint32_t bc_offset) = 0; + virtual bool HandleDebuggerStmt(JSHandle method, uint32_t bcOffset) = 0; virtual void VmStart() = 0; virtual void VmDeath() = 0; - virtual void PendingJobEntry() = 0; virtual void NativeCalling(const void *nativeAddress) = 0; + virtual void MethodEntry(JSHandle method, JSHandle envHandle) = 0; + virtual void MethodExit(JSHandle method) = 0; }; class NotificationManager { @@ -88,13 +89,6 @@ public: } } - void PendingJobEntryEvent() const - { - if (UNLIKELY(listener_ != nullptr)) { - listener_->PendingJobEntry(); - } - } - void VmStartEvent() const { if (UNLIKELY(listener_ != nullptr)) { @@ -108,9 +102,24 @@ public: } } + void MethodEntryEvent(JSThread *thread, Method *method, JSTaggedValue env) const + { + if (UNLIKELY(listener_ != nullptr)) { + JSHandle methodHandle(thread, method); + JSHandle envHandle(thread, env); + listener_->MethodEntry(methodHandle, envHandle); + } + } + void MethodExitEvent(JSThread *thread, Method *method) const + { + if (UNLIKELY(listener_ != nullptr)) { + JSHandle methodHandle(thread, method); + listener_->MethodExit(methodHandle); + } + } private: RuntimeListener *listener_ {nullptr}; }; } // panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_INTERFACE_NOTIFICATION_MANAGER_H \ No newline at end of file +#endif // ECMASCRIPT_DEBUGGER_NOTIFICATION_MANAGER_H \ No newline at end of file diff --git a/ecmascript/deoptimizer/deoptimizer.cpp b/ecmascript/deoptimizer/deoptimizer.cpp index 33cdb672603d3a359cbffec5b1a106382eadb374..d6f4a5d421b15c82c5db4be906d99aea0343cce3 100644 --- a/ecmascript/deoptimizer/deoptimizer.cpp +++ b/ecmascript/deoptimizer/deoptimizer.cpp @@ -75,6 +75,12 @@ public: firstFrame_ = top_; } + void ReviseValueByIndex(JSTaggedType value, size_t index) + { + ASSERT(index < static_cast(start_ - top_)); + *(top_ + index) = value; + } + private: JSThread *thread_ {nullptr}; JSTaggedType *start_ {nullptr}; @@ -379,6 +385,29 @@ bool Deoptimizier::CollectVirtualRegisters(Method* method, FrameWriter *frameWri frameWriter->PushValue(value.GetRawData()); virtualIndex--; } + // revise correct a0 - aN virtual regs , for example: ldobjbyname key; sta a2; update value to a2 + // +--------------------------+ ^ + // | aN | | + // +--------------------------+ | + // | ... | | + // +--------------------------+ | + // | a2(this) | | + // +--------------------------+ revise correct vreg + // | a1(newtarget) | | + // +--------------------------+ | + // | a0(func) | | + // |--------------------------| v + // | v0 - vN | + // sp --> |--------------------------| + int32_t vregsAndArgsNum = declaredNumArgs + callFieldNumVregs + + static_cast(method->GetNumRevervedArgs()); + for (int32_t i = callFieldNumVregs; i < vregsAndArgsNum; i++) { + JSTaggedValue value = JSTaggedValue::Undefined(); + if (HasDeoptValue(curDepth, i)) { + value = GetDeoptValue(curDepth, i); + frameWriter->ReviseValueByIndex(value.GetRawData(), i); + } + } return true; } @@ -505,7 +534,7 @@ void Deoptimizier::UpdateAndDumpDeoptInfo(kungfu::DeoptType type) JSHandle oldHclass(thread_, jsFunc->GetClass()); // instead of hclass by non_optimized hclass when method ClearAOTFlags JSHandle newHClass = factory->GetNonOptimizedHclass(oldHclass, kind); - jsFunc->SetClass(newHClass); + jsFunc->SynchronizedSetClass(*newHClass); } } } diff --git a/ecmascript/dfx/cpu_profiler/cpu_profiler.cpp b/ecmascript/dfx/cpu_profiler/cpu_profiler.cpp index b8bc5e896022abdfbc8d19eeaffe66c47722f7e9..ad977e6ad0fedb996e24a55d0582d733534244ad 100644 --- a/ecmascript/dfx/cpu_profiler/cpu_profiler.cpp +++ b/ecmascript/dfx/cpu_profiler/cpu_profiler.cpp @@ -35,6 +35,7 @@ CpuProfiler::CpuProfiler(const EcmaVM *vm, const int interval) : vm_(vm), interv enableVMTag_ = const_cast(vm)->GetJSOptions().EnableCpuProfilerVMTag(); generator_ = new SamplesRecord(); generator_->SetEnableVMTag(enableVMTag_); + generator_->SetSourceMapTranslateCallback(vm->GetSourceMapTranslateCallback()); generator_->NodeInit(); if (generator_->SemInit(0, 0, 0) != 0) { LOG_ECMA(ERROR) << "sem_[0] init failed"; @@ -77,7 +78,7 @@ void CpuProfiler::StartCpuProfilerForInfo() vm_->GetJSThread()->SetIsProfiling(true); JSPandaFileManager *pandaFileManager = JSPandaFileManager::GetInstance(); pandaFileManager->EnumerateJSPandaFiles([&](const JSPandaFile *file) -> bool { - pandaFileManager->GetJSPtExtractorAndExtract(file); + pandaFileManager->CpuProfilerGetJSPtExtractor(file); return true; }); @@ -137,7 +138,7 @@ void CpuProfiler::StartCpuProfilerForFile(const std::string &fileName) vm_->GetJSThread()->SetIsProfiling(true); JSPandaFileManager *pandaFileManager = JSPandaFileManager::GetInstance(); pandaFileManager->EnumerateJSPandaFiles([&](const JSPandaFile *file) -> bool { - pandaFileManager->GetJSPtExtractorAndExtract(file); + pandaFileManager->CpuProfilerGetJSPtExtractor(file); return true; }); @@ -269,17 +270,17 @@ void CpuProfiler::GetStack(FrameIterator &it) generator_->ResetFrameLength(); for (; !it.Done(); it.Advance<>()) { auto method = it.CheckAndGetMethod(); - if (method == nullptr) { + if (method == nullptr || !JSTaggedValue(method).IsMethod()) { continue; } - const JSPandaFile *jsPandaFile = method->GetJSPandaFile(); + bool isNative = method->IsNativeWithCallField(); struct MethodKey methodKey; methodKey.deoptType = method->GetDeoptType(); if (topFrame) { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, jsPandaFile, true, enableVMTag_); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, true, enableVMTag_); topFrame = false; } else { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, jsPandaFile, false, enableVMTag_); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, false, enableVMTag_); } void *methodIdentifier = JsStackGetter::GetMethodIdentifier(method, it); if (methodIdentifier == nullptr) { @@ -302,20 +303,27 @@ void CpuProfiler::GetStack(FrameIterator &it) generator_->PostFrame(); } -void CpuProfiler::GetStackCallNapi(JSThread *thread, bool beforeCallNapi) +bool CpuProfiler::GetStackBeforeCallNapi(JSThread *thread) { uint64_t tempTimeStamp = SamplingProcessor::GetMicrosecondsTimeStamp(); - if (beforeCallNapi) { - if (tempTimeStamp - beforeCallNapiTimeStamp_ < INTERVAL_OF_ACTIVE_SAMPLING) { - beforeCallNapiTimeStamp_ = tempTimeStamp; - return; - } + if (tempTimeStamp - beforeCallNapiTimeStamp_ < interval_) { + return false; + } + + if (GetStackCallNapi(thread, true)) { beforeCallNapiTimeStamp_ = tempTimeStamp; - } else { - if (tempTimeStamp - beforeCallNapiTimeStamp_ < CPUPROFILER_DEFAULT_INTERVAL) { - return; - } + return true; } + return false; +} + +void CpuProfiler::GetStackAfterCallNapi(JSThread *thread) +{ + GetStackCallNapi(thread, false); +} + +bool CpuProfiler::GetStackCallNapi(JSThread *thread, bool beforeCallNapi) +{ [[maybe_unused]] CallNapiScope scope(this); const CMap &stackInfo = generator_->GetStackInfo(); generator_->ClearNapiStack(); @@ -327,21 +335,22 @@ void CpuProfiler::GetStackCallNapi(JSThread *thread, bool beforeCallNapi) } for (; !it.Done(); it.Advance()) { auto method = it.CheckAndGetMethod(); - if (method == nullptr) { + if (method == nullptr || !JSTaggedValue(method).IsMethod()) { continue; } + bool isNative = method->IsNativeWithCallField(); struct MethodKey methodKey; methodKey.deoptType = method->GetDeoptType(); if (topFrame) { if (beforeCallNapi) { methodKey.state = RunningState::NAPI; } else { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, method->GetJSPandaFile(), true, enableVMTag_); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, true, enableVMTag_); } topFrame = false; } else { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, method->GetJSPandaFile(), false, enableVMTag_); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, false, enableVMTag_); } void *methodIdentifier = JsStackGetter::GetMethodIdentifier(method, it); if (methodIdentifier == nullptr) { @@ -354,14 +363,15 @@ void CpuProfiler::GetStackCallNapi(JSThread *thread, bool beforeCallNapi) continue; } if (UNLIKELY(!generator_->PushNapiStackInfo(codeEntry))) { - return; + return false; } } if (UNLIKELY(!generator_->PushNapiFrameStack(methodKey))) { - return; + return false; } } generator_->PostNapiFrame(); + return true; } void CpuProfiler::GetStackSignalHandler(int signal, [[maybe_unused]] siginfo_t *siginfo, void *context) @@ -387,7 +397,7 @@ void CpuProfiler::GetStackSignalHandler(int signal, [[maybe_unused]] siginfo_t * } } - if (profiler->GetBuildNapiStack()) { + if (profiler->GetBuildNapiStack() || thread->GetGcState()) { if (profiler->generator_->SemPost(0) != 0) { LOG_ECMA(ERROR) << "sem_[0] post failed"; } diff --git a/ecmascript/dfx/cpu_profiler/cpu_profiler.h b/ecmascript/dfx/cpu_profiler/cpu_profiler.h index 28c5b5780694ee0d136d6bcc5d11de9bd884c711..7b48af33624842a2acd6817d4aa86f5da8defdfd 100644 --- a/ecmascript/dfx/cpu_profiler/cpu_profiler.h +++ b/ecmascript/dfx/cpu_profiler/cpu_profiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_CPU_PROFILER_H -#define ECMASCRIPT_CPU_PROFILER_H +#ifndef ECMASCRIPT_DFX_CPU_PROFILER_CPU_PROFILER_H +#define ECMASCRIPT_DFX_CPU_PROFILER_CPU_PROFILER_H #include @@ -80,7 +80,9 @@ public: bool InHeaderOrTail(uint64_t pc, uint64_t entryBegin, uint64_t entryDuration, uint64_t headerSize, uint64_t tailSize) const; bool IsEntryFrameHeaderOrTail(JSThread *thread, uint64_t pc) const; - void GetStackCallNapi(JSThread *thread, bool beforeCallNapi); + bool GetStackBeforeCallNapi(JSThread *thread); + void GetStackAfterCallNapi(JSThread *thread); + bool GetStackCallNapi(JSThread *thread, bool beforeCallNapi); static void GetStackSignalHandler(int signal, siginfo_t *siginfo, void *context); void StartCpuProfilerForInfo(); @@ -132,4 +134,4 @@ private: CpuProfiler *profiler_ {nullptr}; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_CPU_PROFILE_H \ No newline at end of file +#endif // ECMASCRIPT_DFX_CPU_PROFILER_CPU_PROFILER_H \ No newline at end of file diff --git a/ecmascript/dfx/cpu_profiler/samples_record.cpp b/ecmascript/dfx/cpu_profiler/samples_record.cpp index 3041c6356bf57399c155712d313d344c8171ad4d..fad5a27cc0396a97718aeff07126483e0c6a32ec 100644 --- a/ecmascript/dfx/cpu_profiler/samples_record.cpp +++ b/ecmascript/dfx/cpu_profiler/samples_record.cpp @@ -175,14 +175,22 @@ void SamplesRecord::StringifyNodes() { sampleData_ += "\"nodes\":["; size_t nodeCount = static_cast(profileInfo_->nodeCount); + bool translateCallback = false; + if (sourceMapTranslateCallback_ != nullptr) { + translateCallback = true; + } for (size_t i = 0; i < nodeCount; i++) { struct CpuProfileNode node = profileInfo_->nodes[i]; struct FrameInfo codeEntry = node.codeEntry; + if (translateCallback) { + TranslateUrlPositionBySourceMap(codeEntry); + } std::string url = codeEntry.url; replace(url.begin(), url.end(), '\\', '/'); sampleData_ += "{\"id\":" + std::to_string(node.id) + ",\"callFrame\":{\"functionName\":\"" - + codeEntry.functionName + "\",\"scriptId\":\"" + + codeEntry.functionName + "\",\"moduleName\":\"" + + codeEntry.moduleName + "\",\"scriptId\":\"" + std::to_string(codeEntry.scriptId) + "\",\"url\":\"" + url + "\",\"lineNumber\":" + std::to_string(codeEntry.lineNumber) + ",\"columnNumber\":" @@ -658,6 +666,22 @@ bool SamplesRecord::PushNapiStackInfo(const FrameInfoTemp &frameInfoTemp) return true; } +std::string SamplesRecord::GetModuleName(char *recordName) +{ + std::string recordNameStr = recordName; + std::string::size_type atPos = recordNameStr.find("@"); + if (atPos == std::string::npos) { + return ""; + } + + std::string::size_type slashPos = recordNameStr.rfind("/", atPos); + if (slashPos == std::string::npos) { + return ""; + } + + return recordNameStr.substr(slashPos + 1, atPos - slashPos - 1); +} + void SamplesRecord::FrameInfoTempToMap(FrameInfoTemp *frameInfoTemps, int frameInfoTempLength) { if (frameInfoTempLength == 0) { @@ -676,6 +700,9 @@ void SamplesRecord::FrameInfoTempToMap(FrameInfoTemp *frameInfoTemps, int frameI frameInfo.functionName = AddRunningState(frameInfoTemps[i].functionName, frameInfoTemps[i].methodKey.state, frameInfoTemps[i].methodKey.deoptType); + if (strlen(frameInfoTemps[i].recordName) != 0) { + frameInfo.moduleName = GetModuleName(frameInfoTemps[i].recordName); + } frameInfo.columnNumber = frameInfoTemps[i].columnNumber; frameInfo.lineNumber = frameInfoTemps[i].lineNumber; stackInfoMap_.emplace(frameInfoTemps[i].methodKey, frameInfo); @@ -702,6 +729,9 @@ void SamplesRecord::NapiFrameInfoTempToMap() frameInfo.functionName = AddRunningState(napiFrameInfoTemps_[i].functionName, napiFrameInfoTemps_[i].methodKey.state, napiFrameInfoTemps_[i].methodKey.deoptType); + if (strlen(napiFrameInfoTemps_[i].recordName) != 0) { + frameInfo.moduleName = GetModuleName(napiFrameInfoTemps_[i].recordName); + } frameInfo.columnNumber = napiFrameInfoTemps_[i].columnNumber; frameInfo.lineNumber = napiFrameInfoTemps_[i].lineNumber; stackInfoMap_.emplace(napiFrameInfoTemps_[i].methodKey, frameInfo); @@ -749,6 +779,24 @@ void SamplesRecord::SetCallTimeStamp(uint64_t timeStamp) callTimeStamp_ = timeStamp; } +void SamplesRecord::TranslateUrlPositionBySourceMap(struct FrameInfo &codeEntry) +{ + if (codeEntry.url.empty()) { + return; + } + if (!sourceMapTranslateCallback_(codeEntry.url, codeEntry.lineNumber, codeEntry.columnNumber)) { + size_t find = codeEntry.url.rfind("_.js"); + if (find == std::string::npos) { + size_t start = codeEntry.url.find("entry/"); + size_t end = codeEntry.url.rfind(".ets"); + if (start != std::string::npos && end != std::string::npos) { + std::string key = codeEntry.url.substr(start + SUB_LEN, end - start - SUB_LEN); + codeEntry.url = JS_PATH + key + ".js"; + } + } + } +} + // SamplesQueue void SamplesQueue::PostFrame(FrameInfoTemp *frameInfoTemps, MethodKey *frameStack, int frameInfoTempsLength, int frameStackLength) @@ -759,6 +807,8 @@ void SamplesQueue::PostFrame(FrameInfoTemp *frameInfoTemps, MethodKey *frameStac for (int i = 0; i < frameInfoTempsLength; i++) { CheckAndCopy(frames_[rear_].frameInfoTemps[i].functionName, sizeof(frames_[rear_].frameInfoTemps[i].functionName), frameInfoTemps[i].functionName); + CheckAndCopy(frames_[rear_].frameInfoTemps[i].recordName, + sizeof(frames_[rear_].frameInfoTemps[i].recordName), frameInfoTemps[i].recordName); frames_[rear_].frameInfoTemps[i].columnNumber = frameInfoTemps[i].columnNumber; frames_[rear_].frameInfoTemps[i].lineNumber = frameInfoTemps[i].lineNumber; frames_[rear_].frameInfoTemps[i].scriptId = frameInfoTemps[i].scriptId; @@ -788,12 +838,14 @@ void SamplesQueue::PostNapiFrame(CVector &napiFrameInfoTemps, { os::memory::LockHolder holder(mtx_); if (!IsFull()) { - int frameInfoTempsLength = static_cast(napiFrameInfoTemps.size()); - int frameStackLength = static_cast(napiFrameStack.size()); + size_t frameInfoTempsLength = napiFrameInfoTemps.size(); + size_t frameStackLength = napiFrameStack.size(); // napiFrameInfoTemps - for (int i = 0; i < frameInfoTempsLength; i++) { + for (size_t i = 0; i < frameInfoTempsLength; i++) { CheckAndCopy(frames_[rear_].frameInfoTemps[i].functionName, sizeof(frames_[rear_].frameInfoTemps[i].functionName), napiFrameInfoTemps[i].functionName); + CheckAndCopy(frames_[rear_].frameInfoTemps[i].recordName, + sizeof(frames_[rear_].frameInfoTemps[i].recordName), napiFrameInfoTemps[i].recordName); frames_[rear_].frameInfoTemps[i].columnNumber = napiFrameInfoTemps[i].columnNumber; frames_[rear_].frameInfoTemps[i].lineNumber = napiFrameInfoTemps[i].lineNumber; frames_[rear_].frameInfoTemps[i].scriptId = napiFrameInfoTemps[i].scriptId; @@ -804,7 +856,7 @@ void SamplesQueue::PostNapiFrame(CVector &napiFrameInfoTemps, frames_[rear_].frameInfoTemps[i].methodKey.state = napiFrameInfoTemps[i].methodKey.state; } // napiFrameStack - for (int i = 0; i < frameStackLength; i++) { + for (size_t i = 0; i < frameStackLength; i++) { frames_[rear_].frameStack[i].methodIdentifier = napiFrameStack[i].methodIdentifier; frames_[rear_].frameStack[i].state = napiFrameStack[i].state; } diff --git a/ecmascript/dfx/cpu_profiler/samples_record.h b/ecmascript/dfx/cpu_profiler/samples_record.h index 4369c0d434d4734512fe794a4435a350fa490e8e..32f4fe322b536d60e6e4b286fb498058a5ff2772 100644 --- a/ecmascript/dfx/cpu_profiler/samples_record.h +++ b/ecmascript/dfx/cpu_profiler/samples_record.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_SAMPLES_RECORD_H -#define ECMASCRIPT_SAMPLES_RECORD_H +#ifndef ECMASCRIPT_DFX_CPU_PROFILER_SAMPLES_RECORD_H +#define ECMASCRIPT_DFX_CPU_PROFILER_SAMPLES_RECORD_H #include #include @@ -36,13 +36,15 @@ const int PROGRAM_NODE_ID = 2; // 2: the (program) node id const int QUEUE_CAPACITY = 51; // the capacity of the circular queue is QUEUE_CAPACITY - 1 const size_t NAPI_CALL_SETP = 2; // 2: step size of the variable napiCallIdx in while loop const size_t PRE_IDX_RANGE = 5; // 5: length of variable preIdx looping backward +const size_t SUB_LEN = 6; // 6: Truncate the path length +const std::string JS_PATH = "entry/build/default/cache/default/default@CompileArkTS/esmodule/debug/"; struct FrameInfo { - std::string codeType = ""; - std::string functionName = ""; - int columnNumber = -1; - int lineNumber = -1; int scriptId = 0; + int lineNumber = -1; + int columnNumber = -1; + std::string functionName = ""; + std::string moduleName = ""; std::string url = ""; }; @@ -121,6 +123,7 @@ public: int GetMethodNodeCount() const; int GetframeStackLength() const; std::string GetSampleData() const; + std::string GetModuleName(char *recordName); void SetThreadStartTime(uint64_t threadStartTime); void SetThreadStopTime(); void SetStartsampleData(std::string sampleData); @@ -172,6 +175,11 @@ public: enableVMTag_ = flag; } + void SetSourceMapTranslateCallback(SourceMapTranslateCallback cb) + { + sourceMapTranslateCallback_ = cb; + } + void SetTimeDeltaThreshold(uint32_t timeDeltaThreshold) { timeDeltaThreshold_ = timeDeltaThreshold; @@ -186,6 +194,7 @@ private: void FrameInfoTempToMap(FrameInfoTemp *frameInfoTemps, int frameInfoTempLength); void NapiFrameInfoTempToMap(); void StatisticStateTime(int timeDelta, RunningState state); + void TranslateUrlPositionBySourceMap(struct FrameInfo &codeEntry); int previousId_ = 0; RunningState previousState_ = RunningState::OTHER; @@ -215,6 +224,7 @@ private: bool enableVMTag_ {false}; uint64_t callTimeStamp_ = 0; uint32_t timeDeltaThreshold_ = 0; + SourceMapTranslateCallback sourceMapTranslateCallback_ {nullptr}; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_SAMPLES_RECORD_H \ No newline at end of file +#endif // ECMASCRIPT_DFX_CPU_PROFILER_SAMPLES_RECORD_H \ No newline at end of file diff --git a/ecmascript/dfx/cpu_profiler/sampling_processor.h b/ecmascript/dfx/cpu_profiler/sampling_processor.h index bfda1205c0b04eb70985c385f6f8f3a11cd95140..bb91f0d5d5a542abb77d68ae355e3f665ec51f19 100644 --- a/ecmascript/dfx/cpu_profiler/sampling_processor.h +++ b/ecmascript/dfx/cpu_profiler/sampling_processor.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_SAMPLING_PROCESSOR_H -#define ECMASCRIPT_SAMPLING_PROCESSOR_H +#ifndef ECMASCRIPT_DFX_CPU_PROFILER_SAMPLING_PROCESSOR_H +#define ECMASCRIPT_DFX_CPU_PROFILER_SAMPLING_PROCESSOR_H #include #include @@ -44,4 +44,4 @@ private: pthread_t pid_ = 0; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_SAMPLING_PROCESSOR_H +#endif // ECMASCRIPT_DFX_CPU_PROFILER_SAMPLING_PROCESSOR_H diff --git a/ecmascript/dfx/hprof/file_stream.cpp b/ecmascript/dfx/hprof/file_stream.cpp index 4199977c1e899aa23dda8215894324a8e4057144..5352806500a04f852f89e25a72ad2f14e2468fca 100644 --- a/ecmascript/dfx/hprof/file_stream.cpp +++ b/ecmascript/dfx/hprof/file_stream.cpp @@ -32,9 +32,14 @@ FileStream::FileStream(const std::string &fileName) Initialize(fileName); } +FileStream::~FileStream() +{ + EndOfStream(); +} + void FileStream::EndOfStream() { - if (Good()) { + if (fileStream_.is_open()) { fileStream_.close(); } } @@ -50,7 +55,6 @@ void FileStream::Initialize(const std::string &fileName) std::pair realPath = FilePathValid(fileName); if (!realPath.first) { LOG_ECMA(ERROR) << "FileStream: check file path failed"; - fileStream_.close(); return; } diff --git a/ecmascript/dfx/hprof/file_stream.h b/ecmascript/dfx/hprof/file_stream.h index 413b8b50825e083b5473153c987d07a9c94241a2..f1f712cb040f4ab310d149d9bf1ea046b4af7042 100644 --- a/ecmascript/dfx/hprof/file_stream.h +++ b/ecmascript/dfx/hprof/file_stream.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_FILE_STREAM_H -#define ECMASCRIPT_TOOLING_INTERFACE_FILE_STREAM_H +#ifndef ECMASCRIPT_DFX_HPROF_FILE_STREAM_H +#define ECMASCRIPT_DFX_HPROF_FILE_STREAM_H #include #include @@ -28,7 +28,7 @@ namespace panda::ecmascript { class FileStream : public Stream { public: explicit FileStream(const std::string &fileName); - ~FileStream() override = default; + ~FileStream() override; void EndOfStream() override; @@ -85,4 +85,4 @@ private: }; } // namespace panda::ecmascript::tooling -#endif // ECMASCRIPT_TOOLING_INTERFACE_FILE_STREAM_H +#endif // ECMASCRIPT_DFX_HPROF_FILE_STREAM_H diff --git a/ecmascript/dfx/hprof/heap_profiler.cpp b/ecmascript/dfx/hprof/heap_profiler.cpp index 47e90fe91cf38a437b779d74e24e776bd28c818e..2189c6eca67560881d75b854eef5c9d90913a8cb 100644 --- a/ecmascript/dfx/hprof/heap_profiler.cpp +++ b/ecmascript/dfx/hprof/heap_profiler.cpp @@ -24,19 +24,99 @@ #include "ecmascript/mem/heap-inl.h" namespace panda::ecmascript { -HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), chunk_(vm->GetNativeAreaAllocator()) +std::pair EntryIdMap::FindId(Address addr) { - jsonSerializer_ = GetChunk()->New(); - if (UNLIKELY(jsonSerializer_ == nullptr)) { - LOG_FULL(FATAL) << "alloc snapshot json serializer failed"; - UNREACHABLE(); + auto it = idMap_.find(addr); + if (it == idMap_.end()) { + return std::make_pair(false, GetNextId()); // return nextId if entry not exits + } else { + return std::make_pair(true, it->second); + } +} + +bool EntryIdMap::InsertId(Address addr, uint32_t id) +{ + auto it = idMap_.find(addr); + if (it == idMap_.end()) { + idMap_.emplace(addr, id); + return true; + } + idMap_[addr] = id; + return false; +} + +bool EntryIdMap::EraseId(Address addr) +{ + auto it = idMap_.find(addr); + if (it == idMap_.end()) { + return false; + } + idMap_.erase(it); + return true; +} + +bool EntryIdMap::Move(Address oldAddr, Address forwardAddr) +{ + if (oldAddr == forwardAddr) { + return true; + } + auto it = idMap_.find(oldAddr); + if (it != idMap_.end()) { + uint32_t id = it->second; + idMap_.erase(it); + idMap_[forwardAddr] = id; + return true; + } + return false; +} + +void EntryIdMap::RemoveDeadEntryId(HeapSnapshot *snapshot) +{ + auto nodes = snapshot->GetNodes(); + CUnorderedMap newIdMap; + for (auto node : *nodes) { + auto addr = node->GetAddress(); + auto it = idMap_.find(addr); + if (it != idMap_.end()) { + newIdMap.emplace(addr, it->second); + } } + idMap_.clear(); + idMap_ = newIdMap; } + +HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), chunk_(vm->GetNativeAreaAllocator()) +{ + isProfiling_ = false; + entryIdMap_ = GetChunk()->New(); +} + HeapProfiler::~HeapProfiler() { ClearSnapshot(); - GetChunk()->Delete(jsonSerializer_); - jsonSerializer_ = nullptr; + GetChunk()->Delete(entryIdMap_); +} + +void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size) +{ + DISALLOW_GARBAGE_COLLECTION; + if (isProfiling_) { + // Id will be allocated later while add new node + if (heapTracker_ != nullptr) { + heapTracker_->AllocationEvent(address, size); + } + } +} + +void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size) +{ + os::memory::LockHolder lock(mutex_); + if (isProfiling_) { + entryIdMap_->Move(address, reinterpret_cast
(forwardAddress)); + if (heapTracker_ != nullptr) { + heapTracker_->MoveEvent(address, forwardAddress, size); + } + } } void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot) @@ -47,41 +127,46 @@ void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot) } bool HeapProfiler::DumpHeapSnapshot(DumpFormat dumpFormat, Stream *stream, Progress *progress, - bool isVmMode, bool isPrivate) + bool isVmMode, bool isPrivate, bool captureNumericValue) { [[maybe_unused]] bool heapClean = ForceFullGC(vm_); ASSERT(heapClean); LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot start"; size_t heapSize = vm_->GetHeap()->GetHeapObjectSize(); - LOG_ECMA(ERROR) << "HeapProfiler DumpSnapshot heap size " << heapSize; - int32_t heapCount = vm_->GetHeap()->GetHeapObjectCount(); + LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize; + int32_t heapCount = static_cast(vm_->GetHeap()->GetHeapObjectCount()); if (progress != nullptr) { progress->ReportProgress(0, heapCount); } - HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, isVmMode, isPrivate); + HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, isVmMode, isPrivate, captureNumericValue); + ASSERT(snapshot != nullptr); + entryIdMap_->RemoveDeadEntryId(snapshot); + isProfiling_ = true; if (progress != nullptr) { progress->ReportProgress(heapCount, heapCount); } - ASSERT(snapshot != nullptr); if (!stream->Good()) { FileStream newStream(GenDumpFileName(dumpFormat)); - return jsonSerializer_->Serialize(snapshot, &newStream); + auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream); + GetChunk()->Delete(snapshot); + return serializerResult; } - return jsonSerializer_->Serialize(snapshot, stream); + auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream); + GetChunk()->Delete(snapshot); + return serializerResult; } bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream, bool traceAllocation, bool newThread) { - HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, isVmMode, false, traceAllocation); + HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, isVmMode, false, false, traceAllocation); if (snapshot == nullptr) { return false; } - + isProfiling_ = true; UpdateHeapObjects(snapshot); heapTracker_ = std::make_unique(snapshot, timeInterval, stream); - const_cast(vm_)->StartHeapTracking(heapTracker_.get()); - + const_cast(vm_)->StartHeapTracking(); if (newThread) { heapTracker_->StartTracing(); } @@ -91,11 +176,13 @@ bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream bool HeapProfiler::UpdateHeapTracking(Stream *stream) { - HeapSnapshot *snapshot = hprofs_.at(0); + if (heapTracker_ == nullptr) { + return false; + } + HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot(); if (snapshot == nullptr) { return false; } - snapshot->RecordSampleTime(); UpdateHeapObjects(snapshot); @@ -111,14 +198,14 @@ bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool new if (heapTracker_ == nullptr) { return false; } - int32_t heapCount = vm_->GetHeap()->GetHeapObjectCount(); + int32_t heapCount = static_cast(vm_->GetHeap()->GetHeapObjectCount()); const_cast(vm_)->StopHeapTracking(); if (newThread) { heapTracker_->StopTracing(); } - HeapSnapshot *snapshot = hprofs_.at(0); + HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot(); if (snapshot == nullptr) { return false; } @@ -127,10 +214,11 @@ bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool new progress->ReportProgress(0, heapCount); } snapshot->FinishSnapshot(); + isProfiling_ = false; if (progress != nullptr) { progress->ReportProgress(heapCount, heapCount); } - return jsonSerializer_->Serialize(snapshot, stream); + return HeapSnapshotJSONSerializer::Serialize(snapshot, stream); } std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat) @@ -189,24 +277,26 @@ bool HeapProfiler::ForceFullGC(const EcmaVM *vm) return false; } -HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, bool isVmMode, bool isPrivate, bool traceAllocation) +HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, bool isVmMode, bool isPrivate, + bool captureNumericValue, bool traceAllocation) { LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot"; DISALLOW_GARBAGE_COLLECTION; const_cast(vm_->GetHeap())->Prepare(); switch (sampleType) { case SampleType::ONE_SHOT: { - auto *snapshot = GetChunk()->New(vm_, isVmMode, isPrivate, traceAllocation, GetChunk()); + auto *snapshot = GetChunk()->New(vm_, isVmMode, isPrivate, captureNumericValue, + traceAllocation, entryIdMap_, GetChunk()); if (snapshot == nullptr) { LOG_FULL(FATAL) << "alloc snapshot failed"; UNREACHABLE(); } snapshot->BuildUp(); - AddSnapshot(snapshot); return snapshot; } case SampleType::REAL_TIME: { - auto *snapshot = GetChunk()->New(vm_, isVmMode, isPrivate, traceAllocation, GetChunk()); + auto *snapshot = GetChunk()->New(vm_, isVmMode, isPrivate, captureNumericValue, + traceAllocation, entryIdMap_, GetChunk()); if (snapshot == nullptr) { LOG_FULL(FATAL) << "alloc snapshot failed"; UNREACHABLE(); diff --git a/ecmascript/dfx/hprof/heap_profiler.h b/ecmascript/dfx/hprof/heap_profiler.h index d5d1dcae0384acbee3044c7218815ee1dbd6bfec..4facdb9b58836f3451a61817a41fd78a45af7b9f 100644 --- a/ecmascript/dfx/hprof/heap_profiler.h +++ b/ecmascript/dfx/hprof/heap_profiler.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_PROFILER_H -#define ECMASCRIPT_HPROF_HEAP_PROFILER_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_H +#define ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_H #include "ecmascript/dfx/hprof/heap_profiler_interface.h" #include "ecmascript/dfx/hprof/heap_snapshot_json_serializer.h" @@ -29,6 +29,38 @@ namespace panda::ecmascript { class HeapSnapshot; class EcmaVM; +class EntryIdMap { +public: + EntryIdMap() = default; + ~EntryIdMap() = default; + NO_COPY_SEMANTIC(EntryIdMap); + NO_MOVE_SEMANTIC(EntryIdMap); + + static constexpr uint32_t SEQ_STEP = 2; + std::pair FindId(Address addr); + bool InsertId(Address addr, uint32_t id); + bool EraseId(Address addr); + bool Move(Address oldAddr, Address forwardAddr); + void RemoveDeadEntryId(HeapSnapshot *snapshot); + uint32_t GetNextId() + { + nextId_ += SEQ_STEP; + return nextId_ - SEQ_STEP; + } + uint32_t GetLastId() + { + return nextId_ - SEQ_STEP; + } + size_t GetIdCount() + { + return idMap_.size(); + } + +private: + uint32_t nextId_ {3U}; // 1 Reversed for SyntheticRoot + CUnorderedMap idMap_ {}; +}; + class HeapProfiler : public HeapProfilerInterface { public: NO_MOVE_SEMANTIC(HeapProfiler); @@ -37,11 +69,14 @@ public: ~HeapProfiler() override; enum class SampleType { ONE_SHOT, REAL_TIME }; + + void AllocationEvent(TaggedObject *address, size_t size) override; + void MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size) override; /** * dump the specific snapshot in target format */ bool DumpHeapSnapshot(DumpFormat dumpFormat, Stream *stream, Progress *progress = nullptr, - bool isVmMode = true, bool isPrivate = false) override; + bool isVmMode = true, bool isPrivate = false, bool captureNumericValue = false) override; void AddSnapshot(HeapSnapshot *snapshot); @@ -52,6 +87,14 @@ public: bool StartHeapSampling(uint64_t samplingInterval, int stackDepth = 128) override; void StopHeapSampling() override; const struct SamplingInfo *GetAllocationProfile() override; + size_t GetIdCount() override + { + return entryIdMap_->GetIdCount(); + } + EntryIdMap *GetEntryIdMap() const + { + return const_cast(entryIdMap_); + } Chunk *GetChunk() const { return const_cast(&chunk_); @@ -66,7 +109,8 @@ private: * make a new heap snapshot and put it into a container eg, vector */ HeapSnapshot *MakeHeapSnapshot(SampleType sampleType, bool isVmMode = true, - bool isPrivate = false, bool traceAllocation = false); + bool isPrivate = false, bool captureNumericValue = false, + bool traceAllocation = false); std::string GenDumpFileName(DumpFormat dumpFormat); CString GetTimeStamp(); void UpdateHeapObjects(HeapSnapshot *snapshot); @@ -75,10 +119,12 @@ private: const size_t MAX_NUM_HPROF = 5; // ~10MB const EcmaVM *vm_; CVector hprofs_; - HeapSnapshotJSONSerializer *jsonSerializer_ {nullptr}; + bool isProfiling_ {false}; + EntryIdMap* entryIdMap_; std::unique_ptr heapTracker_; Chunk chunk_; std::unique_ptr heapSampling_ {nullptr}; + os::memory::Mutex mutex_; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_PROFILER_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_H diff --git a/ecmascript/dfx/hprof/heap_profiler_interface.h b/ecmascript/dfx/hprof/heap_profiler_interface.h index 2650615553a4567606c606a5f665a2670698cdc1..385161cc78a54e287e1685188583ee095e1733a0 100644 --- a/ecmascript/dfx/hprof/heap_profiler_interface.h +++ b/ecmascript/dfx/hprof/heap_profiler_interface.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_PROFILER_INTERFACE_H -#define ECMASCRIPT_HPROF_HEAP_PROFILER_INTERFACE_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_INTERFACE_H +#define ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_INTERFACE_H #include @@ -22,6 +22,7 @@ namespace panda::ecmascript { class EcmaVM; +class TaggedObject; class Progress; class Stream; struct SamplingInfo; @@ -36,8 +37,11 @@ public: HeapProfilerInterface() = default; virtual ~HeapProfilerInterface() = default; + virtual size_t GetIdCount() = 0; + virtual void AllocationEvent(TaggedObject *address, size_t size) = 0; + virtual void MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)= 0; virtual bool DumpHeapSnapshot(DumpFormat dumpFormat, Stream *stream, Progress *progress = nullptr, - bool isVmMode = true, bool isPrivate = false) = 0; + bool isVmMode = true, bool isPrivate = false, bool captureNumericValue = false) = 0; virtual bool StartHeapTracking(double timeInterval, bool isVmMode = true, Stream *stream = nullptr, bool traceAllocation = false, bool newThread = true) = 0; @@ -51,4 +55,4 @@ public: NO_COPY_SEMANTIC(HeapProfilerInterface); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_PROFILER_INTERFACE_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_PROFILER_INTERFACE_H diff --git a/ecmascript/dfx/hprof/heap_root_visitor.h b/ecmascript/dfx/hprof/heap_root_visitor.h index e929db65b3eaf2b5b21c41c42b74d038917719ce..a2b4d6f1d2232c36b63f857b6099820ee3319748 100644 --- a/ecmascript/dfx/hprof/heap_root_visitor.h +++ b/ecmascript/dfx/hprof/heap_root_visitor.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_ROOT_VISITOR_H -#define ECMASCRIPT_HPROF_HEAP_ROOT_VISITOR_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_ROOT_VISITOR_H +#define ECMASCRIPT_DFX_HPROF_HEAP_ROOT_VISITOR_H #include "ecmascript/ecma_vm.h" #include "ecmascript/mem/visitor.h" @@ -35,4 +35,4 @@ private: EcmaVM *GetVMInstance(JSThread *thread) const; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_ROOT_VISITOR_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_ROOT_VISITOR_H diff --git a/ecmascript/dfx/hprof/heap_sampling.cpp b/ecmascript/dfx/hprof/heap_sampling.cpp index d1dfee57d2fea6eef522e1fcba072a2ce4566ab5..cbe178a6852d3ca2a84d0583b22e50fde9172755 100644 --- a/ecmascript/dfx/hprof/heap_sampling.cpp +++ b/ecmascript/dfx/hprof/heap_sampling.cpp @@ -94,13 +94,13 @@ void HeapSampling::GetStack() if (method == nullptr) { continue; } - const JSPandaFile *jsPandaFile = method->GetJSPandaFile(); + bool isNative = method->IsNativeWithCallField(); struct MethodKey methodKey; if (topFrame) { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, jsPandaFile, true); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, true); topFrame = false; } else { - methodKey.state = JsStackGetter::GetRunningState(it, vm_, jsPandaFile, false); + methodKey.state = JsStackGetter::GetRunningState(it, vm_, isNative, false); } void *methodIdentifier = JsStackGetter::GetMethodIdentifier(method, it); if (methodIdentifier == nullptr) { diff --git a/ecmascript/dfx/hprof/heap_sampling.h b/ecmascript/dfx/hprof/heap_sampling.h index d5b7c99c17949ae660c93d20ee541c1e8a404d79..372ef0dd72002832a09cc843861d5b234b9cecc0 100644 --- a/ecmascript/dfx/hprof/heap_sampling.h +++ b/ecmascript/dfx/hprof/heap_sampling.h @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_SAMPLING_H -#define ECMASCRIPT_HPROF_HEAP_SAMPLING_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_SAMPLING_H +#define ECMASCRIPT_DFX_HPROF_HEAP_SAMPLING_H #include "ecmascript/dfx/stackinfo/js_stackgetter.h" #include "ecmascript/mem/heap.h" @@ -23,6 +23,7 @@ namespace panda::ecmascript { struct CallFrameInfo { std::string codeType_ = ""; std::string functionName_ = ""; + std::string moduleName_ = ""; int columnNumber_ = -1; int lineNumber_ = -1; int scriptId_ = 0; @@ -91,4 +92,4 @@ private: CVector frameInfoTemps_; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_SAMPLING_H \ No newline at end of file +#endif // ECMASCRIPT_DFX_HPROF_HEAP_SAMPLING_H \ No newline at end of file diff --git a/ecmascript/dfx/hprof/heap_snapshot.cpp b/ecmascript/dfx/hprof/heap_snapshot.cpp index 603518519fd6af22713b30f50d3dd62743ec6d1b..f752197c7e22228f916960ff246e07dfd1871ebd 100644 --- a/ecmascript/dfx/hprof/heap_snapshot.cpp +++ b/ecmascript/dfx/hprof/heap_snapshot.cpp @@ -61,7 +61,7 @@ Node *Node::NewNode(Chunk *chunk, size_t id, size_t index, const CString *name, return node; } -Edge *Edge::NewEdge(Chunk *chunk, uint64_t id, EdgeType type, Node *from, Node *to, CString *name) +Edge *Edge::NewEdge(Chunk *chunk, uint32_t id, EdgeType type, Node *from, Node *to, CString *name) { auto edge = chunk->New(id, type, from, to, name); if (UNLIKELY(edge == nullptr)) { @@ -71,6 +71,16 @@ Edge *Edge::NewEdge(Chunk *chunk, uint64_t id, EdgeType type, Node *from, Node * return edge; } +Edge *Edge::NewEdge(Chunk *chunk, uint32_t id, EdgeType type, Node *from, Node *to, uint32_t index) +{ + auto edge = chunk->New(id, type, from, to, index); + if (UNLIKELY(edge == nullptr)) { + LOG_FULL(FATAL) << "internal allocator failed"; + UNREACHABLE(); + } + return edge; +} + HeapSnapshot::~HeapSnapshot() { for (Node *node : nodes_) { @@ -86,6 +96,7 @@ HeapSnapshot::~HeapSnapshot() scriptIdMap_.clear(); methodToTraceNodeId_.clear(); traceNodeIndex_.clear(); + entryIdMap_ = nullptr; chunk_ = nullptr; } @@ -121,6 +132,7 @@ void HeapSnapshot::UpdateNodes(bool isInFinish) if (!(*iter)->IsLive()) { Node *node = entryMap_.FindAndEraseNode((*iter)->GetAddress()); ASSERT(*iter == node); + entryIdMap_->EraseId((*iter)->GetAddress()); if (node != nullptr) { DecreaseNodeSize(node->GetSelfSize()); chunk_->Delete(node); @@ -143,7 +155,7 @@ bool HeapSnapshot::FinishSnapshot() void HeapSnapshot::RecordSampleTime() { - timeStamps_.emplace_back(sequenceId_); + timeStamps_.emplace_back(entryIdMap_->GetLastId()); } void HeapSnapshot::PushHeapStat(Stream* stream) @@ -161,8 +173,8 @@ void HeapSnapshot::PushHeapStat(Stream* stream) TimeStamp& timeStamp = timeStamps_[timeIndex]; sequenceId = timeStamp.GetLastSequenceId(); timeStampUs = timeStamp.GetTimeStamp(); - int32_t nodesSize = 0; - int32_t nodesCount = 0; + uint32_t nodesSize = 0; + uint32_t nodesCount = 0; while (iter != nodes_.end() && (*iter)->GetId() <= static_cast(sequenceId)) { nodesCount++; nodesSize += (*iter)->GetSelfSize(); @@ -198,7 +210,7 @@ void HeapSnapshot::MoveNode(uintptr_t address, TaggedObject *forwardAddress, siz Node *node = entryMap_.FindAndEraseNode(address); if (node != nullptr) { - ASSERT(node->GetId() <= static_cast(INT_MAX)); + ASSERT(node->GetId() <= UINT_MAX); Node *oldNode = entryMap_.FindAndEraseNode(Node::NewAddress(forwardAddress)); if (oldNode != nullptr) { @@ -215,7 +227,6 @@ void HeapSnapshot::MoveNode(uintptr_t address, TaggedObject *forwardAddress, siz node->SetSelfSize(size); } node->SetAddress(Node::NewAddress(forwardAddress)); - entryMap_.InsertEntry(node); } else { LOG_DEBUGGER(WARN) << "Untracked object moves from " << address << " to " << forwardAddress; @@ -249,6 +260,7 @@ CString *HeapSnapshot::GenerateNodeName(TaggedObject *entry) case JSType::LINE_STRING: case JSType::CONSTANT_STRING: case JSType::TREE_STRING: + case JSType::SLICED_STRING: return GetString("BaseString"); case JSType::JS_OBJECT: { CString objName = CString("JSOBJECT(Ctor="); // Ctor-name @@ -299,7 +311,7 @@ CString *HeapSnapshot::GenerateNodeName(TaggedObject *entry) case JSType::JS_ARRAY: { JSArray *jsArray = JSArray::Cast(entry); CString jsArrayName("JSArray["); - jsArrayName.append(ToCString(jsArray->GetLength().GetInt())); + jsArrayName.append(ToCString(jsArray->GetLength())); jsArrayName.append("]"); return GetString(jsArrayName); } @@ -375,6 +387,8 @@ CString *HeapSnapshot::GenerateNodeName(TaggedObject *entry) return GetString("PromiseReactionsFunction"); case JSType::JS_PROMISE_EXECUTOR_FUNCTION: return GetString("PromiseExecutorFunction"); + case JSType::JS_ASYNC_FROM_SYNC_ITER_UNWARP_FUNCTION: + return GetString("AsyncFromSyncIterUnwarpFunction"); case JSType::JS_PROMISE_ALL_RESOLVE_ELEMENT_FUNCTION: return GetString("PromiseAllResolveElementFunction"); case JSType::JS_PROMISE_ANY_REJECT_ELEMENT_FUNCTION: @@ -587,26 +601,23 @@ void HeapSnapshot::FillNodes(bool isInFinish) auto heap = vm_->GetHeap(); if (heap != nullptr) { heap->IterateOverObjects([this, isInFinish](TaggedObject *obj) { - GenerateNode(JSTaggedValue(obj), 0, -1, isInFinish); + GenerateNode(JSTaggedValue(obj), 0, isInFinish); }); } } -Node *HeapSnapshot::GenerateNode(JSTaggedValue entry, size_t size, int sequenceId, bool isInFinish) +Node *HeapSnapshot::GenerateNode(JSTaggedValue entry, size_t size, bool isInFinish) { Node *node = nullptr; - if (sequenceId == -1) { - sequenceId = sequenceId_ + SEQ_STEP; - } if (entry.IsHeapObject()) { if (entry.IsWeak()) { entry.RemoveWeakTag(); } if (entry.IsString()) { if (isPrivate_) { - node = GeneratePrivateStringNode(size, sequenceId); + node = GeneratePrivateStringNode(size); } else { - node = GenerateStringNode(entry, size, sequenceId, isInFinish); + node = GenerateStringNode(entry, size, isInFinish); } if (node == nullptr) { LOG_ECMA(DEBUG) << "string node nullptr"; @@ -618,14 +629,15 @@ Node *HeapSnapshot::GenerateNode(JSTaggedValue entry, size_t size, int sequenceI if (baseClass != nullptr) { Address addr = reinterpret_cast
(obj); Node *existNode = entryMap_.FindEntry(addr); // Fast Index + auto [idExist, sequenceId] = entryIdMap_->FindId(addr); if (existNode == nullptr) { size_t selfSize = (size != 0) ? size : obj->GetClass()->SizeFromJSHClass(obj); node = Node::NewNode(chunk_, sequenceId, nodeCount_, GenerateNodeName(obj), GenerateNodeType(obj), selfSize, obj); - if (sequenceId == sequenceId_ + SEQ_STEP) { - sequenceId_ = sequenceId; // Odd Digit - } entryMap_.InsertEntry(node); + if (!idExist) { + entryIdMap_->InsertId(addr, sequenceId); + } InsertNodeUnique(node); ASSERT(entryMap_.FindEntry(node->GetAddress())->GetAddress() == node->GetAddress()); } else { @@ -636,11 +648,17 @@ Node *HeapSnapshot::GenerateNode(JSTaggedValue entry, size_t size, int sequenceI } else { CString primitiveName; if (entry.IsInt()) { + if (!captureNumericValue_) { + return nullptr; + } primitiveName.append("Int:"); if (!isPrivate_) { primitiveName.append(ToCString(entry.GetInt())); } } else if (entry.IsDouble()) { + if (!captureNumericValue_) { + return nullptr; + } primitiveName.append("Double:"); if (!isPrivate_) { primitiveName.append(FloatToCString(entry.GetDouble())); @@ -668,12 +686,13 @@ Node *HeapSnapshot::GenerateNode(JSTaggedValue entry, size_t size, int sequenceI existNode->SetLive(true); return existNode; } - + Address addr = reinterpret_cast
(obj); + auto [idExist, sequenceId] = entryIdMap_->FindId(addr); node = Node::NewNode(chunk_, sequenceId, nodeCount_, GetString(primitiveName), NodeType::JS_PRIMITIVE_REF, 0, obj); entryMap_.InsertEntry(node); // Fast Index - if (sequenceId == sequenceId_ + SEQ_STEP) { - sequenceId_ = sequenceId; // Odd Digit + if (!idExist) { + entryIdMap_->InsertId(addr, sequenceId); } InsertNodeUnique(node); } @@ -829,7 +848,7 @@ bool HeapSnapshot::AddMethodInfo(MethodLiteral *methodLiteral, return true; } -Node *HeapSnapshot::GenerateStringNode(JSTaggedValue entry, size_t size, int sequenceId, bool isInFinish) +Node *HeapSnapshot::GenerateStringNode(JSTaggedValue entry, size_t size, bool isInFinish) { static const CString EMPTY_STRING; @@ -849,17 +868,19 @@ Node *HeapSnapshot::GenerateStringNode(JSTaggedValue entry, size_t size, int seq if (isInFinish) { nodeName = GetString(EntryVisitor::ConvertKey(entry)); } + Address addr = reinterpret_cast
(entry.GetTaggedObject()); + auto [idExist, sequenceId] = entryIdMap_->FindId(addr); Node *node = Node::NewNode(chunk_, sequenceId, nodeCount_, nodeName, NodeType::PRIM_STRING, selfsize, entry.GetTaggedObject()); - if (sequenceId == sequenceId_ + SEQ_STEP) { - sequenceId_ = sequenceId; // Odd Digit + if (!idExist) { + entryIdMap_->InsertId(addr, sequenceId); } entryMap_.InsertEntry(node); InsertNodeUnique(node); return node; } -Node *HeapSnapshot::GeneratePrivateStringNode(size_t size, int sequenceId) +Node *HeapSnapshot::GeneratePrivateStringNode(size_t size) { if (privateStringNode_ != nullptr) { return privateStringNode_; @@ -870,13 +891,16 @@ Node *HeapSnapshot::GeneratePrivateStringNode(size_t size, int sequenceId) size_t selfsize = (size != 0) ? size : EcmaStringAccessor(originStr).GetFlatStringSize(); CString strContent; strContent.append(EntryVisitor::ConvertKey(stringValue)); + Address addr = reinterpret_cast
(stringValue.GetTaggedObject()); + auto [idExist, sequenceId] = entryIdMap_->FindId(addr); node = Node::NewNode(chunk_, sequenceId, nodeCount_, GetString(strContent), NodeType::PRIM_STRING, selfsize, stringValue.GetTaggedObject()); Node *existNode = entryMap_.FindOrInsertNode(node); // Fast Index if (existNode == node) { - if (sequenceId == sequenceId_ + SEQ_STEP) { - sequenceId_ = sequenceId; // Odd Digit + if (!idExist) { + entryIdMap_->InsertId(addr, sequenceId); } + entryMap_.InsertEntry(node); InsertNodeUnique(node); } else { existNode->SetLive(true); @@ -900,12 +924,16 @@ void HeapSnapshot::FillEdges() size_t count = 0; while (++count < length) { ASSERT(*iter != nullptr); - auto *objFrom = reinterpret_cast((*iter)->GetAddress()); - std::vector> nameResources; - JSTaggedValue(objFrom).DumpForSnapshot(nameResources, isVmMode_); + auto entryFrom = *iter; + auto *objFrom = reinterpret_cast(entryFrom->GetAddress()); + std::vector referenceResources; JSTaggedValue objValue(objFrom); - for (auto const &it : nameResources) { - JSTaggedValue toValue = it.second; + objValue.DumpForSnapshot(referenceResources, isVmMode_); + for (auto const &it : referenceResources) { + JSTaggedValue toValue = it.value_; + if (toValue.IsNumber() && !captureNumericValue_) { + continue; + } Node *entryTo = nullptr; if (toValue.IsWeak()) { toValue.RemoveWeakTag(); @@ -915,17 +943,24 @@ void HeapSnapshot::FillEdges() entryTo = entryMap_.FindEntry(Node::NewAddress(to)); } if (entryTo == nullptr) { - entryTo = GenerateNode(toValue, 0, -1, true); + entryTo = GenerateNode(toValue, 0, true); } if (entryTo != nullptr) { - Edge *edge = Edge::NewEdge(chunk_, edgeCount_, EdgeType::DEFAULT, *iter, entryTo, GetString(it.first)); + Edge *edge = (it.type_ == Reference::ReferenceType::ELEMENT) ? + Edge::NewEdge(chunk_, edgeCount_, (EdgeType)it.type_, entryFrom, entryTo, it.index_) : + Edge::NewEdge(chunk_, edgeCount_, (EdgeType)it.type_, entryFrom, entryTo, GetString(it.name_)); + RenameFunction(it.name_, entryFrom, entryTo); InsertEdgeUnique(edge); (*iter)->IncEdgeCount(); // Update Node's edgeCount_ here } } iter++; } - // Fill Primitive Edge + FillPrimitiveEdge(count, iter); +} + +void HeapSnapshot::FillPrimitiveEdge(size_t count, CList::iterator iter) +{ size_t lengthExtend = nodes_.size(); while (++count < lengthExtend) { ASSERT(*iter != nullptr); @@ -947,6 +982,25 @@ void HeapSnapshot::FillEdges() } } +void HeapSnapshot::RenameFunction(const CString &edgeName, Node *entryFrom, Node *entryTo) +{ + if (edgeName != "name") { + return; + } + auto fromName = *entryFrom->GetName(); + if (*entryTo->GetName() != "" && (fromName == "JSFunctionBase" || + fromName == "JSFunction" || fromName == "PromiseValueThunkOrThrowerFunction" || + fromName == "JSGeneratorFunction" || fromName == "PromiseAllSettledElementFunction" || + fromName == "Bound Function" || fromName == "PromiseAnyRejectElementFunction" || + fromName == "PromiseReactionsFunction" || fromName == "PromiseExecutorFunction" || + fromName == "PromiseAllResolveElementFunction" || fromName == "AsyncFunction" || + fromName == "ProxyRevocFunction" || fromName == "AsyncFromSyncIterUnwarpFunction" || + fromName == "PromiseFinallyFunction" || fromName == "JSIntlBoundFunction" || + fromName == "JSAsyncGeneratorFunction" || fromName == "AsyncAwaitStatusFunction")) { + entryFrom->SetName(GetString(*entryTo->GetName())); + } +} + void HeapSnapshot::BridgeAllReferences() { // This Function is Unused @@ -1121,11 +1175,11 @@ FrontType NodeTypeConverter::Convert(NodeType type) } else if (type == NodeType::JS_OBJECT) { fType = FrontType::OBJECT; } else if (type >= NodeType::JS_FUNCTION_FIRST && type <= NodeType::JS_FUNCTION_LAST) { - fType = FrontType::CLOSURE; + fType = FrontType::DEFAULT; } else if (type == NodeType::JS_BOUND_FUNCTION) { - fType = FrontType::CLOSURE; + fType = FrontType::DEFAULT; } else if (type == NodeType::JS_FUNCTION_BASE) { - fType = FrontType::CLOSURE; + fType = FrontType::DEFAULT; } else if (type == NodeType::JS_REG_EXP) { fType = FrontType::REGEXP; } else if (type == NodeType::SYMBOL) { diff --git a/ecmascript/dfx/hprof/heap_snapshot.h b/ecmascript/dfx/hprof/heap_snapshot.h index 9b366397cc0fa0557c09d6fbe67071b6914a063f..c23ef40978902a6fbecb0be7ee9c9b709576dc5b 100644 --- a/ecmascript/dfx/hprof/heap_snapshot.h +++ b/ecmascript/dfx/hprof/heap_snapshot.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_SNAPSHOT_H -#define ECMASCRIPT_HPROF_HEAP_SNAPSHOT_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_H +#define ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_H #include #include @@ -33,6 +33,7 @@ #include "ecmascript/interpreter/frame_handler.h" namespace panda::ecmascript { +class EntryIdMap; // Define the Object Graphic using Address = uintptr_t; @@ -47,7 +48,7 @@ enum class EdgeType { CONTEXT, ELEMENT, PROPERTY, INTERNAL, HIDDEN, SHORTCUT, WE class Node { public: - Node(uint64_t id, uint64_t index, const CString *name, NodeType type, size_t size, uint64_t traceId, + Node(uint32_t id, uint32_t index, const CString *name, NodeType type, size_t size, uint32_t traceId, Address address, bool isLive = true) : id_(id), index_(index), @@ -59,15 +60,15 @@ public: isLive_(isLive) { } - uint64_t GetId() const + uint32_t GetId() const { return id_; } - void SetIndex(uint64_t index) + void SetIndex(uint32_t index) { index_ = index; } - uint64_t GetIndex() const + uint32_t GetIndex() const { return index_; } @@ -102,7 +103,7 @@ public: { edgeCount_++; } - uint64_t GetStackTraceId() const + uint32_t GetStackTraceId() const { return traceId_; } @@ -122,7 +123,7 @@ public: { isLive_ = isLive; } - void SetTraceId(uint64_t traceId) + void SetTraceId(uint32_t traceId) { traceId_ = traceId; } @@ -137,24 +138,24 @@ public: ~Node() = default; private: - uint64_t id_ {0}; // Range from 1 - uint64_t index_ {0}; + uint32_t id_ {0}; // Range from 1 + uint32_t index_ {0}; const CString *name_ {nullptr}; NodeType type_ {NodeType::INVALID}; size_t size_ {0}; size_t edgeCount_ {0}; - uint64_t traceId_ {0}; + uint32_t traceId_ {0}; Address address_ {0x0}; bool isLive_ {true}; }; class Edge { public: - Edge(uint64_t id, EdgeType type, Node *from, Node *to, CString *name) - : id_(id), edgeType_(type), from_(from), to_(to), name_(name) - { - } - uint64_t GetId() const + Edge(uint32_t id, EdgeType type, Node *from, Node *to, CString *name) + : id_(id), edgeType_(type), from_(from), to_(to), name_(name) {} + Edge(uint32_t id, EdgeType type, Node *from, Node *to, uint32_t index) + : id_(id), edgeType_(type), from_(from), to_(to), index_(index) {} + uint32_t GetId() const { return id_; } @@ -172,10 +173,17 @@ public: } const CString *GetName() const { + ASSERT(GetType() != EdgeType::ELEMENT); return name_; } + uint32_t GetIndex() const + { + ASSERT(GetType() == EdgeType::ELEMENT); + return index_; + } void SetName(CString *name) { + ASSERT(GetType() != EdgeType::ELEMENT); name_ = name; } void UpdateFrom(Node *node) @@ -186,16 +194,20 @@ public: { to_ = node; } - static Edge *NewEdge(Chunk *chunk, uint64_t id, EdgeType type, Node *from, Node *to, CString *name); + static Edge *NewEdge(Chunk *chunk, uint32_t id, EdgeType type, Node *from, Node *to, CString *name); + static Edge *NewEdge(Chunk *chunk, uint32_t id, EdgeType type, Node *from, Node *to, uint32_t index); static constexpr int EDGE_FIELD_COUNT = 3; ~Edge() = default; private: - uint64_t id_ {-1ULL}; + uint32_t id_ {-1U}; EdgeType edgeType_ {EdgeType::DEFAULT}; Node *from_ {nullptr}; Node *to_ {nullptr}; - CString *name_ {nullptr}; + union { + CString *name_; + uint32_t index_; + }; }; class TimeStamp { @@ -216,22 +228,22 @@ public: return timeStampUs_; } - int32_t GetSize() const + uint32_t GetSize() const { return size_; } - void SetSize(int32_t size) + void SetSize(uint32_t size) { size_ = size; } - int32_t GetCount() const + uint32_t GetCount() const { return count_; } - void SetCount(int32_t count) + void SetCount(uint32_t count) { count_ = count; } @@ -247,8 +259,8 @@ private: int lastSequenceId_ {0}; int64_t timeStampUs_ {0}; - int32_t size_ {0}; - int32_t count_ {0}; + uint32_t size_ {0}; + uint32_t count_ {0}; }; class HeapEntryMap { @@ -357,17 +369,29 @@ private: TraceNode root_; }; +struct Reference { + enum class ReferenceType { CONTEXT, ELEMENT, PROPERTY, INTERNAL, HIDDEN, SHORTCUT, WEAK, DEFAULT = PROPERTY }; + + Reference(const CString &name, JSTaggedValue value) : name_(name), value_(value) {} + Reference(const CString &name, JSTaggedValue value, ReferenceType type) : name_(name), value_(value), type_(type) {} + Reference(uint32_t index, JSTaggedValue value, ReferenceType type) : index_(index), value_(value), type_(type) {} + + CString name_; + uint32_t index_ {-1U}; + JSTaggedValue value_; + ReferenceType type_ {ReferenceType::DEFAULT}; +}; + class HeapSnapshot { public: static constexpr int SEQ_STEP = 2; NO_MOVE_SEMANTIC(HeapSnapshot); NO_COPY_SEMANTIC(HeapSnapshot); - HeapSnapshot(const EcmaVM *vm, const bool isVmMode, const bool isPrivate, const bool trackAllocations, - Chunk *chunk) - : stringTable_(vm), vm_(vm), isVmMode_(isVmMode), isPrivate_(isPrivate), trackAllocations_(trackAllocations), - chunk_(chunk) - { - } + HeapSnapshot(const EcmaVM *vm, const bool isVmMode, const bool isPrivate, const bool captureNumericValue, + const bool trackAllocations, EntryIdMap *entryIdMap, Chunk *chunk) + : stringTable_(vm), vm_(vm), isVmMode_(isVmMode), isPrivate_(isPrivate), + captureNumericValue_(captureNumericValue), trackAllocations_(trackAllocations), + entryIdMap_(entryIdMap), chunk_(chunk) {} ~HeapSnapshot(); bool BuildUp(); bool Verify(); @@ -462,10 +486,12 @@ public: private: void FillNodes(bool isInFinish = false); - Node *GenerateNode(JSTaggedValue entry, size_t size = 0, int sequenceId = -1, bool isInFinish = false); - Node *GeneratePrivateStringNode(size_t size, int sequenceId); - Node *GenerateStringNode(JSTaggedValue entry, size_t size, int sequenceId, bool isInFinish = false); + Node *GenerateNode(JSTaggedValue entry, size_t size = 0, bool isInFinish = false); + Node *GeneratePrivateStringNode(size_t size); + Node *GenerateStringNode(JSTaggedValue entry, size_t size, bool isInFinish = false); void FillEdges(); + void FillPrimitiveEdge(size_t count, CList::iterator iter); + void RenameFunction(const CString &edgeName, Node *entryFrom, Node *entryTo); void BridgeAllReferences(); CString *GenerateEdgeName(TaggedObject *from, TaggedObject *to); @@ -480,7 +506,6 @@ private: CList nodes_ {}; CList edges_ {}; CVector timeStamps_ {}; - std::atomic_int sequenceId_ {1}; // 1 Reversed for SyntheticRoot int nodeCount_ {0}; int edgeCount_ {0}; int totalNodesSize_ {0}; @@ -489,6 +514,7 @@ private: const EcmaVM *vm_; bool isVmMode_ {true}; bool isPrivate_ {false}; + bool captureNumericValue_ {false}; Node* privateStringNode_ {nullptr}; bool trackAllocations_ {false}; CVector traceInfoStack_ {}; @@ -497,6 +523,7 @@ private: TraceTree traceTree_; CMap methodToTraceNodeId_; CVector traceNodeIndex_; + EntryIdMap* entryIdMap_; Chunk *chunk_ {nullptr}; }; @@ -539,4 +566,4 @@ public: static FrontType Convert(NodeType type); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_SNAPSHOT_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_H diff --git a/ecmascript/dfx/hprof/heap_snapshot_json_serializer.cpp b/ecmascript/dfx/hprof/heap_snapshot_json_serializer.cpp index 96bf98b28cd6b53ffa5d3d22902947d98f27fbbc..df85c6e5db7a94b5b15efb3c133385dedbd215da 100644 --- a/ecmascript/dfx/hprof/heap_snapshot_json_serializer.cpp +++ b/ecmascript/dfx/hprof/heap_snapshot_json_serializer.cpp @@ -20,254 +20,250 @@ namespace panda::ecmascript { -HeapSnapshotJSONSerializer::~HeapSnapshotJSONSerializer() -{ - if (!writer_) { - delete writer_; - } -} - bool HeapSnapshotJSONSerializer::Serialize(HeapSnapshot *snapshot, Stream *stream) { // Serialize Node/Edge/String-Table - LOG_ECMA(ERROR) << "HeapSnapshotJSONSerializer::Serialize begin"; - snapshot_ = snapshot; - ASSERT(snapshot_->GetNodes() != nullptr && snapshot_->GetEdges() != nullptr && - snapshot_->GetEcmaStringTable() != nullptr); - writer_ = new StreamWriter(stream); + LOG_ECMA(INFO) << "HeapSnapshotJSONSerializer::Serialize begin"; + ASSERT(snapshot->GetNodes() != nullptr && snapshot->GetEdges() != nullptr && + snapshot->GetEcmaStringTable() != nullptr); + auto writer = new StreamWriter(stream); + + SerializeSnapshotHeader(snapshot, writer); // 1. + SerializeNodes(snapshot, writer); // 2. + SerializeEdges(snapshot, writer); // 3. + SerializeTraceFunctionInfo(snapshot, writer); // 4. + SerializeTraceTree(snapshot, writer); // 5. + SerializeSamples(snapshot, writer); // 6. + SerializeLocations(writer); // 7. + SerializeStringTable(snapshot, writer); // 8. + SerializerSnapshotClosure(writer); // 9. + writer->End(); - SerializeSnapshotHeader(); // 1. - SerializeNodes(); // 2. - SerializeEdges(); // 3. - SerializeTraceFunctionInfo(); // 4. - SerializeTraceTree(); // 5. - SerializeSamples(); // 6. - SerializeLocations(); // 7. - SerializeStringTable(); // 8. - SerializerSnapshotClosure(); // 9. - writer_->End(); + delete writer; - LOG_ECMA(ERROR) << "HeapSnapshotJSONSerializer::Serialize exit"; + LOG_ECMA(INFO) << "HeapSnapshotJSONSerializer::Serialize exit"; return true; } -void HeapSnapshotJSONSerializer::SerializeSnapshotHeader() +void HeapSnapshotJSONSerializer::SerializeSnapshotHeader(HeapSnapshot *snapshot, StreamWriter *writer) { - writer_->Write("{\"snapshot\":\n"); // 1. - writer_->Write("{\"meta\":\n"); // 2. + writer->Write("{\"snapshot\":\n"); // 1. + writer->Write("{\"meta\":\n"); // 2. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("{\"node_fields\":[\"type\",\"name\",\"id\",\"self_size\",\"edge_count\",\"trace_node_id\","); - writer_->Write("\"detachedness\"],\n"); // 3. + writer->Write("{\"node_fields\":[\"type\",\"name\",\"id\",\"self_size\",\"edge_count\",\"trace_node_id\","); + writer->Write("\"detachedness\"],\n"); // 3. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"node_types\":[[\"hidden\",\"array\",\"string\",\"object\",\"code\",\"closure\",\"regexp\","); + writer->Write("\"node_types\":[[\"hidden\",\"array\",\"string\",\"object\",\"code\",\"closure\",\"regexp\","); // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"number\",\"native\",\"synthetic\",\"concatenated string\",\"slicedstring\",\"symbol\","); + writer->Write("\"number\",\"native\",\"synthetic\",\"concatenated string\",\"slicedstring\",\"symbol\","); // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"bigint\"],\"string\",\"number\",\"number\",\"number\",\"number\",\"number\"],\n"); // 4. + writer->Write("\"bigint\"],\"string\",\"number\",\"number\",\"number\",\"number\",\"number\"],\n"); // 4. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"edge_fields\":[\"type\",\"name_or_index\",\"to_node\"],\n"); // 5. + writer->Write("\"edge_fields\":[\"type\",\"name_or_index\",\"to_node\"],\n"); // 5. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"edge_types\":[[\"context\",\"element\",\"property\",\"internal\",\"hidden\",\"shortcut\","); + writer->Write("\"edge_types\":[[\"context\",\"element\",\"property\",\"internal\",\"hidden\",\"shortcut\","); // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"weak\"],\"string_or_number\",\"node\"],\n"); // 6. + writer->Write("\"weak\"],\"string_or_number\",\"node\"],\n"); // 6. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"trace_function_info_fields\":[\"function_id\",\"name\",\"script_name\",\"script_id\","); + writer->Write("\"trace_function_info_fields\":[\"function_id\",\"name\",\"script_name\",\"script_id\","); // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"line\",\"column\"],\n"); // 7. + writer->Write("\"line\",\"column\"],\n"); // 7. // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"trace_node_fields\":[\"id\",\"function_info_index\",\"count\",\"size\",\"children\"],\n"); + writer->Write("\"trace_node_fields\":[\"id\",\"function_info_index\",\"count\",\"size\",\"children\"],\n"); // NOLINTNEXTLINE(modernize-raw-string-literal) - writer_->Write("\"sample_fields\":[\"timestamp_us\",\"last_assigned_id\"],\n"); // 9. + writer->Write("\"sample_fields\":[\"timestamp_us\",\"last_assigned_id\"],\n"); // 9. // NOLINTNEXTLINE(modernize-raw-string-literal) // 10. - writer_->Write("\"location_fields\":[\"object_index\",\"script_id\",\"line\",\"column\"]},\n\"node_count\":"); - writer_->Write(snapshot_->GetNodeCount()); // 11. - writer_->Write(",\n\"edge_count\":"); - writer_->Write(snapshot_->GetEdgeCount()); // 12. - writer_->Write(",\n\"trace_function_count\":"); - writer_->Write(snapshot_->GetTrackAllocationsStack().size()); // 13. - writer_->Write("\n},\n"); // 14. + writer->Write("\"location_fields\":[\"object_index\",\"script_id\",\"line\",\"column\"]},\n\"node_count\":"); + writer->Write(snapshot->GetNodeCount()); // 11. + writer->Write(",\n\"edge_count\":"); + writer->Write(snapshot->GetEdgeCount()); // 12. + writer->Write(",\n\"trace_function_count\":"); + writer->Write(snapshot->GetTrackAllocationsStack().size()); // 13. + writer->Write("\n},\n"); // 14. } -void HeapSnapshotJSONSerializer::SerializeNodes() +void HeapSnapshotJSONSerializer::SerializeNodes(HeapSnapshot *snapshot, StreamWriter *writer) { - const CList *nodes = snapshot_->GetNodes(); - const StringHashMap *stringTable = snapshot_->GetEcmaStringTable(); + const CList *nodes = snapshot->GetNodes(); + const StringHashMap *stringTable = snapshot->GetEcmaStringTable(); ASSERT(nodes != nullptr); - writer_->Write("\"nodes\":["); // Section Header + writer->Write("\"nodes\":["); // Section Header size_t i = 0; for (auto *node : *nodes) { if (i > 0) { - writer_->Write(","); // add comma except first line + writer->Write(","); // add comma except first line } - writer_->Write(static_cast(NodeTypeConverter::Convert(node->GetType()))); // 1. - writer_->Write(","); - writer_->Write(stringTable->GetStringId(node->GetName())); // 2. - writer_->Write(","); - writer_->Write(node->GetId()); // 3. - writer_->Write(","); - writer_->Write(node->GetSelfSize()); // 4. - writer_->Write(","); - writer_->Write(node->GetEdgeCount()); // 5. - writer_->Write(","); - writer_->Write(node->GetStackTraceId()); // 6. - writer_->Write(","); + writer->Write(static_cast(NodeTypeConverter::Convert(node->GetType()))); // 1. + writer->Write(","); + writer->Write(stringTable->GetStringId(node->GetName())); // 2. + writer->Write(","); + writer->Write(node->GetId()); // 3. + writer->Write(","); + writer->Write(node->GetSelfSize()); // 4. + writer->Write(","); + writer->Write(node->GetEdgeCount()); // 5. + writer->Write(","); + writer->Write(node->GetStackTraceId()); // 6. + writer->Write(","); if (i == nodes->size() - 1) { // add comma at last the line - writer_->Write("0],\n"); // 7. detachedness default + writer->Write("0],\n"); // 7. detachedness default } else { - writer_->Write("0\n"); // 7. + writer->Write("0\n"); // 7. } i++; } } -void HeapSnapshotJSONSerializer::SerializeEdges() +void HeapSnapshotJSONSerializer::SerializeEdges(HeapSnapshot *snapshot, StreamWriter *writer) { - const CList *edges = snapshot_->GetEdges(); - const StringHashMap *stringTable = snapshot_->GetEcmaStringTable(); + const CList *edges = snapshot->GetEdges(); + const StringHashMap *stringTable = snapshot->GetEcmaStringTable(); ASSERT(edges != nullptr); - writer_->Write("\"edges\":["); + writer->Write("\"edges\":["); size_t i = 0; for (auto *edge : *edges) { + StringId nameOrIndex = edge->GetType() == EdgeType::ELEMENT ? + edge->GetIndex() : stringTable->GetStringId(edge->GetName()); if (i > 0) { // add comma except the first line - writer_->Write(","); + writer->Write(","); } - writer_->Write(static_cast(edge->GetType())); // 1. - writer_->Write(","); - writer_->Write(stringTable->GetStringId(edge->GetName())); // 2. Use StringId - writer_->Write(","); + writer->Write(static_cast(edge->GetType())); // 1. + writer->Write(","); + writer->Write(nameOrIndex); // 2. Use StringId + writer->Write(","); if (i == edges->size() - 1) { // add comma at last the line - writer_->Write(edge->GetTo()->GetIndex() * Node::NODE_FIELD_COUNT); // 3. - writer_->Write("],\n"); + writer->Write(edge->GetTo()->GetIndex() * Node::NODE_FIELD_COUNT); // 3. + writer->Write("],\n"); } else { - writer_->Write(edge->GetTo()->GetIndex() * Node::NODE_FIELD_COUNT); // 3. - writer_->Write("\n"); + writer->Write(edge->GetTo()->GetIndex() * Node::NODE_FIELD_COUNT); // 3. + writer->Write("\n"); } i++; } } -void HeapSnapshotJSONSerializer::SerializeTraceFunctionInfo() +void HeapSnapshotJSONSerializer::SerializeTraceFunctionInfo(HeapSnapshot *snapshot, StreamWriter *writer) { - const CVector trackAllocationsStack = snapshot_->GetTrackAllocationsStack(); - const StringHashMap *stringTable = snapshot_->GetEcmaStringTable(); + const CVector trackAllocationsStack = snapshot->GetTrackAllocationsStack(); + const StringHashMap *stringTable = snapshot->GetEcmaStringTable(); - writer_->Write("\"trace_function_infos\":["); // Empty + writer->Write("\"trace_function_infos\":["); // Empty size_t i = 0; for (const auto &info : trackAllocationsStack) { if (i > 0) { // add comma except the first line - writer_->Write(","); + writer->Write(","); } - writer_->Write(info.functionId); - writer_->Write(","); + writer->Write(info.functionId); + writer->Write(","); CString functionName(info.functionName.c_str()); - writer_->Write(stringTable->GetStringId(&functionName)); - writer_->Write(","); + writer->Write(stringTable->GetStringId(&functionName)); + writer->Write(","); CString scriptName(info.scriptName.c_str()); - writer_->Write(stringTable->GetStringId(&scriptName)); - writer_->Write(","); - writer_->Write(info.scriptId); - writer_->Write(","); - writer_->Write(info.lineNumber); - writer_->Write(","); - writer_->Write(info.columnNumber); - writer_->Write("\n"); + writer->Write(stringTable->GetStringId(&scriptName)); + writer->Write(","); + writer->Write(info.scriptId); + writer->Write(","); + writer->Write(info.lineNumber); + writer->Write(","); + writer->Write(info.columnNumber); + writer->Write("\n"); i++; } - writer_->Write("],\n"); + writer->Write("],\n"); } -void HeapSnapshotJSONSerializer::SerializeTraceTree() +void HeapSnapshotJSONSerializer::SerializeTraceTree(HeapSnapshot *snapshot, StreamWriter *writer) { - writer_->Write("\"trace_tree\":["); - TraceTree* tree = snapshot_->GetTraceTree(); - if ((tree != nullptr) && (snapshot_->trackAllocations())) { - SerializeTraceNode(tree->GetRoot()); + writer->Write("\"trace_tree\":["); + TraceTree* tree = snapshot->GetTraceTree(); + if ((tree != nullptr) && (snapshot->trackAllocations())) { + SerializeTraceNode(tree->GetRoot(), writer); } - writer_->Write("],\n"); + writer->Write("],\n"); } -void HeapSnapshotJSONSerializer::SerializeTraceNode(TraceNode* node) +void HeapSnapshotJSONSerializer::SerializeTraceNode(TraceNode* node, StreamWriter *writer) { if (node == nullptr) { return; } - writer_->Write(node->GetId()); - writer_->Write(","); - writer_->Write(node->GetNodeIndex()); - writer_->Write(","); - writer_->Write(node->GetTotalCount()); - writer_->Write(","); - writer_->Write(node->GetTotalSize()); - writer_->Write(",["); + writer->Write(node->GetId()); + writer->Write(","); + writer->Write(node->GetNodeIndex()); + writer->Write(","); + writer->Write(node->GetTotalCount()); + writer->Write(","); + writer->Write(node->GetTotalSize()); + writer->Write(",["); int i = 0; for (TraceNode* child : node->GetChildren()) { if (i > 0) { - writer_->Write(","); + writer->Write(","); } - SerializeTraceNode(child); + SerializeTraceNode(child, writer); i++; } - writer_->Write("]"); + writer->Write("]"); } -void HeapSnapshotJSONSerializer::SerializeSamples() +void HeapSnapshotJSONSerializer::SerializeSamples(HeapSnapshot *snapshot, StreamWriter *writer) { - writer_->Write("\"samples\":["); - const CVector &timeStamps = snapshot_->GetTimeStamps(); + writer->Write("\"samples\":["); + const CVector &timeStamps = snapshot->GetTimeStamps(); if (!timeStamps.empty()) { auto firstTimeStamp = timeStamps[0]; bool isFirst = true; for (auto timeStamp : timeStamps) { if (!isFirst) { - writer_->Write("\n, "); + writer->Write("\n, "); } else { isFirst = false; } - writer_->Write(timeStamp.GetTimeStamp() - firstTimeStamp.GetTimeStamp()); - writer_->Write(", "); - writer_->Write(timeStamp.GetLastSequenceId()); + writer->Write(timeStamp.GetTimeStamp() - firstTimeStamp.GetTimeStamp()); + writer->Write(", "); + writer->Write(timeStamp.GetLastSequenceId()); } } - writer_->Write("],\n"); + writer->Write("],\n"); } -void HeapSnapshotJSONSerializer::SerializeLocations() +void HeapSnapshotJSONSerializer::SerializeLocations(StreamWriter *writer) { - writer_->Write("\"locations\":[],\n"); + writer->Write("\"locations\":[],\n"); } -void HeapSnapshotJSONSerializer::SerializeStringTable() +void HeapSnapshotJSONSerializer::SerializeStringTable(HeapSnapshot *snapshot, StreamWriter *writer) { - const StringHashMap *stringTable = snapshot_->GetEcmaStringTable(); + const StringHashMap *stringTable = snapshot->GetEcmaStringTable(); ASSERT(stringTable != nullptr); - writer_->Write("\"strings\":[\"\",\n"); - writer_->Write("\"\",\n"); - writer_->Write("\"GC roots\",\n"); + writer->Write("\"strings\":[\"\",\n"); + writer->Write("\"\",\n"); + writer->Write("\"GC roots\",\n"); // StringId Range from 3 size_t capcity = stringTable->GetCapcity(); size_t i = 0; for (auto key : stringTable->GetOrderedKeyStorage()) { if (i == capcity - 1) { - writer_->Write("\""); - writer_->Write(*(stringTable->GetStringByKey(key))); // No Comma for the last line - writer_->Write("\"\n"); + writer->Write("\""); + writer->Write(*(stringTable->GetStringByKey(key))); // No Comma for the last line + writer->Write("\"\n"); } else { - writer_->Write("\""); - writer_->Write(*(stringTable->GetStringByKey(key))); - writer_->Write("\",\n"); + writer->Write("\""); + writer->Write(*(stringTable->GetStringByKey(key))); + writer->Write("\",\n"); } i++; } - writer_->Write("]\n"); + writer->Write("]\n"); } -void HeapSnapshotJSONSerializer::SerializerSnapshotClosure() +void HeapSnapshotJSONSerializer::SerializerSnapshotClosure(StreamWriter *writer) { - writer_->Write("}\n"); + writer->Write("}\n"); } } // namespace panda::ecmascript diff --git a/ecmascript/dfx/hprof/heap_snapshot_json_serializer.h b/ecmascript/dfx/hprof/heap_snapshot_json_serializer.h index 39956fef9d7d95d2afff7f6a9a4410177c364a2e..a4a120e0063d2b659e1fa99e3d4c6ecec0da8afc 100644 --- a/ecmascript/dfx/hprof/heap_snapshot_json_serializer.h +++ b/ecmascript/dfx/hprof/heap_snapshot_json_serializer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_SNAPSHOT_SERIALIZER_H -#define ECMASCRIPT_HPROF_HEAP_SNAPSHOT_SERIALIZER_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_SERIALIZER_H +#define ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_SERIALIZER_H #include #include @@ -90,25 +90,22 @@ private: class HeapSnapshotJSONSerializer { public: explicit HeapSnapshotJSONSerializer() = default; - ~HeapSnapshotJSONSerializer(); + ~HeapSnapshotJSONSerializer() = default; NO_MOVE_SEMANTIC(HeapSnapshotJSONSerializer); NO_COPY_SEMANTIC(HeapSnapshotJSONSerializer); - bool Serialize(HeapSnapshot *snapshot, Stream *stream); + static bool Serialize(HeapSnapshot *snapshot, Stream *stream); private: - void SerializeSnapshotHeader(); - void SerializeNodes(); - void SerializeEdges(); - void SerializeTraceFunctionInfo(); - void SerializeTraceTree(); - void SerializeTraceNode(TraceNode *node); - void SerializeSamples(); - void SerializeLocations(); - void SerializeStringTable(); - void SerializerSnapshotClosure(); - - HeapSnapshot *snapshot_ {nullptr}; - StreamWriter *writer_ {nullptr}; + static void SerializeSnapshotHeader(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeNodes(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeEdges(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeTraceFunctionInfo(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeTraceTree(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeTraceNode(TraceNode *node, StreamWriter *writer); + static void SerializeSamples(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializeLocations(StreamWriter *writer); + static void SerializeStringTable(HeapSnapshot *snapshot, StreamWriter *writer); + static void SerializerSnapshotClosure(StreamWriter *writer); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_SNAPSHOT_SERIALIZER_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_SNAPSHOT_SERIALIZER_H diff --git a/ecmascript/dfx/hprof/heap_tracker.h b/ecmascript/dfx/hprof/heap_tracker.h index 571b0e518be678abe4bf0b472ec682343257a120..f7317a45767cf4cef9b252a62154f4581a75f3b0 100644 --- a/ecmascript/dfx/hprof/heap_tracker.h +++ b/ecmascript/dfx/hprof/heap_tracker.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_HEAP_TRACKER_H -#define ECMASCRIPT_HPROF_HEAP_TRACKER_H +#ifndef ECMASCRIPT_DFX_HPROF_HEAP_TRACKER_H +#define ECMASCRIPT_DFX_HPROF_HEAP_TRACKER_H #include #include @@ -83,6 +83,11 @@ public: sample_.Stop(); } + HeapSnapshot* GetHeapSnapshot() const + { + return snapshot_; + } + void AllocationEvent(TaggedObject *address, size_t size); void MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size); @@ -94,4 +99,4 @@ private: HeapTrackerSample sample_; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_HEAP_TRACKER_H +#endif // ECMASCRIPT_DFX_HPROF_HEAP_TRACKER_H diff --git a/ecmascript/dfx/hprof/progress.h b/ecmascript/dfx/hprof/progress.h index 8ef98ebbb733661ce0327c18fcdf0ba741ff0a83..63f0ca7feac8c1258e2e367d4c623ddc97b907a3 100644 --- a/ecmascript/dfx/hprof/progress.h +++ b/ecmascript/dfx/hprof/progress.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_PROGRESS_H -#define ECMASCRIPT_TOOLING_INTERFACE_PROGRESS_H +#ifndef ECMASCRIPT_DFX_HPROF_PROGRESS_H +#define ECMASCRIPT_DFX_HPROF_PROGRESS_H #include @@ -27,4 +27,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_TOOLING_INTERFACE_PROGRESS_H +#endif // ECMASCRIPT_DFX_HPROF_PROGRESS_H diff --git a/ecmascript/dfx/hprof/stream.h b/ecmascript/dfx/hprof/stream.h index 2fc6b818604c7defa139b4fd3c5551b32892cb03..428f2660dc759f009c70fb644e84a863bd437554 100644 --- a/ecmascript/dfx/hprof/stream.h +++ b/ecmascript/dfx/hprof/stream.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_TOOLING_INTERFACE_STREAM_H -#define ECMASCRIPT_TOOLING_INTERFACE_STREAM_H +#ifndef ECMASCRIPT_DFX_HPROF_STREAM_H +#define ECMASCRIPT_DFX_HPROF_STREAM_H namespace panda::ecmascript { class HeapStat { @@ -45,4 +45,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_TOOLING_INTERFACE_STREAM_H +#endif // ECMASCRIPT_DFX_HPROF_STREAM_H diff --git a/ecmascript/dfx/hprof/string_hashmap.h b/ecmascript/dfx/hprof/string_hashmap.h index 0b177e82eab60150fb647bf97cf55b9e1a6bc3bc..1311f97c8bbe30f1fa96727f6412288afcdc73a1 100644 --- a/ecmascript/dfx/hprof/string_hashmap.h +++ b/ecmascript/dfx/hprof/string_hashmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_HPROF_STRING_HASHMAP_H -#define ECMASCRIPT_HPROF_STRING_HASHMAP_H +#ifndef ECMASCRIPT_DFX_HPROF_STRING_HASHMAP_H +#define ECMASCRIPT_DFX_HPROF_STRING_HASHMAP_H #include "ecmascript/ecma_vm.h" #include "ecmascript/mem/c_containers.h" @@ -78,4 +78,4 @@ private: CUnorderedMap hashmap_; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_HPROF_STRING_HASHMAP_H +#endif // ECMASCRIPT_DFX_HPROF_STRING_HASHMAP_H diff --git a/ecmascript/dfx/hprof/tests/heap_sampling_test.cpp b/ecmascript/dfx/hprof/tests/heap_sampling_test.cpp index a34c2cc1f2a3820b83192127649da40c1a6cec72..1e88b110989bda583347a4f7fad6a119c64dbab4 100644 --- a/ecmascript/dfx/hprof/tests/heap_sampling_test.cpp +++ b/ecmascript/dfx/hprof/tests/heap_sampling_test.cpp @@ -97,7 +97,7 @@ HWTEST_F_L0(HeapSamplingTest, ImplementSampling) int size = 1 << 15; // default size Address addr = 0; heapSampling->ImplementSampling(addr, size); - const struct SamplingInfo *result = heapSampling->GetAllocationProfile(); + const SamplingInfo *result = heapSampling->GetAllocationProfile(); EXPECT_TRUE(result != nullptr); EXPECT_TRUE(result->samples_[0].size_ == size); } diff --git a/ecmascript/dfx/hprof/tests/heap_tracker_test.cpp b/ecmascript/dfx/hprof/tests/heap_tracker_test.cpp index aa23caf527a9cc5c8d26599bc67a0296a35df590..c2efaf9363a288572fff780e022617006e9ab7a8 100644 --- a/ecmascript/dfx/hprof/tests/heap_tracker_test.cpp +++ b/ecmascript/dfx/hprof/tests/heap_tracker_test.cpp @@ -248,7 +248,7 @@ HWTEST_F_L0(HeapTrackerTest, DumpHeapSnapshot) FileStream stream(fileName.c_str()); TestProgress testProgress; - heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true, false); HeapProfilerInterface::Destroy(instance); // Check @@ -282,7 +282,10 @@ HWTEST_F_L0(HeapTrackerTest, HeapSnapshotBuildUp) bool isVmMode = true; bool isPrivate = false; bool traceAllocation = false; - HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, traceAllocation, instance->GetChunk()); + bool captureNumericValue = false; + HeapProfiler heapProfiler(instance); + HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, captureNumericValue, traceAllocation, + heapProfiler.GetEntryIdMap(), instance->GetChunk()); EXPECT_TRUE(heapSnapshot.BuildUp()); } @@ -291,7 +294,10 @@ HWTEST_F_L0(HeapTrackerTest, HeapSnapshotUpdateNode) bool isVmMode = true; bool isPrivate = false; bool traceAllocation = false; - HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, traceAllocation, instance->GetChunk()); + bool captureNumericValue = false; + HeapProfiler heapProfiler(instance); + HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, captureNumericValue, traceAllocation, + heapProfiler.GetEntryIdMap(), instance->GetChunk()); size_t beginNode = heapSnapshot.GetNodeCount(); heapSnapshot.UpdateNodes(); size_t endNode = heapSnapshot.GetNodeCount(); @@ -325,7 +331,7 @@ HWTEST_F_L0(HeapTrackerTest, GenDumpFileName_001) stream.Clear(); EXPECT_TRUE(!stream.Good()); TestProgress testProgress; - heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true, false); HeapProfilerInterface::Destroy(instance); } @@ -356,7 +362,7 @@ HWTEST_F_L0(HeapTrackerTest, GenDumpFileName_002) stream.Clear(); EXPECT_TRUE(!stream.Good()); TestProgress testProgress; - heapProfile->DumpHeapSnapshot(DumpFormat::BINARY, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(DumpFormat::BINARY, &stream, &testProgress, true, true, false); HeapProfilerInterface::Destroy(instance); } @@ -387,7 +393,7 @@ HWTEST_F_L0(HeapTrackerTest, GenDumpFileName_003) stream.Clear(); EXPECT_TRUE(!stream.Good()); TestProgress testProgress; - heapProfile->DumpHeapSnapshot(DumpFormat::OTHER, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(DumpFormat::OTHER, &stream, &testProgress, true, true, false); HeapProfilerInterface::Destroy(instance); } @@ -419,7 +425,7 @@ HWTEST_F_L0(HeapTrackerTest, GenDumpFileName_004) EXPECT_TRUE(!stream.Good()); TestProgress testProgress; DumpFormat dumFormat = static_cast(5); - heapProfile->DumpHeapSnapshot(dumFormat, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(dumFormat, &stream, &testProgress, true, true, false); HeapProfilerInterface::Destroy(instance); } @@ -467,7 +473,7 @@ HWTEST_F_L0(HeapTrackerTest, StreamWriterEnd) stream.UpdateLastSeenObjectId(1, 1677567644913058); TestProgress testProgress; - heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true); + heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream, &testProgress, true, true, false); StreamWriter streamWriter(&stream); streamWriter.End(); HeapProfilerInterface::Destroy(instance); @@ -511,7 +517,10 @@ HWTEST_F_L0(HeapTrackerTest, FormatString) bool isVmMode = true; bool isPrivate = false; bool traceAllocation = false; - HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, traceAllocation, instance->GetChunk()); + bool captureNumericValue = false; + HeapProfiler heapProfiler(instance); + HeapSnapshot heapSnapshot(instance, isVmMode, isPrivate, captureNumericValue, traceAllocation, + heapProfiler.GetEntryIdMap(), instance->GetChunk()); StringHashMap stringHashMap(instance); CString ret = "H\"e\rl\nl\\o\t W\fo\31rld!"; diff --git a/ecmascript/dfx/hprof/tests/hprof_test.cpp b/ecmascript/dfx/hprof/tests/hprof_test.cpp index 6f3f5ca872254cc98723111fcce5ea26d6e406a3..2f163657d9e65c4ba40b9b626ea7ae3212db404d 100644 --- a/ecmascript/dfx/hprof/tests/hprof_test.cpp +++ b/ecmascript/dfx/hprof/tests/hprof_test.cpp @@ -112,47 +112,43 @@ public: class HProfTestHelper { public: - HProfTestHelper(const std::string &aFilePath, EcmaVM *vm) - : instance(vm), - filePath(aFilePath) + explicit HProfTestHelper(EcmaVM *vm) : instance(vm) {} + + ~HProfTestHelper() + { + HeapProfilerInterface::Destroy(instance); + } + + size_t GenerateSnapShot(const std::string &filePath) { + // first generate this file of filePath if not exist, + // so the function `realpath` of FileStream can not failed on arm/arm64. fstream outputString(filePath, std::ios::out); outputString.close(); outputString.clear(); - FileStream stream(filePath.c_str()); HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(instance); heapProfile->DumpHeapSnapshot(DumpFormat::JSON, &stream); - - inputStream = fstream(filePath, std::ios::in); - HeapProfilerInterface::Destroy(instance); + return heapProfile->GetIdCount(); } - ~HProfTestHelper() + bool ContrastJSONLineHeader(const std::string &filePath, std::string lineHeader) { - inputStream.close(); - inputStream.clear(); - } - - bool ContrastJSONLineHeader(std::string lineHeader) - { - bool allSame = false; std::string line; - int i = 1; + std::ifstream inputStream(filePath); while (getline(inputStream, line)) { if (line.find(lineHeader) != line.npos) { - allSame = true; - break; + return true; } - i++; } - return allSame; + return false; } - bool ContrastJSONSectionPayload(std::string dataLable, int fieldNum) + bool ContrastJSONSectionPayload(const std::string &filePath, std::string dataLable, int fieldNum) { std::string line; int i = 1; + std::ifstream inputStream(filePath); while (getline(inputStream, line)) { if (i > 10 && line.find(dataLable) != line.npos) { // 10 : Hit the line std::string::size_type pos = 0; @@ -168,20 +164,21 @@ public: return false; // Lost the Line } - bool ContrastJSONClousure() + bool ContrastJSONClousure(const std::string &filePath) { std::string lineBk; // The Last Line std::string line; + std::ifstream inputStream(filePath); while (getline(inputStream, line)) { lineBk = line; } return lineBk.compare("}") == 0; } - int ExtractCountFromMeta(std::string typeLable) + int ExtractCountFromMeta(const std::string &filePath, std::string typeLable) { std::string line; - int i = 1; + std::ifstream inputStream(filePath); while (getline(inputStream, line)) { int length = line.length() - typeLable.length() - 1; if (line.find(typeLable) != line.npos) { // Get @@ -191,16 +188,16 @@ public: line = line.substr(typeLable.length(), length); return std::stoi(line.c_str()); } - i++; } return -1; } - int ExtractCountFromPayload(std::string dataLabel) + int ExtractCountFromPayload(const std::string &filePath, std::string dataLabel) { std::string line; bool hit = false; int loop = 0; + std::ifstream inputStream(filePath); while (getline(inputStream, line)) { if (!hit && line.find(dataLabel) != line.npos) { // Get loop += 1; // First Line @@ -227,71 +224,92 @@ public: private: EcmaVM *instance {nullptr}; - std::string filePath; - std::fstream inputStream {}; }; HWTEST_F_L0(HProfTest, ParseJSONHeader) { - HProfTestHelper tester("ParseJSONHeader.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONLineHeader("{\"snapshot\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("{\"meta\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("{\"node_fields\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"node_types\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"edge_fields\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"edge_types\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"trace_function_info_fields\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"trace_node_fields\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"sample_fields\":")); - ASSERT_TRUE(tester.ContrastJSONLineHeader("\"location_fields\":")); + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "{\"snapshot\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "{\"meta\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "{\"node_fields\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"node_types\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"edge_fields\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"edge_types\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"trace_function_info_fields\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"trace_node_fields\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"sample_fields\":")); + ASSERT_TRUE(tester.ContrastJSONLineHeader("test.heapsnapshot", "\"location_fields\":")); } HWTEST_F_L0(HProfTest, ContrastTraceFunctionInfo) { - HProfTestHelper tester("ContrastTraceFunctionInfo.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONSectionPayload("\"trace_function_infos\":", 2)); // Empty + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONSectionPayload("test.heapsnapshot", "\"trace_function_infos\":", 2)); // Empty } HWTEST_F_L0(HProfTest, ContrastTraceTree) { - HProfTestHelper tester("ContrastTraceTree.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONSectionPayload("\"trace_tree\":", 2)); // Empty + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONSectionPayload("test.heapsnapshot", "\"trace_tree\":", 2)); // Empty } HWTEST_F_L0(HProfTest, ContrastSamples) { - HProfTestHelper tester("ContrastSamples.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONSectionPayload("\"samples\":", 2)); // Empty + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONSectionPayload("test.heapsnapshot", "\"samples\":", 2)); // Empty } HWTEST_F_L0(HProfTest, ContrastLocations) { - HProfTestHelper tester("ContrastLocations.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONSectionPayload("\"locations\":", 2)); // Empty + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONSectionPayload("test.heapsnapshot", "\"locations\":", 2)); // Empty } HWTEST_F_L0(HProfTest, ContrastString) { - HProfTestHelper tester("ContrastString.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONSectionPayload("\"strings\":[", 1 + 1)); + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONSectionPayload("test.heapsnapshot", "\"strings\":[", 2)); } HWTEST_F_L0(HProfTest, ContrastClosure) { - HProfTestHelper tester("ContrastClosure.heapsnapshot", instance); - ASSERT_TRUE(tester.ContrastJSONClousure()); + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ContrastJSONClousure("test.heapsnapshot")); } HWTEST_F_L0(HProfTest, ContrastEdgeCount) { - HProfTestHelper tester("ContrastEdgeCount.heapsnapshot", instance); - ASSERT_TRUE(tester.ExtractCountFromMeta("\"edge_count\":") == tester.ExtractCountFromPayload("\"edges\":[")); + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ExtractCountFromMeta("test.heapsnapshot", "\"edge_count\":") == + tester.ExtractCountFromPayload("test.heapsnapshot", "\"edges\":[")); } -HWTEST_F_L0(HProfTest, ContrastTraceFunctionInfoCount) +HWTEST_F_L0(HProfTest, TraceFuncInfoCount) { - HProfTestHelper tester("ContrastTraceFunctionInfoCount.heapsnapshot", instance); - ASSERT_TRUE(tester.ExtractCountFromMeta("\"trace_function_count\":") == - tester.ExtractCountFromPayload("\"trace_function_infos\":")); + HProfTestHelper tester(instance); + tester.GenerateSnapShot("test.heapsnapshot"); + ASSERT_TRUE(tester.ExtractCountFromMeta("test.heapsnapshot", "\"trace_function_count\":") == + tester.ExtractCountFromPayload("test.heapsnapshot", "\"trace_function_infos\":")); +} + +HWTEST_F_L0(HProfTest, TestIdConsistency) +{ + HProfTestHelper tester(instance); + int64_t count1 = tester.GenerateSnapShot("TestIdConsistency_1.heapsnapshot"); + for (int i = 0; i < 100; ++i) { + instance->GetFactory()->NewJSAsyncFuncObject(); + instance->GetFactory()->NewJSSymbol(); + } + int64_t count2 = tester.GenerateSnapShot("TestIdConsistency_2.heapsnapshot"); + ASSERT_TRUE(std::abs(count1 - count2) <= 500LL); + // load two heapsnapshots into chrome, and further use "Comparision View" } } // namespace panda::test diff --git a/ecmascript/dfx/stackinfo/js_stackgetter.cpp b/ecmascript/dfx/stackinfo/js_stackgetter.cpp index 1a1f665072cfa3f996d9e28d27c1a78892691058..2b48d087d9edd337a784db7c6b3945cc7f423f85 100644 --- a/ecmascript/dfx/stackinfo/js_stackgetter.cpp +++ b/ecmascript/dfx/stackinfo/js_stackgetter.cpp @@ -18,6 +18,7 @@ #include "ecmascript/compiler/aot_file/aot_file_manager.h" #include "ecmascript/compiler/assembler/assembler.h" #include "ecmascript/frames.h" +#include "ecmascript/global_env_constants-inl.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" namespace panda::ecmascript { @@ -55,44 +56,51 @@ bool JsStackGetter::ParseMethodInfo(struct MethodKey &methodKey, FrameInfoTemp &codeEntry, bool isCpuProfiler) { - const JSPandaFile *jsPandaFile = it.CheckAndGetMethod()->GetJSPandaFile(); + auto method = it.CheckAndGetMethod(); + const JSPandaFile *jsPandaFile = method->GetJSPandaFile(); codeEntry.methodKey = methodKey; - if (jsPandaFile == nullptr) { + if (method->IsNativeWithCallField()) { FrameIterator itNext(it.GetSp(), it.GetThread()); itNext.Advance(); GetNativeMethodCallPos(itNext, codeEntry); GetNativeStack(vm, it, codeEntry.functionName, sizeof(codeEntry.functionName), isCpuProfiler); } else { EntityId methodId = reinterpret_cast(methodKey.methodIdentifier)->GetMethodId(); - const char *tempVariable = MethodLiteral::GetMethodName(jsPandaFile, methodId); - uint8_t length = strlen(tempVariable); - if (length != 0 && tempVariable[0] == '#') { + // function name + const char *functionName = MethodLiteral::GetMethodName(jsPandaFile, methodId); + uint8_t length = strlen(functionName); + if (length != 0 && functionName[0] == '#') { uint8_t index = length - 1; - while (tempVariable[index] != '#') { + while (functionName[index] != '#') { index--; } - tempVariable += (index + 1); + functionName += (index + 1); } - if (strlen(tempVariable) == 0) { - tempVariable = "anonymous"; + if (strlen(functionName) == 0) { + functionName = "anonymous"; } - if (!CheckAndCopy(codeEntry.functionName, sizeof(codeEntry.functionName), tempVariable)) { + if (!CheckAndCopy(codeEntry.functionName, sizeof(codeEntry.functionName), functionName)) { return false; } - // source file + // record name + const char *recordName = MethodLiteral::GetRecordNameWithSymbol(jsPandaFile, methodId); + if (strlen(recordName) != 0) { + if (!CheckAndCopy(codeEntry.recordName, sizeof(codeEntry.recordName), recordName)) { + return false; + } + } + DebugInfoExtractor *debugExtractor = JSPandaFileManager::GetInstance()->GetJSPtExtractor(jsPandaFile); if (debugExtractor == nullptr) { return false; } + // source file const std::string &sourceFile = debugExtractor->GetSourceFile(methodId); - if (sourceFile.empty()) { - tempVariable = ""; - } else { - tempVariable = sourceFile.c_str(); - } - if (!CheckAndCopy(codeEntry.url, sizeof(codeEntry.url), tempVariable)) { - return false; + if (!sourceFile.empty()) { + if (!CheckAndCopy(codeEntry.url, sizeof(codeEntry.url), sourceFile.c_str())) { + return false; + } } // line number and clomn number codeEntry.lineNumber = debugExtractor->GetFristLine(methodId); @@ -154,12 +162,11 @@ void JsStackGetter::GetNativeStack(const EcmaVM *vm, const FrameIterator &it, ch } RunningState JsStackGetter::GetRunningState(const FrameIterator &it, const EcmaVM *vm, - const JSPandaFile *jsPandaFile, bool topFrame, + bool isNative, bool topFrame, bool enableVMTag) { JSThread *thread = vm->GetAssociatedJSThread(); JSFunction* function = JSFunction::Cast(it.GetFunction().GetTaggedObject()); - bool isNative = jsPandaFile == nullptr; if (enableVMTag) { if (topFrame) { @@ -266,12 +273,11 @@ void *JsStackGetter::GetMethodIdentifier(Method *method, const FrameIterator &it { JSFunction* function = JSFunction::Cast(it.GetFunction().GetTaggedObject()); JSTaggedValue extraInfoValue = function->GetNativeFunctionExtraInfo(); - if (extraInfoValue.CheckIsJSNativePointer()) { - JSNativePointer *extraInfo = JSNativePointer::Cast(extraInfoValue.GetTaggedObject()); - return reinterpret_cast(extraInfo->GetData()); - } - - if (method->GetJSPandaFile() == nullptr) { + if (method->IsNativeWithCallField()) { + if (extraInfoValue.CheckIsJSNativePointer()) { + JSNativePointer *extraInfo = JSNativePointer::Cast(extraInfoValue.GetTaggedObject()); + return reinterpret_cast(extraInfo->GetData()); + } return const_cast(method->GetNativePointer()); } diff --git a/ecmascript/dfx/stackinfo/js_stackgetter.h b/ecmascript/dfx/stackinfo/js_stackgetter.h index a10bfd0eac618c7923fb71847cbea572b4caebcc..d2f576891de4ddba407e8b2fa9f88d9940a27898 100644 --- a/ecmascript/dfx/stackinfo/js_stackgetter.h +++ b/ecmascript/dfx/stackinfo/js_stackgetter.h @@ -61,6 +61,7 @@ struct NodeKey { struct FrameInfoTemp { char codeType[20] = {0}; // 20:the maximum size of the codeType char functionName[100] = {0}; // 100:the maximum size of the functionName + char recordName[500] = {0}; // 500:the maximum size of the recordName int columnNumber = -1; int lineNumber = -1; int scriptId = 0; @@ -79,7 +80,7 @@ public: static bool CheckAndCopy(char *dest, size_t length, const char *src); static void GetNativeStack(const EcmaVM *vm, const FrameIterator &it, char *functionName, size_t size, bool isCpuProfiler); - static RunningState GetRunningState(const FrameIterator &it, const EcmaVM *vm, const JSPandaFile *jsPandaFile, + static RunningState GetRunningState(const FrameIterator &it, const EcmaVM *vm, bool isNative, bool topFrame, bool enableVMTag = false); static void GetNativeMethodCallPos(FrameIterator &it, FrameInfoTemp &codeEntry); static void *GetMethodIdentifier(Method *method, const FrameIterator &it); diff --git a/ecmascript/dfx/stackinfo/js_stackinfo.cpp b/ecmascript/dfx/stackinfo/js_stackinfo.cpp index c2f94cbe280191523fa25489c60846904a1ce4c8..3fa10a1f719e861999810d70451bced823f45bd2 100644 --- a/ecmascript/dfx/stackinfo/js_stackinfo.cpp +++ b/ecmascript/dfx/stackinfo/js_stackinfo.cpp @@ -19,6 +19,7 @@ #include "ecmascript/interpreter/frame_handler.h" #include "ecmascript/interpreter/interpreter.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" +#include "ecmascript/mem/heap-inl.h" #include "ecmascript/message_string.h" #include "ecmascript/platform/os.h" #if defined(ENABLE_EXCEPTION_BACKTRACE) @@ -71,7 +72,7 @@ std::string JsStackInfo::BuildJsStackTrace(JSThread *thread, bool needNative) std::string data; JSTaggedType *current = const_cast(thread->GetCurrentFrame()); FrameIterator it(current, thread); - for (; !it.Done(); it.Advance()) { + for (; !it.Done(); it.Advance()) { if (!it.IsJSFrame()) { continue; } @@ -219,9 +220,29 @@ void CrashCallback(char *buf __attribute__((unused)), size_t len __attribute__(( #endif } -bool ReadUintptrFromAddr(int pid, uintptr_t addr, uintptr_t &value) +bool ReadUintptrFromAddr(int pid, uintptr_t addr, uintptr_t &value, bool needCheckRegion) { if (pid == getpid()) { + if (needCheckRegion) { + bool flag = false; + auto callback = [addr, &flag](Region *region) { + uintptr_t regionBegin = region->GetBegin(); + uintptr_t regionEnd = region->GetEnd(); + if (regionBegin <= addr && addr <= regionEnd) { + flag = true; + } + }; + if (JsStackInfo::loader != nullptr) { + const Heap *heap = JsStackInfo::loader->GetHeap(); + if (heap != nullptr) { + heap->EnumerateRegions(callback); + } + } + if (!flag) { + LOG_ECMA(ERROR) << "addr not in Region, addr: " << addr; + return false; + } + } value = *(reinterpret_cast(addr)); return true; } @@ -329,10 +350,15 @@ bool StepArkManagedNativeFrame(int pid, uintptr_t *pc, uintptr_t *fp, uintptr_t LOG_ECMA(ERROR) << "fp is nullptr in StepArkManagedNativeFrame()!"; return false; } + if (pid == getpid() && JsStackInfo::loader != nullptr && + !JsStackInfo::loader->InsideStub(*pc) && !JsStackInfo::loader->InsideAOT(*pc)) { + LOG_ECMA(ERROR) << "invalid pc in StepArkManagedNativeFrame()!"; + return false; + } while (true) { currentPtr -= sizeof(FrameType); uintptr_t frameType = 0; - if (!ReadUintptrFromAddr(pid, currentPtr, frameType)) { + if (!ReadUintptrFromAddr(pid, currentPtr, frameType, true)) { return false; } uintptr_t typeOffset = 0; @@ -348,7 +374,7 @@ bool StepArkManagedNativeFrame(int pid, uintptr_t *pc, uintptr_t *fp, uintptr_t } currentPtr -= typeOffset; currentPtr += prevOffset; - if (!ReadUintptrFromAddr(pid, currentPtr, currentPtr)) { + if (!ReadUintptrFromAddr(pid, currentPtr, currentPtr, true)) { return false; } if (currentPtr == 0) { @@ -359,7 +385,7 @@ bool StepArkManagedNativeFrame(int pid, uintptr_t *pc, uintptr_t *fp, uintptr_t currentPtr += sizeof(FrameType); *fp = currentPtr; currentPtr += FP_SIZE; - if (!ReadUintptrFromAddr(pid, currentPtr, *pc)) { + if (!ReadUintptrFromAddr(pid, currentPtr, *pc, true)) { return false; } currentPtr += LR_SIZE; @@ -418,7 +444,7 @@ bool GetArkJSHeapCrashInfo(int pid, uintptr_t *bytecodePc, uintptr_t *fp, bool o } currentPtr -= sizeof(FrameType); uintptr_t frameType = 0; - if (!ReadUintptrFromAddr(pid, currentPtr, frameType)) { + if (!ReadUintptrFromAddr(pid, currentPtr, frameType, false)) { return false; } if (static_cast(frameType) != FrameType::ASM_INTERPRETER_FRAME) { @@ -426,7 +452,7 @@ bool GetArkJSHeapCrashInfo(int pid, uintptr_t *bytecodePc, uintptr_t *fp, bool o } size_t strIndex = 0; uintptr_t registerBytecode = 0; - if (!ReadUintptrFromAddr(pid, *bytecodePc, registerBytecode)) { + if (!ReadUintptrFromAddr(pid, *bytecodePc, registerBytecode, false)) { return false; } CopyBytecodeInfoToBuffer("RegisterBytecode:", registerBytecode, strIndex, outStr, strLen); @@ -436,10 +462,10 @@ bool GetArkJSHeapCrashInfo(int pid, uintptr_t *bytecodePc, uintptr_t *fp, bool o currentPtr += pcOffset; uintptr_t framePc = 0; uintptr_t frameBytecode = 0; - if (!ReadUintptrFromAddr(pid, currentPtr, framePc)) { + if (!ReadUintptrFromAddr(pid, currentPtr, framePc, false)) { return false; } - if (!ReadUintptrFromAddr(pid, framePc, frameBytecode)) { + if (!ReadUintptrFromAddr(pid, framePc, frameBytecode, false)) { return false; } CopyBytecodeInfoToBuffer(" FrameBytecode:", frameBytecode, strIndex, outStr, strLen); @@ -448,7 +474,7 @@ bool GetArkJSHeapCrashInfo(int pid, uintptr_t *bytecodePc, uintptr_t *fp, bool o currentPtr -= pcOffset; currentPtr += functionOffset; uintptr_t functionAddress = 0; - if (!ReadUintptrFromAddr(pid, currentPtr, functionAddress)) { + if (!ReadUintptrFromAddr(pid, currentPtr, functionAddress, false)) { return false; } JSTaggedValue functionValue(static_cast(functionAddress)); diff --git a/ecmascript/dfx/vm_thread_control.cpp b/ecmascript/dfx/vm_thread_control.cpp index 8b6ad945c4a0f41c65c6b695ffacd14260ffed77..3a1d7d98186a4b99e46159683b2562f45808eac4 100644 --- a/ecmascript/dfx/vm_thread_control.cpp +++ b/ecmascript/dfx/vm_thread_control.cpp @@ -16,6 +16,8 @@ #include "ecmascript/dfx/vm_thread_control.h" namespace panda::ecmascript { +constexpr int32_t TIME_OUT_MS = 1500; + bool VmThreadControl::NotifyVMThreadSuspension() // block caller thread { if (VMNeedSuspension()) { // only enable one thread to post suspension @@ -25,7 +27,11 @@ bool VmThreadControl::NotifyVMThreadSuspension() // block caller thread thread_->SetCheckSafePointStatus(); os::memory::LockHolder lock(vmThreadSuspensionMutex_); while (!IsSuspended()) { - vmThreadNeedSuspensionCV_.Wait(&vmThreadSuspensionMutex_); + if (vmThreadNeedSuspensionCV_.TimedWait(&vmThreadSuspensionMutex_, TIME_OUT_MS)) { + SetVMNeedSuspension(false); + thread_->ResetCheckSafePointStatus(); + return false; + } } return true; } diff --git a/ecmascript/dfx/vmstat/caller_stat.h b/ecmascript/dfx/vmstat/caller_stat.h index 928e38959bc38fb6fb6c8d774a0f24dfcca03ed7..ba765e32418791300f16ce6d49c5572d05699e57 100644 --- a/ecmascript/dfx/vmstat/caller_stat.h +++ b/ecmascript/dfx/vmstat/caller_stat.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_VMSTAT_CALLER_STAT_H -#define ECMASCRIPT_VMSTAT_CALLER_STAT_H +#ifndef ECMASCRIPT_DFX_VMSTAT_CALLER_STAT_H +#define ECMASCRIPT_DFX_VMSTAT_CALLER_STAT_H #include #include @@ -128,4 +128,4 @@ private: friend class FunctionCallTimer; }; } // namespace panda::ecmascript -#endif +#endif // ECMASCRIPT_DFX_VMSTAT_CALLER_STAT_H diff --git a/ecmascript/dfx/vmstat/function_call_timer.h b/ecmascript/dfx/vmstat/function_call_timer.h index 7b15322fc6177a4d9ae3d763c5b0979733d46eca..ec983974a46f8534c5775d6c7725980aa1fde311 100644 --- a/ecmascript/dfx/vmstat/function_call_timer.h +++ b/ecmascript/dfx/vmstat/function_call_timer.h @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_FCUNTION_CALL_TIMER_H -#define ECMASCRIPT_FCUNTION_CALL_TIMER_H +#ifndef ECMASCRIPT_DFX_VMSTAT_FCUNTION_CALL_TIMER_H +#define ECMASCRIPT_DFX_VMSTAT_FCUNTION_CALL_TIMER_H #include "ecmascript/dfx/vmstat/caller_stat.h" #include "ecmascript/mem/c_containers.h" @@ -82,4 +82,4 @@ private: CMap callTimer_ {}; }; } -#endif \ No newline at end of file +#endif // ECMASCRIPT_DFX_VMSTAT_FCUNTION_CALL_TIMER_H \ No newline at end of file diff --git a/ecmascript/dfx/vmstat/runtime_stat.h b/ecmascript/dfx/vmstat/runtime_stat.h index 3b86badc80439cd90b0e1a8182a57c7a430842ae..34093bd37239396683e04f6743b14aea65db77d8 100644 --- a/ecmascript/dfx/vmstat/runtime_stat.h +++ b/ecmascript/dfx/vmstat/runtime_stat.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_VMSTAT_RUNTIME_STAT_H -#define ECMASCRIPT_VMSTAT_RUNTIME_STAT_H +#ifndef ECMASCRIPT_DFX_VMSTAT_RUNTIME_STAT_H +#define ECMASCRIPT_DFX_VMSTAT_RUNTIME_STAT_H #include "ecmascript/ecma_vm.h" #include "ecmascript/js_thread.h" @@ -85,4 +85,4 @@ private: EcmaRuntimeStat *stats_ {nullptr}; }; } // namespace panda::ecmascript -#endif +#endif // ECMASCRIPT_DFX_VMSTAT_RUNTIME_STAT_H diff --git a/ecmascript/dump.cpp b/ecmascript/dump.cpp index 9b2178ba3a7b0bdb584f5c1d94e6d3d4d5328a3d..090054a03105fb45cffe8e81360d81180cac6676 100644 --- a/ecmascript/dump.cpp +++ b/ecmascript/dump.cpp @@ -19,6 +19,7 @@ #include #include "ecmascript/accessor_data.h" +#include "ecmascript/dfx/hprof/heap_snapshot.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/global_dictionary-inl.h" #include "ecmascript/global_env.h" @@ -150,6 +151,7 @@ CString JSHClass::DumpJSType(JSType type) case JSType::LINE_STRING: case JSType::CONSTANT_STRING: case JSType::TREE_STRING: + case JSType::SLICED_STRING: return "BaseString"; case JSType::JS_NATIVE_POINTER: return "NativePointer"; @@ -531,39 +533,61 @@ static void DumpHClass(const JSHClass *jshclass, std::ostream &os, bool withDeta LayoutInfo *layoutInfo = LayoutInfo::Cast(attrs.GetTaggedObject()); layoutInfo->Dump(os); } + os << " - Transitions :" << std::setw(DUMP_TYPE_OFFSET); JSTaggedValue transtions = jshclass->GetTransitions(); transtions.DumpTaggedValue(os); os << "\n"; - if (withDetail && !transtions.IsUndefined()) { + if (withDetail && !transtions.IsWeakForHeapObject() && transtions.IsDictionary()) { transtions.Dump(os); } + os << " - ProtoChangeMarker :" << std::setw(DUMP_TYPE_OFFSET); + JSTaggedValue marker = jshclass->GetProtoChangeMarker(); + marker.DumpTaggedValue(os); + if (marker.IsHeapObject()) { + ProtoChangeMarker::Cast(marker.GetTaggedObject())->Dump(os); + } else { + os << "\n"; + } + + os << " - ProtoChangeDetails :" << std::setw(DUMP_TYPE_OFFSET); + JSTaggedValue details = jshclass->GetProtoChangeDetails(); + details.DumpTaggedValue(os); + if (details.IsHeapObject()) { + ProtoChangeDetails::Cast(details.GetTaggedObject())->Dump(os); + } else { + os << "\n"; + } + JSTaggedValue supers = jshclass->GetSupers(); uint32_t length = 0; if (supers.IsTaggedArray()) { length = WeakVector::Cast(supers.GetTaggedObject())->GetExtraLength(); } - os << " - Supers[" << std::dec << length << "]:\n"; + os << " - Supers[" << std::dec << length << "]: "; + os << std::setw(DUMP_TYPE_OFFSET); supers.DumpTaggedValue(os); - os << "\n"; if (withDetail && !supers.IsUndefined()) { WeakVector::Cast(supers.GetTaggedObject())->Dump(os); + } else { + os << "\n"; } os << " - VTable :" << std::setw(DUMP_TYPE_OFFSET); JSTaggedValue vtable = jshclass->GetVTable(); vtable.DumpTaggedValue(os); - os << "\n"; if (withDetail && !vtable.IsUndefined()) { VTable::Cast(vtable.GetTaggedObject())->Dump(os); + } else { + os << "\n"; } os << " - Flags : " << std::setw(DUMP_TYPE_OFFSET); os << "IsCtor :" << std::boolalpha << jshclass->IsConstructor(); os << "| IsCallable :" << std::boolalpha << jshclass->IsCallable(); os << "| IsExtensible :" << std::boolalpha << jshclass->IsExtensible(); - os << "| ElementRepresentation :" << static_cast(jshclass->GetElementRepresentation()); + os << "| ElementsKind :" << Elements::GetString(jshclass->GetElementsKind()); os << "| NumberOfProps :" << std::dec << jshclass->NumberOfProps(); os << "| InlinedProperties :" << std::dec << jshclass->GetInlinedProperties(); os << "| IsTS :" << std::boolalpha << jshclass->IsTS(); @@ -573,9 +597,8 @@ static void DumpHClass(const JSHClass *jshclass, std::ostream &os, bool withDeta static void DumpClass(TaggedObject *obj, std::ostream &os) { - JSHClass *hclass = obj->GetClass(); - os << "JSHClass :" << std::setw(DUMP_TYPE_OFFSET) << " klass_(" << std::hex << hclass << ")\n"; - DumpHClass(hclass, os, true); + ASSERT(obj->GetClass()->GetObjectType() == JSType::HCLASS); + DumpHClass(JSHClass::Cast(obj), os, true); } static void DumpAttr(const PropertyAttributes &attr, bool fastMode, std::ostream &os) @@ -615,6 +638,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) auto jsHclass = obj->GetClass(); JSType type = jsHclass->GetObjectType(); + bool needDumpHClass = false; switch (type) { case JSType::HCLASS: return DumpClass(obj, os); @@ -635,6 +659,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) case JSType::LINE_STRING: case JSType::CONSTANT_STRING: case JSType::TREE_STRING: + case JSType::SLICED_STRING: DumpStringClass(EcmaString::Cast(obj), os); os << "\n"; break; @@ -652,9 +677,11 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) case JSType::JS_SYNTAX_ERROR: case JSType::JS_OOM_ERROR: case JSType::JS_ARGUMENTS: + needDumpHClass = true; JSObject::Cast(obj)->Dump(os); break; case JSType::JS_FUNCTION_BASE: + needDumpHClass = true; JSFunctionBase::Cast(obj)->Dump(os); break; case JSType::GLOBAL_ENV: @@ -663,24 +690,31 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) case JSType::ACCESSOR_DATA: break; case JSType::JS_FUNCTION: + needDumpHClass = true; JSFunction::Cast(obj)->Dump(os); break; case JSType::JS_BOUND_FUNCTION: + needDumpHClass = true; JSBoundFunction::Cast(obj)->Dump(os); break; case JSType::JS_SET: + needDumpHClass = true; JSSet::Cast(obj)->Dump(os); break; case JSType::JS_MAP: + needDumpHClass = true; JSMap::Cast(obj)->Dump(os); break; case JSType::JS_WEAK_SET: + needDumpHClass = true; JSWeakSet::Cast(obj)->Dump(os); break; case JSType::JS_WEAK_MAP: + needDumpHClass = true; JSWeakMap::Cast(obj)->Dump(os); break; case JSType::JS_WEAK_REF: + needDumpHClass = true; JSWeakRef::Cast(obj)->Dump(os); break; case JSType::JS_FINALIZATION_REGISTRY: @@ -690,12 +724,15 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) CellRecord::Cast(obj)->Dump(os); break; case JSType::JS_REG_EXP: + needDumpHClass = true; JSRegExp::Cast(obj)->Dump(os); break; case JSType::JS_DATE: + needDumpHClass = true; JSDate::Cast(obj)->Dump(os); break; case JSType::JS_ARRAY: + needDumpHClass = true; JSArray::Cast(obj)->Dump(os); break; case JSType::JS_TYPED_ARRAY: @@ -710,6 +747,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) case JSType::JS_FLOAT64_ARRAY: case JSType::JS_BIGINT64_ARRAY: case JSType::JS_BIGUINT64_ARRAY: + needDumpHClass = true; JSTypedArray::Cast(obj)->Dump(os); break; case JSType::BIGINT: @@ -719,6 +757,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) ByteArray::Cast(obj)->Dump(os); break; case JSType::JS_PROXY: + needDumpHClass = true; JSProxy::Cast(obj)->Dump(os); break; case JSType::JS_PRIMITIVE_REF: @@ -752,6 +791,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) ResolvingFunctionsRecord::Cast(obj)->Dump(os); break; case JSType::JS_PROMISE: + needDumpHClass = true; JSPromise::Cast(obj)->Dump(os); break; case JSType::JS_PROMISE_REACTIONS_FUNCTION: @@ -800,6 +840,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) JSProxyRevocFunction::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_FUNCTION: + needDumpHClass = true; JSAsyncFunction::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_AWAIT_STATUS_FUNCTION: @@ -809,6 +850,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) JSGeneratorFunction::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_GENERATOR_FUNCTION: + needDumpHClass = true; JSAsyncGeneratorFunction::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_GENERATOR_RESUME_NEXT_RETURN_PROCESSOR_RST_FTN: @@ -855,13 +897,16 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) PropertyBox::Cast(obj)->Dump(os); break; case JSType::JS_REALM: + needDumpHClass = true; JSRealm::Cast(obj)->Dump(os); break; #ifdef ARK_SUPPORT_INTL case JSType::JS_INTL: + needDumpHClass = true; JSIntl::Cast(obj)->Dump(os); break; case JSType::JS_LOCALE: + needDumpHClass = true; JSLocale::Cast(obj)->Dump(os); break; case JSType::JS_DATE_TIME_FORMAT: @@ -874,6 +919,7 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) JSNumberFormat::Cast(obj)->Dump(os); break; case JSType::JS_COLLATOR: + needDumpHClass = true; JSCollator::Cast(obj)->Dump(os); break; case JSType::JS_PLURAL_RULES: @@ -898,12 +944,15 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) break; #endif case JSType::JS_GENERATOR_OBJECT: + needDumpHClass = true; JSGeneratorObject::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_GENERATOR_OBJECT: + needDumpHClass = true; JSAsyncGeneratorObject::Cast(obj)->Dump(os); break; case JSType::JS_ASYNC_FUNC_OBJECT: + needDumpHClass = true; JSAsyncFuncObject::Cast(obj)->Dump(os); break; case JSType::JS_GENERATOR_CONTEXT: @@ -1083,7 +1132,9 @@ static void DumpObject(TaggedObject *obj, std::ostream &os) break; } - DumpHClass(jsHclass, os, false); + if (needDumpHClass) { + DumpHClass(jsHclass, os, false); + } } void JSTaggedValue::DumpSpecialValue(std::ostream &os) const @@ -1122,8 +1173,7 @@ void JSTaggedValue::DumpHeapObjectType(std::ostream &os) const bool isWeak = IsWeak(); TaggedObject *obj = isWeak ? GetTaggedWeakRef() : GetTaggedObject(); if (isWeak) { - os << "----------Dump Weak Referent----------" - << "\n"; + os << " [Weak Ref] "; } JSType type = obj->GetClass()->GetObjectType(); @@ -1409,7 +1459,7 @@ void JSObject::Dump(std::ostream &os) const os << ": ("; JSTaggedValue val; if (attr.IsInlinedProps()) { - val = GetPropertyInlinedProps(i); + val = GetPropertyInlinedPropsWithRep(i, attr); } else { val = properties->Get(i - static_cast(jshclass->GetInlinedProperties())); } @@ -1572,7 +1622,7 @@ void JSAPITreeMap::Dump(std::ostream &os) const map->Dump(os); } -void JSAPITreeMap::DumpForSnapshot(std::vector> &vec) const +void JSAPITreeMap::DumpForSnapshot(std::vector &vec) const { TaggedTreeMap *map = TaggedTreeMap::Cast(GetTreeMap().GetTaggedObject()); map->DumpForSnapshot(vec); @@ -1595,13 +1645,13 @@ void JSAPITreeMapIterator::Dump(std::ostream &os) const map->Dump(os); } -void JSAPITreeMapIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPITreeMapIterator::DumpForSnapshot(std::vector &vec) const { TaggedTreeMap *map = TaggedTreeMap::Cast(JSAPITreeMap::Cast(GetIteratedMap().GetTaggedObject())->GetTreeMap().GetTaggedObject()); map->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } @@ -1663,8 +1713,8 @@ void TaggedTreeMap::Dump(std::ostream &os) const node.DumpTaggedValue(os); os << std::right << "}" << "\n"; - int capacity = NumberOfElements() + NumberOfDeletedElements(); - for (int index = 0; index < capacity; index++) { + uint32_t capacity = NumberOfElements() + NumberOfDeletedElements(); + for (uint32_t index = 0; index < capacity; index++) { if (GetKey(index).IsHole()) { os << std::left << std::setw(DUMP_ELEMENT_OFFSET) << "[entry] " << index << ": "; GetKey(index).DumpTaggedValue(os); @@ -1687,7 +1737,7 @@ void JSAPITreeSet::Dump(std::ostream &os) const set->Dump(os); } -void JSAPITreeSet::DumpForSnapshot(std::vector> &vec) const +void JSAPITreeSet::DumpForSnapshot(std::vector &vec) const { TaggedTreeSet *set = TaggedTreeSet::Cast(GetTreeSet().GetTaggedObject()); set->DumpForSnapshot(vec); @@ -1710,13 +1760,13 @@ void JSAPITreeSetIterator::Dump(std::ostream &os) const set->Dump(os); } -void JSAPITreeSetIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPITreeSetIterator::DumpForSnapshot(std::vector &vec) const { TaggedTreeSet *set = TaggedTreeSet::Cast(JSAPITreeSet::Cast(GetIteratedSet().GetTaggedObject())->GetTreeSet().GetTaggedObject()); set->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } @@ -1740,8 +1790,8 @@ void TaggedTreeSet::Dump(std::ostream &os) const node.DumpTaggedValue(os); os << std::right << "}" << "\n"; - int capacity = NumberOfElements() + NumberOfDeletedElements(); - for (int index = 0; index < capacity; index++) { + uint32_t capacity = NumberOfElements() + NumberOfDeletedElements(); + for (uint32_t index = 0; index < capacity; index++) { if (GetKey(index).IsHole()) { os << std::left << std::setw(DUMP_ELEMENT_OFFSET) << "[entry] " << index << ": "; GetKey(index).DumpTaggedValue(os); @@ -1769,7 +1819,7 @@ void JSAPIPlainArray::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIPlainArray::DumpForSnapshot(std::vector> &vec) const +void JSAPIPlainArray::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } @@ -1782,11 +1832,11 @@ void JSAPIPlainArrayIterator::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIPlainArrayIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIPlainArrayIterator::DumpForSnapshot(std::vector &vec) const { JSAPIPlainArray *array = JSAPIPlainArray::Cast(GetIteratedPlainArray().GetTaggedObject()); array->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } @@ -1966,9 +2016,9 @@ void JSAPIDequeIterator::Dump(std::ostream &os) const void JSAPILightWeightMap::Dump(std::ostream &os) const { - int capacity = GetSize(); + uint32_t capacity = GetSize(); os << " - length: " << std::dec << capacity << "\n"; - int i = 0; + uint32_t i = 0; TaggedArray *hashArray = TaggedArray::Cast(GetHashes().GetTaggedObject()); TaggedArray *keyArray = TaggedArray::Cast(GetKeys().GetTaggedObject()); TaggedArray *valueArray = TaggedArray::Cast(GetValues().GetTaggedObject()); @@ -2000,7 +2050,7 @@ void JSAPIHashMap::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIHashMap::DumpForSnapshot(std::vector> &vec) const +void JSAPIHashMap::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } @@ -2013,7 +2063,7 @@ void JSAPIHashSet::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIHashSet::DumpForSnapshot(std::vector> &vec) const +void JSAPIHashSet::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } @@ -2025,7 +2075,7 @@ void JSAPIHashMapIterator::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIHashMapIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIHashMapIterator::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } @@ -2037,7 +2087,7 @@ void JSAPIHashSetIterator::Dump(std::ostream &os) const JSObject::Dump(os); } -void JSAPIHashSetIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIHashSetIterator::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } @@ -2076,10 +2126,10 @@ void JSAPIList::Dump(std::ostream &os) const list->Dump(os); } -void JSAPIList::DumpForSnapshot(std::vector> &vec) const +void JSAPIList::DumpForSnapshot(std::vector &vec) const { - TaggedSingleList *map = TaggedSingleList::Cast(GetSingleList().GetTaggedObject()); - map->DumpForSnapshot(vec); + TaggedSingleList *list = TaggedSingleList::Cast(GetSingleList().GetTaggedObject()); + list->DumpForSnapshot(vec); JSObject::DumpForSnapshot(vec); } @@ -2095,11 +2145,11 @@ void JSAPIListIterator::Dump(std::ostream &os) const list->Dump(os); } -void JSAPIListIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIListIterator::DumpForSnapshot(std::vector &vec) const { TaggedSingleList *list = TaggedSingleList::Cast(GetIteratedList().GetTaggedObject()); list->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } @@ -2113,10 +2163,10 @@ void JSAPILinkedList::Dump(std::ostream &os) const linkedList->Dump(os); } -void JSAPILinkedList::DumpForSnapshot(std::vector> &vec) const +void JSAPILinkedList::DumpForSnapshot(std::vector &vec) const { - TaggedDoubleList *map = TaggedDoubleList::Cast(GetDoubleList().GetTaggedObject()); - map->DumpForSnapshot(vec); + TaggedDoubleList *list = TaggedDoubleList::Cast(GetDoubleList().GetTaggedObject()); + list->DumpForSnapshot(vec); JSObject::DumpForSnapshot(vec); } @@ -2132,11 +2182,11 @@ void JSAPILinkedListIterator::Dump(std::ostream &os) const linkedList->Dump(os); } -void JSAPILinkedListIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPILinkedListIterator::DumpForSnapshot(std::vector &vec) const { TaggedDoubleList *linkedList = TaggedDoubleList::Cast(GetIteratedLinkedList().GetTaggedObject()); linkedList->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } @@ -3099,8 +3149,7 @@ void ProtoChangeDetails::Dump(std::ostream &os) const { os << " - ChangeListener: "; GetChangeListener().Dump(os); - os << "\n"; - os << " - RegisterIndex: " << GetRegisterIndex(); + os << " \t- RegisterIndex: " << GetRegisterIndex(); os << "\n"; } @@ -3564,7 +3613,7 @@ void CjsExports::Dump(std::ostream &os) const os << ": ("; JSTaggedValue val; if (attr.IsInlinedProps()) { - val = GetPropertyInlinedProps(i); + val = GetPropertyInlinedPropsWithRep(i, attr); } else { val = properties->Get(i - static_cast(jshclass->GetInlinedProperties())); } @@ -3618,8 +3667,7 @@ void ClassLiteral::Dump(std::ostream &os) const // ######################################################################################## // Dump for Snapshot // ######################################################################################## -static void DumpArrayClass(const TaggedArray *arr, - std::vector> &vec) +static void DumpArrayClass(const TaggedArray *arr, std::vector &vec) { DISALLOW_GARBAGE_COLLECTION; uint32_t len = arr->GetLength(); @@ -3631,8 +3679,18 @@ static void DumpArrayClass(const TaggedArray *arr, } } -static void DumpConstantPoolClass(const ConstantPool *arr, - std::vector> &vec) +static void DumpElementClass(const TaggedArray *arr, std::vector &vec) +{ + DISALLOW_GARBAGE_COLLECTION; + uint32_t len = arr->GetLength(); + vec.reserve(vec.size() + len); + for (uint32_t i = 0; i < len; i++) { + JSTaggedValue val(arr->Get(i)); + vec.emplace_back(i, val, Reference::ReferenceType::ELEMENT); + } +} + +static void DumpConstantPoolClass(const ConstantPool *arr, std::vector &vec) { DISALLOW_GARBAGE_COLLECTION; uint32_t len = arr->GetCacheLength(); @@ -3644,21 +3702,18 @@ static void DumpConstantPoolClass(const ConstantPool *arr, } } -static void DumpStringClass(const EcmaString *str, - std::vector> &vec) +static void DumpStringClass(const EcmaString *str, std::vector &vec) { - vec.emplace_back("string", JSTaggedValue(str)); + vec.emplace_back(CString("string"), JSTaggedValue(str)); } -static void DumpClass(TaggedObject *obj, - std::vector> &vec) +static void DumpClass(TaggedObject *obj, std::vector &vec) { JSHClass *jshclass = obj->GetClass(); - vec.emplace_back("__proto__", jshclass->GetPrototype()); + vec.emplace_back(CString("__proto__"), jshclass->GetPrototype()); } -static void DumpObject(TaggedObject *obj, - std::vector> &vec, bool isVmMode) +static void DumpObject(TaggedObject *obj, std::vector &vec, bool isVmMode) { DISALLOW_GARBAGE_COLLECTION; auto jsHclass = obj->GetClass(); @@ -3672,14 +3727,19 @@ static void DumpObject(TaggedObject *obj, case JSType::TAGGED_DICTIONARY: case JSType::LEXICAL_ENV: case JSType::COW_TAGGED_ARRAY: + case JSType::AOT_LITERAL_INFO: DumpArrayClass(TaggedArray::Cast(obj), vec); return; case JSType::CONSTANT_POOL: DumpConstantPoolClass(ConstantPool::Cast(obj), vec); return; + case JSType::VTABLE: + VTable::Cast(obj)->DumpForSnapshot(vec); + return; case JSType::LINE_STRING: case JSType::CONSTANT_STRING: case JSType::TREE_STRING: + case JSType::SLICED_STRING: DumpStringClass(EcmaString::Cast(obj), vec); return; case JSType::JS_NATIVE_POINTER: @@ -4158,7 +4218,7 @@ static void KeyToStd(CString &res, JSTaggedValue key) } } -void JSTaggedValue::DumpForSnapshot(std::vector> &vec, bool isVmMode) const +void JSTaggedValue::DumpForSnapshot(std::vector &vec, bool isVmMode) const { if (IsHeapObject()) { return DumpObject(GetTaggedObject(), vec, isVmMode); @@ -4167,7 +4227,7 @@ void JSTaggedValue::DumpForSnapshot(std::vector> &vec) const +void NumberDictionary::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int size = Size(); @@ -4176,13 +4236,13 @@ void NumberDictionary::DumpForSnapshot(std::vector(JSTaggedNumber(key).GetNumber())); - vec.emplace_back(str, val); + vec.emplace_back( + static_cast(JSTaggedNumber(key).GetNumber()), val, Reference::ReferenceType::ELEMENT); } } } -void NameDictionary::DumpForSnapshot(std::vector> &vec) const +void NameDictionary::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int size = Size(); @@ -4198,7 +4258,7 @@ void NameDictionary::DumpForSnapshot(std::vector> &vec) const +void GlobalDictionary::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int size = Size(); @@ -4214,7 +4274,7 @@ void GlobalDictionary::DumpForSnapshot(std::vector> &vec) const +void LinkedHashSet::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int capacity = NumberOfElements() + NumberOfDeletedElements(); @@ -4229,7 +4289,7 @@ void LinkedHashSet::DumpForSnapshot(std::vector> &vec) const +void LinkedHashMap::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int capacity = NumberOfElements() + NumberOfDeletedElements(); @@ -4245,12 +4305,12 @@ void LinkedHashMap::DumpForSnapshot(std::vector> &vec) const +void TaggedTreeMap::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; - int capacity = NumberOfElements() + NumberOfDeletedElements(); + uint32_t capacity = NumberOfElements() + NumberOfDeletedElements(); vec.reserve(vec.size() + capacity); - for (int index = 0; index < capacity; index++) { + for (uint32_t index = 0; index < capacity; index++) { JSTaggedValue key(GetKey(index)); if (!key.IsUndefined() && !key.IsHole() && !key.IsNull()) { JSTaggedValue val = GetValue(index); @@ -4261,12 +4321,12 @@ void TaggedTreeMap::DumpForSnapshot(std::vector> &vec) const +void TaggedTreeSet::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; - int capacity = NumberOfElements() + NumberOfDeletedElements(); + uint32_t capacity = NumberOfElements() + NumberOfDeletedElements(); vec.reserve(vec.size() + capacity); - for (int index = 0; index < capacity; index++) { + for (uint32_t index = 0; index < capacity; index++) { JSTaggedValue key(GetKey(index)); if (!key.IsUndefined() && !key.IsHole() && !key.IsNull()) { CString str; @@ -4276,7 +4336,7 @@ void TaggedTreeSet::DumpForSnapshot(std::vector> &vec) const +void TaggedDoubleList::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int capacity = NumberOfNodes(); @@ -4289,7 +4349,7 @@ void TaggedDoubleList::DumpForSnapshot(std::vector> &vec) const +void TaggedSingleList::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; int capacity = NumberOfNodes(); @@ -4302,16 +4362,16 @@ void TaggedSingleList::DumpForSnapshot(std::vector> &vec) const +void JSObject::DumpForSnapshot(std::vector &vec) const { DISALLOW_GARBAGE_COLLECTION; JSHClass *jshclass = GetJSHClass(); - vec.emplace_back("__proto__", jshclass->GetPrototype()); + vec.emplace_back(CString("__proto__"), jshclass->GetPrototype()); TaggedArray *elements = TaggedArray::Cast(GetElements().GetTaggedObject()); if (elements->GetLength() == 0) { } else if (!elements->IsDictionaryMode()) { - DumpArrayClass(elements, vec); + DumpElementClass(elements, vec); } else { NumberDictionary *dict = NumberDictionary::Cast(elements); dict->DumpForSnapshot(vec); @@ -4339,7 +4399,7 @@ void JSObject::DumpForSnapshot(std::vector> &v ASSERT(i == static_cast(attr.GetOffset())); JSTaggedValue val; if (attr.IsInlinedProps()) { - val = GetPropertyInlinedProps(i); + val = GetPropertyInlinedPropsWithRep(i, attr); } else { val = properties->Get(i - static_cast(jshclass->GetInlinedProperties())); } @@ -4354,76 +4414,76 @@ void JSObject::DumpForSnapshot(std::vector> &v } } -void JSHClass::DumpForSnapshot([[maybe_unused]] std::vector> &vec) const +void JSHClass::DumpForSnapshot([[maybe_unused]] std::vector &vec) const { } -void JSFunction::DumpForSnapshot(std::vector> &vec) const +void JSFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ProtoOrHClass", GetProtoOrHClass()); - vec.emplace_back("LexicalEnv", GetLexicalEnv()); - vec.emplace_back("HomeObject", GetHomeObject()); - vec.emplace_back("FunctionKind", JSTaggedValue(static_cast(GetFunctionKind()))); - vec.emplace_back("FunctionExtraInfo", GetFunctionExtraInfo()); + vec.emplace_back(CString("ProtoOrHClass"), GetProtoOrHClass()); + vec.emplace_back(CString("LexicalEnv"), GetLexicalEnv()); + vec.emplace_back(CString("HomeObject"), GetHomeObject()); + vec.emplace_back(CString("FunctionKind"), JSTaggedValue(static_cast(GetFunctionKind()))); + vec.emplace_back(CString("FunctionExtraInfo"), GetFunctionExtraInfo()); JSObject::DumpForSnapshot(vec); } -void Method::DumpForSnapshot(std::vector> &vec) const +void Method::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ConstantPool", GetConstantPool()); - vec.emplace_back("ProfileTypeInfo", GetProfileTypeInfo()); + vec.emplace_back(CString("ConstantPool"), GetConstantPool()); + vec.emplace_back(CString("ProfileTypeInfo"), GetProfileTypeInfo()); } -void Program::DumpForSnapshot(std::vector> &vec) const +void Program::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("MainFunction", GetMainFunction()); + vec.emplace_back(CString("MainFunction"), GetMainFunction()); } -void ConstantPool::DumpForSnapshot(std::vector> &vec) const +void ConstantPool::DumpForSnapshot(std::vector &vec) const { DumpArrayClass(this, vec); } -void VTable::DumpForSnapshot(std::vector> &vec) const +void VTable::DumpForSnapshot(std::vector &vec) const { DumpArrayClass(this, vec); } -void COWTaggedArray::DumpForSnapshot(std::vector> &vec) const +void COWTaggedArray::DumpForSnapshot(std::vector &vec) const { DumpArrayClass(this, vec); } -void JSBoundFunction::DumpForSnapshot(std::vector> &vec) const +void JSBoundFunction::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); - vec.emplace_back("BoundTarget", GetBoundTarget()); - vec.emplace_back("BoundThis", GetBoundThis()); - vec.emplace_back("BoundArguments", GetBoundArguments()); + vec.emplace_back(CString("BoundTarget"), GetBoundTarget()); + vec.emplace_back(CString("BoundThis"), GetBoundThis()); + vec.emplace_back(CString("BoundArguments"), GetBoundArguments()); } -void JSPrimitiveRef::DumpForSnapshot(std::vector> &vec) const +void JSPrimitiveRef::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("subValue", GetValue()); + vec.emplace_back(CString("subValue"), GetValue()); JSObject::DumpForSnapshot(vec); } -void BigInt::DumpForSnapshot(std::vector> &vec) const +void BigInt::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Length", JSTaggedValue(GetLength())); - vec.emplace_back("Sign", JSTaggedValue(GetSign())); + vec.emplace_back(CString("Length"), JSTaggedValue(GetLength())); + vec.emplace_back(CString("Sign"), JSTaggedValue(GetSign())); } -void JSDate::DumpForSnapshot(std::vector> &vec) const +void JSDate::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("time", GetTime()); - vec.emplace_back("localOffset", GetLocalOffset()); + vec.emplace_back(CString("time"), GetTime()); + vec.emplace_back(CString("localOffset"), GetLocalOffset()); JSObject::DumpForSnapshot(vec); } -void JSMap::DumpForSnapshot(std::vector> &vec) const +void JSMap::DumpForSnapshot(std::vector &vec) const { LinkedHashMap *map = LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject()); map->DumpForSnapshot(vec); @@ -4431,25 +4491,25 @@ void JSMap::DumpForSnapshot(std::vector> &vec) JSObject::DumpForSnapshot(vec); } -void JSForInIterator::DumpForSnapshot(std::vector> &vec) const +void JSForInIterator::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Object", GetObject()); - vec.emplace_back("WasVisited", JSTaggedValue(GetWasVisited())); - vec.emplace_back("VisitedObjs", GetVisitedObjs()); - vec.emplace_back("RemainingKeys", GetRemainingKeys()); + vec.emplace_back(CString("Object"), GetObject()); + vec.emplace_back(CString("WasVisited"), JSTaggedValue(GetWasVisited())); + vec.emplace_back(CString("VisitedObjs"), GetVisitedObjs()); + vec.emplace_back(CString("RemainingKeys"), GetRemainingKeys()); JSObject::DumpForSnapshot(vec); } -void JSMapIterator::DumpForSnapshot(std::vector> &vec) const +void JSMapIterator::DumpForSnapshot(std::vector &vec) const { LinkedHashMap *map = LinkedHashMap::Cast(GetIteratedMap().GetTaggedObject()); map->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } -void JSSet::DumpForSnapshot(std::vector> &vec) const +void JSSet::DumpForSnapshot(std::vector &vec) const { LinkedHashSet *set = LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject()); set->DumpForSnapshot(vec); @@ -4457,7 +4517,7 @@ void JSSet::DumpForSnapshot(std::vector> &vec) JSObject::DumpForSnapshot(vec); } -void JSWeakMap::DumpForSnapshot(std::vector> &vec) const +void JSWeakMap::DumpForSnapshot(std::vector &vec) const { LinkedHashMap *map = LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject()); map->DumpForSnapshot(vec); @@ -4465,7 +4525,7 @@ void JSWeakMap::DumpForSnapshot(std::vector> & JSObject::DumpForSnapshot(vec); } -void JSWeakSet::DumpForSnapshot(std::vector> &vec) const +void JSWeakSet::DumpForSnapshot(std::vector &vec) const { LinkedHashSet *set = LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject()); set->DumpForSnapshot(vec); @@ -4473,930 +4533,929 @@ void JSWeakSet::DumpForSnapshot(std::vector> & JSObject::DumpForSnapshot(vec); } -void JSWeakRef::DumpForSnapshot(std::vector> &vec) const +void JSWeakRef::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("WeakObject", GetWeakObject()); + vec.emplace_back(CString("WeakObject"), GetWeakObject()); JSObject::DumpForSnapshot(vec); } -void JSFinalizationRegistry::DumpForSnapshot(std::vector> &vec) const +void JSFinalizationRegistry::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("CleanupCallback", GetCleanupCallback()); + vec.emplace_back(CString("CleanupCallback"), GetCleanupCallback()); LinkedHashMap *map = LinkedHashMap::Cast(GetMaybeUnregister().GetTaggedObject()); map->DumpForSnapshot(vec); - vec.emplace_back("MaybeUnregister", GetMaybeUnregister()); + vec.emplace_back(CString("MaybeUnregister"), GetMaybeUnregister()); JSObject::DumpForSnapshot(vec); } -void CellRecord::DumpForSnapshot(std::vector> &vec) const +void CellRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("WeakRefTarget", GetWeakRefTarget()); - vec.emplace_back("HeldValue", GetHeldValue()); + vec.emplace_back(CString("WeakRefTarget"), GetWeakRefTarget()); + vec.emplace_back(CString("HeldValue"), GetHeldValue()); } -void JSSetIterator::DumpForSnapshot(std::vector> &vec) const +void JSSetIterator::DumpForSnapshot(std::vector &vec) const { LinkedHashSet *set = LinkedHashSet::Cast(GetIteratedSet().GetTaggedObject()); set->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } -void JSArray::DumpForSnapshot(std::vector> &vec) const +void JSArray::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIArrayList::DumpForSnapshot(std::vector> &vec) const +void JSAPIArrayList::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIArrayListIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIArrayListIterator::DumpForSnapshot(std::vector &vec) const { JSAPIArrayList *arraylist = JSAPIArrayList::Cast(GetIteratedArrayList().GetTaggedObject()); arraylist->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSAPILightWeightMap::DumpForSnapshot(std::vector> &vec) const +void JSAPILightWeightMap::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPILightWeightMapIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPILightWeightMapIterator::DumpForSnapshot(std::vector &vec) const { JSAPILightWeightMap *map = JSAPILightWeightMap::Cast(GetIteratedLightWeightMap().GetTaggedObject()); map->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } -void JSAPIQueue::DumpForSnapshot(std::vector> &vec) const +void JSAPIQueue::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIQueueIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIQueueIterator::DumpForSnapshot(std::vector &vec) const { JSAPIQueue *queue = JSAPIQueue::Cast(GetIteratedQueue().GetTaggedObject()); queue->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSAPIDeque::DumpForSnapshot(std::vector> &vec) const +void JSAPIDeque::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIDequeIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIDequeIterator::DumpForSnapshot(std::vector &vec) const { JSAPIDeque *deque = JSAPIDeque::Cast(GetIteratedDeque().GetTaggedObject()); deque->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSAPILightWeightSet::DumpForSnapshot(std::vector> &vec) const +void JSAPILightWeightSet::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPILightWeightSetIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPILightWeightSetIterator::DumpForSnapshot(std::vector &vec) const { JSAPILightWeightSet *set = JSAPILightWeightSet::Cast(GetIteratedLightWeightSet().GetTaggedObject()); set->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } -void JSAPIStack::DumpForSnapshot(std::vector> &vec) const +void JSAPIStack::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIStackIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIStackIterator::DumpForSnapshot(std::vector &vec) const { JSAPIStack *stack = JSAPIStack::Cast(GetIteratedStack().GetTaggedObject()); stack->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSArrayIterator::DumpForSnapshot(std::vector> &vec) const +void JSArrayIterator::DumpForSnapshot(std::vector &vec) const { JSArray *array = JSArray::Cast(GetIteratedArray().GetTaggedObject()); array->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); - vec.emplace_back("IterationKind", JSTaggedValue(static_cast(GetIterationKind()))); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("IterationKind"), JSTaggedValue(static_cast(GetIterationKind()))); JSObject::DumpForSnapshot(vec); } -void JSAPIVector::DumpForSnapshot(std::vector> &vec) const +void JSAPIVector::DumpForSnapshot(std::vector &vec) const { JSObject::DumpForSnapshot(vec); } -void JSAPIVectorIterator::DumpForSnapshot(std::vector> &vec) const +void JSAPIVectorIterator::DumpForSnapshot(std::vector &vec) const { JSAPIVector *vector = JSAPIVector::Cast(GetIteratedVector().GetTaggedObject()); vector->DumpForSnapshot(vec); - vec.emplace_back("NextIndex", JSTaggedValue(GetNextIndex())); + vec.emplace_back(CString("NextIndex"), JSTaggedValue(GetNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSStringIterator::DumpForSnapshot(std::vector> &vec) const +void JSStringIterator::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("IteratedString", GetIteratedString()); - vec.emplace_back("StringIteratorNextIndex", JSTaggedValue(GetStringIteratorNextIndex())); + vec.emplace_back(CString("IteratedString"), GetIteratedString()); + vec.emplace_back(CString("StringIteratorNextIndex"), JSTaggedValue(GetStringIteratorNextIndex())); JSObject::DumpForSnapshot(vec); } -void JSTypedArray::DumpForSnapshot(std::vector> &vec) const +void JSTypedArray::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 5; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("viewed-array-buffer", GetViewedArrayBufferOrByteArray()); - vec.emplace_back("typed-array-name", GetTypedArrayName()); - vec.emplace_back("byte-length", JSTaggedValue(GetByteLength())); - vec.emplace_back("byte-offset", JSTaggedValue(GetByteOffset())); - vec.emplace_back("array-length", JSTaggedValue(GetArrayLength())); + vec.emplace_back(CString("viewed-array-buffer"), GetViewedArrayBufferOrByteArray()); + vec.emplace_back(CString("typed-array-name"), GetTypedArrayName()); + vec.emplace_back(CString("byte-length"), JSTaggedValue(GetByteLength())); + vec.emplace_back(CString("byte-offset"), JSTaggedValue(GetByteOffset())); + vec.emplace_back(CString("array-length"), JSTaggedValue(GetArrayLength())); } -void ByteArray::DumpForSnapshot(std::vector> &vec) const +void ByteArray::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("array-length", JSTaggedValue(GetArrayLength())); - vec.emplace_back("byte-length", JSTaggedValue(GetByteLength())); + vec.emplace_back(CString("array-length"), JSTaggedValue(GetArrayLength())); + vec.emplace_back(CString("byte-length"), JSTaggedValue(GetByteLength())); } -void JSRegExp::DumpForSnapshot(std::vector> &vec) const +void JSRegExp::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("originalSource", GetOriginalSource()); - vec.emplace_back("originalFlags", GetOriginalFlags()); - vec.emplace_back("groupName", GetGroupName()); + vec.emplace_back(CString("originalSource"), GetOriginalSource()); + vec.emplace_back(CString("originalFlags"), GetOriginalFlags()); + vec.emplace_back(CString("groupName"), GetGroupName()); JSObject::DumpForSnapshot(vec); } -void JSRegExpIterator::DumpForSnapshot(std::vector> &vec) const +void JSRegExpIterator::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("IteratingRegExp", GetIteratingRegExp()); - vec.emplace_back("IteratedString", GetIteratedString()); - vec.emplace_back("Global", JSTaggedValue(GetGlobal())); - vec.emplace_back("Unicode", JSTaggedValue(GetUnicode())); - vec.emplace_back("Done", JSTaggedValue(GetDone())); + vec.emplace_back(CString("IteratingRegExp"), GetIteratingRegExp()); + vec.emplace_back(CString("IteratedString"), GetIteratedString()); + vec.emplace_back(CString("Global"), JSTaggedValue(GetGlobal())); + vec.emplace_back(CString("Unicode"), JSTaggedValue(GetUnicode())); + vec.emplace_back(CString("Done"), JSTaggedValue(GetDone())); JSObject::DumpForSnapshot(vec); } -void JSProxy::DumpForSnapshot(std::vector> &vec) const +void JSProxy::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("target", GetTarget()); - vec.emplace_back("handler", GetHandler()); + vec.emplace_back(CString("target"), GetTarget()); + vec.emplace_back(CString("handler"), GetHandler()); } -void JSSymbol::DumpForSnapshot(std::vector> &vec) const +void JSSymbol::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("hash-field", JSTaggedValue(GetHashField())); - vec.emplace_back("flags", JSTaggedValue(GetFlags())); - vec.emplace_back("description", GetDescription()); + vec.emplace_back(CString("hash-field"), JSTaggedValue(GetHashField())); + vec.emplace_back(CString("flags"), JSTaggedValue(GetFlags())); + vec.emplace_back(CString("description"), GetDescription()); } -void AccessorData::DumpForSnapshot(std::vector> &vec) const +void AccessorData::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("getter", GetGetter()); - vec.emplace_back("setter", GetSetter()); + vec.emplace_back(CString("getter"), GetGetter()); + vec.emplace_back(CString("setter"), GetSetter()); } -void LexicalEnv::DumpForSnapshot(std::vector> &vec) const +void LexicalEnv::DumpForSnapshot(std::vector &vec) const { DumpArrayClass(this, vec); } -void GlobalEnv::DumpForSnapshot(std::vector> &vec) const +void GlobalEnv::DumpForSnapshot(std::vector &vec) const { auto globalConst = GetJSThread()->GlobalConstants(); // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 137; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("ObjectFunction", GetObjectFunction().GetTaggedValue()); - vec.emplace_back("FunctionFunction", GetFunctionFunction().GetTaggedValue()); - vec.emplace_back("NumberFunction", GetNumberFunction().GetTaggedValue()); - vec.emplace_back("BigIntFunction", GetBigIntFunction().GetTaggedValue()); - vec.emplace_back("DateFunction", GetDateFunction().GetTaggedValue()); - vec.emplace_back("BooleanFunction", GetBooleanFunction().GetTaggedValue()); - vec.emplace_back("ErrorFunction", GetErrorFunction().GetTaggedValue()); - vec.emplace_back("ArrayFunction", GetArrayFunction().GetTaggedValue()); - vec.emplace_back("TypedArrayFunction", GetTypedArrayFunction().GetTaggedValue()); - vec.emplace_back("Int8ArrayFunction", GetInt8ArrayFunction().GetTaggedValue()); - vec.emplace_back("Uint8ArrayFunction", GetUint8ArrayFunction().GetTaggedValue()); - vec.emplace_back("Uint8ClampedArrayFunction", GetUint8ClampedArrayFunction().GetTaggedValue()); - vec.emplace_back("Int16ArrayFunction", GetInt16ArrayFunction().GetTaggedValue()); - vec.emplace_back("Uint16ArrayFunction", GetUint16ArrayFunction().GetTaggedValue()); - vec.emplace_back("Int32ArrayFunction", GetInt32ArrayFunction().GetTaggedValue()); - vec.emplace_back("Uint32ArrayFunction", GetUint32ArrayFunction().GetTaggedValue()); - vec.emplace_back("Float32ArrayFunction", GetFloat32ArrayFunction().GetTaggedValue()); - vec.emplace_back("Float64ArrayFunction", GetFloat64ArrayFunction().GetTaggedValue()); - vec.emplace_back("ArrayBufferFunction", GetArrayBufferFunction().GetTaggedValue()); - vec.emplace_back("SharedArrayBufferFunction", GetSharedArrayBufferFunction().GetTaggedValue()); - vec.emplace_back("SymbolFunction", GetSymbolFunction().GetTaggedValue()); - vec.emplace_back("RangeErrorFunction", GetRangeErrorFunction().GetTaggedValue()); - vec.emplace_back("ReferenceErrorFunction", GetReferenceErrorFunction().GetTaggedValue()); - vec.emplace_back("TypeErrorFunction", GetTypeErrorFunction().GetTaggedValue()); - vec.emplace_back("AggregateErrorFunction", GetAggregateErrorFunction().GetTaggedValue()); - vec.emplace_back("URIErrorFunction", GetURIErrorFunction().GetTaggedValue()); - vec.emplace_back("SyntaxErrorFunction", GetSyntaxErrorFunction().GetTaggedValue()); - vec.emplace_back("EvalErrorFunction", GetEvalErrorFunction().GetTaggedValue()); - vec.emplace_back("OOMErrorFunction", GetOOMErrorFunction().GetTaggedValue()); - vec.emplace_back("RegExpFunction", GetRegExpFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsSetFunction", GetBuiltinsSetFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsMapFunction", GetBuiltinsMapFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsWeakSetFunction", GetBuiltinsWeakSetFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsWeakMapFunction", GetBuiltinsWeakMapFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsWeakRefFunction", GetBuiltinsWeakRefFunction().GetTaggedValue()); - vec.emplace_back("BuiltinsFinalizationRegistryFunction", + vec.emplace_back(CString("ObjectFunction"), GetObjectFunction().GetTaggedValue()); + vec.emplace_back(CString("FunctionFunction"), GetFunctionFunction().GetTaggedValue()); + vec.emplace_back(CString("NumberFunction"), GetNumberFunction().GetTaggedValue()); + vec.emplace_back(CString("BigIntFunction"), GetBigIntFunction().GetTaggedValue()); + vec.emplace_back(CString("DateFunction"), GetDateFunction().GetTaggedValue()); + vec.emplace_back(CString("BooleanFunction"), GetBooleanFunction().GetTaggedValue()); + vec.emplace_back(CString("ErrorFunction"), GetErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("ArrayFunction"), GetArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("TypedArrayFunction"), GetTypedArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Int8ArrayFunction"), GetInt8ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Uint8ArrayFunction"), GetUint8ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Uint8ClampedArrayFunction"), GetUint8ClampedArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Int16ArrayFunction"), GetInt16ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Uint16ArrayFunction"), GetUint16ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Int32ArrayFunction"), GetInt32ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Uint32ArrayFunction"), GetUint32ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Float32ArrayFunction"), GetFloat32ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("Float64ArrayFunction"), GetFloat64ArrayFunction().GetTaggedValue()); + vec.emplace_back(CString("ArrayBufferFunction"), GetArrayBufferFunction().GetTaggedValue()); + vec.emplace_back(CString("SharedArrayBufferFunction"), GetSharedArrayBufferFunction().GetTaggedValue()); + vec.emplace_back(CString("SymbolFunction"), GetSymbolFunction().GetTaggedValue()); + vec.emplace_back(CString("RangeErrorFunction"), GetRangeErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("ReferenceErrorFunction"), GetReferenceErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("TypeErrorFunction"), GetTypeErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("AggregateErrorFunction"), GetAggregateErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("URIErrorFunction"), GetURIErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("SyntaxErrorFunction"), GetSyntaxErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("EvalErrorFunction"), GetEvalErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("OOMErrorFunction"), GetOOMErrorFunction().GetTaggedValue()); + vec.emplace_back(CString("RegExpFunction"), GetRegExpFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsSetFunction"), GetBuiltinsSetFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsMapFunction"), GetBuiltinsMapFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsWeakSetFunction"), GetBuiltinsWeakSetFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsWeakMapFunction"), GetBuiltinsWeakMapFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsWeakRefFunction"), GetBuiltinsWeakRefFunction().GetTaggedValue()); + vec.emplace_back(CString("BuiltinsFinalizationRegistryFunction"), GetBuiltinsFinalizationRegistryFunction().GetTaggedValue()); - vec.emplace_back("MathFunction", GetMathFunction().GetTaggedValue()); - vec.emplace_back("AtomicsFunction", GetAtomicsFunction().GetTaggedValue()); - vec.emplace_back("JsonFunction", GetJsonFunction().GetTaggedValue()); - vec.emplace_back("StringFunction", GetStringFunction().GetTaggedValue()); - vec.emplace_back("ProxyFunction", GetProxyFunction().GetTaggedValue()); - vec.emplace_back("ReflectFunction", GetReflectFunction().GetTaggedValue()); - vec.emplace_back("AsyncFunction", GetAsyncFunction().GetTaggedValue()); - vec.emplace_back("AsyncFunctionPrototype", GetAsyncFunctionPrototype().GetTaggedValue()); - vec.emplace_back("JSGlobalObject", GetJSGlobalObject().GetTaggedValue()); - vec.emplace_back("EmptyArray", globalConst->GetEmptyArray()); - vec.emplace_back("EmptyString", globalConst->GetEmptyString()); - vec.emplace_back("EmptyTaggedQueue", globalConst->GetEmptyTaggedQueue()); - vec.emplace_back("PrototypeString", globalConst->GetPrototypeString()); - vec.emplace_back("HasInstanceSymbol", GetHasInstanceSymbol().GetTaggedValue()); - vec.emplace_back("IsConcatSpreadableSymbol", GetIsConcatSpreadableSymbol().GetTaggedValue()); - vec.emplace_back("ToStringTagSymbol", GetToStringTagSymbol().GetTaggedValue()); - vec.emplace_back("IteratorSymbol", GetIteratorSymbol().GetTaggedValue()); - vec.emplace_back("AsyncIteratorSymbol", GetAsyncIteratorSymbol().GetTaggedValue()); - vec.emplace_back("MatchSymbol", GetMatchSymbol().GetTaggedValue()); - vec.emplace_back("MatchAllSymbol", GetMatchAllSymbol().GetTaggedValue()); - vec.emplace_back("ReplaceSymbol", GetReplaceSymbol().GetTaggedValue()); - vec.emplace_back("SearchSymbol", GetSearchSymbol().GetTaggedValue()); - vec.emplace_back("SpeciesSymbol", GetSpeciesSymbol().GetTaggedValue()); - vec.emplace_back("SplitSymbol", GetSplitSymbol().GetTaggedValue()); - vec.emplace_back("ToPrimitiveSymbol", GetToPrimitiveSymbol().GetTaggedValue()); - vec.emplace_back("UnscopablesSymbol", GetUnscopablesSymbol().GetTaggedValue()); - vec.emplace_back("HoleySymbol", GetHoleySymbol().GetTaggedValue()); - vec.emplace_back("AttachSymbol", GetAttachSymbol().GetTaggedValue()); - vec.emplace_back("DetachSymbol", GetDetachSymbol().GetTaggedValue()); - vec.emplace_back("ConstructorString", globalConst->GetConstructorString()); - vec.emplace_back("IteratorPrototype", GetIteratorPrototype().GetTaggedValue()); - vec.emplace_back("ForinIteratorPrototype", GetForinIteratorPrototype().GetTaggedValue()); - vec.emplace_back("StringIterator", GetStringIterator().GetTaggedValue()); - vec.emplace_back("MapIteratorPrototype", GetMapIteratorPrototype().GetTaggedValue()); - vec.emplace_back("SetIteratorPrototype", GetSetIteratorPrototype().GetTaggedValue()); - vec.emplace_back("RegExpIteratorPrototype", GetRegExpIteratorPrototype().GetTaggedValue()); - vec.emplace_back("ArrayIteratorPrototype", GetArrayIteratorPrototype().GetTaggedValue()); - vec.emplace_back("StringIteratorPrototype", GetStringIteratorPrototype().GetTaggedValue()); - vec.emplace_back("LengthString", globalConst->GetLengthString()); - vec.emplace_back("ValueString", globalConst->GetValueString()); - vec.emplace_back("WritableString", globalConst->GetWritableString()); - vec.emplace_back("GetString", globalConst->GetGetString()); - vec.emplace_back("SetString", globalConst->GetSetString()); - vec.emplace_back("EnumerableString", globalConst->GetEnumerableString()); - vec.emplace_back("ConfigurableString", globalConst->GetConfigurableString()); - vec.emplace_back("NameString", globalConst->GetNameString()); - vec.emplace_back("ValueOfString", globalConst->GetValueOfString()); - vec.emplace_back("ToStringString", globalConst->GetToStringString()); - vec.emplace_back("ToLocaleStringString", globalConst->GetToLocaleStringString()); - vec.emplace_back("UndefinedString", globalConst->GetUndefinedString()); - vec.emplace_back("NullString", globalConst->GetNullString()); - vec.emplace_back("TrueString", globalConst->GetTrueString()); - vec.emplace_back("FalseString", globalConst->GetFalseString()); - vec.emplace_back("RegisterSymbols", GetRegisterSymbols().GetTaggedValue()); - vec.emplace_back("ThrowTypeError", GetThrowTypeError().GetTaggedValue()); - vec.emplace_back("GetPrototypeOfString", globalConst->GetGetPrototypeOfString()); - vec.emplace_back("SetPrototypeOfString", globalConst->GetSetPrototypeOfString()); - vec.emplace_back("IsExtensibleString", globalConst->GetIsExtensibleString()); - vec.emplace_back("PreventExtensionsString", globalConst->GetPreventExtensionsString()); - vec.emplace_back("GetOwnPropertyDescriptorString", globalConst->GetGetOwnPropertyDescriptorString()); - vec.emplace_back("DefinePropertyString", globalConst->GetDefinePropertyString()); - vec.emplace_back("HasString", globalConst->GetHasString()); - vec.emplace_back("DeletePropertyString", globalConst->GetDeletePropertyString()); - vec.emplace_back("EnumerateString", globalConst->GetEnumerateString()); - vec.emplace_back("OwnKeysString", globalConst->GetOwnKeysString()); - vec.emplace_back("ApplyString", globalConst->GetApplyString()); - vec.emplace_back("ProxyString", globalConst->GetProxyString()); - vec.emplace_back("RevokeString", globalConst->GetRevokeString()); - vec.emplace_back("ProxyConstructString", globalConst->GetProxyConstructString()); - vec.emplace_back("ProxyCallString", globalConst->GetProxyCallString()); - vec.emplace_back("DoneString", globalConst->GetDoneString()); - vec.emplace_back("NegativeZeroString", globalConst->GetNegativeZeroString()); - vec.emplace_back("NextString", globalConst->GetNextString()); - vec.emplace_back("PromiseThenString", globalConst->GetPromiseThenString()); - vec.emplace_back("PromiseFunction", GetPromiseFunction().GetTaggedValue()); - vec.emplace_back("PromiseReactionJob", GetPromiseReactionJob().GetTaggedValue()); - vec.emplace_back("PromiseResolveThenableJob", GetPromiseResolveThenableJob().GetTaggedValue()); - vec.emplace_back("DynamicImportJob", GetDynamicImportJob().GetTaggedValue()); - vec.emplace_back("ScriptJobString", globalConst->GetScriptJobString()); - vec.emplace_back("PromiseString", globalConst->GetPromiseString()); - vec.emplace_back("IdentityString", globalConst->GetIdentityString()); - vec.emplace_back("AsyncFunctionString", globalConst->GetAsyncFunctionString()); - vec.emplace_back("ThrowerString", globalConst->GetThrowerString()); - vec.emplace_back("Undefined", globalConst->GetUndefined()); - vec.emplace_back("ArrayListFunction", globalConst->GetArrayListFunction()); - vec.emplace_back("ArrayListIteratorPrototype", globalConst->GetArrayListIteratorPrototype()); - vec.emplace_back("HashMapIteratorPrototype", globalConst->GetHashMapIteratorPrototype()); - vec.emplace_back("HashSetIteratorPrototype", globalConst->GetHashSetIteratorPrototype()); - vec.emplace_back("LightWeightMapIteratorPrototype", globalConst->GetLightWeightMapIteratorPrototype()); - vec.emplace_back("LightWeightSetIteratorPrototype", globalConst->GetLightWeightSetIteratorPrototype()); - vec.emplace_back("TreeMapIteratorPrototype", globalConst->GetTreeMapIteratorPrototype()); - vec.emplace_back("TreeSetIteratorPrototype", globalConst->GetTreeSetIteratorPrototype()); - vec.emplace_back("VectorFunction", globalConst->GetVectorFunction()); - vec.emplace_back("VectorIteratorPrototype", globalConst->GetVectorIteratorPrototype()); - vec.emplace_back("QueueIteratorPrototype", globalConst->GetQueueIteratorPrototype()); - vec.emplace_back("PlainArrayIteratorPrototype", globalConst->GetPlainArrayIteratorPrototype()); - vec.emplace_back("DequeIteratorPrototype", globalConst->GetDequeIteratorPrototype()); - vec.emplace_back("StackIteratorPrototype", globalConst->GetStackIteratorPrototype()); - vec.emplace_back("LinkedListIteratorPrototype", globalConst->GetLinkedListIteratorPrototype()); - vec.emplace_back("ListIteratorPrototype", globalConst->GetListIteratorPrototype()); - vec.emplace_back("GlobalPatch", GetGlobalPatch().GetTaggedValue()); -} - -void JSDataView::DumpForSnapshot(std::vector> &vec) const -{ - vec.emplace_back("data-view", GetDataView()); - vec.emplace_back("buffer", GetViewedArrayBuffer()); - vec.emplace_back("byte-length", JSTaggedValue(GetByteLength())); - vec.emplace_back("byte-offset", JSTaggedValue(GetByteOffset())); -} - -void JSArrayBuffer::DumpForSnapshot(std::vector> &vec) const -{ - vec.emplace_back("buffer-data", GetArrayBufferData()); - vec.emplace_back("byte-length", JSTaggedValue(GetArrayBufferByteLength())); - vec.emplace_back("shared", JSTaggedValue(GetShared())); -} - -void PromiseReaction::DumpForSnapshot(std::vector> &vec) const -{ - vec.emplace_back("promise-capability", GetPromiseCapability()); - vec.emplace_back("handler", GetHandler()); - vec.emplace_back("type", JSTaggedValue(static_cast(GetType()))); -} - -void PromiseCapability::DumpForSnapshot(std::vector> &vec) const -{ - vec.emplace_back("promise", GetPromise()); - vec.emplace_back("resolve", GetResolve()); - vec.emplace_back("reject", GetReject()); -} - -void PromiseIteratorRecord::DumpForSnapshot(std::vector> &vec) const + vec.emplace_back(CString("MathFunction"), GetMathFunction().GetTaggedValue()); + vec.emplace_back(CString("AtomicsFunction"), GetAtomicsFunction().GetTaggedValue()); + vec.emplace_back(CString("JsonFunction"), GetJsonFunction().GetTaggedValue()); + vec.emplace_back(CString("StringFunction"), GetStringFunction().GetTaggedValue()); + vec.emplace_back(CString("ProxyFunction"), GetProxyFunction().GetTaggedValue()); + vec.emplace_back(CString("ReflectFunction"), GetReflectFunction().GetTaggedValue()); + vec.emplace_back(CString("AsyncFunction"), GetAsyncFunction().GetTaggedValue()); + vec.emplace_back(CString("AsyncFunctionPrototype"), GetAsyncFunctionPrototype().GetTaggedValue()); + vec.emplace_back(CString("JSGlobalObject"), GetJSGlobalObject().GetTaggedValue()); + vec.emplace_back(CString("EmptyArray"), globalConst->GetEmptyArray()); + vec.emplace_back(CString("EmptyString"), globalConst->GetEmptyString()); + vec.emplace_back(CString("EmptyTaggedQueue"), globalConst->GetEmptyTaggedQueue()); + vec.emplace_back(CString("PrototypeString"), globalConst->GetPrototypeString()); + vec.emplace_back(CString("HasInstanceSymbol"), GetHasInstanceSymbol().GetTaggedValue()); + vec.emplace_back(CString("IsConcatSpreadableSymbol"), GetIsConcatSpreadableSymbol().GetTaggedValue()); + vec.emplace_back(CString("ToStringTagSymbol"), GetToStringTagSymbol().GetTaggedValue()); + vec.emplace_back(CString("IteratorSymbol"), GetIteratorSymbol().GetTaggedValue()); + vec.emplace_back(CString("AsyncIteratorSymbol"), GetAsyncIteratorSymbol().GetTaggedValue()); + vec.emplace_back(CString("MatchSymbol"), GetMatchSymbol().GetTaggedValue()); + vec.emplace_back(CString("MatchAllSymbol"), GetMatchAllSymbol().GetTaggedValue()); + vec.emplace_back(CString("ReplaceSymbol"), GetReplaceSymbol().GetTaggedValue()); + vec.emplace_back(CString("SearchSymbol"), GetSearchSymbol().GetTaggedValue()); + vec.emplace_back(CString("SpeciesSymbol"), GetSpeciesSymbol().GetTaggedValue()); + vec.emplace_back(CString("SplitSymbol"), GetSplitSymbol().GetTaggedValue()); + vec.emplace_back(CString("ToPrimitiveSymbol"), GetToPrimitiveSymbol().GetTaggedValue()); + vec.emplace_back(CString("UnscopablesSymbol"), GetUnscopablesSymbol().GetTaggedValue()); + vec.emplace_back(CString("HoleySymbol"), GetHoleySymbol().GetTaggedValue()); + vec.emplace_back(CString("AttachSymbol"), GetAttachSymbol().GetTaggedValue()); + vec.emplace_back(CString("DetachSymbol"), GetDetachSymbol().GetTaggedValue()); + vec.emplace_back(CString("ConstructorString"), globalConst->GetConstructorString()); + vec.emplace_back(CString("IteratorPrototype"), GetIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("ForinIteratorPrototype"), GetForinIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("StringIterator"), GetStringIterator().GetTaggedValue()); + vec.emplace_back(CString("MapIteratorPrototype"), GetMapIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("SetIteratorPrototype"), GetSetIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("RegExpIteratorPrototype"), GetRegExpIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("ArrayIteratorPrototype"), GetArrayIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("StringIteratorPrototype"), GetStringIteratorPrototype().GetTaggedValue()); + vec.emplace_back(CString("LengthString"), globalConst->GetLengthString()); + vec.emplace_back(CString("ValueString"), globalConst->GetValueString()); + vec.emplace_back(CString("WritableString"), globalConst->GetWritableString()); + vec.emplace_back(CString("GetString"), globalConst->GetGetString()); + vec.emplace_back(CString("SetString"), globalConst->GetSetString()); + vec.emplace_back(CString("EnumerableString"), globalConst->GetEnumerableString()); + vec.emplace_back(CString("ConfigurableString"), globalConst->GetConfigurableString()); + vec.emplace_back(CString("NameString"), globalConst->GetNameString()); + vec.emplace_back(CString("ValueOfString"), globalConst->GetValueOfString()); + vec.emplace_back(CString("ToStringString"), globalConst->GetToStringString()); + vec.emplace_back(CString("ToLocaleStringString"), globalConst->GetToLocaleStringString()); + vec.emplace_back(CString("UndefinedString"), globalConst->GetUndefinedString()); + vec.emplace_back(CString("NullString"), globalConst->GetNullString()); + vec.emplace_back(CString("TrueString"), globalConst->GetTrueString()); + vec.emplace_back(CString("FalseString"), globalConst->GetFalseString()); + vec.emplace_back(CString("RegisterSymbols"), GetRegisterSymbols().GetTaggedValue()); + vec.emplace_back(CString("ThrowTypeError"), GetThrowTypeError().GetTaggedValue()); + vec.emplace_back(CString("GetPrototypeOfString"), globalConst->GetGetPrototypeOfString()); + vec.emplace_back(CString("SetPrototypeOfString"), globalConst->GetSetPrototypeOfString()); + vec.emplace_back(CString("IsExtensibleString"), globalConst->GetIsExtensibleString()); + vec.emplace_back(CString("PreventExtensionsString"), globalConst->GetPreventExtensionsString()); + vec.emplace_back(CString("GetOwnPropertyDescriptorString"), globalConst->GetGetOwnPropertyDescriptorString()); + vec.emplace_back(CString("DefinePropertyString"), globalConst->GetDefinePropertyString()); + vec.emplace_back(CString("HasString"), globalConst->GetHasString()); + vec.emplace_back(CString("DeletePropertyString"), globalConst->GetDeletePropertyString()); + vec.emplace_back(CString("EnumerateString"), globalConst->GetEnumerateString()); + vec.emplace_back(CString("OwnKeysString"), globalConst->GetOwnKeysString()); + vec.emplace_back(CString("ApplyString"), globalConst->GetApplyString()); + vec.emplace_back(CString("ProxyString"), globalConst->GetProxyString()); + vec.emplace_back(CString("RevokeString"), globalConst->GetRevokeString()); + vec.emplace_back(CString("ProxyConstructString"), globalConst->GetProxyConstructString()); + vec.emplace_back(CString("ProxyCallString"), globalConst->GetProxyCallString()); + vec.emplace_back(CString("DoneString"), globalConst->GetDoneString()); + vec.emplace_back(CString("NegativeZeroString"), globalConst->GetNegativeZeroString()); + vec.emplace_back(CString("NextString"), globalConst->GetNextString()); + vec.emplace_back(CString("PromiseThenString"), globalConst->GetPromiseThenString()); + vec.emplace_back(CString("PromiseFunction"), GetPromiseFunction().GetTaggedValue()); + vec.emplace_back(CString("PromiseReactionJob"), GetPromiseReactionJob().GetTaggedValue()); + vec.emplace_back(CString("PromiseResolveThenableJob"), GetPromiseResolveThenableJob().GetTaggedValue()); + vec.emplace_back(CString("DynamicImportJob"), GetDynamicImportJob().GetTaggedValue()); + vec.emplace_back(CString("ScriptJobString"), globalConst->GetScriptJobString()); + vec.emplace_back(CString("PromiseString"), globalConst->GetPromiseString()); + vec.emplace_back(CString("IdentityString"), globalConst->GetIdentityString()); + vec.emplace_back(CString("AsyncFunctionString"), globalConst->GetAsyncFunctionString()); + vec.emplace_back(CString("ThrowerString"), globalConst->GetThrowerString()); + vec.emplace_back(CString("Undefined"), globalConst->GetUndefined()); + vec.emplace_back(CString("ArrayListFunction"), globalConst->GetArrayListFunction()); + vec.emplace_back(CString("ArrayListIteratorPrototype"), globalConst->GetArrayListIteratorPrototype()); + vec.emplace_back(CString("HashMapIteratorPrototype"), globalConst->GetHashMapIteratorPrototype()); + vec.emplace_back(CString("HashSetIteratorPrototype"), globalConst->GetHashSetIteratorPrototype()); + vec.emplace_back(CString("LightWeightMapIteratorPrototype"), globalConst->GetLightWeightMapIteratorPrototype()); + vec.emplace_back(CString("LightWeightSetIteratorPrototype"), globalConst->GetLightWeightSetIteratorPrototype()); + vec.emplace_back(CString("TreeMapIteratorPrototype"), globalConst->GetTreeMapIteratorPrototype()); + vec.emplace_back(CString("TreeSetIteratorPrototype"), globalConst->GetTreeSetIteratorPrototype()); + vec.emplace_back(CString("VectorFunction"), globalConst->GetVectorFunction()); + vec.emplace_back(CString("VectorIteratorPrototype"), globalConst->GetVectorIteratorPrototype()); + vec.emplace_back(CString("QueueIteratorPrototype"), globalConst->GetQueueIteratorPrototype()); + vec.emplace_back(CString("PlainArrayIteratorPrototype"), globalConst->GetPlainArrayIteratorPrototype()); + vec.emplace_back(CString("DequeIteratorPrototype"), globalConst->GetDequeIteratorPrototype()); + vec.emplace_back(CString("StackIteratorPrototype"), globalConst->GetStackIteratorPrototype()); + vec.emplace_back(CString("LinkedListIteratorPrototype"), globalConst->GetLinkedListIteratorPrototype()); + vec.emplace_back(CString("ListIteratorPrototype"), globalConst->GetListIteratorPrototype()); + vec.emplace_back(CString("GlobalPatch"), GetGlobalPatch().GetTaggedValue()); +} + +void JSDataView::DumpForSnapshot(std::vector &vec) const +{ + vec.emplace_back(CString("data-view"), GetDataView()); + vec.emplace_back(CString("buffer"), GetViewedArrayBuffer()); + vec.emplace_back(CString("byte-length"), JSTaggedValue(GetByteLength())); + vec.emplace_back(CString("byte-offset"), JSTaggedValue(GetByteOffset())); +} + +void JSArrayBuffer::DumpForSnapshot(std::vector &vec) const +{ + vec.emplace_back(CString("buffer-data"), GetArrayBufferData()); + vec.emplace_back(CString("byte-length"), JSTaggedValue(GetArrayBufferByteLength())); + vec.emplace_back(CString("shared"), JSTaggedValue(GetShared())); +} + +void PromiseReaction::DumpForSnapshot(std::vector &vec) const +{ + vec.emplace_back(CString("promise-capability"), GetPromiseCapability()); + vec.emplace_back(CString("handler"), GetHandler()); + vec.emplace_back(CString("type"), JSTaggedValue(static_cast(GetType()))); +} + +void PromiseCapability::DumpForSnapshot(std::vector &vec) const +{ + vec.emplace_back(CString("promise"), GetPromise()); + vec.emplace_back(CString("resolve"), GetResolve()); + vec.emplace_back(CString("reject"), GetReject()); +} + +void PromiseIteratorRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("iterator", GetIterator()); - vec.emplace_back("done", JSTaggedValue(GetDone())); + vec.emplace_back(CString("iterator"), GetIterator()); + vec.emplace_back(CString("done"), JSTaggedValue(GetDone())); } -void PromiseRecord::DumpForSnapshot(std::vector> &vec) const +void PromiseRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("value", GetValue()); + vec.emplace_back(CString("value"), GetValue()); } -void ResolvingFunctionsRecord::DumpForSnapshot(std::vector> &vec) const +void ResolvingFunctionsRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("resolve-function", GetResolveFunction()); - vec.emplace_back("reject-function", GetRejectFunction()); + vec.emplace_back(CString("resolve-function"), GetResolveFunction()); + vec.emplace_back(CString("reject-function"), GetRejectFunction()); } -void AsyncGeneratorRequest::DumpForSnapshot(std::vector> &vec) const +void AsyncGeneratorRequest::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("completion", GetCompletion()); - vec.emplace_back("capability", GetCapability()); + vec.emplace_back(CString("completion"), GetCompletion()); + vec.emplace_back(CString("capability"), GetCapability()); } -void AsyncIteratorRecord::DumpForSnapshot(std::vector> &vec) const +void AsyncIteratorRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("iterator", GetIterator()); - vec.emplace_back("nextmethod", GetNextMethod()); - vec.emplace_back("done", JSTaggedValue(GetDone())); + vec.emplace_back(CString("iterator"), GetIterator()); + vec.emplace_back(CString("nextmethod"), GetNextMethod()); + vec.emplace_back(CString("done"), JSTaggedValue(GetDone())); } -void JSAsyncFromSyncIterator::DumpForSnapshot(std::vector> &vec) const +void JSAsyncFromSyncIterator::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("synciteratorrecord", GetSyncIteratorRecord()); + vec.emplace_back(CString("synciteratorrecord"), GetSyncIteratorRecord()); } -void JSAsyncFromSyncIterUnwarpFunction::DumpForSnapshot(std::vector> &vec) const +void JSAsyncFromSyncIterUnwarpFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("done", JSTaggedValue(GetDone())); + vec.emplace_back(CString("done"), JSTaggedValue(GetDone())); } -void JSPromise::DumpForSnapshot(std::vector> &vec) const +void JSPromise::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("promise-state", JSTaggedValue(static_cast(GetPromiseState()))); - vec.emplace_back("promise-result", GetPromiseResult()); - vec.emplace_back("promise-fulfill-reactions", GetPromiseFulfillReactions()); - vec.emplace_back("promise-reject-reactions", GetPromiseRejectReactions()); - vec.emplace_back("promise-is-handled", JSTaggedValue(GetPromiseIsHandled())); + vec.emplace_back(CString("promise-state"), JSTaggedValue(static_cast(GetPromiseState()))); + vec.emplace_back(CString("promise-result"), GetPromiseResult()); + vec.emplace_back(CString("promise-fulfill-reactions"), GetPromiseFulfillReactions()); + vec.emplace_back(CString("promise-reject-reactions"), GetPromiseRejectReactions()); + vec.emplace_back(CString("promise-is-handled"), JSTaggedValue(GetPromiseIsHandled())); JSObject::DumpForSnapshot(vec); } -void JSPromiseReactionsFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseReactionsFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("promise", GetPromise()); - vec.emplace_back("already-resolved", GetAlreadyResolved()); + vec.emplace_back(CString("promise"), GetPromise()); + vec.emplace_back(CString("already-resolved"), GetAlreadyResolved()); JSObject::DumpForSnapshot(vec); } -void JSAsyncGeneratorResNextRetProRstFtn::DumpForSnapshot(std::vector> &vec) const +void JSAsyncGeneratorResNextRetProRstFtn::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("async-generator-object", GetAsyncGeneratorObject()); + vec.emplace_back(CString("async-generator-object"), GetAsyncGeneratorObject()); JSObject::DumpForSnapshot(vec); } -void JSPromiseExecutorFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseExecutorFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("capability", GetCapability()); + vec.emplace_back(CString("capability"), GetCapability()); JSObject::DumpForSnapshot(vec); } -void JSPromiseAllResolveElementFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseAllResolveElementFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("index", GetIndex()); - vec.emplace_back("values", GetValues()); - vec.emplace_back("capabilities", GetCapabilities()); - vec.emplace_back("remaining-elements", GetRemainingElements()); - vec.emplace_back("already-called", GetAlreadyCalled()); + vec.emplace_back(CString("index"), GetIndex()); + vec.emplace_back(CString("values"), GetValues()); + vec.emplace_back(CString("capabilities"), GetCapabilities()); + vec.emplace_back(CString("remaining-elements"), GetRemainingElements()); + vec.emplace_back(CString("already-called"), GetAlreadyCalled()); JSObject::DumpForSnapshot(vec); } -void JSPromiseAnyRejectElementFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseAnyRejectElementFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("index", JSTaggedValue(GetIndex())); - vec.emplace_back("errors", GetErrors()); - vec.emplace_back("capability", GetCapability()); - vec.emplace_back("remaining-elements", GetRemainingElements()); - vec.emplace_back("already-called", GetAlreadyCalled()); + vec.emplace_back(CString("index"), JSTaggedValue(GetIndex())); + vec.emplace_back(CString("errors"), GetErrors()); + vec.emplace_back(CString("capability"), GetCapability()); + vec.emplace_back(CString("remaining-elements"), GetRemainingElements()); + vec.emplace_back(CString("already-called"), GetAlreadyCalled()); JSObject::DumpForSnapshot(vec); } -void JSPromiseAllSettledElementFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseAllSettledElementFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("already-called", GetAlreadyCalled()); - vec.emplace_back("index", JSTaggedValue(GetIndex())); - vec.emplace_back("values", GetValues()); - vec.emplace_back("capability", GetCapability()); - vec.emplace_back("remaining-elements", GetRemainingElements()); + vec.emplace_back(CString("already-called"), GetAlreadyCalled()); + vec.emplace_back(CString("index"), JSTaggedValue(GetIndex())); + vec.emplace_back(CString("values"), GetValues()); + vec.emplace_back(CString("capability"), GetCapability()); + vec.emplace_back(CString("remaining-elements"), GetRemainingElements()); JSObject::DumpForSnapshot(vec); } -void JSPromiseFinallyFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseFinallyFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("constructor", GetConstructor()); - vec.emplace_back("onFinally", GetOnFinally()); + vec.emplace_back(CString("constructor"), GetConstructor()); + vec.emplace_back(CString("onFinally"), GetOnFinally()); JSObject::DumpForSnapshot(vec); } -void JSPromiseValueThunkOrThrowerFunction::DumpForSnapshot(std::vector> &vec) const +void JSPromiseValueThunkOrThrowerFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("result", GetResult()); + vec.emplace_back(CString("result"), GetResult()); JSObject::DumpForSnapshot(vec); } -void MicroJobQueue::DumpForSnapshot(std::vector> &vec) const +void MicroJobQueue::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("promise-job-queue", GetPromiseJobQueue()); - vec.emplace_back("script-job-queue", GetScriptJobQueue()); + vec.emplace_back(CString("promise-job-queue"), GetPromiseJobQueue()); + vec.emplace_back(CString("script-job-queue"), GetScriptJobQueue()); } -void PendingJob::DumpForSnapshot(std::vector> &vec) const +void PendingJob::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("job", GetJob()); - vec.emplace_back("arguments", GetArguments()); + vec.emplace_back(CString("job"), GetJob()); + vec.emplace_back(CString("arguments"), GetArguments()); } -void CompletionRecord::DumpForSnapshot(std::vector> &vec) const +void CompletionRecord::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("value", GetValue()); - vec.emplace_back("type", JSTaggedValue(static_cast(GetType()))); + vec.emplace_back(CString("value"), GetValue()); + vec.emplace_back(CString("type"), JSTaggedValue(static_cast(GetType()))); } -void JSProxyRevocFunction::DumpForSnapshot(std::vector> &vec) const +void JSProxyRevocFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("RevocableProxy", GetRevocableProxy()); + vec.emplace_back(CString("RevocableProxy"), GetRevocableProxy()); } -void JSAsyncFunction::DumpForSnapshot(std::vector> &vec) const +void JSAsyncFunction::DumpForSnapshot(std::vector &vec) const { JSFunction::DumpForSnapshot(vec); } -void JSAsyncAwaitStatusFunction::DumpForSnapshot(std::vector> &vec) const +void JSAsyncAwaitStatusFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("AsyncContext", GetAsyncContext()); + vec.emplace_back(CString("AsyncContext"), GetAsyncContext()); } -void JSGeneratorFunction::DumpForSnapshot(std::vector> &vec) const +void JSGeneratorFunction::DumpForSnapshot(std::vector &vec) const { JSFunction::DumpForSnapshot(vec); } -void JSAsyncGeneratorFunction::DumpForSnapshot(std::vector> &vec) const +void JSAsyncGeneratorFunction::DumpForSnapshot(std::vector &vec) const { JSFunction::DumpForSnapshot(vec); } -void JSIntlBoundFunction::DumpForSnapshot(std::vector> &vec) const +void JSIntlBoundFunction::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("NumberFormat", GetNumberFormat()); - vec.emplace_back("DateTimeFormat", GetDateTimeFormat()); - vec.emplace_back("Collator", GetCollator()); + vec.emplace_back(CString("NumberFormat"), GetNumberFormat()); + vec.emplace_back(CString("DateTimeFormat"), GetDateTimeFormat()); + vec.emplace_back(CString("Collator"), GetCollator()); JSObject::DumpForSnapshot(vec); } -void PropertyBox::DumpForSnapshot(std::vector> &vec) const +void PropertyBox::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Value", GetValue()); + vec.emplace_back(CString("Value"), GetValue()); } -void PrototypeHandler::DumpForSnapshot(std::vector> &vec) const +void PrototypeHandler::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("HandlerInfo", GetHandlerInfo()); - vec.emplace_back("ProtoCell", GetProtoCell()); - vec.emplace_back("Holder", GetHolder()); + vec.emplace_back(CString("HandlerInfo"), GetHandlerInfo()); + vec.emplace_back(CString("ProtoCell"), GetProtoCell()); + vec.emplace_back(CString("Holder"), GetHolder()); } -void TransitionHandler::DumpForSnapshot(std::vector> &vec) const +void TransitionHandler::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("HandlerInfo", GetHandlerInfo()); - vec.emplace_back("TransitionHClass", GetTransitionHClass()); + vec.emplace_back(CString("HandlerInfo"), GetHandlerInfo()); + vec.emplace_back(CString("TransitionHClass"), GetTransitionHClass()); } -void TransWithProtoHandler::DumpForSnapshot(std::vector> &vec) const +void TransWithProtoHandler::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("HandlerInfo", GetHandlerInfo()); - vec.emplace_back("TransitionHClass", GetTransitionHClass()); - vec.emplace_back("ProtoCell", GetProtoCell()); + vec.emplace_back(CString("HandlerInfo"), GetHandlerInfo()); + vec.emplace_back(CString("TransitionHClass"), GetTransitionHClass()); + vec.emplace_back(CString("ProtoCell"), GetProtoCell()); } -void StoreTSHandler::DumpForSnapshot(std::vector> &vec) const +void StoreTSHandler::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("HandlerInfo", GetHandlerInfo()); - vec.emplace_back("ProtoCell", GetProtoCell()); - vec.emplace_back("Holder", GetHolder()); + vec.emplace_back(CString("HandlerInfo"), GetHandlerInfo()); + vec.emplace_back(CString("ProtoCell"), GetProtoCell()); + vec.emplace_back(CString("Holder"), GetHolder()); } -void JSRealm::DumpForSnapshot(std::vector> &vec) const +void JSRealm::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Value", GetValue()); - vec.emplace_back("GLobalEnv", GetGlobalEnv()); + vec.emplace_back(CString("Value"), GetValue()); + vec.emplace_back(CString("GLobalEnv"), GetGlobalEnv()); JSObject::DumpForSnapshot(vec); } #ifdef ARK_SUPPORT_INTL -void JSIntl::DumpForSnapshot(std::vector> &vec) const +void JSIntl::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("FallbackSymbol", GetFallbackSymbol()); + vec.emplace_back(CString("FallbackSymbol"), GetFallbackSymbol()); JSObject::DumpForSnapshot(vec); } -void JSLocale::DumpForSnapshot(std::vector> &vec) const +void JSLocale::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("IcuField", GetIcuField()); + vec.emplace_back(CString("IcuField"), GetIcuField()); JSObject::DumpForSnapshot(vec); } -void JSDateTimeFormat::DumpForSnapshot(std::vector> &vec) const +void JSDateTimeFormat::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 11; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("Calendar", GetCalendar()); - vec.emplace_back("NumberingSystem", GetNumberingSystem()); - vec.emplace_back("TimeZone", GetTimeZone()); - vec.emplace_back("HourCycle", JSTaggedValue(static_cast(GetHourCycle()))); - vec.emplace_back("LocaleIcu", GetLocaleIcu()); - vec.emplace_back("SimpleDateTimeFormatIcu", GetSimpleDateTimeFormatIcu()); - vec.emplace_back("Iso8601", GetIso8601()); - vec.emplace_back("DateStyle", JSTaggedValue(static_cast(GetDateStyle()))); - vec.emplace_back("TimeStyle", JSTaggedValue(static_cast(GetTimeStyle()))); - vec.emplace_back("BoundFormat", GetBoundFormat()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("Calendar"), GetCalendar()); + vec.emplace_back(CString("NumberingSystem"), GetNumberingSystem()); + vec.emplace_back(CString("TimeZone"), GetTimeZone()); + vec.emplace_back(CString("HourCycle"), JSTaggedValue(static_cast(GetHourCycle()))); + vec.emplace_back(CString("LocaleIcu"), GetLocaleIcu()); + vec.emplace_back(CString("SimpleDateTimeFormatIcu"), GetSimpleDateTimeFormatIcu()); + vec.emplace_back(CString("Iso8601"), GetIso8601()); + vec.emplace_back(CString("DateStyle"), JSTaggedValue(static_cast(GetDateStyle()))); + vec.emplace_back(CString("TimeStyle"), JSTaggedValue(static_cast(GetTimeStyle()))); + vec.emplace_back(CString("BoundFormat"), GetBoundFormat()); JSObject::DumpForSnapshot(vec); } -void JSRelativeTimeFormat::DumpForSnapshot(std::vector> &vec) const +void JSRelativeTimeFormat::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("NumberingSystem", GetNumberingSystem()); - vec.emplace_back("Style", JSTaggedValue(static_cast(GetStyle()))); - vec.emplace_back("Numeric", JSTaggedValue(static_cast(GetNumeric()))); - vec.emplace_back("IcuField", GetIcuField()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("NumberingSystem"), GetNumberingSystem()); + vec.emplace_back(CString("Style"), JSTaggedValue(static_cast(GetStyle()))); + vec.emplace_back(CString("Numeric"), JSTaggedValue(static_cast(GetNumeric()))); + vec.emplace_back(CString("IcuField"), GetIcuField()); JSObject::DumpForSnapshot(vec); } -void JSNumberFormat::DumpForSnapshot(std::vector> &vec) const +void JSNumberFormat::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 20; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("NumberingSystem", GetNumberingSystem()); - vec.emplace_back("Style", JSTaggedValue(static_cast(GetStyle()))); - vec.emplace_back("Currency", GetCurrency()); - vec.emplace_back("CurrencyDisplay", JSTaggedValue(static_cast(GetCurrencyDisplay()))); - vec.emplace_back("CurrencySign", JSTaggedValue(static_cast(GetCurrencySign()))); - vec.emplace_back("Unit", GetUnit()); - vec.emplace_back("UnitDisplay", JSTaggedValue(static_cast(GetUnitDisplay()))); - vec.emplace_back("MinimumIntegerDigits", GetMinimumIntegerDigits()); - vec.emplace_back("MinimumFractionDigits", GetMinimumFractionDigits()); - vec.emplace_back("MaximumFractionDigits", GetMaximumFractionDigits()); - vec.emplace_back("MinimumSignificantDigits", GetMinimumSignificantDigits()); - vec.emplace_back("MaximumSignificantDigits", GetMaximumSignificantDigits()); - vec.emplace_back("UseGrouping", GetUseGrouping()); - vec.emplace_back("RoundingType", JSTaggedValue(static_cast(GetRoundingType()))); - vec.emplace_back("Notation", JSTaggedValue(static_cast(GetNotation()))); - vec.emplace_back("CompactDisplay", JSTaggedValue(static_cast(GetCompactDisplay()))); - vec.emplace_back("SignDisplay", JSTaggedValue(static_cast(GetSignDisplay()))); - vec.emplace_back("BoundFormat", GetBoundFormat()); - vec.emplace_back("IcuField", GetIcuField()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("NumberingSystem"), GetNumberingSystem()); + vec.emplace_back(CString("Style"), JSTaggedValue(static_cast(GetStyle()))); + vec.emplace_back(CString("Currency"), GetCurrency()); + vec.emplace_back(CString("CurrencyDisplay"), JSTaggedValue(static_cast(GetCurrencyDisplay()))); + vec.emplace_back(CString("CurrencySign"), JSTaggedValue(static_cast(GetCurrencySign()))); + vec.emplace_back(CString("Unit"), GetUnit()); + vec.emplace_back(CString("UnitDisplay"), JSTaggedValue(static_cast(GetUnitDisplay()))); + vec.emplace_back(CString("MinimumIntegerDigits"), GetMinimumIntegerDigits()); + vec.emplace_back(CString("MinimumFractionDigits"), GetMinimumFractionDigits()); + vec.emplace_back(CString("MaximumFractionDigits"), GetMaximumFractionDigits()); + vec.emplace_back(CString("MinimumSignificantDigits"), GetMinimumSignificantDigits()); + vec.emplace_back(CString("MaximumSignificantDigits"), GetMaximumSignificantDigits()); + vec.emplace_back(CString("UseGrouping"), GetUseGrouping()); + vec.emplace_back(CString("RoundingType"), JSTaggedValue(static_cast(GetRoundingType()))); + vec.emplace_back(CString("Notation"), JSTaggedValue(static_cast(GetNotation()))); + vec.emplace_back(CString("CompactDisplay"), JSTaggedValue(static_cast(GetCompactDisplay()))); + vec.emplace_back(CString("SignDisplay"), JSTaggedValue(static_cast(GetSignDisplay()))); + vec.emplace_back(CString("BoundFormat"), GetBoundFormat()); + vec.emplace_back(CString("IcuField"), GetIcuField()); JSObject::DumpForSnapshot(vec); } -void JSCollator::DumpForSnapshot(std::vector> &vec) const +void JSCollator::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 9; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("IcuField", GetIcuField()); - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("Collation", GetCollation()); - vec.emplace_back("BoundCompare", GetBoundCompare()); - vec.emplace_back("CaseFirst", JSTaggedValue(static_cast(GetCaseFirst()))); - vec.emplace_back("Usage", JSTaggedValue(static_cast(GetUsage()))); - vec.emplace_back("Sensitivity", JSTaggedValue(static_cast(GetSensitivity()))); - vec.emplace_back("IgnorePunctuation", JSTaggedValue(GetIgnorePunctuation())); - vec.emplace_back("Numeric", JSTaggedValue(GetNumeric())); + vec.emplace_back(CString("IcuField"), GetIcuField()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("Collation"), GetCollation()); + vec.emplace_back(CString("BoundCompare"), GetBoundCompare()); + vec.emplace_back(CString("CaseFirst"), JSTaggedValue(static_cast(GetCaseFirst()))); + vec.emplace_back(CString("Usage"), JSTaggedValue(static_cast(GetUsage()))); + vec.emplace_back(CString("Sensitivity"), JSTaggedValue(static_cast(GetSensitivity()))); + vec.emplace_back(CString("IgnorePunctuation"), JSTaggedValue(GetIgnorePunctuation())); + vec.emplace_back(CString("Numeric"), JSTaggedValue(GetNumeric())); JSObject::DumpForSnapshot(vec); } -void JSPluralRules::DumpForSnapshot(std::vector> &vec) const +void JSPluralRules::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 10; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("MinimumIntegerDigits", GetMinimumIntegerDigits()); - vec.emplace_back("MinimumFractionDigits", GetMinimumFractionDigits()); - vec.emplace_back("MaximumFractionDigits", GetMaximumFractionDigits()); - vec.emplace_back("MinimumSignificantDigits", GetMinimumSignificantDigits()); - vec.emplace_back("MaximumSignificantDigits", GetMaximumSignificantDigits()); - vec.emplace_back("RoundingType", JSTaggedValue(static_cast(GetRoundingType()))); - vec.emplace_back("IcuPR", GetIcuPR()); - vec.emplace_back("IcuNF", GetIcuNF()); - vec.emplace_back("Type", JSTaggedValue(static_cast(GetType()))); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("MinimumIntegerDigits"), GetMinimumIntegerDigits()); + vec.emplace_back(CString("MinimumFractionDigits"), GetMinimumFractionDigits()); + vec.emplace_back(CString("MaximumFractionDigits"), GetMaximumFractionDigits()); + vec.emplace_back(CString("MinimumSignificantDigits"), GetMinimumSignificantDigits()); + vec.emplace_back(CString("MaximumSignificantDigits"), GetMaximumSignificantDigits()); + vec.emplace_back(CString("RoundingType"), JSTaggedValue(static_cast(GetRoundingType()))); + vec.emplace_back(CString("IcuPR"), GetIcuPR()); + vec.emplace_back(CString("IcuNF"), GetIcuNF()); + vec.emplace_back(CString("Type"), JSTaggedValue(static_cast(GetType()))); JSObject::DumpForSnapshot(vec); } -void JSDisplayNames::DumpForSnapshot(std::vector> &vec) const +void JSDisplayNames::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("Type", JSTaggedValue(static_cast(GetType()))); - vec.emplace_back("Style", JSTaggedValue(static_cast(GetStyle()))); - vec.emplace_back("Fallback", JSTaggedValue(static_cast(GetFallback()))); - vec.emplace_back("IcuLDN", GetIcuLDN()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("Type"), JSTaggedValue(static_cast(GetType()))); + vec.emplace_back(CString("Style"), JSTaggedValue(static_cast(GetStyle()))); + vec.emplace_back(CString("Fallback"), JSTaggedValue(static_cast(GetFallback()))); + vec.emplace_back(CString("IcuLDN"), GetIcuLDN()); JSObject::DumpForSnapshot(vec); } -void JSListFormat::DumpForSnapshot(std::vector> &vec) const +void JSListFormat::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Locale", GetLocale()); - vec.emplace_back("Type", JSTaggedValue(static_cast(GetType()))); - vec.emplace_back("Style", JSTaggedValue(static_cast(GetStyle()))); - vec.emplace_back("IcuLF", GetIcuLF()); + vec.emplace_back(CString("Locale"), GetLocale()); + vec.emplace_back(CString("Type"), JSTaggedValue(static_cast(GetType()))); + vec.emplace_back(CString("Style"), JSTaggedValue(static_cast(GetStyle()))); + vec.emplace_back(CString("IcuLF"), GetIcuLF()); JSObject::DumpForSnapshot(vec); } #endif -void JSGeneratorObject::DumpForSnapshot(std::vector> &vec) const +void JSGeneratorObject::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("GeneratorContext", GetGeneratorContext()); - vec.emplace_back("ResumeResult", GetResumeResult()); - vec.emplace_back("GeneratorState", JSTaggedValue(static_cast(GetGeneratorState()))); - vec.emplace_back("ResumeMode", JSTaggedValue(static_cast(GetResumeMode()))); + vec.emplace_back(CString("GeneratorContext"), GetGeneratorContext()); + vec.emplace_back(CString("ResumeResult"), GetResumeResult()); + vec.emplace_back(CString("GeneratorState"), JSTaggedValue(static_cast(GetGeneratorState()))); + vec.emplace_back(CString("ResumeMode"), JSTaggedValue(static_cast(GetResumeMode()))); JSObject::DumpForSnapshot(vec); } -void JSAsyncGeneratorObject::DumpForSnapshot(std::vector> &vec) const +void JSAsyncGeneratorObject::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("GeneratorContext", GetGeneratorContext()); - vec.emplace_back("AsyncGeneratorQueue", GetAsyncGeneratorQueue()); - vec.emplace_back("GeneratorBrand", GetGeneratorBrand()); - vec.emplace_back("ResumeResult", GetResumeResult()); - vec.emplace_back("AsyncGeneratorState", JSTaggedValue(static_cast(GetAsyncGeneratorState()))); - vec.emplace_back("ResumeMode", JSTaggedValue(static_cast(GetResumeMode()))); + vec.emplace_back(CString("GeneratorContext"), GetGeneratorContext()); + vec.emplace_back(CString("AsyncGeneratorQueue"), GetAsyncGeneratorQueue()); + vec.emplace_back(CString("GeneratorBrand"), GetGeneratorBrand()); + vec.emplace_back(CString("ResumeResult"), GetResumeResult()); + vec.emplace_back(CString("AsyncGeneratorState"), JSTaggedValue(static_cast(GetAsyncGeneratorState()))); + vec.emplace_back(CString("ResumeMode"), JSTaggedValue(static_cast(GetResumeMode()))); JSObject::DumpForSnapshot(vec); } -void JSAsyncFuncObject::DumpForSnapshot(std::vector> &vec) const +void JSAsyncFuncObject::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Promise", GetPromise()); + vec.emplace_back(CString("Promise"), GetPromise()); } -void GeneratorContext::DumpForSnapshot(std::vector> &vec) const +void GeneratorContext::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 8; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("RegsArray", GetRegsArray()); - vec.emplace_back("Method", GetMethod()); - vec.emplace_back("This", GetThis()); - vec.emplace_back("Acc", GetAcc()); - vec.emplace_back("GeneratorObject", GetGeneratorObject()); - vec.emplace_back("LexicalEnv", GetLexicalEnv()); - vec.emplace_back("NRegs", JSTaggedValue(GetNRegs())); - vec.emplace_back("BCOffset", JSTaggedValue(GetBCOffset())); + vec.emplace_back(CString("RegsArray"), GetRegsArray()); + vec.emplace_back(CString("Method"), GetMethod()); + vec.emplace_back(CString("This"), GetThis()); + vec.emplace_back(CString("Acc"), GetAcc()); + vec.emplace_back(CString("GeneratorObject"), GetGeneratorObject()); + vec.emplace_back(CString("LexicalEnv"), GetLexicalEnv()); + vec.emplace_back(CString("NRegs"), JSTaggedValue(GetNRegs())); + vec.emplace_back(CString("BCOffset"), JSTaggedValue(GetBCOffset())); } -void ProtoChangeMarker::DumpForSnapshot(std::vector> &vec) const +void ProtoChangeMarker::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Promise", JSTaggedValue(GetHasChanged())); + vec.emplace_back(CString("Promise"), JSTaggedValue(GetHasChanged())); } -void ProtoChangeDetails::DumpForSnapshot(std::vector> &vec) const +void ProtoChangeDetails::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ChangeListener", GetChangeListener()); - vec.emplace_back("RegisterIndex", JSTaggedValue(GetRegisterIndex())); + vec.emplace_back(CString("ChangeListener"), GetChangeListener()); + vec.emplace_back(CString("RegisterIndex"), JSTaggedValue(GetRegisterIndex())); } -void MachineCode::DumpForSnapshot(std::vector> &vec) const +void MachineCode::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("InstructionSizeInBytes", JSTaggedValue(GetInstructionSizeInBytes())); + vec.emplace_back(CString("InstructionSizeInBytes"), JSTaggedValue(GetInstructionSizeInBytes())); } -void ClassInfoExtractor::DumpForSnapshot(std::vector> &vec) const +void ClassInfoExtractor::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 6; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("NonStaticKeys", GetNonStaticKeys()); - vec.emplace_back("NonStaticProperties", GetNonStaticProperties()); - vec.emplace_back("NonStaticElements", GetNonStaticElements()); - vec.emplace_back("StaticKeys", GetStaticKeys()); - vec.emplace_back("StaticProperties", GetStaticProperties()); - vec.emplace_back("StaticElements", GetStaticElements()); + vec.emplace_back(CString("NonStaticKeys"), GetNonStaticKeys()); + vec.emplace_back(CString("NonStaticProperties"), GetNonStaticProperties()); + vec.emplace_back(CString("NonStaticElements"), GetNonStaticElements()); + vec.emplace_back(CString("StaticKeys"), GetStaticKeys()); + vec.emplace_back(CString("StaticProperties"), GetStaticProperties()); + vec.emplace_back(CString("StaticElements"), GetStaticElements()); } -void TSObjectType::DumpForSnapshot(std::vector> &vec) const +void TSObjectType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ObjLayoutInfo", GetObjLayoutInfo()); - vec.emplace_back("IndexSigns", GetIndexSigns()); + vec.emplace_back(CString("ObjLayoutInfo"), GetObjLayoutInfo()); + vec.emplace_back(CString("IndexSigns"), GetIndexSigns()); } -void TSClassType::DumpForSnapshot(std::vector> &vec) const +void TSClassType::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 5; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("InstanceType", GetInstanceType()); - vec.emplace_back("ConstructorType", GetConstructorType()); - vec.emplace_back("PrototypeType", GetPrototypeType()); - vec.emplace_back("ExtensionGT", JSTaggedValue(GetExtensionGT().GetType())); - vec.emplace_back("HasLinked", JSTaggedValue(GetHasLinked())); + vec.emplace_back(CString("InstanceType"), GetInstanceType()); + vec.emplace_back(CString("ConstructorType"), GetConstructorType()); + vec.emplace_back(CString("PrototypeType"), GetPrototypeType()); + vec.emplace_back(CString("ExtensionGT"), JSTaggedValue(GetExtensionGT().GetType())); + vec.emplace_back(CString("HasLinked"), JSTaggedValue(GetHasLinked())); } -void TSInterfaceType::DumpForSnapshot(std::vector> &vec) const +void TSInterfaceType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Fields", GetFields()); - vec.emplace_back("Extends", GetExtends()); + vec.emplace_back(CString("Fields"), GetFields()); + vec.emplace_back(CString("Extends"), GetExtends()); } -void TSClassInstanceType::DumpForSnapshot(std::vector> &vec) const +void TSClassInstanceType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ClassGT", JSTaggedValue(GetClassGT().GetType())); + vec.emplace_back(CString("ClassGT"), JSTaggedValue(GetClassGT().GetType())); } -void TSUnionType::DumpForSnapshot(std::vector> &vec) const +void TSUnionType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ComponentTypes", GetComponents()); + vec.emplace_back(CString("ComponentTypes"), GetComponents()); } -void TSFunctionType::DumpForSnapshot(std::vector> &vec) const +void TSFunctionType::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 5; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("Name", GetName()); - vec.emplace_back("ParameterTypes", GetParameterTypes()); - vec.emplace_back("ReturnGT", JSTaggedValue(GetReturnGT().GetType())); - vec.emplace_back("ThisGT", JSTaggedValue(GetThisGT().GetType())); - vec.emplace_back("BitFiled", JSTaggedValue(GetBitField())); + vec.emplace_back(CString("Name"), GetName()); + vec.emplace_back(CString("ParameterTypes"), GetParameterTypes()); + vec.emplace_back(CString("ReturnGT"), JSTaggedValue(GetReturnGT().GetType())); + vec.emplace_back(CString("ThisGT"), JSTaggedValue(GetThisGT().GetType())); + vec.emplace_back(CString("BitFiled"), JSTaggedValue(GetBitField())); } -void TSArrayType::DumpForSnapshot(std::vector> &vec) const +void TSArrayType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ParameterTypeRef", JSTaggedValue(GetElementGT().GetType())); + vec.emplace_back(CString("ParameterTypeRef"), JSTaggedValue(GetElementGT().GetType())); } -void TSIteratorInstanceType::DumpForSnapshot(std::vector> &vec) const +void TSIteratorInstanceType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("kindGT", JSTaggedValue(GetKindGT().GetType())); - vec.emplace_back("elementGT", JSTaggedValue(GetElementGT().GetType())); + vec.emplace_back(CString("kindGT"), JSTaggedValue(GetKindGT().GetType())); + vec.emplace_back(CString("elementGT"), JSTaggedValue(GetElementGT().GetType())); } -void TSNamespaceType::DumpForSnapshot(std::vector> &vec) const +void TSNamespaceType::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("PropertyType", GetPropertyType()); + vec.emplace_back(CString("PropertyType"), GetPropertyType()); } -void SourceTextModule::DumpForSnapshot(std::vector> &vec) const +void SourceTextModule::DumpForSnapshot(std::vector &vec) const { // please update the NUM_OF_ITEMS if you change the items below constexpr int16_t NUM_OF_ITEMS = 14; vec.reserve(vec.size() + NUM_OF_ITEMS); - vec.emplace_back("Environment", GetEnvironment()); - vec.emplace_back("Namespace", GetNamespace()); - vec.emplace_back("EcmaModuleFilename", GetEcmaModuleFilename()); - vec.emplace_back("EcmaModuleRecordName", GetEcmaModuleRecordName()); - vec.emplace_back("RequestedModules", GetRequestedModules()); - vec.emplace_back("ImportEntries", GetImportEntries()); - vec.emplace_back("LocalExportEntries", GetLocalExportEntries()); - vec.emplace_back("IndirectExportEntries", GetIndirectExportEntries()); - vec.emplace_back("StarExportEntries", GetStarExportEntries()); - vec.emplace_back("Status", JSTaggedValue(static_cast(GetStatus()))); - vec.emplace_back("EvaluationError", JSTaggedValue(GetEvaluationError())); - vec.emplace_back("DFSIndex", JSTaggedValue(GetDFSIndex())); - vec.emplace_back("DFSAncestorIndex", JSTaggedValue(GetDFSAncestorIndex())); - vec.emplace_back("NameDictionary", GetNameDictionary()); + vec.emplace_back(CString("Environment"), GetEnvironment()); + vec.emplace_back(CString("Namespace"), GetNamespace()); + vec.emplace_back(CString("EcmaModuleFilename"), GetEcmaModuleFilename()); + vec.emplace_back(CString("EcmaModuleRecordName"), GetEcmaModuleRecordName()); + vec.emplace_back(CString("RequestedModules"), GetRequestedModules()); + vec.emplace_back(CString("ImportEntries"), GetImportEntries()); + vec.emplace_back(CString("LocalExportEntries"), GetLocalExportEntries()); + vec.emplace_back(CString("IndirectExportEntries"), GetIndirectExportEntries()); + vec.emplace_back(CString("StarExportEntries"), GetStarExportEntries()); + vec.emplace_back(CString("Status"), JSTaggedValue(static_cast(GetStatus()))); + vec.emplace_back(CString("EvaluationError"), JSTaggedValue(GetEvaluationError())); + vec.emplace_back(CString("DFSIndex"), JSTaggedValue(GetDFSIndex())); + vec.emplace_back(CString("DFSAncestorIndex"), JSTaggedValue(GetDFSAncestorIndex())); + vec.emplace_back(CString("NameDictionary"), GetNameDictionary()); } -void ImportEntry::DumpForSnapshot(std::vector> &vec) const +void ImportEntry::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ModuleRequest", GetModuleRequest()); - vec.emplace_back("ImportName", GetImportName()); - vec.emplace_back("LocalName", GetLocalName()); + vec.emplace_back(CString("ModuleRequest"), GetModuleRequest()); + vec.emplace_back(CString("ImportName"), GetImportName()); + vec.emplace_back(CString("LocalName"), GetLocalName()); } -void LocalExportEntry::DumpForSnapshot(std::vector> &vec) const +void LocalExportEntry::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ExportName", GetExportName()); - vec.emplace_back("LocalName", GetLocalName()); + vec.emplace_back(CString("ExportName"), GetExportName()); + vec.emplace_back(CString("LocalName"), GetLocalName()); } -void IndirectExportEntry::DumpForSnapshot(std::vector> &vec) const +void IndirectExportEntry::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ExportName", GetExportName()); - vec.emplace_back("ModuleRequest", GetModuleRequest()); - vec.emplace_back("ImportName", GetImportName()); + vec.emplace_back(CString("ExportName"), GetExportName()); + vec.emplace_back(CString("ModuleRequest"), GetModuleRequest()); + vec.emplace_back(CString("ImportName"), GetImportName()); } -void StarExportEntry::DumpForSnapshot(std::vector> &vec) const +void StarExportEntry::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("ModuleRequest", GetModuleRequest()); + vec.emplace_back(CString("ModuleRequest"), GetModuleRequest()); } -void ResolvedBinding::DumpForSnapshot(std::vector> &vec) const +void ResolvedBinding::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Module", GetModule()); - vec.emplace_back("BindingName", GetBindingName()); + vec.emplace_back(CString("Module"), GetModule()); + vec.emplace_back(CString("BindingName"), GetBindingName()); } -void ResolvedIndexBinding::DumpForSnapshot(std::vector> &vec) const +void ResolvedIndexBinding::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Module", GetModule()); - vec.emplace_back("Index", JSTaggedValue(GetIndex())); + vec.emplace_back(CString("Module"), GetModule()); + vec.emplace_back(CString("Index"), JSTaggedValue(GetIndex())); } -void ModuleNamespace::DumpForSnapshot(std::vector> &vec) const +void ModuleNamespace::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Module", GetModule()); - vec.emplace_back("Exports", GetExports()); + vec.emplace_back(CString("Module"), GetModule()); + vec.emplace_back(CString("Exports"), GetExports()); } -void CjsModule::DumpForSnapshot(std::vector> &vec) const +void CjsModule::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Id", GetId()); - vec.emplace_back("Path", GetPath()); - vec.emplace_back("Exports", GetExports()); - vec.emplace_back("Filename", GetFilename()); + vec.emplace_back(CString("Id"), GetId()); + vec.emplace_back(CString("Path"), GetPath()); + vec.emplace_back(CString("Exports"), GetExports()); + vec.emplace_back(CString("Filename"), GetFilename()); } -void CjsExports::DumpForSnapshot(std::vector> &vec) const +void CjsExports::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Exports", GetExports()); + vec.emplace_back(CString("Exports"), GetExports()); } -void CjsRequire::DumpForSnapshot(std::vector> &vec) const +void CjsRequire::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Cache", GetCache()); - vec.emplace_back("Parent", GetParent()); + vec.emplace_back(CString("Cache"), GetCache()); + vec.emplace_back(CString("Parent"), GetParent()); } -void ClassLiteral::DumpForSnapshot(std::vector> &vec) const +void ClassLiteral::DumpForSnapshot(std::vector &vec) const { - vec.emplace_back("Array", GetArray()); - vec.emplace_back("IsAOTUsed", GetIsAOTUsed()); + vec.emplace_back(CString("Array"), GetArray()); + vec.emplace_back(CString("IsAOTUsed"), JSTaggedValue(GetIsAOTUsed())); } } // namespace panda::ecmascript diff --git a/ecmascript/ecma_context.cpp b/ecmascript/ecma_context.cpp index 1f3a24b0a5f30c7bc66211c62e9d95f36c7523c7..4fc2f2adeea1456de207e5e1a7931a3d5a2d6976 100644 --- a/ecmascript/ecma_context.cpp +++ b/ecmascript/ecma_context.cpp @@ -32,6 +32,7 @@ #include "ecmascript/jspandafile/program_object.h" #include "ecmascript/js_function.h" #include "ecmascript/js_thread.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/object_factory.h" #include "ecmascript/pgo_profiler/pgo_profiler_manager.h" #include "ecmascript/require/js_cjs_module_cache.h" @@ -88,6 +89,8 @@ bool EcmaContext::Initialize() JSType::GLOBAL_ENV); thread_->SetGlobalConst(&globalConst_); globalConst_.Init(thread_, *hClassHandle); + auto arrayHClassIndexMaps = Elements::InitializeHClassMap(); + thread_->SetArrayHClassIndexMap(arrayHClassIndexMaps); JSHandle globalEnv = factory_->NewGlobalEnv(*globalEnvClass); globalEnv->Init(thread_); @@ -101,6 +104,7 @@ bool EcmaContext::Initialize() moduleManager_ = new ModuleManager(vm_); tsManager_ = new TSManager(vm_); optCodeProfiler_ = new OptCodeProfiler(); + initialized_ = true; return true; } @@ -207,6 +211,12 @@ EcmaContext::~EcmaContext() delete propertiesCache_; propertiesCache_ = nullptr; } + // clear join stack + joinStack_.clear(); + + for (auto v : stringifyCache_) { + v.clear(); + } } JSTaggedValue EcmaContext::InvokeEcmaAotEntrypoint(JSHandle mainFunc, JSHandle &thisArg, @@ -248,34 +258,40 @@ Expected EcmaContext::InvokeEcmaEntrypoint(const JSPandaFil JSHandle func(thread_, program->GetMainFunction()); JSHandle global = GlobalEnv::Cast(globalEnv_.GetTaggedObject())->GetJSGlobalObject(); JSHandle undefined = thread_->GlobalConstants()->GetHandledUndefined(); - if (jsPandaFile->IsModule(thread_, entryPoint.data())) { + CString moduleName = jsPandaFile->GetJSPandaFileDesc(); + CString entry = entryPoint.data(); + if (jsPandaFile->IsMergedPF()) { + moduleName = entry; + } + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + CString msg = "cannot find record '" + entry + "', please check the request path."; + LOG_FULL(ERROR) << msg; + THROW_REFERENCE_ERROR_AND_RETURN(thread_, msg.c_str(), Unexpected(false)); + } + if (jsPandaFile->IsModule(recordInfo)) { global = undefined; - CString moduleName = jsPandaFile->GetJSPandaFileDesc(); - if (!jsPandaFile->IsBundlePack()) { - moduleName = entryPoint.data(); - } JSHandle module = moduleManager_->HostGetImportedModule(moduleName); func->SetModule(thread_, module); } else { // if it is Cjs at present, the module slot of the function is not used. We borrow it to store the recordName, // which can avoid the problem of larger memory caused by the new slot - JSHandle recordName = factory_->NewFromUtf8(entryPoint.data()); + JSHandle recordName = factory_->NewFromUtf8(moduleName); func->SetModule(thread_, recordName); } vm_->CheckStartCpuProfiler(); JSTaggedValue result; - if (jsPandaFile->IsCjs(thread_, entryPoint.data())) { - if (!thread_->HasPendingException()) { - CJSExecution(func, global, jsPandaFile, entryPoint); - } + if (jsPandaFile->IsCjs(recordInfo)) { + CJSExecution(func, global, jsPandaFile, entryPoint); } else { - if (aotFileManager_->IsLoadMain(jsPandaFile, entryPoint.data())) { + if (aotFileManager_->IsLoadMain(jsPandaFile, entry)) { EcmaRuntimeStatScope runtimeStatScope(vm_); result = InvokeEcmaAotEntrypoint(func, global, jsPandaFile, entryPoint); } else { if (thread_->IsPGOProfilerEnable()) { - vm_->GetPGOProfiler()->ProfileCall(func.GetTaggedType()); + vm_->GetPGOProfiler()->ProfileCall(JSTaggedValue::VALUE_UNDEFINED, func.GetTaggedType()); } EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread_, JSHandle(func), global, undefined, 0); @@ -303,17 +319,15 @@ void EcmaContext::CJSExecution(JSHandle &func, JSHandle exports = factory_->NewCjsExports(); JSMutableHandle filename(thread_, JSTaggedValue::Undefined()); JSMutableHandle dirname(thread_, JSTaggedValue::Undefined()); - if (jsPandaFile->IsBundlePack()) { - PathHelper::ResolveCurrentPath(thread_, dirname, filename, jsPandaFile); - } else { - filename.Update(func->GetModule()); - ASSERT(filename->IsString()); - dirname.Update(PathHelper::ResolveDirPath(thread_, filename)); - } + // Cjs's module slot of the function stores the recordName. + filename.Update(func->GetModule()); + CString fullName = ConvertToString(filename.GetTaggedValue()); + dirname.Update(PathHelper::ResolveDirPath(thread_, fullName)); CJSInfo cjsInfo(module, require, exports, filename, dirname); RequireManager::InitializeCommonJS(thread_, cjsInfo); if (aotFileManager_->IsLoadMain(jsPandaFile, entryPoint.data())) { EcmaRuntimeStatScope runtimeStateScope(vm_); + isAotEntry_ = true; InvokeEcmaAotEntrypoint(func, thisArg, jsPandaFile, entryPoint, &cjsInfo); } else { // Execute main function @@ -386,6 +400,15 @@ JSHandle EcmaContext::FindOrCreateConstPool(const JSPandaFile *jsP panda_file::IndexAccessor indexAccessor(*jsPandaFile->GetPandaFile(), id); int32_t index = static_cast(indexAccessor.GetHeaderIndex()); JSTaggedValue constpool = FindConstpool(jsPandaFile, index); + // In the taskpool thread, there is a case where the Function object is serialized before InitForCurrentThread. + // A constpool is created when a Function is serialized. Slowpath, the default deserialized constpool, + // string is non-lazy load mode. A hole is returned if you access the constpool of the serialized Function + if (constpool.IsHole() && ecmascript::AnFileDataManager::GetInstance()->IsEnable()) { + bool result = GetAOTFileManager()->LoadAiFile(jsPandaFile); + if (result) { + constpool = FindConstpool(jsPandaFile, index); + } + } if (constpool.IsHole()) { JSHandle newConstpool = ConstantPool::CreateConstPool(vm_, jsPandaFile, id); AddConstpool(jsPandaFile, newConstpool.GetTaggedValue(), index); @@ -508,7 +531,7 @@ void EcmaContext::HandleUncaughtException(JSTaggedValue exception) // if caught exceptionHandle type is JSError thread_->ClearException(); if (exceptionHandle->IsJSError()) { - PrintJSErrorInfo(exceptionHandle); + PrintJSErrorInfo(thread_, exceptionHandle); return; } JSHandle result = JSTaggedValue::ToString(thread_, exceptionHandle); @@ -516,14 +539,33 @@ void EcmaContext::HandleUncaughtException(JSTaggedValue exception) LOG_NO_TAG(ERROR) << string; } -void EcmaContext::PrintJSErrorInfo(const JSHandle &exceptionInfo) -{ - JSHandle nameKey = thread_->GlobalConstants()->GetHandledNameString(); - JSHandle name(JSObject::GetProperty(thread_, exceptionInfo, nameKey).GetValue()); - JSHandle msgKey = thread_->GlobalConstants()->GetHandledMessageString(); - JSHandle msg(JSObject::GetProperty(thread_, exceptionInfo, msgKey).GetValue()); - JSHandle stackKey = thread_->GlobalConstants()->GetHandledStackString(); - JSHandle stack(JSObject::GetProperty(thread_, exceptionInfo, stackKey).GetValue()); +// static +void EcmaContext::PrintJSErrorInfo(JSThread *thread, const JSHandle &exceptionInfo) +{ + JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); + JSHandle nameValue = JSObject::GetProperty(thread, exceptionInfo, nameKey).GetValue(); + JSHandle name = JSTaggedValue::ToString(thread, nameValue); + // JSTaggedValue::ToString may cause exception. In this case, do not return, use "" instead. + if (thread->HasPendingException()) { + thread->ClearException(); + name = thread->GetEcmaVM()->GetFactory()->NewFromStdString(""); + } + JSHandle msgKey = thread->GlobalConstants()->GetHandledMessageString(); + JSHandle msgValue = JSObject::GetProperty(thread, exceptionInfo, msgKey).GetValue(); + JSHandle msg = JSTaggedValue::ToString(thread, msgValue); + // JSTaggedValue::ToString may cause exception. In this case, do not return, use "" instead. + if (thread->HasPendingException()) { + thread->ClearException(); + msg = thread->GetEcmaVM()->GetFactory()->NewFromStdString(""); + } + JSHandle stackKey = thread->GlobalConstants()->GetHandledStackString(); + JSHandle stackValue = JSObject::GetProperty(thread, exceptionInfo, stackKey).GetValue(); + JSHandle stack = JSTaggedValue::ToString(thread, stackValue); + // JSTaggedValue::ToString may cause exception. In this case, do not return, use "" instead. + if (thread->HasPendingException()) { + thread->ClearException(); + stack = thread->GetEcmaVM()->GetFactory()->NewFromStdString(""); + } CString nameBuffer = ConvertToString(*name); CString msgBuffer = ConvertToString(*msg); @@ -531,6 +573,14 @@ void EcmaContext::PrintJSErrorInfo(const JSHandle &exceptionInfo) LOG_NO_TAG(ERROR) << nameBuffer << ": " << msgBuffer << "\n" << stackBuffer; } +bool EcmaContext::HasPendingJob() +{ + if (isProcessingPendingJob_) { + return true; + } + return job::MicroJobQueue::HasPendingJob(thread_, GetMicroJobQueue()); +} + bool EcmaContext::ExecutePromisePendingJob() { if (isProcessingPendingJob_) { @@ -604,6 +654,7 @@ void EcmaContext::CheckAndDestroy(JSThread *thread, EcmaContext *context) { if (thread->EraseContext(context)) { Destroy(context); + return; } LOG_ECMA(FATAL) << "CheckAndDestroy a nonexistent context."; } @@ -643,6 +694,11 @@ void EcmaContext::Iterate(const RootVisitor &v, const RootRangeVisitor &rv) rv(ecmascript::Root::ROOT_HANDLE, ObjectSlot(ToUintPtr(start)), ObjectSlot(ToUintPtr(end))); } } + + if (!joinStack_.empty()) { + rv(Root::ROOT_VM, ObjectSlot(ToUintPtr(&joinStack_.front())), + ObjectSlot(ToUintPtr(&joinStack_.back()) + JSTaggedValue::TaggedTypeSize())); + } } size_t EcmaContext::IterateHandle(const RootRangeVisitor &rangeVisitor) @@ -747,4 +803,56 @@ void EcmaContext::DumpAOTInfo() const { aotFileManager_->DumpAOTInfo(); } + +bool EcmaContext::JoinStackPushFastPath(JSHandle receiver) +{ + if (JSTaggedValue::SameValue(joinStack_[0], JSTaggedValue::Hole())) { + joinStack_[0] = receiver.GetTaggedValue(); + return true; + } + return JoinStackPush(receiver); +} + +bool EcmaContext::JoinStackPush(JSHandle receiver) +{ + uint32_t capacity = joinStack_.size(); + JSTaggedValue receiverValue = receiver.GetTaggedValue(); + for (size_t i = 0; i < capacity; ++i) { + if (JSTaggedValue::SameValue(joinStack_[i], JSTaggedValue::Hole())) { + joinStack_[i] = receiverValue; + return true; + } + if (JSTaggedValue::SameValue(joinStack_[i], receiverValue)) { + return false; + } + } + joinStack_.emplace_back(receiverValue); + return true; +} + +void EcmaContext::JoinStackPopFastPath(JSHandle receiver) +{ + uint32_t length = joinStack_.size(); + if (JSTaggedValue::SameValue(joinStack_[0], receiver.GetTaggedValue()) && length == MIN_JOIN_STACK_SIZE) { + joinStack_[0] = JSTaggedValue::Hole(); + } else { + JoinStackPop(receiver); + } +} + +void EcmaContext::JoinStackPop(JSHandle receiver) +{ + uint32_t length = joinStack_.size(); + for (size_t i = 0; i < length; ++i) { + if (JSTaggedValue::SameValue(joinStack_[i], receiver.GetTaggedValue())) { + if (i == 0 && length > MIN_JOIN_STACK_SIZE) { + joinStack_ = {JSTaggedValue::Hole(), JSTaggedValue::Hole()}; + break; + } else { + joinStack_[i] = JSTaggedValue::Hole(); + break; + } + } + } +} } // namespace panda::ecmascript diff --git a/ecmascript/ecma_context.h b/ecmascript/ecma_context.h index 63ea71507d4c7568727b09f4fd9b60522f23e49d..ea10d681060a0b0bb3b3ff36adae06bf08a2bb2e 100644 --- a/ecmascript/ecma_context.h +++ b/ecmascript/ecma_context.h @@ -101,6 +101,8 @@ public: bool Initialize(); + bool HasPendingJob(); + bool ExecutePromisePendingJob(); static EcmaContext *ConstCast(const EcmaContext *context) @@ -108,6 +110,11 @@ public: return const_cast(context); } + bool IsInitialized() const + { + return initialized_; + } + ModuleManager *GetModuleManager() const { return moduleManager_; @@ -213,7 +220,7 @@ public: JSHandle GetMicroJobQueue() const; - void PrintJSErrorInfo(const JSHandle &exceptionInfo); + static void PrintJSErrorInfo(JSThread *thread, const JSHandle &exceptionInfo); void Iterate(const RootVisitor &v, const RootRangeVisitor &rv); static void MountContext(JSThread *thread); static void UnmountContext(JSThread *thread); @@ -384,6 +391,26 @@ public: return &globalConst_; } + bool JoinStackPushFastPath(JSHandle receiver); + bool JoinStackPush(JSHandle receiver); + void JoinStackPopFastPath(JSHandle receiver); + void JoinStackPop(JSHandle receiver); + + void SetJsonStringifyCache(size_t index, CVector> &value) + { + stringifyCache_[index] = value; + } + + CVector> GetJsonStringifyCache(size_t index) + { + return stringifyCache_[index]; + } + + bool IsAotEntry() + { + return isAotEntry_; + } + private: void CJSExecution(JSHandle &func, JSHandle &thisArg, const JSPandaFile *jsPandaFile, std::string_view entryPoint); @@ -401,7 +428,7 @@ private: bool isUncaughtExceptionRegistered_ {false}; bool isProcessingPendingJob_ {false}; - + bool initialized_ {false}; ObjectFactory *factory_ {nullptr}; // VM execution states. @@ -461,6 +488,13 @@ private: uint64_t stackLimit_ {0}; PropertiesCache *propertiesCache_ {nullptr}; GlobalEnvConstants globalConst_; + // Join Stack + static constexpr uint32_t MIN_JOIN_STACK_SIZE = 2; + CVector joinStack_ {JSTaggedValue::Hole(), JSTaggedValue::Hole()}; + // json stringify cache + static constexpr uint32_t STRINGIFY_CACHE_SIZE = 64; + std::array>, STRINGIFY_CACHE_SIZE> stringifyCache_ {}; + bool isAotEntry_ { false }; friend class EcmaHandleScope; friend class JSPandaFileExecutor; diff --git a/ecmascript/ecma_handle_scope.cpp b/ecmascript/ecma_handle_scope.cpp index 63ba4dcb3bf393cff6f2c81d4b20c351b1cce859..a022ce52d29beb574f208dff4c25ccf5f57c630a 100644 --- a/ecmascript/ecma_handle_scope.cpp +++ b/ecmascript/ecma_handle_scope.cpp @@ -25,9 +25,9 @@ EcmaHandleScope::EcmaHandleScope(JSThread *thread) : thread_(thread) prevEnd_ = context->handleScopeStorageEnd_; prevHandleStorageIndex_ = context->currentHandleStorageIndex_; #ifdef ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK - thread_->HandleScopeCountAdd(); - prevHandleScope_ = thread->GetLastHandleScope(); - thread_->SetLastHandleScope(this); + context->HandleScopeCountAdd(); + prevHandleScope_ = context->GetLastHandleScope(); + context->SetLastHandleScope(this); #endif } @@ -65,14 +65,18 @@ uintptr_t EcmaHandleScope::NewHandle(JSThread *thread, JSTaggedType value) LOG_ECMA(INFO) << stack.str(); } } -#endif -#if ECMASCRIPT_ENABLE_NEW_HANDLE_CHECK - thread->CheckJSTaggedType(value); #endif auto result = context->handleScopeStorageNext_; if (result == context->handleScopeStorageEnd_) { result = reinterpret_cast(context->ExpandHandleStorage()); } +#if ECMASCRIPT_ENABLE_NEW_HANDLE_CHECK + thread->CheckJSTaggedType(value); + if (result == nullptr) { + LOG_ECMA(ERROR) << "result is nullptr, New handle fail!"; + return nullptr; + } +#endif // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) context->handleScopeStorageNext_ = result + 1; *result = value; diff --git a/ecmascript/ecma_macros.h b/ecmascript/ecma_macros.h index 6d4ac758d955085d7192ec11bc26a1529936a44a..d9a256d0acccfb9e66d3ea19ed0749d2cc9d6030 100644 --- a/ecmascript/ecma_macros.h +++ b/ecmascript/ecma_macros.h @@ -51,9 +51,9 @@ // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define SET_VALUE_WITH_BARRIER(thread, addr, offset, value) \ if ((value).IsHeapObject()) { \ - Barriers::SetObject(thread, addr, offset, (value).GetRawData()); \ + Barriers::SetObject(thread, addr, offset, (value).GetRawData()); \ } else { \ - Barriers::SetPrimitive(addr, offset, (value).GetRawData()); \ + Barriers::SetPrimitive(addr, offset, (value).GetRawData()); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) @@ -67,31 +67,31 @@ { \ /* Note: We can't statically decide the element type is a primitive or heap object, especially for */ \ /* dynamically-typed languages like JavaScript. So we simply skip the read-barrier. */ \ - return JSTaggedValue(Barriers::GetValue(this, offset)); \ + return JSTaggedValue(Barriers::GetValue(this, offset)); \ } \ template \ void Set##name(const JSThread *thread, JSHandle value, BarrierMode mode = WRITE_BARRIER) \ { \ if (mode == WRITE_BARRIER) { \ if (value.GetTaggedValue().IsHeapObject()) { \ - Barriers::SetObject(thread, this, offset, value.GetTaggedValue().GetRawData()); \ + Barriers::SetObject(thread, this, offset, value.GetTaggedValue().GetRawData()); \ } else { \ - Barriers::SetPrimitive(this, offset, value.GetTaggedValue().GetRawData()); \ + Barriers::SetPrimitive(this, offset, value.GetTaggedValue().GetRawData()); \ } \ } else { \ - Barriers::SetPrimitive(this, offset, value.GetTaggedValue().GetRawData()); \ + Barriers::SetPrimitive(this, offset, value.GetTaggedValue().GetRawData()); \ } \ } \ void Set##name(const JSThread *thread, JSTaggedValue value, BarrierMode mode = WRITE_BARRIER) \ { \ if (mode == WRITE_BARRIER) { \ if (value.IsHeapObject()) { \ - Barriers::SetObject(thread, this, offset, value.GetRawData()); \ + Barriers::SetObject(thread, this, offset, value.GetRawData()); \ } else { \ - Barriers::SetPrimitive(this, offset, value.GetRawData()); \ + Barriers::SetPrimitive(this, offset, value.GetRawData()); \ } \ } else { \ - Barriers::SetPrimitive(this, offset, value.GetRawData()); \ + Barriers::SetPrimitive(this, offset, value.GetRawData()); \ } \ } @@ -105,11 +105,11 @@ static constexpr size_t endOffset = (offset) + sizeof(sizeType); \ inline void Set##name(type value) \ { \ - Barriers::SetPrimitive(this, offset, value); \ + Barriers::SetPrimitive(this, offset, value); \ } \ inline type Get##name() const \ { \ - return Barriers::GetValue(this, offset); \ + return Barriers::GetValue(this, offset); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) @@ -173,11 +173,11 @@ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define RETURN_IF_ABRUPT_COMPLETION(thread) \ - do { \ +#define RETURN_IF_ABRUPT_COMPLETION(thread) \ + do { \ if ((thread)->HasPendingException()) { \ - return; \ - } \ + return; \ + } \ } while (false) // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) @@ -196,6 +196,16 @@ } \ } while (false) +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, value) \ + do { \ + if ((thread)->HasPendingException()) { \ + auto ecmaContext = thread->GetCurrentEcmaContext(); \ + ecmaContext->JoinStackPopFastPath(value); \ + return JSTaggedValue::Exception(); \ + } \ + } while (false) + // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define RETURN_HANDLE_IF_ABRUPT_COMPLETION(type, thread) \ do { \ @@ -422,7 +432,7 @@ { \ Dump(std::cout); \ } \ - void DumpForSnapshot(std::vector> &vec) const; + void DumpForSnapshot(std::vector &vec) const; #endif // defined(__cplusplus) @@ -435,47 +445,50 @@ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define DECL_VISIT_ARRAY(BEGIN_OFFSET, LENGTH) \ - void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ - { \ - size_t endOffset = (BEGIN_OFFSET) + (LENGTH) * JSTaggedValue::TaggedTypeSize(); \ - visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), ObjectSlot(ToUintPtr(this) + endOffset), false); \ +#define DECL_VISIT_ARRAY(BEGIN_OFFSET, LENGTH) \ + void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ + { \ + size_t endOffset = (BEGIN_OFFSET) + (LENGTH) * JSTaggedValue::TaggedTypeSize(); \ + visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ + ObjectSlot(ToUintPtr(this) + endOffset), VisitObjectArea::NORMAL); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define DECL_VISIT_OBJECT(BEGIN_OFFSET, END_OFFSET) \ - void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ - { \ - visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ - ObjectSlot(ToUintPtr(this) + (END_OFFSET)), false); \ +#define DECL_VISIT_OBJECT(BEGIN_OFFSET, END_OFFSET) \ + void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ + { \ + visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ + ObjectSlot(ToUintPtr(this) + (END_OFFSET)), VisitObjectArea::NORMAL); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define DECL_VISIT_NATIVE_FIELD(BEGIN_OFFSET, END_OFFSET) \ - void VisitRangeSlotForNative(const EcmaObjectRangeVisitor &visitor) \ - { \ - visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), ObjectSlot(ToUintPtr(this) + (END_OFFSET)), true); \ +#define DECL_VISIT_NATIVE_FIELD(BEGIN_OFFSET, END_OFFSET) \ + void VisitRangeSlotForNative(const EcmaObjectRangeVisitor &visitor) \ + { \ + visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ + ObjectSlot(ToUintPtr(this) + (END_OFFSET)), VisitObjectArea::NATIVE_POINTER); \ } // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define DECL_VISIT_OBJECT_FOR_JS_OBJECT(PARENTCLASS, BEGIN_OFFSET, END_OFFSET) \ - void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ - { \ - VisitObjects(visitor); \ - /* visit in object fields */ \ - auto objSize = this->GetClass()->GetObjectSize(); \ - if (objSize > SIZE) { \ - visitor(this, ObjectSlot(ToUintPtr(this) + SIZE), ObjectSlot(ToUintPtr(this) + objSize), false); \ - } \ - } \ - void VisitObjects(const EcmaObjectRangeVisitor &visitor) \ - { \ - PARENTCLASS::VisitObjects(visitor); \ - if ((BEGIN_OFFSET) == (END_OFFSET)) { \ - return; \ - } \ - visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ - ObjectSlot(ToUintPtr(this) + (END_OFFSET)), false); \ +#define DECL_VISIT_OBJECT_FOR_JS_OBJECT(PARENTCLASS, BEGIN_OFFSET, END_OFFSET) \ + void VisitRangeSlot(const EcmaObjectRangeVisitor &visitor) \ + { \ + VisitObjects(visitor); \ + /* visit in object fields */ \ + auto objSize = this->GetClass()->GetObjectSize(); \ + if (objSize > SIZE) { \ + visitor(this, ObjectSlot(ToUintPtr(this) + SIZE), \ + ObjectSlot(ToUintPtr(this) + objSize), VisitObjectArea::IN_OBJECT); \ + } \ + } \ + void VisitObjects(const EcmaObjectRangeVisitor &visitor) \ + { \ + PARENTCLASS::VisitObjects(visitor); \ + if ((BEGIN_OFFSET) == (END_OFFSET)) { \ + return; \ + } \ + visitor(this, ObjectSlot(ToUintPtr(this) + (BEGIN_OFFSET)), \ + ObjectSlot(ToUintPtr(this) + (END_OFFSET)), VisitObjectArea::NORMAL); \ } #if ECMASCRIPT_ENABLE_CAST_CHECK @@ -540,8 +553,8 @@ LOG_FULL(FATAL) << __func__ << ":" << __LINE__ << " begin: " << (begin) << " end: " << (end); \ } -#define CHECK_JS_THREAD(vm) \ - if (!(vm)->GetJSThread()->IsCrossThreadExecutionEnable()) { \ +#define CHECK_JS_THREAD(vm) \ + if (!(vm)->GetJSThread()->IsCrossThreadExecutionEnable()) { \ ASSERT((vm)->GetJSThread()->GetThreadId() == JSThread::GetCurrentThreadId()); \ } diff --git a/ecmascript/ecma_param_configuration.h b/ecmascript/ecma_param_configuration.h index 38a38b97d614695f6c240d2d8c11bf0e826b6d85..d10c735038f3936ed98cc3c14f0da3b0e012ec52 100644 --- a/ecmascript/ecma_param_configuration.h +++ b/ecmascript/ecma_param_configuration.h @@ -57,11 +57,12 @@ public: defaultMachineCodeSpaceSize_ = 2_MB; semiSpaceTriggerConcurrentMark_ = 1_MB; semiSpaceOvershootSize_ = 2_MB; + oldSpaceOvershootSize_ = 4_MB; outOfMemoryOvershootSize_ = 2_MB; minAllocLimitGrowingStep_ = 2_MB; minGrowingStep_ = 4_MB; maxStackSize_ = 128_KB; - maxJSSerializerSize_ = 16_MB; + maxJSSerializerSize_ = 8_MB; } else if (maxHeapSize_ < HIGH_MEMORY) { // 128_MB ~ 256_MB minSemiSpaceSize_ = 2_MB; maxSemiSpaceSize_ = 8_MB; @@ -71,25 +72,27 @@ public: defaultMachineCodeSpaceSize_ = 2_MB; semiSpaceTriggerConcurrentMark_ = 1.5_MB; semiSpaceOvershootSize_ = 2_MB; + oldSpaceOvershootSize_ = 8_MB; outOfMemoryOvershootSize_ = 2_MB; minAllocLimitGrowingStep_ = 4_MB; minGrowingStep_ = 8_MB; maxStackSize_ = 128_KB; - maxJSSerializerSize_ = 32_MB; + maxJSSerializerSize_ = 16_MB; } else { // 256_MB ~ 384_MB minSemiSpaceSize_ = 2_MB; maxSemiSpaceSize_ = 16_MB; defaultReadOnlySpaceSize_ = 256_KB; - defaultNonMovableSpaceSize_ = 10_MB; + defaultNonMovableSpaceSize_ = 18_MB; defaultSnapshotSpaceSize_ = 4_MB; defaultMachineCodeSpaceSize_ = 8_MB; semiSpaceTriggerConcurrentMark_ = 1.5_MB; semiSpaceOvershootSize_ = 2_MB; + oldSpaceOvershootSize_ = 8_MB; outOfMemoryOvershootSize_ = 2_MB; minAllocLimitGrowingStep_ = 8_MB; minGrowingStep_ = 16_MB; maxStackSize_ = 128_KB; - maxJSSerializerSize_ = 32_MB; + maxJSSerializerSize_ = 16_MB; } } @@ -138,6 +141,11 @@ public: return semiSpaceOvershootSize_; } + size_t GetOldSpaceOvershootSize() const + { + return oldSpaceOvershootSize_; + } + size_t GetOutOfMemoryOvershootSize() const { return outOfMemoryOvershootSize_; @@ -189,6 +197,7 @@ private: size_t defaultMachineCodeSpaceSize_ {0}; size_t semiSpaceTriggerConcurrentMark_ {0}; size_t semiSpaceOvershootSize_ {0}; + size_t oldSpaceOvershootSize_ {0}; size_t outOfMemoryOvershootSize_ {0}; size_t minAllocLimitGrowingStep_ {0}; size_t minGrowingStep_ {0}; diff --git a/ecmascript/ecma_runtime_call_info.h b/ecmascript/ecma_runtime_call_info.h index 7a41f9bdbadb946d630de3561ddd17bb7e18c03e..8e41e5ca474a2d400715ed79a1257e56b58452dc 100644 --- a/ecmascript/ecma_runtime_call_info.h +++ b/ecmascript/ecma_runtime_call_info.h @@ -156,9 +156,9 @@ public: } } - inline void SetCallArg(int32_t argsLength, const TaggedArray* args) + inline void SetCallArg(uint32_t argsLength, const TaggedArray* args) { - for (int32_t i = 0; i < argsLength; i++) { + for (uint32_t i = 0; i < argsLength; i++) { SetCallArg(i, args->Get(GetThread(), i)); } } diff --git a/ecmascript/ecma_string-inl.h b/ecmascript/ecma_string-inl.h index 6d5d7b74894b9a7b2c408e6fe617ef3d92d111c1..1742f99426d90b46c37cbe89fdf502b8b20cb8e5 100644 --- a/ecmascript/ecma_string-inl.h +++ b/ecmascript/ecma_string-inl.h @@ -139,6 +139,13 @@ inline EcmaString *EcmaString::CreateLineStringWithSpaceType(const EcmaVM *vm, s return string; } +inline SlicedString *EcmaString::CreateSlicedString(const EcmaVM *vm, MemSpaceType type) +{ + auto slicedString = SlicedString::Cast(vm->GetFactory()->AllocSlicedStringObject(type)); + slicedString->SetRawHashcode(0); + return slicedString; +} + inline EcmaString *EcmaString::CreateConstantString(const EcmaVM *vm, const uint8_t *utf8Data, size_t length, bool compressed, MemSpaceType type, uint32_t idOffset) { @@ -154,6 +161,7 @@ inline EcmaString *EcmaString::CreateConstantString(const EcmaVM *vm, const uint inline EcmaString *EcmaString::CreateTreeString(const EcmaVM *vm, const JSHandle &left, const JSHandle &right, uint32_t length, bool compressed) { + ECMA_STRING_CHECK_LENGTH_AND_TRHOW(vm, length); auto thread = vm->GetJSThread(); auto string = TreeEcmaString::Cast(vm->GetFactory()->AllocTreeStringObject()); string->SetLength(length, compressed); @@ -167,36 +175,38 @@ inline EcmaString *EcmaString::CreateTreeString(const EcmaVM *vm, EcmaString *EcmaString::FastSubUtf8String(const EcmaVM *vm, const JSHandle &src, uint32_t start, uint32_t length) { - ASSERT(src->IsLineOrConstantString()); - auto string = CreateLineString(vm, length, true); + JSHandle string(vm->GetJSThread(), CreateLineString(vm, length, true)); // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) + FlatStringInfo srcFlat = FlattenAllString(vm, src); Span dst(string->GetDataUtf8Writable(), length); - Span source(src->GetDataUtf8() + start, length); + Span source(srcFlat.GetDataUtf8() + start, length); EcmaString::MemCopyChars(dst, length, source, length); - ASSERT_PRINT(CanBeCompressed(string), "canBeCompresse does not match the real value!"); - return string; + ASSERT_PRINT(CanBeCompressed(*string), "canBeCompresse does not match the real value!"); + return *string; } /* static */ EcmaString *EcmaString::FastSubUtf16String(const EcmaVM *vm, const JSHandle &src, uint32_t start, uint32_t length) { - ASSERT(src->IsLineOrConstantString()); - bool canBeCompressed = CanBeCompressed(src->GetDataUtf16() + start, length); - auto string = CreateLineString(vm, length, canBeCompressed); + FlatStringInfo srcFlat = FlattenAllString(vm, src); + bool canBeCompressed = CanBeCompressed(srcFlat.GetDataUtf16() + start, length); + JSHandle string(vm->GetJSThread(), CreateLineString(vm, length, canBeCompressed)); + // maybe happen GC,so get srcFlat again + srcFlat = FlattenAllString(vm, src); if (canBeCompressed) { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) - CopyChars(string->GetDataUtf8Writable(), src->GetDataUtf16() + start, length); + CopyChars(string->GetDataUtf8Writable(), srcFlat.GetDataUtf16() + start, length); } else { uint32_t len = length * (sizeof(uint16_t) / sizeof(uint8_t)); // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) Span dst(string->GetDataUtf16Writable(), length); - Span source(src->GetDataUtf16() + start, length); + Span source(srcFlat.GetDataUtf16() + start, length); EcmaString::MemCopyChars(dst, len, source, len); } - ASSERT_PRINT(canBeCompressed == CanBeCompressed(string), "canBeCompresse does not match the real value!"); - return string; + ASSERT_PRINT(canBeCompressed == CanBeCompressed(*string), "canBeCompresse does not match the real value!"); + return *string; } inline uint16_t *EcmaString::GetData() const @@ -208,10 +218,10 @@ inline uint16_t *EcmaString::GetData() const inline const uint8_t *EcmaString::GetDataUtf8() const { ASSERT_PRINT(IsUtf8(), "EcmaString: Read data as utf8 for utf16 string"); - if (IsConstantString()) { - return ConstantString::Cast(this)->GetConstantData(); + if (IsLineString()) { + return reinterpret_cast(GetData()); } - return reinterpret_cast(GetData()); + return ConstantString::Cast(this)->GetConstantData(); } inline const uint16_t *EcmaString::GetDataUtf16() const @@ -237,11 +247,12 @@ inline uint16_t *EcmaString::GetDataUtf16Writable() inline size_t EcmaString::GetUtf8Length(bool modify) const { - ASSERT(IsLineOrConstantString()); if (!IsUtf16()) { return GetLength() + 1; // add place for zero in the end } - return base::utf_helper::Utf16ToUtf8Size(GetData(), GetLength(), modify); + CVector tmpBuf; + const uint16_t *data = GetUtf16DataFlat(this, tmpBuf); + return base::utf_helper::Utf16ToUtf8Size(data, GetLength(), modify); } template @@ -253,12 +264,18 @@ inline uint16_t EcmaString::At(int32_t index) const return 0; } } - if (IsLineString()) { - return LineEcmaString::Cast(this)->Get(index); - } else if (IsConstantString()) { - return ConstantString::Cast(this)->Get(index); - } else { - return TreeEcmaString::Cast(this)->Get(index); + switch (GetStringType()) { + case JSType::LINE_STRING: + return LineEcmaString::Cast(this)->Get(index); + case JSType::CONSTANT_STRING: + return ConstantString::Cast(this)->Get(index); + case JSType::SLICED_STRING: + return SlicedString::Cast(this)->Get(index); + case JSType::TREE_STRING: + return TreeEcmaString::Cast(this)->Get(index); + default: + LOG_ECMA(FATAL) << "this branch is unreachable"; + UNREACHABLE(); } } @@ -336,6 +353,15 @@ void EcmaString::WriteToFlat(EcmaString *src, Char *buf, uint32_t maxLength) } continue; } + case JSType::SLICED_STRING: { + EcmaString *parent = EcmaString::Cast(SlicedString::Cast(src)->GetParent()); + if (src->IsUtf8()) { + CopyChars(buf, parent->GetDataUtf8() + SlicedString::Cast(src)->GetStartIndex(), length); + } else { + CopyChars(buf, parent->GetDataUtf16() + SlicedString::Cast(src)->GetStartIndex(), length); + } + return; + } default: LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -343,6 +369,21 @@ void EcmaString::WriteToFlat(EcmaString *src, Char *buf, uint32_t maxLength) } } +inline const uint8_t *FlatStringInfo::GetDataUtf8() const +{ + return string_->GetDataUtf8() + startIndex_; +} + +inline const uint16_t *FlatStringInfo::GetDataUtf16() const +{ + return string_->GetDataUtf16() + startIndex_; +} + +inline uint8_t *FlatStringInfo::GetDataUtf8Writable() const +{ + return string_->GetDataUtf8Writable() + startIndex_; +} + inline const uint8_t *EcmaStringAccessor::GetDataUtf8() { return string_->GetDataUtf8(); diff --git a/ecmascript/ecma_string.cpp b/ecmascript/ecma_string.cpp index c793807354d1e72c61dc00b8b765bc51adeb305f..7690ddfbbdb70c772e13c58e024ecd81ddb2bc7d 100644 --- a/ecmascript/ecma_string.cpp +++ b/ecmascript/ecma_string.cpp @@ -19,35 +19,44 @@ #include "ecmascript/mem/c_containers.h" namespace panda::ecmascript { -static constexpr int SMALL_STRING_SIZE = 128; EcmaString *EcmaString::Concat(const EcmaVM *vm, - const JSHandle &left, const JSHandle &right) + const JSHandle &left, const JSHandle &right, MemSpaceType type) { // allocator may trig gc and move src, need to hold it EcmaString *strLeft = *left; EcmaString *strRight = *right; uint32_t leftLength = strLeft->GetLength(); - if (leftLength == 0) { - return strRight; - } uint32_t rightLength = strRight->GetLength(); - if (rightLength == 0) { - return strLeft; - } - uint32_t newLength = leftLength + rightLength; if (newLength == 0) { return vm->GetFactory()->GetEmptyString().GetObject(); } - bool compressed = (strLeft->IsUtf8() && strRight->IsUtf8()); - + if (leftLength == 0) { + if (type == MemSpaceType::OLD_SPACE) { + Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast(*right)); + if (objectRegion->InYoungSpace()) { + return CopyStringToOldSpace(vm, right, rightLength, strRight->IsUtf8()); + } + } + return strRight; + } + if (rightLength == 0) { + if (type == MemSpaceType::OLD_SPACE) { + Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast(*left)); + if (objectRegion->InYoungSpace()) { + return CopyStringToOldSpace(vm, left, leftLength, strLeft->IsUtf8()); + } + } + return strLeft; + } // if the result string is small, make a LineString + bool compressed = (strLeft->IsUtf8() && strRight->IsUtf8()); if (newLength < TreeEcmaString::MIN_TREE_ECMASTRING_LENGTH) { ASSERT(strLeft->IsLineOrConstantString()); ASSERT(strRight->IsLineOrConstantString()); - auto newString = CreateLineString(vm, newLength, compressed); + auto newString = CreateLineStringWithSpaceType(vm, newLength, compressed, type); // retrieve strings after gc strLeft = *left; strRight = *right; @@ -84,6 +93,35 @@ EcmaString *EcmaString::Concat(const EcmaVM *vm, return CreateTreeString(vm, left, right, newLength, compressed); } +/* static */ +EcmaString *EcmaString::CopyStringToOldSpace(const EcmaVM *vm, const JSHandle &original, + uint32_t length, bool compressed) +{ + if (original->IsConstantString()) { + return CreateConstantString(vm, original->GetDataUtf8(), length, MemSpaceType::OLD_SPACE); + } + JSHandle newString(vm->GetJSThread(), + CreateLineStringWithSpaceType(vm, length, compressed, MemSpaceType::OLD_SPACE)); + auto strOrigin = FlattenAllString(vm, original); + if (compressed) { + // copy + Span sp(newString->GetDataUtf8Writable(), length); + Span srcSp(strOrigin.GetDataUtf8(), length); + EcmaString::MemCopyChars(sp, length, srcSp, length); + } else { + // copy left part + Span sp(newString->GetDataUtf16Writable(), length); + if (strOrigin.IsUtf8()) { + EcmaString::CopyChars(sp.data(), strOrigin.GetDataUtf8(), length); + } else { + Span srcSp(strOrigin.GetDataUtf16(), length); + EcmaString::MemCopyChars(sp, length << 1U, srcSp, length << 1U); + } + } + ASSERT_PRINT(compressed == CanBeCompressed(*newString), "compressed does not match the real value!"); + return *newString; +} + /* static */ EcmaString *EcmaString::FastSubString(const EcmaVM *vm, const JSHandle &src, uint32_t start, uint32_t length) @@ -95,11 +133,26 @@ EcmaString *EcmaString::FastSubString(const EcmaVM *vm, if (start == 0 && length == src->GetLength()) { return *src; } - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - if (srcFlat->IsUtf8()) { - return FastSubUtf8String(vm, srcFlat, start, length); + if (src->IsUtf8()) { + return FastSubUtf8String(vm, src, start, length); } - return FastSubUtf16String(vm, srcFlat, start, length); + return FastSubUtf16String(vm, src, start, length); +} + +/* static */ +EcmaString *EcmaString::GetSlicedString(const EcmaVM *vm, + const JSHandle &src, uint32_t start, uint32_t length) +{ + ASSERT((start + length) <= src->GetLength()); + if (start == 0 && length == src->GetLength()) { + return *src; + } + JSHandle slicedString(vm->GetJSThread(), CreateSlicedString(vm)); + FlatStringInfo srcFlat = FlattenAllString(vm, src); + slicedString->SetLength(length, srcFlat.GetString()->IsUtf8()); + slicedString->SetParent(vm->GetJSThread(), JSTaggedValue(srcFlat.GetString())); + slicedString->SetStartIndex(start + srcFlat.GetStartIndex()); + return *slicedString; } void EcmaString::WriteData(EcmaString *src, uint32_t start, uint32_t destSize, uint32_t length) @@ -153,38 +206,38 @@ int32_t EcmaString::Compare(const EcmaVM *vm, const JSHandle &left, if (*left == *right) { return 0; } - auto leftFlat = JSHandle(vm->GetJSThread(), Flatten(vm, left)); - auto rightFlat = JSHandle(vm->GetJSThread(), Flatten(vm, right)); - EcmaString *lhs = *leftFlat; - EcmaString *rhs = *rightFlat; - int32_t lhsCount = static_cast(lhs->GetLength()); - int32_t rhsCount = static_cast(rhs->GetLength()); + FlatStringInfo lhs = FlattenAllString(vm, left); + JSHandle string(vm->GetJSThread(), lhs.GetString()); + FlatStringInfo rhs = FlattenAllString(vm, right); + lhs.SetString(*string); + int32_t lhsCount = static_cast(lhs.GetLength()); + int32_t rhsCount = static_cast(rhs.GetLength()); int32_t countDiff = lhsCount - rhsCount; int32_t minCount = (countDiff < 0) ? lhsCount : rhsCount; - if (!lhs->IsUtf16() && !rhs->IsUtf16()) { - Span lhsSp(lhs->GetDataUtf8(), lhsCount); - Span rhsSp(rhs->GetDataUtf8(), rhsCount); + if (!lhs.IsUtf16() && !rhs.IsUtf16()) { + Span lhsSp(lhs.GetDataUtf8(), lhsCount); + Span rhsSp(rhs.GetDataUtf8(), rhsCount); int32_t charDiff = CompareStringSpan(lhsSp, rhsSp, minCount); if (charDiff != 0) { return charDiff; } - } else if (!lhs->IsUtf16()) { - Span lhsSp(lhs->GetDataUtf8(), lhsCount); - Span rhsSp(rhs->GetDataUtf16(), rhsCount); + } else if (!lhs.IsUtf16()) { + Span lhsSp(lhs.GetDataUtf8(), lhsCount); + Span rhsSp(rhs.GetDataUtf16(), rhsCount); int32_t charDiff = CompareStringSpan(lhsSp, rhsSp, minCount); if (charDiff != 0) { return charDiff; } - } else if (!rhs->IsUtf16()) { - Span lhsSp(lhs->GetDataUtf16(), rhsCount); - Span rhsSp(rhs->GetDataUtf8(), lhsCount); + } else if (!rhs.IsUtf16()) { + Span lhsSp(lhs.GetDataUtf16(), rhsCount); + Span rhsSp(rhs.GetDataUtf8(), lhsCount); int32_t charDiff = CompareStringSpan(lhsSp, rhsSp, minCount); if (charDiff != 0) { return charDiff; } } else { - Span lhsSp(lhs->GetDataUtf16(), lhsCount); - Span rhsSp(rhs->GetDataUtf16(), rhsCount); + Span lhsSp(lhs.GetDataUtf16(), lhsCount); + Span rhsSp(rhs.GetDataUtf16(), rhsCount); int32_t charDiff = CompareStringSpan(lhsSp, rhsSp, minCount); if (charDiff != 0) { return charDiff; @@ -250,13 +303,13 @@ int32_t EcmaString::LastIndexOf(Span &lhsSp, Span &rhsSp, in int32_t EcmaString::IndexOf(const EcmaVM *vm, const JSHandle &receiver, const JSHandle &search, int pos) { - EcmaString *lhs = *receiver; - EcmaString *rhs = *search; - if (lhs == nullptr || rhs == nullptr) { + EcmaString *lhstring = *receiver; + EcmaString *rhstring = *search; + if (lhstring == nullptr || rhstring == nullptr) { return -1; } - int32_t lhsCount = static_cast(lhs->GetLength()); - int32_t rhsCount = static_cast(rhs->GetLength()); + int32_t lhsCount = static_cast(lhstring->GetLength()); + int32_t rhsCount = static_cast(rhstring->GetLength()); if (pos > lhsCount) { return -1; @@ -279,24 +332,24 @@ int32_t EcmaString::IndexOf(const EcmaVM *vm, return -1; } - auto receiverFlat = JSHandle(vm->GetJSThread(), Flatten(vm, receiver)); - auto searchFlat = JSHandle(vm->GetJSThread(), Flatten(vm, search)); - lhs = *receiverFlat; - rhs = *searchFlat; + FlatStringInfo lhs = FlattenAllString(vm, receiver); + JSHandle string(vm->GetJSThread(), lhs.GetString()); + FlatStringInfo rhs = FlattenAllString(vm, search); + lhs.SetString(*string); - if (rhs->IsUtf8() && lhs->IsUtf8()) { - Span lhsSp(lhs->GetDataUtf8(), lhsCount); - Span rhsSp(rhs->GetDataUtf8(), rhsCount); + if (rhs.IsUtf8() && lhs.IsUtf8()) { + Span lhsSp(lhs.GetDataUtf8(), lhsCount); + Span rhsSp(rhs.GetDataUtf8(), rhsCount); return EcmaString::IndexOf(lhsSp, rhsSp, pos, max); - } else if (rhs->IsUtf16() && lhs->IsUtf16()) { // NOLINT(readability-else-after-return) - Span lhsSp(lhs->GetDataUtf16(), lhsCount); - Span rhsSp(rhs->GetDataUtf16(), rhsCount); + } else if (rhs.IsUtf16() && lhs.IsUtf16()) { // NOLINT(readability-else-after-return) + Span lhsSp(lhs.GetDataUtf16(), lhsCount); + Span rhsSp(rhs.GetDataUtf16(), rhsCount); return EcmaString::IndexOf(lhsSp, rhsSp, pos, max); - } else if (rhs->IsUtf16()) { + } else if (rhs.IsUtf16()) { return -1; } else { // NOLINT(readability-else-after-return) - Span lhsSp(lhs->GetDataUtf16(), lhsCount); - Span rhsSp(rhs->GetDataUtf8(), rhsCount); + Span lhsSp(lhs.GetDataUtf16(), lhsCount); + Span rhsSp(rhs.GetDataUtf8(), rhsCount); return EcmaString::IndexOf(lhsSp, rhsSp, pos, max); } } @@ -304,14 +357,14 @@ int32_t EcmaString::IndexOf(const EcmaVM *vm, int32_t EcmaString::LastIndexOf(const EcmaVM *vm, const JSHandle &receiver, const JSHandle &search, int pos) { - EcmaString *lhs = *receiver; - EcmaString *rhs = *search; - if (lhs == nullptr || rhs == nullptr) { + EcmaString *lhstring = *receiver; + EcmaString *rhstring = *search; + if (lhstring == nullptr || rhstring == nullptr) { return -1; } - int32_t lhsCount = static_cast(lhs->GetLength()); - int32_t rhsCount = static_cast(rhs->GetLength()); + int32_t lhsCount = static_cast(lhstring->GetLength()); + int32_t rhsCount = static_cast(rhstring->GetLength()); if (lhsCount < rhsCount) { return -1; } @@ -332,24 +385,23 @@ int32_t EcmaString::LastIndexOf(const EcmaVM *vm, return pos; } - auto receiverFlat = JSHandle(vm->GetJSThread(), Flatten(vm, receiver)); - auto searchFlat = JSHandle(vm->GetJSThread(), Flatten(vm, search)); - lhs = *receiverFlat; - rhs = *searchFlat; - - if (rhs->IsUtf8() && lhs->IsUtf8()) { - Span lhsSp(lhs->GetDataUtf8(), lhsCount); - Span rhsSp(rhs->GetDataUtf8(), rhsCount); + FlatStringInfo lhs = FlattenAllString(vm, receiver); + JSHandle string(vm->GetJSThread(), lhs.GetString()); + FlatStringInfo rhs = FlattenAllString(vm, search); + lhs.SetString(*string); + if (rhs.IsUtf8() && lhs.IsUtf8()) { + Span lhsSp(lhs.GetDataUtf8(), lhsCount); + Span rhsSp(rhs.GetDataUtf8(), rhsCount); return EcmaString::LastIndexOf(lhsSp, rhsSp, pos); - } else if (rhs->IsUtf16() && lhs->IsUtf16()) { // NOLINT(readability-else-after-return) - Span lhsSp(lhs->GetDataUtf16(), lhsCount); - Span rhsSp(rhs->GetDataUtf16(), rhsCount); + } else if (rhs.IsUtf16() && lhs.IsUtf16()) { // NOLINT(readability-else-after-return) + Span lhsSp(lhs.GetDataUtf16(), lhsCount); + Span rhsSp(rhs.GetDataUtf16(), rhsCount); return EcmaString::LastIndexOf(lhsSp, rhsSp, pos); - } else if (rhs->IsUtf16()) { + } else if (rhs.IsUtf16()) { return -1; } else { // NOLINT(readability-else-after-return) - Span lhsSp(lhs->GetDataUtf16(), lhsCount); - Span rhsSp(rhs->GetDataUtf8(), rhsCount); + Span lhsSp(lhs.GetDataUtf16(), lhsCount); + Span rhsSp(rhs.GetDataUtf8(), rhsCount); return EcmaString::LastIndexOf(lhsSp, rhsSp, pos); } } @@ -413,52 +465,84 @@ bool EcmaString::CanBeCompressed(const uint16_t *utf16Data, uint32_t utf16Len) bool EcmaString::EqualToSplicedString(const EcmaString *str1, const EcmaString *str2) { - ASSERT(IsLineOrConstantString()); - ASSERT(str1->IsLineOrConstantString() && str2->IsLineOrConstantString()); + ASSERT(NotTreeString()); + ASSERT(str1->NotTreeString() && str2->NotTreeString()); if (GetLength() != str1->GetLength() + str2->GetLength()) { return false; } if (IsUtf16()) { - if (str1->IsUtf8() && str2->IsUtf8()) { - return false; - } - if (EcmaString::StringsAreEqualUtf16(str1, GetDataUtf16(), str1->GetLength())) { - return EcmaString::StringsAreEqualUtf16(str2, GetDataUtf16() + str1->GetLength(), str2->GetLength()); + CVector buf; + const uint16_t *data = EcmaString::GetUtf16DataFlat(this, buf); + if (EcmaString::StringsAreEqualUtf16(str1, data, str1->GetLength())) { + return EcmaString::StringsAreEqualUtf16(str2, data + str1->GetLength(), str2->GetLength()); } } else { - if (str1->IsUtf16() || str2->IsUtf16()) { - return false; - } - Span concatData(GetDataUtf8(), str1->GetLength()); - Span data1(str1->GetDataUtf8(), str1->GetLength()); - if (EcmaString::StringsAreEquals(concatData, data1)) { - concatData = Span(GetDataUtf8() + str1->GetLength(), str2->GetLength()); - Span data2(str2->GetDataUtf8(), str2->GetLength()); - return EcmaString::StringsAreEquals(concatData, data2); + CVector buf; + const uint8_t *data = EcmaString::GetUtf8DataFlat(this, buf); + if (EcmaString::StringsAreEqualUtf8(str1, data, str1->GetLength(), this->IsUtf8())) { + return EcmaString::StringsAreEqualUtf8(str2, data + str1->GetLength(), str2->GetLength(), this->IsUtf8()); } } return false; } /* static */ -bool EcmaString::StringsAreEqualSameUtfEncoding(EcmaString *str1, EcmaString *str2) -{ - if (str1->IsUtf16()) { - CVector buf1; - CVector buf2; - const uint16_t *data1 = EcmaString::GetUtf16DataFlat(str1, buf1); - const uint16_t *data2 = EcmaString::GetUtf16DataFlat(str2, buf2); - Span sp1(data1, str1->GetLength()); - Span sp2(data2, str2->GetLength()); - return EcmaString::StringsAreEquals(sp1, sp2); - } else { // NOLINT(readability-else-after-return) - CVector buf1; - CVector buf2; - const uint8_t *data1 = EcmaString::GetUtf8DataFlat(str1, buf1); - const uint8_t *data2 = EcmaString::GetUtf8DataFlat(str2, buf2); - Span sp1(data1, str1->GetLength()); - Span sp2(data2, str2->GetLength()); - return EcmaString::StringsAreEquals(sp1, sp2); +bool EcmaString::StringsAreEqualDiffUtfEncoding(EcmaString *left, EcmaString *right) +{ + CVector bufLeftUft16; + CVector bufRightUft16; + CVector bufLeftUft8; + CVector bufRightUft8; + int32_t lhsCount = static_cast(left->GetLength()); + int32_t rhsCount = static_cast(right->GetLength()); + if (!left->IsUtf16() && !right->IsUtf16()) { + const uint8_t *data1 = EcmaString::GetUtf8DataFlat(left, bufLeftUft8); + const uint8_t *data2 = EcmaString::GetUtf8DataFlat(right, bufRightUft8); + Span lhsSp(data1, lhsCount); + Span rhsSp(data2, rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else if (!left->IsUtf16()) { + const uint8_t *data1 = EcmaString::GetUtf8DataFlat(left, bufLeftUft8); + const uint16_t *data2 = EcmaString::GetUtf16DataFlat(right, bufRightUft16); + Span lhsSp(data1, lhsCount); + Span rhsSp(data2, rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else if (!right->IsUtf16()) { + const uint16_t *data1 = EcmaString::GetUtf16DataFlat(left, bufLeftUft16); + const uint8_t *data2 = EcmaString::GetUtf8DataFlat(right, bufRightUft8); + Span lhsSp(data1, lhsCount); + Span rhsSp(data2, rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else { + const uint16_t *data1 = EcmaString::GetUtf16DataFlat(left, bufLeftUft16); + const uint16_t *data2 = EcmaString::GetUtf16DataFlat(right, bufRightUft16); + Span lhsSp(data1, lhsCount); + Span rhsSp(data2, rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } +} + +/* static */ +bool EcmaString::StringsAreEqualDiffUtfEncoding(const FlatStringInfo &left, const FlatStringInfo &right) +{ + int32_t lhsCount = static_cast(left.GetLength()); + int32_t rhsCount = static_cast(right.GetLength()); + if (!left.IsUtf16() && !right.IsUtf16()) { + Span lhsSp(left.GetDataUtf8(), lhsCount); + Span rhsSp(right.GetDataUtf8(), rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else if (!left.IsUtf16()) { + Span lhsSp(left.GetDataUtf8(), lhsCount); + Span rhsSp(right.GetDataUtf16(), rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else if (!right.IsUtf16()) { + Span lhsSp(left.GetDataUtf16(), rhsCount); + Span rhsSp(right.GetDataUtf8(), lhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); + } else { + Span lhsSp(left.GetDataUtf16(), lhsCount); + Span rhsSp(right.GetDataUtf16(), rhsCount); + return EcmaString::StringsAreEquals(lhsSp, rhsSp); } } @@ -467,9 +551,6 @@ bool EcmaString::StringsAreEqual(const EcmaVM *vm, const JSHandle &s if (str1 == str2) { return true; } - if (str1->IsUtf16() != str2->IsUtf16()) { - return false; - } uint32_t str1Len = str1->GetLength(); if (str1Len != str2->GetLength()) { return false; @@ -485,10 +566,11 @@ bool EcmaString::StringsAreEqual(const EcmaVM *vm, const JSHandle &s return false; } } - - auto str1Flat = JSHandle(vm->GetJSThread(), Flatten(vm, str1)); - auto str2Flat = JSHandle(vm->GetJSThread(), Flatten(vm, str2)); - return StringsAreEqualSameUtfEncoding(*str1Flat, *str2Flat); + FlatStringInfo str1Flat = FlattenAllString(vm, str1); + JSHandle string(vm->GetJSThread(), str1Flat.GetString()); + FlatStringInfo str2Flat = FlattenAllString(vm, str2); + str1Flat.SetString(*string); + return StringsAreEqualDiffUtfEncoding(str1Flat, str2Flat); } /* static */ @@ -497,9 +579,6 @@ bool EcmaString::StringsAreEqual(EcmaString *str1, EcmaString *str2) if (str1 == str2) { return true; } - if (str1->IsUtf16() != str2->IsUtf16()) { - return false; - } uint32_t str1Len = str1->GetLength(); if (str1Len != str2->GetLength()) { return false; @@ -515,20 +594,20 @@ bool EcmaString::StringsAreEqual(EcmaString *str1, EcmaString *str2) return false; } } - return StringsAreEqualSameUtfEncoding(str1, str2); + return StringsAreEqualDiffUtfEncoding(str1, str2); } /* static */ bool EcmaString::StringsAreEqualUtf8(const EcmaString *str1, const uint8_t *utf8Data, uint32_t utf8Len, bool canBeCompress) { - if (canBeCompress != str1->IsUtf8()) { + if (!str1->IsSlicedString() && canBeCompress != str1->IsUtf8()) { return false; } if (canBeCompress && str1->GetLength() != utf8Len) { return false; } - if (canBeCompress) { + if (str1->IsUtf8()) { CVector buf; Span data1(EcmaString::GetUtf8DataFlat(str1, buf), utf8Len); Span data2(utf8Data, utf8Len); @@ -559,23 +638,6 @@ bool EcmaString::StringsAreEqualUtf16(const EcmaString *str1, const uint16_t *ut } } -/* static */ -template -bool EcmaString::StringsAreEquals(Span &str1, Span &str2) -{ - ASSERT(str1.Size() <= str2.Size()); - size_t size = str1.Size(); - if (size < SMALL_STRING_SIZE) { - for (size_t i = 0; i < size; i++) { - if (str1[i] != str2[i]) { - return false; - } - } - return true; - } - return !memcmp(str1.data(), str2.data(), size); -} - template bool EcmaString::MemCopyChars(Span &dst, size_t dstMax, Span &src, size_t count) { @@ -588,18 +650,6 @@ bool EcmaString::MemCopyChars(Span &dst, size_t dstMax, Span &src, s return true; } -template -static uint32_t ComputeHashForData(const T *data, size_t size, uint32_t hashSeed) -{ - uint32_t hash = hashSeed; - Span sp(data, size); - for (auto c : sp) { - constexpr size_t SHIFT = 5; - hash = (hash << SHIFT) - hash + c; - } - return hash; -} - uint32_t EcmaString::ComputeHashcode(uint32_t hashSeed) const { uint32_t hash; @@ -643,17 +693,27 @@ uint32_t EcmaString::ComputeHashcodeUtf16(const uint16_t *utf16Data, uint32_t le bool EcmaString::IsUtf8EqualsUtf16(const uint8_t *utf8Data, size_t utf8Len, const uint16_t *utf16Data, uint32_t utf16Len) { - // length is one more than compared utf16Data, don't need convert all utf8Data to utf16Data - uint32_t utf8ConvertLength = utf16Len + 1; - CVector tmpBuffer(utf8ConvertLength); - auto len = base::utf_helper::ConvertRegionUtf8ToUtf16(utf8Data, tmpBuffer.data(), utf8Len, utf8ConvertLength, 0); - if (len != utf16Len) { - return false; + size_t utf8Pos = 0; + size_t utf16Pos = 0; + while (utf8Pos < utf8Len) { + auto [pair, nbytes] = utf::ConvertMUtf8ToUtf16Pair(utf8Data, utf8Len - utf8Pos); + auto [pHigh, pLow] = utf::SplitUtf16Pair(pair); + utf8Data += nbytes; + utf8Pos += nbytes; + if (pHigh != 0) { + if (utf16Pos >= utf16Len - 1 || *utf16Data != pHigh) { + return false; + } + ++utf16Pos; + ++utf16Data; + } + if (utf16Pos >= utf16Len || *utf16Data != pLow) { + return false; + } + ++utf16Pos; + ++utf16Data; } - - Span data1(tmpBuffer.data(), len); - Span data2(utf16Data, utf16Len); - return EcmaString::StringsAreEquals(data1, data2); + return true; } bool EcmaString::ToElementIndex(uint32_t *index) @@ -743,10 +803,10 @@ EcmaString *EcmaString::TrimBody(const JSThread *thread, const JSHandleGetLength(); int32_t start = 0; - int32_t end = srcLen - 1; + int32_t end = static_cast(srcLen) - 1; if (mode == TrimMode::TRIM || mode == TrimMode::TRIM_START) { - start = base::StringHelper::GetStart(data, srcLen); + start = static_cast(base::StringHelper::GetStart(data, srcLen)); } if (mode == TrimMode::TRIM || mode == TrimMode::TRIM_END) { end = base::StringHelper::GetEnd(data, start, srcLen); @@ -758,27 +818,27 @@ EcmaString *EcmaString::TrimBody(const JSThread *thread, const JSHandle &src) { - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - uint32_t srcLength = srcFlat->GetLength(); + auto srcFlat = FlattenAllString(vm, src); + uint32_t srcLength = srcFlat.GetLength(); auto factory = vm->GetFactory(); - if (srcFlat->IsUtf16()) { - std::u16string u16str = base::StringHelper::Utf16ToU16String(srcFlat->GetDataUtf16(), srcLength); + if (srcFlat.IsUtf16()) { + std::u16string u16str = base::StringHelper::Utf16ToU16String(srcFlat.GetDataUtf16(), srcLength); std::string res = base::StringHelper::ToLower(u16str); return *(factory->NewFromStdString(res)); } else { - return ConvertUtf8ToLowerOrUpper(vm, srcFlat, true); + return ConvertUtf8ToLowerOrUpper(vm, src, true); } } /* static */ EcmaString *EcmaString::TryToLower(const EcmaVM *vm, const JSHandle &src) { - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - uint32_t srcLength = srcFlat->GetLength(); + auto srcFlat = FlattenAllString(vm, src); + uint32_t srcLength = srcFlat.GetLength(); const char start = 'A'; const char end = 'Z'; uint32_t upperIndex = srcLength; - Span data(srcFlat->GetDataUtf8Writable(), srcLength); + Span data(srcFlat.GetDataUtf8Writable(), srcLength); for (uint32_t index = 0; index < srcLength; ++index) { if (base::StringHelper::Utf8CharInRange(data[index], start, end)) { upperIndex = index; @@ -788,18 +848,19 @@ EcmaString *EcmaString::TryToLower(const EcmaVM *vm, const JSHandle if (upperIndex == srcLength) { return *src; } - return ConvertUtf8ToLowerOrUpper(vm, srcFlat, true, upperIndex); + return ConvertUtf8ToLowerOrUpper(vm, src, true, upperIndex); } /* static */ -EcmaString *EcmaString::ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle &srcFlat, +EcmaString *EcmaString::ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHandle &src, bool toLower, uint32_t startIndex) { const char start = toLower ? 'A' : 'a'; const char end = toLower ? 'Z' : 'z'; - uint32_t srcLength = srcFlat->GetLength(); - auto newString = CreateLineString(vm, srcLength, true); - Span data(srcFlat->GetDataUtf8Writable(), srcLength); + uint32_t srcLength = src->GetLength(); + JSHandle newString(vm->GetJSThread(), CreateLineString(vm, srcLength, true)); + auto srcFlat = FlattenAllString(vm, src); + Span data(srcFlat.GetDataUtf8Writable(), srcLength); auto newStringPtr = newString->GetDataUtf8Writable(); if (startIndex > 0) { if (memcpy_s(newStringPtr, startIndex * sizeof(uint8_t), data.data(), startIndex * sizeof(uint8_t)) != EOK) { @@ -814,21 +875,21 @@ EcmaString *EcmaString::ConvertUtf8ToLowerOrUpper(const EcmaVM *vm, const JSHand *(newStringPtr + index) = data[index]; } } - return newString; + return *newString; } /* static */ EcmaString *EcmaString::ToUpper(const EcmaVM *vm, const JSHandle &src) { - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - uint32_t srcLength = srcFlat->GetLength(); + FlatStringInfo srcFlat = FlattenAllString(vm, src); + uint32_t srcLength = srcFlat.GetLength(); auto factory = vm->GetFactory(); - if (srcFlat->IsUtf16()) { - std::u16string u16str = base::StringHelper::Utf16ToU16String(srcFlat->GetDataUtf16(), srcLength); + if (srcFlat.IsUtf16()) { + std::u16string u16str = base::StringHelper::Utf16ToU16String(srcFlat.GetDataUtf16(), srcLength); std::string res = base::StringHelper::ToUpper(u16str); return *(factory->NewFromStdString(res)); } else { - return ConvertUtf8ToLowerOrUpper(vm, srcFlat, false); + return ConvertUtf8ToLowerOrUpper(vm, src, false); } } @@ -836,8 +897,8 @@ EcmaString *EcmaString::ToUpper(const EcmaVM *vm, const JSHandle &sr EcmaString *EcmaString::ToLocaleLower(const EcmaVM *vm, const JSHandle &src, const icu::Locale &locale) { auto factory = vm->GetFactory(); - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - std::u16string utf16 = srcFlat->ToU16String(); + FlatStringInfo srcFlat = FlattenAllString(vm, src); + std::u16string utf16 = srcFlat.ToU16String(); std::string res = base::StringHelper::ToLocaleLower(utf16, locale); return *(factory->NewFromStdString(res)); } @@ -846,63 +907,87 @@ EcmaString *EcmaString::ToLocaleLower(const EcmaVM *vm, const JSHandle &src, const icu::Locale &locale) { auto factory = vm->GetFactory(); - auto srcFlat = JSHandle(vm->GetJSThread(), Flatten(vm, src)); - std::u16string utf16 = srcFlat->ToU16String(); + FlatStringInfo srcFlat = FlattenAllString(vm, src); + std::u16string utf16 = srcFlat.ToU16String(); std::string res = base::StringHelper::ToLocaleUpper(utf16, locale); return *(factory->NewFromStdString(res)); } EcmaString *EcmaString::Trim(const JSThread *thread, const JSHandle &src, TrimMode mode) { - auto srcFlat = JSHandle(thread, Flatten(thread->GetEcmaVM(), src)); - uint32_t srcLen = srcFlat->GetLength(); + FlatStringInfo srcFlat = FlattenAllString(thread->GetEcmaVM(), src); + uint32_t srcLen = srcFlat.GetLength(); if (UNLIKELY(srcLen == 0)) { return EcmaString::Cast(thread->GlobalConstants()->GetEmptyString().GetTaggedObject()); } - if (srcFlat->IsUtf8()) { - Span data(srcFlat->GetDataUtf8(), srcLen); - return TrimBody(thread, srcFlat, data, mode); + if (srcFlat.IsUtf8()) { + Span data(srcFlat.GetDataUtf8(), srcLen); + return TrimBody(thread, src, data, mode); } else { - Span data(srcFlat->GetDataUtf16(), srcLen); - return TrimBody(thread, srcFlat, data, mode); + Span data(srcFlat.GetDataUtf16(), srcLen); + return TrimBody(thread, src, data, mode); } } -EcmaString *EcmaString::SlowFlatten(const EcmaVM *vm, const JSHandle &string) +EcmaString *EcmaString::SlowFlatten(const EcmaVM *vm, const JSHandle &string, MemSpaceType type) { + ASSERT(string->IsTreeString() || string->IsSlicedString()); auto thread = vm->GetJSThread(); - ASSERT(EcmaString::Cast(string->GetSecond())->GetLength() != 0); - uint32_t length = string->GetLength(); EcmaString *result = nullptr; if (string->IsUtf8()) { - result = CreateLineString(vm, length, true); + result = CreateLineStringWithSpaceType(vm, length, true, type); WriteToFlat(*string, result->GetDataUtf8Writable(), length); } else { - result = CreateLineString(vm, length, false); + result = CreateLineStringWithSpaceType(vm, length, false, type); WriteToFlat(*string, result->GetDataUtf16Writable(), length); } - string->SetFirst(thread, JSTaggedValue(result)); - string->SetSecond(thread, JSTaggedValue(*vm->GetFactory()->GetEmptyString())); + if (string->IsTreeString()) { + JSHandle tree(string); + ASSERT(EcmaString::Cast(tree->GetSecond())->GetLength() != 0); + tree->SetFirst(thread, JSTaggedValue(result)); + tree->SetSecond(thread, JSTaggedValue(*vm->GetFactory()->GetEmptyString())); + } return result; } -EcmaString *EcmaString::Flatten(const EcmaVM *vm, const JSHandle &string) +EcmaString *EcmaString::Flatten(const EcmaVM *vm, const JSHandle &string, MemSpaceType type) { EcmaString *s = *string; - if (s->IsLineOrConstantString()) { + if (s->IsLineOrConstantString() || s->IsSlicedString()) { return s; } if (s->IsTreeString()) { JSHandle tree = JSHandle::Cast(string); if (!tree->IsFlat()) { - return SlowFlatten(vm, tree); + return SlowFlatten(vm, string, type); } s = EcmaString::Cast(tree->GetFirst()); } return s; } +FlatStringInfo EcmaString::FlattenAllString(const EcmaVM *vm, const JSHandle &string, MemSpaceType type) +{ + EcmaString *s = *string; + uint32_t startIndex = 0; + if (s->IsLineOrConstantString()) { + return FlatStringInfo(s, startIndex, s->GetLength()); + } + if (string->IsTreeString()) { + JSHandle tree = JSHandle::Cast(string); + if (!tree->IsFlat()) { + s = SlowFlatten(vm, string, type); + } else { + s = EcmaString::Cast(tree->GetFirst()); + } + } else if (string->IsSlicedString()) { + s = EcmaString::Cast(SlicedString::Cast(*string)->GetParent()); + startIndex = SlicedString::Cast(*string)->GetStartIndex(); + } + return FlatStringInfo(s, startIndex, s->GetLength()); +} + EcmaString *EcmaString::FlattenNoGC(const EcmaVM *vm, EcmaString *string) { DISALLOW_GARBAGE_COLLECTION; @@ -927,6 +1012,18 @@ EcmaString *EcmaString::FlattenNoGC(const EcmaVM *vm, EcmaString *string) tree->SetSecond(vm->GetJSThread(), JSTaggedValue(*vm->GetFactory()->GetEmptyString())); return result; } + } else if (string->IsSlicedString()) { + SlicedString *str = SlicedString::Cast(string); + uint32_t length = str->GetLength(); + EcmaString *result = nullptr; + if (str->IsUtf8()) { + result = CreateLineStringNoGC(vm, length, true); + WriteToFlat(str, result->GetDataUtf8Writable(), length); + } else { + result = CreateLineStringNoGC(vm, length, false); + WriteToFlat(str, result->GetDataUtf16Writable(), length); + } + return result; } return string; } @@ -944,6 +1041,9 @@ const uint8_t *EcmaString::GetUtf8DataFlat(const EcmaString *src, CVectorIsSlicedString()) { + SlicedString *str = SlicedString::Cast(string); + return EcmaString::Cast(str->GetParent())->GetDataUtf8() + str->GetStartIndex(); } return string->GetDataUtf8(); } @@ -961,10 +1061,27 @@ const uint16_t *EcmaString::GetUtf16DataFlat(const EcmaString *src, CVectorIsSlicedString()) { + SlicedString *str = SlicedString::Cast(string); + return EcmaString::Cast(str->GetParent())->GetDataUtf16() + str->GetStartIndex(); } return string->GetDataUtf16(); } +std::u16string FlatStringInfo::ToU16String(uint32_t len) +{ + uint32_t length = len > 0 ? len : GetLength(); + std::u16string result; + if (IsUtf16()) { + const uint16_t *data = this->GetDataUtf16(); + result = base::StringHelper::Utf16ToU16String(data, length); + } else { + const uint8_t *data = this->GetDataUtf8(); + result = base::StringHelper::Utf8ToU16String(data, length); + } + return result; +} + EcmaStringAccessor::EcmaStringAccessor(EcmaString *string) { ASSERT(string != nullptr); diff --git a/ecmascript/ecma_string.h b/ecmascript/ecma_string.h index de7ae40102888fc4034c8b1899a97fd4e7386d98..920faf57dbbd26559902aca477122f65ab12e487 100644 --- a/ecmascript/ecma_string.h +++ b/ecmascript/ecma_string.h @@ -21,6 +21,7 @@ #include #include "ecmascript/base/utf_helper.h" +#include "ecmascript/common.h" #include "ecmascript/ecma_macros.h" #include "ecmascript/js_hclass.h" #include "ecmascript/js_tagged_value.h" @@ -41,6 +42,14 @@ class EcmaVM; class LineEcmaString; class ConstantString; class TreeEcmaString; +class SlicedString; +class FlatStringInfo; + +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ECMA_STRING_CHECK_LENGTH_AND_TRHOW(vm, length) \ + if ((length) >= MAX_STRING_LENGTH) { \ + THROW_RANGE_ERROR_AND_RETURN((vm)->GetJSThread(), "Invalid string length", nullptr); \ + } class EcmaString : public TaggedObject { public: @@ -48,6 +57,7 @@ public: static constexpr uint32_t STRING_COMPRESSED_BIT = 0x1; static constexpr uint32_t STRING_INTERN_BIT = 0x2; + static constexpr size_t MAX_STRING_LENGTH = 0x40000000U; // 30 bits for string length, 2 bits for special meaning static constexpr size_t MIX_LENGTH_OFFSET = TaggedObjectSize(); // In last bit of mix_length we store if this string is compressed or not. @@ -70,6 +80,11 @@ private: friend class LineEcmaString; friend class ConstantString; friend class TreeEcmaString; + friend class SlicedString; + friend class FlatStringInfo; + friend class NameDictionary; + + static constexpr int SMALL_STRING_SIZE = 128; static EcmaString *CreateEmptyString(const EcmaVM *vm); static EcmaString *CreateFromUtf8(const EcmaVM *vm, const uint8_t *utf8Data, uint32_t utf8Len, @@ -77,6 +92,7 @@ private: uint32_t idOffset = 0); static EcmaString *CreateFromUtf16(const EcmaVM *vm, const uint16_t *utf16Data, uint32_t utf16Len, bool canBeCompress, MemSpaceType type = MemSpaceType::SEMI_SPACE); + static SlicedString *CreateSlicedString(const EcmaVM *vm, MemSpaceType type = MemSpaceType::SEMI_SPACE); static EcmaString *CreateLineString(const EcmaVM *vm, size_t length, bool compressed); static EcmaString *CreateLineStringNoGC(const EcmaVM *vm, size_t length, bool compressed); static EcmaString *CreateLineStringWithSpaceType(const EcmaVM *vm, @@ -85,10 +101,14 @@ private: const JSHandle &left, const JSHandle &right, uint32_t length, bool compressed); static EcmaString *CreateConstantString(const EcmaVM *vm, const uint8_t *utf8Data, size_t length, bool compressed, MemSpaceType type = MemSpaceType::SEMI_SPACE, uint32_t idOffset = 0); - static EcmaString *Concat(const EcmaVM *vm, - const JSHandle &left, const JSHandle &right); + static EcmaString *Concat(const EcmaVM *vm, const JSHandle &left, + const JSHandle &right, MemSpaceType type = MemSpaceType::SEMI_SPACE); + static EcmaString *CopyStringToOldSpace(const EcmaVM *vm, const JSHandle &original, + uint32_t length, bool compressed); static EcmaString *FastSubString(const EcmaVM *vm, const JSHandle &src, uint32_t start, uint32_t length); + static EcmaString *GetSlicedString(const EcmaVM *vm, + const JSHandle &src, uint32_t start, uint32_t length); // require src is LineString // not change src data structure static inline EcmaString *FastSubUtf8String(const EcmaVM *vm, @@ -124,7 +144,7 @@ private: void SetLength(uint32_t length, bool compressed = false) { - ASSERT(length < 0x40000000U); + ASSERT(length < MAX_STRING_LENGTH); // Use 0u for compressed/utf8 expression SetMixLength((length << 2U) | (compressed ? STRING_COMPRESSED : STRING_UNCOMPRESSED)); } @@ -185,9 +205,26 @@ private: // can change left and right data structure static int32_t Compare(const EcmaVM *vm, const JSHandle &left, const JSHandle &right); - template // Check that two spans are equal. Should have the same length. - static bool StringsAreEquals(Span &str1, Span &str2); + /* static */ + template + static bool StringsAreEquals(Span &str1, Span &str2) + { + ASSERT(str1.Size() <= str2.Size()); + size_t size = str1.Size(); + if (size < SMALL_STRING_SIZE || !std::is_same_v) { + for (size_t i = 0; i < size; i++) { + auto left = static_cast(str1[i]); + auto right = static_cast(str2[i]); + if (left != right) { + return false; + } + } + return true; + } + return memcmp(str1.data(), str2.data(), size * sizeof(T)) == 0; + } + // Converts utf8Data to utf16 and compare it with given utf16_data. static bool IsUtf8EqualsUtf16(const uint8_t *utf8Data, size_t utf8Len, const uint16_t *utf16Data, uint32_t utf16Len); @@ -198,7 +235,8 @@ private: // Compares strings by bytes, It doesn't check canonical unicode equivalence. static bool StringsAreEqual(EcmaString *str1, EcmaString *str2); // Two strings have the same type of utf encoding format. - static bool StringsAreEqualSameUtfEncoding(EcmaString *str1, EcmaString *str2); + static bool StringsAreEqualDiffUtfEncoding(EcmaString *str1, EcmaString *str2); + static bool StringsAreEqualDiffUtfEncoding(const FlatStringInfo &str1, const FlatStringInfo &str2); // Compares strings by bytes, It doesn't check canonical unicode equivalence. // not change str1 data structure. // if str1 is not flat, this func has low efficiency. @@ -447,6 +485,18 @@ private: template static bool MemCopyChars(Span &dst, size_t dstMax, Span &src, size_t count); + template + static uint32_t ComputeHashForData(const T *data, size_t size, uint32_t hashSeed) + { + uint32_t hash = hashSeed; + Span sp(data, size); + for (auto c : sp) { + constexpr size_t SHIFT = 5; + hash = (hash << SHIFT) - hash + c; + } + return hash; + } + static bool IsASCIICharacter(uint16_t data) { // \0 is not considered ASCII in Ecma-Modified-UTF8 [only modify '\u0000'] @@ -469,10 +519,18 @@ private: { return GetClass()->IsConstantString(); } + bool IsSlicedString() const + { + return GetClass()->IsSlicedString(); + } bool IsTreeString() const { return GetClass()->IsTreeString(); } + bool NotTreeString() const + { + return !IsTreeString(); + } bool IsLineOrConstantString() const { auto hclass = GetClass(); @@ -494,9 +552,13 @@ private: static const uint16_t *GetUtf16DataFlat(const EcmaString *src, CVector &buf); // string must be not flat - static EcmaString *SlowFlatten(const EcmaVM *vm, const JSHandle &string); + static EcmaString *SlowFlatten(const EcmaVM *vm, const JSHandle &string, MemSpaceType type); + + static EcmaString *Flatten(const EcmaVM *vm, const JSHandle &string, + MemSpaceType type = MemSpaceType::SEMI_SPACE); - static EcmaString *Flatten(const EcmaVM *vm, const JSHandle &string); + static FlatStringInfo FlattenAllString(const EcmaVM *vm, const JSHandle &string, + MemSpaceType type = MemSpaceType::SEMI_SPACE); static EcmaString *FlattenNoGC(const EcmaVM *vm, EcmaString *string); @@ -625,6 +687,57 @@ public: } }; +// The substrings of another string use SlicedString to describe. +class SlicedString : public EcmaString { +public: + static constexpr uint32_t MIN_SLICED_ECMASTRING_LENGTH = 13; + static constexpr size_t PARENT_OFFSET = EcmaString::SIZE; + ACCESSORS(Parent, PARENT_OFFSET, STARTINDEX_OFFSET); + ACCESSORS_PRIMITIVE_FIELD(StartIndex, uint32_t, STARTINDEX_OFFSET, SIZE); + DECL_VISIT_OBJECT(PARENT_OFFSET, STARTINDEX_OFFSET); + + CAST_CHECK(SlicedString, IsSlicedString); +private: + friend class EcmaString; + static SlicedString *Cast(EcmaString *str) + { + return static_cast(str); + } + + static SlicedString *Cast(const EcmaString *str) + { + return SlicedString::Cast(const_cast(str)); + } + + static size_t ObjectSize() + { + return SlicedString::SIZE; + } + + // Minimum length for a sliced string + template + uint16_t Get(int32_t index) const + { + int32_t length = static_cast(GetLength()); + if (verify) { + if ((index < 0) || (index >= length)) { + return 0; + } + } + EcmaString *parent = EcmaString::Cast(GetParent()); + if (parent->IsLineString()) { + if (parent->IsUtf8()) { + Span sp(parent->GetDataUtf8() + GetStartIndex(), length); + return sp[index]; + } + Span sp(parent->GetDataUtf16() + GetStartIndex(), length); + return sp[index]; + } + Span sp(ConstantString::Cast(parent)->GetConstantData() + GetStartIndex(), length); + return sp[index]; + } +}; + class TreeEcmaString : public EcmaString { public: // Minimum length for a tree string @@ -683,7 +796,52 @@ public: } } UNREACHABLE(); - } + } +}; + +class FlatStringInfo { +public: + FlatStringInfo(EcmaString *string, uint32_t startIndex, uint32_t length) : string_(string), + startIndex_(startIndex), + length_(length) {} + bool IsUtf8() const + { + return string_->IsUtf8(); + } + + bool IsUtf16() const + { + return string_->IsUtf16(); + } + + EcmaString *GetString() const + { + return string_; + } + + void SetString(EcmaString *string) + { + string_ = string; + } + + uint32_t GetStartIndex() const + { + return startIndex_; + } + + uint32_t GetLength() const + { + return length_; + } + + const uint8_t *GetDataUtf8() const; + const uint16_t *GetDataUtf16() const; + uint8_t *GetDataUtf8Writable() const; + std::u16string ToU16String(uint32_t len = 0); +private: + EcmaString *string_ {nullptr}; + uint32_t startIndex_ {0}; + uint32_t length_ {0}; }; // if you want to use functions of EcmaString, please not use directly, @@ -728,10 +886,16 @@ public: return EcmaString::CreateFromUtf16(vm, utf16Data, utf16Len, canBeCompress, type); } - static EcmaString *Concat(const EcmaVM *vm, - const JSHandle &str1Handle, const JSHandle &str2Handle) + static EcmaString *Concat(const EcmaVM *vm, const JSHandle &str1Handle, + const JSHandle &str2Handle, MemSpaceType type = MemSpaceType::SEMI_SPACE) { - return EcmaString::Concat(vm, str1Handle, str2Handle); + return EcmaString::Concat(vm, str1Handle, str2Handle, type); + } + + static EcmaString *CopyStringToOldSpace(const EcmaVM *vm, const JSHandle &original, + uint32_t length, bool compressed) + { + return EcmaString::CopyStringToOldSpace(vm, original, length, compressed); } // can change src data structure @@ -741,6 +905,13 @@ public: return EcmaString::FastSubString(vm, src, start, length); } + // get + static EcmaString *GetSlicedString(const EcmaVM *vm, + const JSHandle &src, uint32_t start, uint32_t length) + { + return EcmaString::GetSlicedString(vm, src, start, length); + } + bool IsUtf8() const { return string_->IsUtf8(); @@ -932,9 +1103,9 @@ public: // not change str1 and str2 data structure. // if str1 or str2 is not flat, this func has low efficiency. - static bool StringsAreEqualSameUtfEncoding(EcmaString *str1, EcmaString *str2) + static bool StringsAreEqualDiffUtfEncoding(EcmaString *str1, EcmaString *str2) { - return EcmaString::StringsAreEqualSameUtfEncoding(str1, str2); + return EcmaString::StringsAreEqualDiffUtfEncoding(str1, str2); } // not change str1 data structure. @@ -1046,14 +1217,27 @@ public: return string_->IsTreeString(); } - static EcmaString *Flatten(const EcmaVM *vm, const JSHandle &string) + bool NotTreeString() const + { + return string_->NotTreeString(); + } + + static EcmaString *Flatten(const EcmaVM *vm, const JSHandle &string, + MemSpaceType type = MemSpaceType::SEMI_SPACE) + { + return EcmaString::Flatten(vm, string, type); + } + + static FlatStringInfo FlattenAllString(const EcmaVM *vm, const JSHandle &string, + MemSpaceType type = MemSpaceType::SEMI_SPACE) { - return EcmaString::Flatten(vm, string); + return EcmaString::FlattenAllString(vm, string, type); } - static EcmaString *SlowFlatten(const EcmaVM *vm, const JSHandle &string) + static EcmaString *SlowFlatten(const EcmaVM *vm, const JSHandle &string, + MemSpaceType type = MemSpaceType::SEMI_SPACE) { - return EcmaString::SlowFlatten(vm, string); + return EcmaString::SlowFlatten(vm, string, type); } static EcmaString *FlattenNoGC(const EcmaVM *vm, EcmaString *string) diff --git a/ecmascript/ecma_string_table.cpp b/ecmascript/ecma_string_table.cpp index 462dd02b761094b1a27af579b4959d01d2a9e523..f6131463af0323390f060509ff520e44e8810308 100644 --- a/ecmascript/ecma_string_table.cpp +++ b/ecmascript/ecma_string_table.cpp @@ -27,8 +27,8 @@ EcmaStringTable::EcmaStringTable(const EcmaVM *vm) : vm_(vm) {} EcmaString *EcmaStringTable::GetString(const JSHandle &firstString, const JSHandle &secondString) const { - ASSERT(EcmaStringAccessor(firstString).IsLineOrConstantString()); - ASSERT(EcmaStringAccessor(secondString).IsLineOrConstantString()); + ASSERT(EcmaStringAccessor(firstString).NotTreeString()); + ASSERT(EcmaStringAccessor(secondString).NotTreeString()); uint32_t hashCode = EcmaStringAccessor(firstString).GetHashcode(); hashCode = EcmaStringAccessor(secondString).ComputeHashcode(hashCode); auto range = table_.equal_range(hashCode); @@ -69,7 +69,7 @@ EcmaString *EcmaStringTable::GetString(const uint16_t *utf16Data, uint32_t utf16 EcmaString *EcmaStringTable::GetString(EcmaString *string) const { - ASSERT(EcmaStringAccessor(string).IsLineOrConstantString()); + ASSERT(EcmaStringAccessor(string).NotTreeString()); auto hashcode = EcmaStringAccessor(string).GetHashcode(); auto range = table_.equal_range(hashcode); for (auto item = range.first; item != range.second; ++item) { @@ -86,8 +86,11 @@ void EcmaStringTable::InternString(EcmaString *string) if (EcmaStringAccessor(string).IsInternString()) { return; } - ASSERT(EcmaStringAccessor(string).IsLineOrConstantString()); - table_.emplace(EcmaStringAccessor(string).GetHashcode(), string); + // Strings in string table should not be in the young space. + ASSERT(!Region::ObjectAddressToRange(reinterpret_cast(string))->InYoungSpace()); + ASSERT(EcmaStringAccessor(string).NotTreeString()); + auto hashcode = EcmaStringAccessor(string).GetHashcode(); + table_.emplace(hashcode, string); EcmaStringAccessor(string).SetInternString(); } @@ -105,8 +108,9 @@ EcmaString *EcmaStringTable::GetOrInternString(const JSHandle &first if (concatString != nullptr) { return concatString; } - JSHandle concatHandle(vm_->GetJSThread(), EcmaStringAccessor::Concat(vm_, firstFlat, secondFlat)); - concatString = EcmaStringAccessor::Flatten(vm_, concatHandle); + JSHandle concatHandle(vm_->GetJSThread(), + EcmaStringAccessor::Concat(vm_, firstFlat, secondFlat, MemSpaceType::OLD_SPACE)); + concatString = EcmaStringAccessor::Flatten(vm_, concatHandle, MemSpaceType::OLD_SPACE); InternString(concatString); return concatString; } @@ -118,7 +122,7 @@ EcmaString *EcmaStringTable::GetOrInternString(const uint8_t *utf8Data, uint32_t return result; } - result = EcmaStringAccessor::CreateFromUtf8(vm_, utf8Data, utf8Len, canBeCompress); + result = EcmaStringAccessor::CreateFromUtf8(vm_, utf8Data, utf8Len, canBeCompress, MemSpaceType::OLD_SPACE); InternString(result); return result; } @@ -146,7 +150,7 @@ EcmaString *EcmaStringTable::GetOrInternString(const uint16_t *utf16Data, uint32 return result; } - result = EcmaStringAccessor::CreateFromUtf16(vm_, utf16Data, utf16Len, canBeCompress); + result = EcmaStringAccessor::CreateFromUtf16(vm_, utf16Data, utf16Len, canBeCompress, MemSpaceType::OLD_SPACE); InternString(result); return result; } @@ -158,7 +162,7 @@ EcmaString *EcmaStringTable::GetOrInternString(EcmaString *string) } JSHandle strHandle(vm_->GetJSThread(), string); // may gc - auto strFlat = EcmaStringAccessor::Flatten(vm_, strHandle); + auto strFlat = EcmaStringAccessor::Flatten(vm_, strHandle, MemSpaceType::OLD_SPACE); if (EcmaStringAccessor(strFlat).IsInternString()) { return strFlat; } @@ -166,6 +170,15 @@ EcmaString *EcmaStringTable::GetOrInternString(EcmaString *string) if (result != nullptr) { return result; } + + if (EcmaStringAccessor(strFlat).NotTreeString()) { + Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast(strFlat)); + if (objectRegion->InYoungSpace()) { + JSHandle resultHandle(vm_->GetJSThread(), strFlat); + strFlat = EcmaStringAccessor::CopyStringToOldSpace(vm_, + resultHandle, EcmaStringAccessor(strFlat).GetLength(), EcmaStringAccessor(strFlat).IsUtf8()); + } + } InternString(strFlat); return strFlat; } @@ -178,6 +191,7 @@ EcmaString *EcmaStringTable::GetOrInternStringWithSpaceType(const uint8_t *utf8D if (result != nullptr) { return result; } + type = type == MemSpaceType::NON_MOVABLE ? MemSpaceType::NON_MOVABLE : MemSpaceType::OLD_SPACE; if (canBeCompress) { // Constant string will be created in this branch. result = EcmaStringAccessor::CreateFromUtf8(vm_, utf8Data, utf8Len, canBeCompress, type, isConstantString, @@ -196,7 +210,7 @@ EcmaString *EcmaStringTable::GetOrInternStringWithSpaceType(const uint16_t *utf1 if (result != nullptr) { return result; } - + type = type == MemSpaceType::NON_MOVABLE ? MemSpaceType::NON_MOVABLE : MemSpaceType::OLD_SPACE; result = EcmaStringAccessor::CreateFromUtf16(vm_, utf16Data, utf16Len, canBeCompress, type); InternString(result); return result; @@ -205,8 +219,10 @@ EcmaString *EcmaStringTable::GetOrInternStringWithSpaceType(const uint16_t *utf1 void EcmaStringTable::SweepWeakReference(const WeakRootVisitor &visitor) { for (auto it = table_.begin(); it != table_.end();) { + // Strings in string table should not be in the young space. Only old gc will sweep string table. auto *object = it->second; auto fwd = visitor(object); + ASSERT(!Region::ObjectAddressToRange(object)->InYoungSpace()); if (fwd == nullptr) { LOG_ECMA(VERBOSE) << "StringTable: delete string " << std::hex << object; table_.erase(it++); @@ -224,7 +240,7 @@ bool EcmaStringTable::CheckStringTableValidity() { for (auto itemOuter = table_.begin(); itemOuter != table_.end(); ++itemOuter) { auto outerString = itemOuter->second; - if (!EcmaStringAccessor(outerString).IsLineOrConstantString()) { + if (!EcmaStringAccessor(outerString).NotTreeString()) { return false; } int counter = 0; diff --git a/ecmascript/ecma_vm.cpp b/ecmascript/ecma_vm.cpp index f9bd486f91327d305bbbf522f4ae05df78eaba37..3b036a33570ba1095f2ef17cd7c8e13f2391ca85 100644 --- a/ecmascript/ecma_vm.cpp +++ b/ecmascript/ecma_vm.cpp @@ -18,6 +18,7 @@ #include "ecmascript/base/string_helper.h" #include "ecmascript/builtins/builtins.h" #include "ecmascript/builtins/builtins_ark_tools.h" +#include "ecmascript/log.h" #ifdef ARK_SUPPORT_INTL #include "ecmascript/builtins/builtins_collator.h" #include "ecmascript/builtins/builtins_date_time_format.h" @@ -72,7 +73,6 @@ #include "ecmascript/mem/visitor.h" #include "ecmascript/module/js_module_manager.h" #include "ecmascript/module/module_data_extractor.h" -#include "ecmascript/napi/include/dfx_jsnapi.h" #include "ecmascript/object_factory.h" #include "ecmascript/patch/quick_fix_manager.h" #include "ecmascript/pgo_profiler/pgo_profiler_manager.h" @@ -91,6 +91,7 @@ namespace panda::ecmascript { using RandomGenerator = base::RandomGenerator; +using PGOProfilerManager = pgo::PGOProfilerManager; AOTFileManager *JsStackInfo::loader = nullptr; /* static */ EcmaVM *EcmaVM::Create(const JSRuntimeOptions &options, EcmaParamConfiguration &config) @@ -230,6 +231,9 @@ EcmaVM::~EcmaVM() initialized_ = false; #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) DFXJSNApi::StopCpuProfilerForFile(this); +#endif +#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER) + DeleteHeapProfile(); #endif heap_->WaitAllTasksFinished(); Taskpool::GetCurrentTaskpool()->Destroy(thread_->GetThreadId()); @@ -368,32 +372,25 @@ JSHandle EcmaVM::GetEcmaUncaughtException() const void EcmaVM::PrintJSErrorInfo(const JSHandle &exceptionInfo) const { - JSHandle nameKey = thread_->GlobalConstants()->GetHandledNameString(); - JSHandle name(JSObject::GetProperty(thread_, exceptionInfo, nameKey).GetValue()); - JSHandle msgKey = thread_->GlobalConstants()->GetHandledMessageString(); - JSHandle msg(JSObject::GetProperty(thread_, exceptionInfo, msgKey).GetValue()); - JSHandle stackKey = thread_->GlobalConstants()->GetHandledStackString(); - JSHandle stack(JSObject::GetProperty(thread_, exceptionInfo, stackKey).GetValue()); - - CString nameBuffer = ConvertToString(*name); - CString msgBuffer = ConvertToString(*msg); - CString stackBuffer = ConvertToString(*stack); - LOG_NO_TAG(ERROR) << nameBuffer << ": " << msgBuffer << "\n" << stackBuffer; + EcmaContext::PrintJSErrorInfo(thread_, exceptionInfo); } void EcmaVM::ProcessNativeDelete(const WeakRootVisitor &visitor) { - auto iter = nativePointerList_.begin(); - while (iter != nativePointerList_.end()) { - JSNativePointer *object = *iter; - auto fwd = visitor(reinterpret_cast(object)); - if (fwd == nullptr) { - object->Destroy(); - iter = nativePointerList_.erase(iter); - } else { - ++iter; + if (!heap_->IsYoungGC()) { + auto iter = nativePointerList_.begin(); + while (iter != nativePointerList_.end()) { + JSNativePointer *object = *iter; + auto fwd = visitor(reinterpret_cast(object)); + if (fwd == nullptr) { + object->Destroy(); + iter = nativePointerList_.erase(iter); + } else { + ++iter; + } } } + thread_->GetCurrentEcmaContext()->ProcessNativeDelete(visitor); } @@ -402,22 +399,24 @@ void EcmaVM::ProcessReferences(const WeakRootVisitor &visitor) if (thread_->GetCurrentEcmaContext()->GetRegExpParserCache() != nullptr) { thread_->GetCurrentEcmaContext()->GetRegExpParserCache()->Clear(); } - heap_->ResetNativeBindingSize(); - // array buffer - auto iter = nativePointerList_.begin(); - while (iter != nativePointerList_.end()) { - JSNativePointer *object = *iter; - auto fwd = visitor(reinterpret_cast(object)); - if (fwd == nullptr) { - object->Destroy(); - iter = nativePointerList_.erase(iter); - continue; - } - heap_->IncreaseNativeBindingSize(JSNativePointer::Cast(fwd)); - if (fwd != reinterpret_cast(object)) { - *iter = JSNativePointer::Cast(fwd); + if (!heap_->IsYoungGC()) { + heap_->ResetNativeBindingSize(); + // array buffer + auto iter = nativePointerList_.begin(); + while (iter != nativePointerList_.end()) { + JSNativePointer *object = *iter; + auto fwd = visitor(reinterpret_cast(object)); + if (fwd == nullptr) { + object->Destroy(); + iter = nativePointerList_.erase(iter); + continue; + } + heap_->IncreaseNativeBindingSize(JSNativePointer::Cast(fwd)); + if (fwd != reinterpret_cast(object)) { + *iter = JSNativePointer::Cast(fwd); + } + ++iter; } - ++iter; } thread_->GetCurrentEcmaContext()->ProcessReferences(visitor); } @@ -437,6 +436,25 @@ void EcmaVM::RemoveFromNativePointerList(JSNativePointer *array) } } +void EcmaVM::PushToDeregisterModuleList(CString module) +{ + deregisterModuleList_.emplace_back(module); +} + +void EcmaVM::RemoveFromDeregisterModuleList(CString module) +{ + auto iter = std::find(deregisterModuleList_.begin(), deregisterModuleList_.end(), module); + if (iter != deregisterModuleList_.end()) { + deregisterModuleList_.erase(iter); + } +} + +bool EcmaVM::ContainInDeregisterModuleList(CString module) +{ + return (std::find(deregisterModuleList_.begin(), deregisterModuleList_.end(), module) + != deregisterModuleList_.end()); +} + void EcmaVM::ClearBufferData() { for (auto iter : nativePointerList_) { @@ -446,6 +464,7 @@ void EcmaVM::ClearBufferData() thread_->GetCurrentEcmaContext()->ClearBufferData(); internalNativeMethods_.clear(); workerList_.clear(); + deregisterModuleList_.clear(); } void EcmaVM::CollectGarbage(TriggerGCType gcType, GCReason reason) const @@ -453,16 +472,6 @@ void EcmaVM::CollectGarbage(TriggerGCType gcType, GCReason reason) const heap_->CollectGarbage(gcType, reason); } -void EcmaVM::StartHeapTracking(HeapTracker *tracker) -{ - heap_->StartHeapTracking(tracker); -} - -void EcmaVM::StopHeapTracking() -{ - heap_->StopHeapTracking(); -} - void EcmaVM::Iterate(const RootVisitor &v, const RootRangeVisitor &rv) { rv(Root::ROOT_VM, ObjectSlot(ToUintPtr(&internalNativeMethods_.front())), @@ -482,6 +491,14 @@ void EcmaVM::DeleteHeapProfile() heapProfile_ = nullptr; } +HeapProfilerInterface *EcmaVM::GetHeapProfile() +{ + if (heapProfile_ != nullptr) { + return heapProfile_; + } + return nullptr; +} + HeapProfilerInterface *EcmaVM::GetOrNewHeapProfile() { if (heapProfile_ != nullptr) { @@ -491,6 +508,16 @@ HeapProfilerInterface *EcmaVM::GetOrNewHeapProfile() ASSERT(heapProfile_ != nullptr); return heapProfile_; } + +void EcmaVM::StartHeapTracking() +{ + heap_->StartHeapTracking(); +} + +void EcmaVM::StopHeapTracking() +{ + heap_->StopHeapTracking(); +} #endif // NOLINTNEXTLINE(modernize-avoid-c-arrays) @@ -591,4 +618,80 @@ void EcmaVM::DumpCallTimeInfo() callTimer_->PrintAllStats(); } } + +void EcmaVM::WorkersetInfo(EcmaVM *hostVm, EcmaVM *workerVm) +{ + os::memory::LockHolder lock(mutex_); + auto thread = workerVm->GetJSThread(); + if (thread != nullptr && hostVm != nullptr) { + auto tid = thread->GetThreadId(); + if (tid != 0) { + workerList_.emplace(tid, workerVm); + } + } +} + +EcmaVM *EcmaVM::GetWorkerVm(uint32_t tid) +{ + os::memory::LockHolder lock(mutex_); + EcmaVM *workerVm = nullptr; + if (!workerList_.empty()) { + auto iter = workerList_.find(tid); + if (iter != workerList_.end()) { + workerVm = iter->second; + } + } + return workerVm; +} + +bool EcmaVM::DeleteWorker(EcmaVM *hostVm, EcmaVM *workerVm) +{ + os::memory::LockHolder lock(mutex_); + auto thread = workerVm->GetJSThread(); + if (hostVm != nullptr && thread != nullptr) { + auto tid = thread->GetThreadId(); + if (tid == 0) { + return false; + } + auto iter = workerList_.find(tid); + if (iter != workerList_.end()) { + workerList_.erase(iter); + return true; + } + return false; + } + return false; +} + +bool EcmaVM::SuspendWorkerVm(uint32_t tid) +{ + os::memory::LockHolder lock(mutex_); + if (!workerList_.empty()) { + auto iter = workerList_.find(tid); + if (iter != workerList_.end()) { + return DFXJSNApi::SuspendVM(iter->second); + } + } + return false; +} + +void EcmaVM::ResumeWorkerVm(uint32_t tid) +{ + os::memory::LockHolder lock(mutex_); + if (!workerList_.empty()) { + auto iter = workerList_.find(tid); + if (iter != workerList_.end()) { + DFXJSNApi::ResumeVM(iter->second); + } + } +} + +bool EcmaVM::RequestAot(const std::string &bundleName, const std::string &moduleName, RequestAotMode triggerMode) const +{ + if (requestAotCallback_ == nullptr) { + LOG_ECMA(ERROR) << "Trigger aot failed. callback is null."; + return false; + } + return (requestAotCallback_(bundleName, moduleName, static_cast(triggerMode)) == 0); +} } // namespace panda::ecmascript diff --git a/ecmascript/ecma_vm.h b/ecmascript/ecma_vm.h index cd900d6ce96c837d7c0326d66f35c73b18ff5a0b..c4be8eff491f03c19776dcdc7e345c465dadccc8 100644 --- a/ecmascript/ecma_vm.h +++ b/ecmascript/ecma_vm.h @@ -16,6 +16,7 @@ #ifndef ECMASCRIPT_ECMA_VM_H #define ECMASCRIPT_ECMA_VM_H +#include #include #include "ecmascript/base/config.h" @@ -23,10 +24,13 @@ #include "ecmascript/ecma_context.h" #include "ecmascript/js_runtime_options.h" #include "ecmascript/js_thread.h" +#include "ecmascript/log_wrapper.h" #include "ecmascript/mem/c_containers.h" #include "ecmascript/mem/c_string.h" #include "ecmascript/mem/gc_stats.h" +#include "ecmascript/napi/include/dfx_jsnapi.h" #include "ecmascript/napi/include/jsnapi.h" +#include "ecmascript/pgo_profiler/pgo_profiler.h" #include "ecmascript/taskpool/taskpool.h" namespace panda { @@ -55,9 +59,10 @@ class EcmaStringTable; class SnapshotEnv; class SnapshotSerialize; class SnapshotProcessor; -class PGOProfiler; +using PGOProfiler = pgo::PGOProfiler; #if !WIN_OR_MAC_OR_IOS_PLATFORM class HeapProfilerInterface; +class HeapProfiler; #endif namespace job { class MicroJobQueue; @@ -82,10 +87,11 @@ class FunctionCallTimer; class EcmaStringTable; using NativePtrGetter = void* (*)(void* info); - -using ResolvePathCallback = std::function; -using ResolveBufferCallback = std::function(std::string dirPath)>; - +using SourceMapTranslateCallback = std::function; +using ResolveBufferCallback = std::function; +using UnloadNativeModuleCallback = std::function; +using RequestAotCallback = + std::function; class EcmaVM { public: static EcmaVM *Create(const JSRuntimeOptions &options, EcmaParamConfiguration &config); @@ -181,7 +187,9 @@ public: void PushToNativePointerList(JSNativePointer *array); void RemoveFromNativePointerList(JSNativePointer *array); - + void PushToDeregisterModuleList(CString module); + void RemoveFromDeregisterModuleList(CString module); + bool ContainInDeregisterModuleList(CString module); JSHandle GetAndClearEcmaUncaughtException() const; JSHandle GetEcmaUncaughtException() const; bool IsOptionalLogEnabled() const @@ -197,10 +205,6 @@ public: } void CollectGarbage(TriggerGCType gcType, GCReason reason = GCReason::OTHER) const; - void StartHeapTracking(HeapTracker *tracker); - - void StopHeapTracking(); - NativeAreaAllocator *GetNativeAreaAllocator() const { return nativeAreaAllocator_.get(); @@ -243,6 +247,16 @@ public: return nativePtrGetter_; } + void SetSourceMapTranslateCallback(SourceMapTranslateCallback cb) + { + sourceMapTranslateCallback_ = cb; + } + + SourceMapTranslateCallback GetSourceMapTranslateCallback() const + { + return sourceMapTranslateCallback_; + } + size_t GetNativePointerListSize() { return nativePointerList_.size(); @@ -258,57 +272,41 @@ public: return resolveBufferCallback_; } - void SetConcurrentCallback(ConcurrentCallback callback, void *data) + bool RequestAot(const std::string &bundleName, const std::string &moduleName, RequestAotMode triggerMode) const; + + void SetRequestAotCallback(const RequestAotCallback &cb) { - concurrentCallback_ = callback; - concurrentData_ = data; + requestAotCallback_ = cb; } - void TriggerConcurrentCallback(JSTaggedValue result, JSTaggedValue hint); - - void WorkersetInfo(EcmaVM *hostVm, EcmaVM *workerVm) + void SetUnloadNativeModuleCallback(const UnloadNativeModuleCallback &cb) { - os::memory::LockHolder lock(mutex_); - auto thread = workerVm->GetJSThread(); - if (thread != nullptr && hostVm != nullptr) { - auto tid = thread->GetThreadId(); - if (tid != 0) { - workerList_.emplace(tid, workerVm); - } - } + unloadNativeModuleCallback_ = cb; } - EcmaVM *GetWorkerVm(uint32_t tid) const + UnloadNativeModuleCallback GetUnloadNativeModuleCallback() const { - EcmaVM *workerVm = nullptr; - if (!workerList_.empty()) { - auto iter = workerList_.find(tid); - if (iter != workerList_.end()) { - workerVm = iter->second; - } - } - return workerVm; + return unloadNativeModuleCallback_; } - bool DeleteWorker(EcmaVM *hostVm, EcmaVM *workerVm) + void SetConcurrentCallback(ConcurrentCallback callback, void *data) { - os::memory::LockHolder lock(mutex_); - auto thread = workerVm->GetJSThread(); - if (hostVm != nullptr && thread != nullptr) { - auto tid = thread->GetThreadId(); - if (tid == 0) { - return false; - } - auto iter = workerList_.find(tid); - if (iter != workerList_.end()) { - workerList_.erase(iter); - return true; - } - return false; - } - return false; + concurrentCallback_ = callback; + concurrentData_ = data; } + void TriggerConcurrentCallback(JSTaggedValue result, JSTaggedValue hint); + + void WorkersetInfo(EcmaVM *hostVm, EcmaVM *workerVm); + + EcmaVM *GetWorkerVm(uint32_t tid); + + bool DeleteWorker(EcmaVM *hostVm, EcmaVM *workerVm); + + bool SuspendWorkerVm(uint32_t tid); + + void ResumeWorkerVm(uint32_t tid); + template void EnumerateWorkerVm(Callback cb) { @@ -336,7 +334,10 @@ public: #if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER) void DeleteHeapProfile(); + HeapProfilerInterface *GetHeapProfile(); HeapProfilerInterface *GetOrNewHeapProfile(); + void StartHeapTracking(); + void StopHeapTracking(); #endif bool EnableReportModuleResolvingFailure() const @@ -417,6 +418,33 @@ public: ASSERT(stringTable_ != nullptr); return stringTable_; } + + void IncreaseCallDepth() + { + callDepth_++; + } + + void DecreaseCallDepth() + { + ASSERT(callDepth_ > 0); + callDepth_--; + } + + bool IsTopLevelCallDepth() + { + return callDepth_ == 0; + } + + void SetProfilerState(bool state) + { + isProfiling_ = state; + } + + bool GetProfilerState() + { + return isProfiling_; + } + protected: void PrintJSErrorInfo(const JSHandle &exceptionInfo) const; @@ -461,14 +489,21 @@ private: CString assetPath_; CString bundleName_; CString moduleName_; + CList deregisterModuleList_; // Registered Callbacks NativePtrGetter nativePtrGetter_ {nullptr}; + SourceMapTranslateCallback sourceMapTranslateCallback_ {nullptr}; void *loop_ {nullptr}; - // CJS resolve path Callbacks - ResolvePathCallback resolvePathCallback_ {nullptr}; + // resolve path to get abc's buffer ResolveBufferCallback resolveBufferCallback_ {nullptr}; + // delete the native module and dlclose so from NativeModuleManager + UnloadNativeModuleCallback unloadNativeModuleCallback_ {nullptr}; + + // trigger local aot + RequestAotCallback requestAotCallback_ {nullptr}; + // Concurrent taskpool callback and data ConcurrentCallback concurrentCallback_ {nullptr}; void *concurrentData_ {nullptr}; @@ -489,6 +524,10 @@ private: // PGO Profiler PGOProfiler *pgoProfiler_ {nullptr}; + // c++ call js + size_t callDepth_ {0}; + + bool isProfiling_ {false}; friend class Snapshot; friend class SnapshotProcessor; diff --git a/ecmascript/elements.cpp b/ecmascript/elements.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1e0d934ab5d331332e188a1145b780b053716589 --- /dev/null +++ b/ecmascript/elements.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/elements.h" +#include "ecmascript/global_env_constants.h" +#include "ecmascript/js_tagged_value-inl.h" + +namespace panda::ecmascript { +CMap Elements::InitializeHClassMap() +{ + CMap result; + result.emplace(ElementsKind::NONE, ConstantIndex::ELEMENT_NONE_HCLASS_INDEX); + result.emplace(ElementsKind::INT, ConstantIndex::ELEMENT_INT_HCLASS_INDEX); + result.emplace(ElementsKind::DOUBLE, ConstantIndex::ELEMENT_DOUBLE_HCLASS_INDEX); + result.emplace(ElementsKind::NUMBER, ConstantIndex::ELEMENT_NUMBER_HCLASS_INDEX); + result.emplace(ElementsKind::STRING, ConstantIndex::ELEMENT_STRING_HCLASS_INDEX); + result.emplace(ElementsKind::OBJECT, ConstantIndex::ELEMENT_OBJECT_HCLASS_INDEX); + result.emplace(ElementsKind::TAGGED, ConstantIndex::ELEMENT_TAGGED_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_INT, ConstantIndex::ELEMENT_HOLE_INT_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_DOUBLE, ConstantIndex::ELEMENT_HOLE_DOUBLE_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_NUMBER, ConstantIndex::ELEMENT_HOLE_NUMBER_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_STRING, ConstantIndex::ELEMENT_HOLE_STRING_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_OBJECT, ConstantIndex::ELEMENT_HOLE_OBJECT_HCLASS_INDEX); + result.emplace(ElementsKind::HOLE_TAGGED, ConstantIndex::ELEMENT_HOLE_TAGGED_HCLASS_INDEX); + return result; +} + +std::string Elements::GetString(ElementsKind kind) +{ + return std::to_string(static_cast(kind)); +} + +bool Elements::IsInt(ElementsKind kind) +{ + return kind == ElementsKind::INT; +} + +bool Elements::IsDouble(ElementsKind kind) +{ + return kind == ElementsKind::DOUBLE; +} + +bool Elements::IsObject(ElementsKind kind) +{ + return kind == ElementsKind::OBJECT; +} + +bool Elements::IsHole(ElementsKind kind) +{ + static constexpr uint8_t EVEN_NUMBER = 2; + return static_cast(kind) % EVEN_NUMBER == 1; +} + +ElementsKind Elements::MergeElementsKind(ElementsKind curKind, ElementsKind newKind) +{ + auto result = ElementsKind(static_cast(curKind) | static_cast(newKind)); + ASSERT(result != ElementsKind::NONE); + result = FixElementsKind(result); + return result; +} + +ElementsKind Elements::FixElementsKind(ElementsKind oldKind) +{ + auto result = oldKind; + switch (result) { + case ElementsKind::NONE: + case ElementsKind::INT: + case ElementsKind::DOUBLE: + case ElementsKind::NUMBER: + case ElementsKind::STRING: + case ElementsKind::OBJECT: + case ElementsKind::HOLE_INT: + case ElementsKind::HOLE_DOUBLE: + case ElementsKind::HOLE_NUMBER: + case ElementsKind::HOLE_STRING: + case ElementsKind::HOLE_OBJECT: + break; + default: + if (IsHole(result)) { + result = ElementsKind::HOLE_TAGGED; + } else { + result = ElementsKind::TAGGED; + } + break; + } + return result; +} + +ElementsKind Elements::ToElementsKind(JSTaggedValue value, ElementsKind kind) +{ + ElementsKind valueKind = ElementsKind::NONE; + if (value.IsInt()) { + valueKind = ElementsKind::INT; + } else if (value.IsDouble()) { + valueKind = ElementsKind::DOUBLE; + } else if (value.IsString()) { + valueKind = ElementsKind::STRING; + } else if (value.IsHeapObject()) { + valueKind = ElementsKind::OBJECT; + } else if (value.IsHole()) { + valueKind = ElementsKind::HOLE; + } else { + valueKind = ElementsKind::TAGGED; + } + return MergeElementsKind(valueKind, kind); +} +} // namespace panda::ecmascript diff --git a/ecmascript/elements.h b/ecmascript/elements.h new file mode 100644 index 0000000000000000000000000000000000000000..492f7b4e8cd9461a625a6b87964c04cac83806b5 --- /dev/null +++ b/ecmascript/elements.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_ELEMENTS_H +#define ECMASCRIPT_ELEMENTS_H + +#include "ecmascript/global_env_constants.h" +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/mem/c_containers.h" + +namespace panda::ecmascript { +enum class ElementsKind : uint8_t { + NONE = 0x00UL, + HOLE = 0x01UL, + INT = 0x1UL << 1, // 2 + DOUBLE = 0x1UL << 2, // 4 + NUMBER = INT | DOUBLE, // 6 + STRING = 0x1UL << 3, // 8 + OBJECT = 0x1UL << 4, // 16 + TAGGED = 0x1EUL, // 30 + HOLE_INT = HOLE | INT, + HOLE_DOUBLE = HOLE | DOUBLE, + HOLE_NUMBER = HOLE | NUMBER, + HOLE_STRING = HOLE | STRING, + HOLE_OBJECT = HOLE | OBJECT, + HOLE_TAGGED = HOLE | TAGGED, + GENERIC = HOLE_TAGGED, +}; + +class Elements { +public: + static CMap InitializeHClassMap(); + + static std::string GetString(ElementsKind kind); + static bool IsInt(ElementsKind kind); + static bool IsDouble(ElementsKind kind); + static bool IsObject(ElementsKind kind); + static bool IsHole(ElementsKind kind); + static bool IsGeneric(ElementsKind kind) + { + return kind == ElementsKind::GENERIC; + } + + static bool IsNone(ElementsKind kind) + { + return kind == ElementsKind::NONE; + } + + static ElementsKind MergeElementsKind(ElementsKind curKind, ElementsKind newKind); + static ElementsKind FixElementsKind(ElementsKind oldKind); + static ElementsKind ToElementsKind(JSTaggedValue value, ElementsKind kind); +}; +} // namespace panda::ecmascript +#endif // ECMASCRIPT_ELEMENTS_H diff --git a/ecmascript/filter_helper.h b/ecmascript/filter_helper.h index f9f4f6da69dad4c24a020b66898546ea9667e931..f8f7e0b1c5c2e920832eca8b6322307ede66acca 100644 --- a/ecmascript/filter_helper.h +++ b/ecmascript/filter_helper.h @@ -16,8 +16,8 @@ #ifndef ECMASCRIPT_FILTER_HELPER_H #define ECMASCRIPT_FILTER_HELPER_H -#include "ecmascript/property_attributes.h" #include "ecmascript/object_operator.h" +#include "ecmascript/property_attributes.h" #define NATIVE_DEFAULT 0 #define NATIVE_WRITABLE 1 << 0 diff --git a/ecmascript/global_env.cpp b/ecmascript/global_env.cpp index 8157dee2623c8c31f782bc4934c4a468e524ed2f..909a121dc1ada30c13d059c936f1bf7e6e2d3ff0 100644 --- a/ecmascript/global_env.cpp +++ b/ecmascript/global_env.cpp @@ -41,6 +41,7 @@ void GlobalEnv::Init(JSThread *thread) stringTable->InternEmptyString(EcmaString::Cast(emptyStr.GetTaggedObject())); SetTemplateMap(thread, TemplateMap::Create(thread)); SetObjectLiteralHClassCache(thread, JSTaggedValue::Hole()); + SetJsonObjectHclassCache(thread, JSTaggedValue::Hole()); } JSHandle GlobalEnv::GetSymbol(JSThread *thread, const JSHandle &string) { diff --git a/ecmascript/global_env.h b/ecmascript/global_env.h index c4c7d2f95086d9a729b4937f91747005659a3f26..88c53a90c8675e7e28601dba34c28699987a898c 100644 --- a/ecmascript/global_env.h +++ b/ecmascript/global_env.h @@ -90,6 +90,7 @@ class JSThread; V(JSTaggedValue, AtomicsFunction, ATOMICS_FUNCTION_INDEX) \ V(JSTaggedValue, JsonFunction, JSON_FUNCTION_INDEX) \ V(JSTaggedValue, StringFunction, STRING_FUNCTION_INDEX) \ + V(JSTaggedValue, StringPrototype, STRING_PROTOTYPE_INDEX) \ V(JSTaggedValue, ProxyFunction, PROXY_FUNCTION_INDEX) \ V(JSTaggedValue, GeneratorFunctionFunction, GENERATOR_FUNCTION_OFFSET) \ V(JSTaggedValue, GeneratorFunctionPrototype, GENERATOR_FUNCTION_PROTOTYPE_OFFSET) \ @@ -196,7 +197,8 @@ class JSThread; V(JSTaggedValue, CjsExportsFunction, CJS_EXPORTS_FUNCTION_INDEX) \ V(JSTaggedValue, CjsRequireFunction, CJS_REQUIRE_FUNCTION_INDEX) \ V(JSTaggedValue, GlobalPatch, GLOBAL_PATCH) \ - V(JSTaggedValue, ExportOfScript, DEFAULT_EXPORT_OF_SCRIPT) + V(JSTaggedValue, ExportOfScript, DEFAULT_EXPORT_OF_SCRIPT) \ + V(JSTaggedValue, JsonObjectHclassCache, JSON_OBJECT_HCLASS_CACHE) class GlobalEnv : public TaggedObject { public: diff --git a/ecmascript/global_env_constants.cpp b/ecmascript/global_env_constants.cpp index 47b2ae39d1a37b318b7be1688da0002ad1fd6b98..c64ceb7f7f22c042a2ce7eb214517a5c36adb868 100644 --- a/ecmascript/global_env_constants.cpp +++ b/ecmascript/global_env_constants.cpp @@ -18,6 +18,7 @@ #include "ecmascript/accessor_data.h" #include "ecmascript/builtins/builtins.h" #include "ecmascript/builtins/builtins_global.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/free_object.h" #include "ecmascript/global_env.h" @@ -95,6 +96,8 @@ void GlobalEnvConstants::InitRootsClass(JSThread *thread, JSHClass *hClass) SetConstant(ConstantIndex::FREE_OBJECT_WITH_TWO_FIELD_CLASS_INDEX, factory->NewEcmaReadOnlyHClass(hClass, FreeObject::SIZE, JSType::FREE_OBJECT_WITH_TWO_FIELD)); SetConstant(ConstantIndex::LINE_STRING_CLASS_INDEX, factory->NewEcmaReadOnlyHClass(hClass, 0, JSType::LINE_STRING)); + SetConstant(ConstantIndex::SLICED_STRING_CLASS_INDEX, + factory->NewEcmaReadOnlyHClass(hClass, 0, JSType::SLICED_STRING)); SetConstant(ConstantIndex::CONSTANT_STRING_CLASS_INDEX, factory->NewEcmaReadOnlyHClass(hClass, 0, JSType::CONSTANT_STRING)); SetConstant(ConstantIndex::TREE_STRING_CLASS_INDEX, factory->NewEcmaReadOnlyHClass(hClass, 0, JSType::TREE_STRING)); @@ -385,6 +388,7 @@ void GlobalEnvConstants::InitGlobalConstant(JSThread *thread) SetConstant(ConstantIndex::TO_JSON_STRING_INDEX, factory->NewFromASCIINonMovable("toJSON")); SetConstant(ConstantIndex::GLOBAL_STRING_INDEX, factory->NewFromASCIINonMovable("global")); SetConstant(ConstantIndex::MESSAGE_STRING_INDEX, factory->NewFromASCIINonMovable("message")); + SetConstant(ConstantIndex::CAUSE_STRING_INDEX, factory->NewFromASCIINonMovable("cause")); SetConstant(ConstantIndex::ERROR_STRING_INDEX, factory->NewFromASCIINonMovable("Error")); SetConstant(ConstantIndex::ERRORS_STRING_INDEX, factory->NewFromASCII("errors")); SetConstant(ConstantIndex::AGGREGATE_ERROR_STRING_INDEX, factory->NewFromASCII("AggregateError")); @@ -537,6 +541,8 @@ void GlobalEnvConstants::InitGlobalConstant(JSThread *thread) SetConstant(ConstantIndex::GREGORY_INDEX, factory->NewFromASCIINonMovable("gregory")); SetConstant(ConstantIndex::ETHIOAA_INDEX, factory->NewFromASCIINonMovable("ethioaa")); SetConstant(ConstantIndex::STICKY_INDEX, factory->NewFromASCIINonMovable("sticky")); + SetConstant(ConstantIndex::HAS_INDICES_INDEX, factory->NewFromASCIINonMovable("hasIndices")); + SetConstant(ConstantIndex::INDICES_INDEX, factory->NewFromASCIINonMovable("indices")); SetConstant(ConstantIndex::U_INDEX, factory->NewFromASCIINonMovable("u")); SetConstant(ConstantIndex::INDEX_INDEX, factory->NewFromASCIINonMovable("index")); SetConstant(ConstantIndex::INPUT_INDEX, factory->NewFromASCIINonMovable("input")); @@ -647,4 +653,17 @@ void GlobalEnvConstants::InitClassConstructorOptimizedClass(ObjectFactory *facto fastCall->SetCanFastCall(true); SetConstant(ConstantIndex::CLASS_CONSTRUCTOR_OPTIMIZED_WITH_FAST_CALL_HCLASS_INDEX, fastCall); } + +void GlobalEnvConstants::InitElementKindHClass(const JSThread *thread, JSHandle originHClass) +{ + auto map = thread->GetArrayHClassIndexMap(); + for (auto iter : map) { + JSHandle hclass = originHClass; + if (iter.first != ElementsKind::GENERIC) { + hclass = JSHClass::Clone(thread, originHClass); + hclass->SetElementsKind(iter.first); + } + SetConstant(iter.second, hclass); + } +} } // namespace panda::ecmascript diff --git a/ecmascript/global_env_constants.h b/ecmascript/global_env_constants.h index 22e0cf14b0542fdbe7538c1980afa1cae7183c4a..56a157e776244fe369366cbff4940a324240bbc5 100644 --- a/ecmascript/global_env_constants.h +++ b/ecmascript/global_env_constants.h @@ -37,6 +37,7 @@ class ObjectFactory; V(JSTaggedValue, FreeObjectWithOneFieldClass, FREE_OBJECT_WITH_ONE_FIELD_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, FreeObjectWithTwoFieldClass, FREE_OBJECT_WITH_TWO_FIELD_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, LineStringClass, LINE_STRING_CLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, SlicedStringClass, SLICED_STRING_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, ConstantStringClass, CONSTANT_STRING_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, TreeStringClass, TREE_STRING_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, ArrayClass, ARRAY_CLASS_INDEX, ecma_roots_class) \ @@ -126,6 +127,19 @@ class ObjectFactory; V(JSTaggedValue, AOTLiteralInfoClass, AOT_LITERAL_INFO_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, VTableClass, VTABLE_CLASS_INDEX, ecma_roots_class) \ V(JSTaggedValue, ClassLiteralClass, CLASS_LITERAL_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementNoneClass, ELEMENT_NONE_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementIntClass, ELEMENT_INT_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementDoubleClass, ELEMENT_DOUBLE_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementNumberClass, ELEMENT_NUMBER_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementStringClass, ELEMENT_STRING_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementObjectClass, ELEMENT_OBJECT_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementTaggedClass, ELEMENT_TAGGED_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleIntClass, ELEMENT_HOLE_INT_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleDoubleClass, ELEMENT_HOLE_DOUBLE_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleNumberClass, ELEMENT_HOLE_NUMBER_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleStringClass, ELEMENT_HOLE_STRING_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleObjectClass, ELEMENT_HOLE_OBJECT_HCLASS_INDEX, ecma_roots_class) \ + V(JSTaggedValue, ElementHoleTaggedClass, ELEMENT_HOLE_TAGGED_HCLASS_INDEX, ecma_roots_class) // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define GLOBAL_ENV_CONSTANT_SPECIAL(V) \ @@ -143,7 +157,10 @@ class ObjectFactory; V(JSTaggedValue, MathACosFunction, MATH_ACOS_FUNCTION_INDEX, ecma_roots_special) \ V(JSTaggedValue, MathATanFunction, MATH_ATAN_FUNCTION_INDEX, ecma_roots_special) \ V(JSTaggedValue, MathAbsFunction, MATH_ABS_FUNCTION_INDEX, ecma_roots_special) \ - V(JSTaggedValue, MathFloorFunction, MATH_FLOOR_FUNCTION_INDEX, ecma_roots_special) + V(JSTaggedValue, MathFloorFunction, MATH_FLOOR_FUNCTION_INDEX, ecma_roots_special) \ + V(JSTaggedValue, LocaleCompareFunction, LOCALE_COMPARE_FUNCTION_INDEX, ecma_roots_special) \ + V(JSTaggedValue, ArraySortFunction, ARRAY_SORT_FUNCTION_INDEX, ecma_roots_special) \ + V(JSTaggedValue, JsonStringifyFunction, JSON_STRINGIFY_FUNCTION_INDEX, ecma_roots_special) /* GlobalConstant */ // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) @@ -246,6 +263,7 @@ class ObjectFactory; V(JSTaggedValue, ToJsonString, TO_JSON_STRING_INDEX, toJSON) \ V(JSTaggedValue, GlobalString, GLOBAL_STRING_INDEX, global) \ V(JSTaggedValue, MessageString, MESSAGE_STRING_INDEX, message) \ + V(JSTaggedValue, CauseString, CAUSE_STRING_INDEX, cause) \ V(JSTaggedValue, ErrorString, ERROR_STRING_INDEX, Error) \ V(JSTaggedValue, RangeErrorString, RANGE_ERROR_STRING_INDEX, RangeError) \ V(JSTaggedValue, ReferenceErrorString, REFERENCE_ERROR_STRING_INDEX, ReferenceError) \ @@ -389,6 +407,8 @@ class ObjectFactory; V(JSTaggedValue, GregoryString, GREGORY_INDEX, gregory) \ V(JSTaggedValue, EthioaaString, ETHIOAA_INDEX, ethioaa) \ V(JSTaggedValue, StickyString, STICKY_INDEX, sticky) \ + V(JSTaggedValue, HasIndicesString, HAS_INDICES_INDEX, hasIndices) \ + V(JSTaggedValue, IndicesString, INDICES_INDEX, indices) \ V(JSTaggedValue, UString, U_INDEX, u) \ V(JSTaggedValue, IndexString, INDEX_INDEX, index) \ V(JSTaggedValue, InputString, INPUT_INDEX, input) \ @@ -501,6 +521,8 @@ public: void InitSpecialForSnapshot(); void InitClassConstructorOptimizedClass(ObjectFactory *factory); + void InitElementKindHClass(const JSThread *thread, JSHandle originHClass); + void SetCachedLocales(JSTaggedValue value); void SetConstant(ConstantIndex index, JSTaggedValue value); diff --git a/ecmascript/ic/ic_binary_op.h b/ecmascript/ic/ic_binary_op.h index 1e47bd33e41dadc31853340f3f5097abcb748c60..c204659a3adec5377a28f44fd7394aa0746cc5eb 100644 --- a/ecmascript/ic/ic_binary_op.h +++ b/ecmascript/ic/ic_binary_op.h @@ -68,6 +68,7 @@ public: JSHandle stringA0 = JSHandle(JSHandle(thread, left)); JSHandle stringA1 = JSHandle(JSHandle(thread, right)); EcmaString *ret = EcmaStringAccessor::Concat(thread->GetEcmaVM(), stringA0, stringA1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSTaggedValue(ret); } // Support cases, such as: string + null, string + object, string + boolean, string + number, etc. @@ -77,12 +78,16 @@ public: if (left.IsString()) { JSHandle stringA0 = JSHandle(leftValue); JSHandle stringA1 = JSTaggedValue::ToString(thread, rightValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *ret = EcmaStringAccessor::Concat(thread->GetEcmaVM(), stringA0, stringA1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSTaggedValue(ret); } else { JSHandle stringA0 = JSTaggedValue::ToString(thread, leftValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle stringA1 = JSHandle(rightValue); EcmaString *ret = EcmaStringAccessor::Concat(thread->GetEcmaVM(), stringA0, stringA1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSTaggedValue(ret); } } diff --git a/ecmascript/ic/ic_compare_op.h b/ecmascript/ic/ic_compare_op.h index 770f175da8e40ec016db74278b4744b37083ec6e..aacff9d7f7df4b01f6f6519878c91ebd793d6f41 100644 --- a/ecmascript/ic/ic_compare_op.h +++ b/ecmascript/ic/ic_compare_op.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_IC_IC_COMPARE_H -#define ECMASCRIPT_IC_IC_COMPARE_H +#ifndef ECMASCRIPT_IC_IC_COMPARE_OP_H +#define ECMASCRIPT_IC_IC_COMPARE_OP_H #include "ecmascript/js_function.h" #include "ecmascript/js_thread.h" @@ -75,4 +75,4 @@ public: JSTaggedValue right, CompareOpType operationType); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_IC_IC_COMPAREOP_H \ No newline at end of file +#endif // ECMASCRIPT_IC_IC_COMPARE_OP_H \ No newline at end of file diff --git a/ecmascript/ic/ic_handler.h b/ecmascript/ic/ic_handler.h index 7e62da108f4ea12683a8bf3ead6cf90c061ed084..9315af7ee79249743cd300e8504cbff768671700 100644 --- a/ecmascript/ic/ic_handler.h +++ b/ecmascript/ic/ic_handler.h @@ -38,7 +38,8 @@ public: using InternalAccessorBit = AccessorBit::NextFlag; using IsJSArrayBit = InternalAccessorBit::NextFlag; using OffsetBit = IsJSArrayBit::NextField; - using AttrIndexBit = OffsetBit::NextField; + using RepresentationBit = OffsetBit::NextField; + using AttrIndexBit = RepresentationBit::NextField; HandlerBase() = default; virtual ~HandlerBase() = default; @@ -122,6 +123,7 @@ public: auto index = holder->GetJSHClass()->GetInlinedPropertiesIndex(op.GetIndex()); OffsetBit::Set(index, &handler); AttrIndexBit::Set(op.GetIndex(), &handler); + RepresentationBit::Set(op.GetRepresentation(), &handler); return JSHandle(thread, JSTaggedValue(handler)); } if (op.IsFastMode()) { @@ -129,6 +131,7 @@ public: uint32_t inlinePropNum = holder->GetJSHClass()->GetInlinedProperties(); AttrIndexBit::Set(op.GetIndex() + inlinePropNum, &handler); OffsetBit::Set(op.GetIndex(), &handler); + RepresentationBit::Set(Representation::TAGGED, &handler); return JSHandle(thread, JSTaggedValue(handler)); } LOG_ECMA(FATAL) << "this branch is unreachable"; @@ -172,6 +175,7 @@ public: } AttrIndexBit::Set(op.GetIndex(), &handler); OffsetBit::Set(index, &handler); + RepresentationBit::Set(op.GetRepresentation(), &handler); return JSHandle(thread, JSTaggedValue(handler)); } ASSERT(op.IsFastMode()); @@ -179,6 +183,7 @@ public: uint32_t inlinePropNum = receiver->GetJSHClass()->GetInlinedProperties(); AttrIndexBit::Set(op.GetIndex() + inlinePropNum, &handler); OffsetBit::Set(op.GetIndex(), &handler); + RepresentationBit::Set(Representation::TAGGED, &handler); return JSHandle(thread, JSTaggedValue(handler)); } diff --git a/ecmascript/ic/ic_runtime.cpp b/ecmascript/ic/ic_runtime.cpp index d860b8a63b73e6a7dc9e353a719409eedbde146e..81b639d6099a816147389d38a1dd7e269b749fab 100644 --- a/ecmascript/ic/ic_runtime.cpp +++ b/ecmascript/ic/ic_runtime.cpp @@ -149,7 +149,9 @@ JSTaggedValue LoadICRuntime::LoadMiss(JSHandle receiver, JSHandle { if (!receiver->IsJSObject() || receiver->HasOrdinaryGet()) { icAccessor_.SetAsMega(); - return JSTaggedValue::GetProperty(thread_, receiver, key).GetValue().GetTaggedValue(); + JSHandle propKey = JSTaggedValue::ToPropertyKey(thread_, key); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + return JSTaggedValue::GetProperty(thread_, receiver, propKey).GetValue().GetTaggedValue(); } ICKind kind = GetICKind(); @@ -213,7 +215,6 @@ JSTaggedValue StoreICRuntime::StoreMiss(JSHandle receiver, JSHand return JSTaggedValue::Undefined(); } } - UpdateReceiverHClass(JSHandle(GetThread(), JSHandle::Cast(receiver)->GetClass())); ObjectOperator op(GetThread(), receiver, key); if (!op.IsFound()) { @@ -225,6 +226,7 @@ JSTaggedValue StoreICRuntime::StoreMiss(JSHandle receiver, JSHand } } bool success = JSObject::SetProperty(&op, value, true); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); // ic-switch if (!GetThread()->GetEcmaVM()->ICEnabled()) { icAccessor_.SetAsMega(); @@ -237,6 +239,7 @@ JSTaggedValue StoreICRuntime::StoreMiss(JSHandle receiver, JSHand return success ? JSTaggedValue::Undefined() : JSTaggedValue::Exception(); } if (success) { + UpdateReceiverHClass(JSHandle(GetThread(), JSHandle::Cast(receiver)->GetClass())); UpdateStoreHandler(op, key, receiver); return JSTaggedValue::Undefined(); } diff --git a/ecmascript/ic/ic_runtime_stub-inl.h b/ecmascript/ic/ic_runtime_stub-inl.h index 482c807fd77b37731819655fd9478aac3f6e1bd0..eccb4f21ec32221a256fc0e9a2b6d82760662086 100644 --- a/ecmascript/ic/ic_runtime_stub-inl.h +++ b/ecmascript/ic/ic_runtime_stub-inl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H_ -#define ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H_ +#ifndef ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H +#define ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H #include "ecmascript/base/config.h" #include "ecmascript/global_env.h" @@ -283,7 +283,7 @@ void ICRuntimeStub::StoreWithTransition(JSThread *thread, JSObject *receiver, JS handlerInfo = static_cast(transitionHandler->GetHandlerInfo().GetInt()); } - receiver->SetClass(newHClass); + receiver->SynchronizedSetClass(newHClass); ASSERT(HandlerBase::IsField(handlerInfo)); if (!HandlerBase::IsInlinedProps(handlerInfo)) { @@ -301,8 +301,10 @@ void ICRuntimeStub::StoreWithTransition(JSThread *thread, JSObject *receiver, JS properties = factory->NewTaggedArray(capacity); } else { auto arrayHandle = JSHandle(thread, array); - properties = factory->CopyArray(arrayHandle, capacity, - JSObject::ComputePropertyCapacity(capacity)); + uint32_t maxNonInlinedFastPropsCapacity = objHandle->GetNonInlinedFastPropsCapacity(); + uint32_t newLen = JSObject::ComputeNonInlinedFastPropsCapacity(capacity, + maxNonInlinedFastPropsCapacity); + properties = factory->CopyArray(arrayHandle, capacity, newLen); } properties->Set(thread, index, valueHandle); objHandle->SetProperties(thread, properties); @@ -488,7 +490,7 @@ JSTaggedValue ICRuntimeStub::StoreElement(JSThread *thread, JSObject *receiver, return JSTaggedValue::Undefined(); } -ARK_INLINE int32_t ICRuntimeStub::TryToElementsIndex(JSTaggedValue key) +ARK_INLINE int64_t ICRuntimeStub::TryToElementsIndex(JSTaggedValue key) { if (LIKELY(key.IsInt())) { return key.GetInt(); @@ -497,7 +499,7 @@ ARK_INLINE int32_t ICRuntimeStub::TryToElementsIndex(JSTaggedValue key) if (key.IsString()) { uint32_t index = 0; if (JSTaggedValue::StringToElementIndex(key, &index)) { - return static_cast(index); + return static_cast(index); } } @@ -536,4 +538,4 @@ JSTaggedValue ICRuntimeStub::StoreMiss(JSThread *thread, ProfileTypeInfo *profil } } // namespace panda::ecmascript -#endif // ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H_ +#endif // ECMASCRIPT_IC_IC_RUNTIME_STUB_INL_H diff --git a/ecmascript/ic/ic_runtime_stub.h b/ecmascript/ic/ic_runtime_stub.h index 455146ae9f37f5b38c24e6e55f6e3b7de17c8aec..50b87ce9eea5fc2ce1f46703b434c1d2d5cbd9c9 100644 --- a/ecmascript/ic/ic_runtime_stub.h +++ b/ecmascript/ic/ic_runtime_stub.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_IC_IC_RUNTIME_STUB_H_ -#define ECMASCRIPT_IC_IC_RUNTIME_STUB_H_ +#ifndef ECMASCRIPT_IC_IC_RUNTIME_STUB_H +#define ECMASCRIPT_IC_IC_RUNTIME_STUB_H #include "ecmascript/ic/profile_type_info.h" #include "ecmascript/js_tagged_value.h" @@ -71,7 +71,7 @@ public: static inline JSTaggedValue LoadElement(JSObject *receiver, JSTaggedValue key); static inline JSTaggedValue StoreElement(JSThread *thread, JSObject *receiver, JSTaggedValue key, JSTaggedValue value, JSTaggedValue handlerInfo); - static inline int32_t TryToElementsIndex(JSTaggedValue key); + static inline int64_t TryToElementsIndex(JSTaggedValue key); static inline JSTaggedValue LoadMiss(JSThread *thread, ProfileTypeInfo *profileTypeInfo, JSTaggedValue receiver, JSTaggedValue key, uint32_t slotId, ICKind kind); static inline JSTaggedValue StoreMiss(JSThread *thread, ProfileTypeInfo *profileTypeInfo, JSTaggedValue receiver, @@ -79,4 +79,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_IC_IC_RUNTIME_STUB_H_ +#endif // ECMASCRIPT_IC_IC_RUNTIME_STUB_H diff --git a/ecmascript/ic/invoke_cache.h b/ecmascript/ic/invoke_cache.h index 82d308a641011f992adadf67f700ab6d22cb0e7c..e533122ca597455c3d2395f0aeae4c8e6f414f10 100644 --- a/ecmascript/ic/invoke_cache.h +++ b/ecmascript/ic/invoke_cache.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_IC_INVOKE_CACHE_H_ -#define ECMASCRIPT_IC_INVOKE_CACHE_H_ +#ifndef ECMASCRIPT_IC_INVOKE_CACHE_H +#define ECMASCRIPT_IC_INVOKE_CACHE_H #include "ecmascript/ic/profile_type_info.h" #include "ecmascript/js_tagged_value.h" @@ -48,4 +48,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_IC_INVOKE_CACHE_H_ +#endif // ECMASCRIPT_IC_INVOKE_CACHE_H diff --git a/ecmascript/interpreter/fast_runtime_stub-inl.h b/ecmascript/interpreter/fast_runtime_stub-inl.h index 9deddc53eea9739dd3de0495e7089d041ffd48c8..0d18dc76a0f9e024c1d9a2bfac623ce341aa26f4 100644 --- a/ecmascript/interpreter/fast_runtime_stub-inl.h +++ b/ecmascript/interpreter/fast_runtime_stub-inl.h @@ -18,6 +18,7 @@ #include "ecmascript/interpreter/fast_runtime_stub.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/global_dictionary-inl.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/interpreter.h" diff --git a/ecmascript/interpreter/interpreter-inl.h b/ecmascript/interpreter/interpreter-inl.h index 6cc43d33afa3cebac03ed9a3a33758fc5f3a0ab7..c9bef0cf2ca9f9c7ea6c6f31645ec4a45f986a28 100644 --- a/ecmascript/interpreter/interpreter-inl.h +++ b/ecmascript/interpreter/interpreter-inl.h @@ -198,12 +198,42 @@ using CommonStubCSigns = kungfu::CommonStubCSigns; } \ } while (false) +#define JUMP_IF_ENTRYFRAME_PENDING() \ + do { \ + if (thread->IsEntryFrameDroppedPending()) { \ + thread->ResetEntryFrameDroppedState(); \ + DROPFRAME_JUMP(); \ + } \ + } while (false) + +#define DROPFRAME_JUMP() \ + do { \ + thread->ResetFrameDroppedState(); \ + sp = const_cast(thread->GetCurrentSPFrame()); \ + InterpretedFrame *state = GET_FRAME(sp); \ + pc = state->pc; \ + RESTORE_ACC(); \ + DISPATCH_OFFSET(0); \ + } while (false) + +#define RESET_AND_JUMP_IF_DROPFRAME() \ + do { \ + if (thread->IsFrameDropped()) { \ + if (thread->IsEntryFrameDroppedTrue()) { \ + return; \ + } \ + DROPFRAME_JUMP(); \ + } \ + } while (false) + // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define NOTIFY_DEBUGGER_EVENT() \ do { \ + JUMP_IF_ENTRYFRAME_PENDING(); \ SAVE_ACC(); \ SAVE_PC(); \ NotifyBytecodePcChanged(thread); \ + RESET_AND_JUMP_IF_DROPFRAME(); \ RESTORE_ACC(); \ } while (false) @@ -712,14 +742,22 @@ JSTaggedValue EcmaInterpreter::Execute(EcmaRuntimeCallInfo *info) thread->CheckSafepoint(); LOG_INST() << "Entry: Runtime Call " << std::hex << reinterpret_cast(newSp) << " " << std::hex << reinterpret_cast(pc); - + MethodEntry(thread); EcmaInterpreter::RunInternal(thread, pc, newSp); // NOLINTNEXTLINE(readability-identifier-naming) const JSTaggedValue resAcc = state->acc; - // pop frame + InterpretedEntryFrame *entryState = GET_ENTRY_FRAME(sp); JSTaggedType *prevSp = entryState->base.prev; + + if (thread->IsEntryFrameDroppedTrue()) { + thread->PendingEntryFrameDroppedState(); + InterpretedFrame *prevState = GET_FRAME(prevSp); + return prevState->acc; + } + + // pop frame thread->SetCurrentSPFrame(prevSp); return resAcc; #else @@ -786,6 +824,7 @@ JSTaggedValue EcmaInterpreter::GeneratorReEnterInterpreter(JSThread *thread, JSH // execute interpreter thread->SetCurrentSPFrame(newSp); + MethodEntry(thread); EcmaInterpreter::RunInternal(thread, resumePc, newSp); JSTaggedValue res = state->acc; @@ -865,6 +904,41 @@ void EcmaInterpreter::NotifyDebuggerStmt(JSThread *thread) } } +void EcmaInterpreter::MethodEntry(JSThread *thread) +{ + FrameHandler frameHandler(thread); + for (; frameHandler.HasFrame(); frameHandler.PrevJSFrame()) { + if (frameHandler.IsEntryFrame()) { + continue; + } + Method *method = frameHandler.GetMethod(); + if (method->IsNativeWithCallField()) { + continue; + } + JSTaggedValue env = frameHandler.GetEnv(); + auto *debuggerMgr = thread->GetEcmaVM()->GetJsDebuggerManager(); + debuggerMgr->GetNotificationManager()->MethodEntryEvent(thread, method, env); + return; + } +} + +void EcmaInterpreter::MethodExit(JSThread *thread) +{ + FrameHandler frameHandler(thread); + for (; frameHandler.HasFrame(); frameHandler.PrevJSFrame()) { + if (frameHandler.IsEntryFrame()) { + continue; + } + Method *method = frameHandler.GetMethod(); + if (method->IsNativeWithCallField()) { + continue; + } + auto *debuggerMgr = thread->GetEcmaVM()->GetJsDebuggerManager(); + debuggerMgr->GetNotificationManager()->MethodExitEvent(thread, method); + return; + } +} + const JSPandaFile *EcmaInterpreter::GetNativeCallPandafile(JSThread *thread) { FrameHandler frameHandler(thread); @@ -1363,6 +1437,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime Call " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -1400,6 +1475,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t DISPATCH(DEPRECATED_CALLSPREAD_PREF_V8_V8_V8); } HANDLE_OPCODE(RETURN) { + MethodExit(thread); LOG_INST() << "return"; InterpretedFrame *state = GET_FRAME(sp); LOG_INST() << "Exit: Runtime Call " << std::hex << reinterpret_cast(sp) << " " @@ -1448,6 +1524,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t INTERPRETER_HANDLE_RETURN(); } HANDLE_OPCODE(RETURNUNDEFINED) { + MethodExit(thread); LOG_INST() << "return.undefined"; InterpretedFrame *state = GET_FRAME(sp); LOG_INST() << "Exit: Runtime Call " << std::hex << reinterpret_cast(sp) << " " @@ -3050,6 +3127,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t DISPATCH(CREATEASYNCGENERATOROBJ_V8); } HANDLE_OPCODE(ASYNCGENERATORRESOLVE_V8_V8_V8) { + MethodExit(thread); uint16_t v0 = READ_INST_8_0(); uint16_t v1 = READ_INST_8_1(); uint16_t v2 = READ_INST_8_2(); @@ -3235,6 +3313,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime SuperCall " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -3370,6 +3449,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime SuperCall " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -3505,6 +3585,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime SuperCall " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -3640,6 +3721,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime SuperCall " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -3728,9 +3810,14 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t pc = method->GetBytecodeArray() + pcOffset; break; } + if (!method->IsNativeWithCallField()) { + auto *debuggerMgr = thread->GetEcmaVM()->GetJsDebuggerManager(); + debuggerMgr->GetNotificationManager()->MethodExitEvent(thread, method); + } } if (pcOffset == INVALID_INDEX) { - return; + LOG_FULL(FATAL) << "EXCEPTION: EntryFrame Not Found"; + UNREACHABLE(); } auto exception = thread->GetException(); @@ -4005,6 +4092,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime New " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -4141,6 +4229,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime New " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -4276,6 +4365,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t thread->SetCurrentSPFrame(newSp); LOG_INST() << "Entry: Runtime New " << std::hex << reinterpret_cast(sp) << " " << std::hex << reinterpret_cast(pc); + MethodEntry(thread); DISPATCH_OFFSET(0); } } @@ -5049,6 +5139,7 @@ NO_UB_SANITIZE void EcmaInterpreter::RunInternal(JSThread *thread, const uint8_t DISPATCH(DEPRECATED_DELOBJPROP_PREF_V8_V8); } HANDLE_OPCODE(SUSPENDGENERATOR_V8) { + MethodExit(thread); uint16_t v0 = READ_INST_8_0(); LOG_INST() << "intrinsics::suspendgenerator" << " v" << v0; @@ -7357,10 +7448,14 @@ JSTaggedType *EcmaInterpreter::GetInterpreterFrameEnd(JSThread *thread, JSTagged } else { if (FrameHandler::GetFrameType(sp) == FrameType::INTERPRETER_FRAME || FrameHandler::GetFrameType(sp) == FrameType::INTERPRETER_FAST_NEW_FRAME) { - newSp = sp - InterpretedFrame::NumOfMembers(); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + newSp = sp - InterpretedFrame::NumOfMembers(); + } else if (FrameHandler::GetFrameType(sp) == FrameType::INTERPRETER_BUILTIN_FRAME) { + // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + newSp = sp - InterpretedBuiltinFrame::NumOfMembers(); } else { - newSp = - sp - InterpretedEntryFrame::NumOfMembers(); // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + newSp = sp - InterpretedEntryFrame::NumOfMembers(); } } return newSp; diff --git a/ecmascript/interpreter/interpreter.h b/ecmascript/interpreter/interpreter.h index b37c6a53e2e5f192d8a03e34b4b90a976b4b106e..73c166e2d79fcca62f13379cabb12977d961f693 100644 --- a/ecmascript/interpreter/interpreter.h +++ b/ecmascript/interpreter/interpreter.h @@ -61,6 +61,8 @@ public: static inline bool UpdateHotnessCounter(JSThread* thread, JSTaggedType *sp, JSTaggedValue acc, int32_t offset); static inline void NotifyBytecodePcChanged(JSThread *thread); static inline void NotifyDebuggerStmt(JSThread *thread); + static inline void MethodEntry(JSThread *thread); + static inline void MethodExit(JSThread *thread); static inline const JSPandaFile *GetNativeCallPandafile(JSThread *thread); static inline JSTaggedValue GetCurrentEntryPoint(JSThread *thread); static inline JSTaggedValue GetFunction(JSTaggedType *sp); diff --git a/ecmascript/interpreter/interpreter_assembly.cpp b/ecmascript/interpreter/interpreter_assembly.cpp index 3f88c46acd5fa2ecd06335c8816a3a3c3c9941fe..4abf58bd612e1bf85c4eaea766db90f73c1e206c 100644 --- a/ecmascript/interpreter/interpreter_assembly.cpp +++ b/ecmascript/interpreter/interpreter_assembly.cpp @@ -32,6 +32,7 @@ #include "ecmascript/mem/concurrent_marker.h" #include "ecmascript/runtime_call_id.h" #include "ecmascript/template_string.h" +#include "ecmascript/debugger/js_debugger_manager.h" #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h" @@ -256,8 +257,19 @@ JSTaggedValue InterpreterAssembly::Execute(EcmaRuntimeCallInfo *info) #if ECMASCRIPT_ENABLE_FUNCTION_CALL_TIMER RuntimeStubs::StartCallTimer(thread->GetGlueAddr(), info->GetFunctionValue().GetRawData(), false); #endif + if (thread->IsDebugMode() && !method->IsNativeWithCallField()) { + JSHandle func(thread, info->GetFunctionValue()); + JSTaggedValue env = func->GetLexicalEnv(); + MethodEntry(thread, method, env); + } auto acc = reinterpret_cast(entry)(thread->GetGlueAddr(), callTarget, method, method->GetCallField(), argc, argv); + + if (thread->IsEntryFrameDroppedTrue()) { + thread->PendingEntryFrameDroppedState(); + return JSTaggedValue::Hole(); + } + auto sp = const_cast(thread->GetCurrentSPFrame()); ASSERT(FrameHandler::GetFrameType(sp) == FrameType::INTERPRETER_ENTRY_FRAME); auto prevEntry = InterpretedEntryFrame::GetFrameFromSp(sp)->GetPrevFrameFp(); @@ -266,11 +278,30 @@ JSTaggedValue InterpreterAssembly::Execute(EcmaRuntimeCallInfo *info) return JSTaggedValue(acc); } +void InterpreterAssembly::MethodEntry(JSThread *thread, Method *method, JSTaggedValue env) +{ + FrameHandler frameHandler(thread); + for (; frameHandler.HasFrame(); frameHandler.PrevJSFrame()) { + if (frameHandler.IsEntryFrame()) { + continue; + } + auto *debuggerMgr = thread->GetEcmaVM()->GetJsDebuggerManager(); + debuggerMgr->GetNotificationManager()->MethodEntryEvent(thread, method, env); + return; + } +} + JSTaggedValue InterpreterAssembly::GeneratorReEnterInterpreter(JSThread *thread, JSHandle context) { // check is or not debugger thread->CheckSwitchDebuggerBCStub(); auto entry = thread->GetRTInterface(kungfu::RuntimeStubCSigns::ID_GeneratorReEnterAsmInterp); + JSTaggedValue func = context->GetMethod(); + Method *method = ECMAObject::Cast(func.GetTaggedObject())->GetCallTarget(); + JSTaggedValue env = context->GetLexicalEnv(); + if (thread->IsDebugMode() && !method->IsNativeWithCallField()) { + MethodEntry(thread, method, env); + } auto acc = reinterpret_cast(entry)(thread->GetGlueAddr(), context.GetTaggedType()); return JSTaggedValue(acc); } diff --git a/ecmascript/interpreter/interpreter_assembly.h b/ecmascript/interpreter/interpreter_assembly.h index 5ba6b9f0f591deaedbc2aa7bfff5da97da62af64..b5a6a606529986d371bb8adcb9e4f5bf3bcc4f43 100644 --- a/ecmascript/interpreter/interpreter_assembly.h +++ b/ecmascript/interpreter/interpreter_assembly.h @@ -40,6 +40,7 @@ public: static JSTaggedValue Execute(EcmaRuntimeCallInfo *info); static JSTaggedValue GeneratorReEnterInterpreter(JSThread *thread, JSHandle context); static inline size_t GetJumpSizeAfterCall(const uint8_t *prevPc); + static inline void MethodEntry(JSThread *thread, Method *method, JSTaggedValue env); static inline JSTaggedValue UpdateHotnessCounter(JSThread* thread, JSTaggedType *sp); static inline void InterpreterFrameCopyArgs(JSTaggedType *newSp, uint32_t numVregs, uint32_t numActualArgs, diff --git a/ecmascript/jobs/micro_job_queue.cpp b/ecmascript/jobs/micro_job_queue.cpp index 86ee75fdd8d6045a820b73fd8a7ecfbf19dee008..d598dea14507448157eb13b6d001e52abd2a526a 100644 --- a/ecmascript/jobs/micro_job_queue.cpp +++ b/ecmascript/jobs/micro_job_queue.cpp @@ -27,6 +27,13 @@ #include "ecmascript/tagged_queue.h" namespace panda::ecmascript::job { +uint32_t MicroJobQueue::GetPromiseQueueSize(JSThread *thread, JSHandle jobQueue) +{ + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle promiseQueue(thread, jobQueue->GetPromiseJobQueue()); + return promiseQueue->Size(); +} + void MicroJobQueue::EnqueueJob(JSThread *thread, JSHandle jobQueue, QueueType queueType, const JSHandle &job, const JSHandle &argv) { @@ -75,4 +82,11 @@ void MicroJobQueue::ExecutePendingJob(JSThread *thread, JSHandle } } } + +bool MicroJobQueue::HasPendingJob(JSThread *thread, JSHandle jobQueue) +{ + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle promiseQueue(thread, jobQueue->GetPromiseJobQueue()); + return !promiseQueue->Empty(); +} } // namespace panda::ecmascript::job diff --git a/ecmascript/jobs/micro_job_queue.h b/ecmascript/jobs/micro_job_queue.h index fb2c81a20baa12c62bb1db56bdd27b34becc7db9..35f04c54991088a9a5307f5ca7d30fc58d7cec9c 100644 --- a/ecmascript/jobs/micro_job_queue.h +++ b/ecmascript/jobs/micro_job_queue.h @@ -35,9 +35,11 @@ public: return static_cast(object); } + static uint32_t GetPromiseQueueSize(JSThread *thread, JSHandle jobQueue); static void EnqueueJob(JSThread *thread, JSHandle jobQueue, QueueType queueType, const JSHandle &job, const JSHandle &argv); static void ExecutePendingJob(JSThread *thread, JSHandle jobQueue); + static bool HasPendingJob(JSThread *thread, JSHandle jobQueue); static constexpr size_t PROMISE_JOB_QUEUE_OFFSET = Record::SIZE; ACCESSORS(PromiseJobQueue, PROMISE_JOB_QUEUE_OFFSET, SCRIPT_JOB_QUEUE_OFFSET); diff --git a/ecmascript/jobs/pending_job.h b/ecmascript/jobs/pending_job.h index b3b2249cb76bfb0bda3651d046425494f1cb0ba2..81d5ca0ef5d064aadadef63ef7b207b7b7b44015 100644 --- a/ecmascript/jobs/pending_job.h +++ b/ecmascript/jobs/pending_job.h @@ -40,13 +40,11 @@ public: { [[maybe_unused]] EcmaHandleScope handleScope(thread); EXECUTE_JOB_HITRACE(pendingJob); - tooling::JsDebuggerManager *jsDebuggerManager = thread->GetEcmaVM()->GetJsDebuggerManager(); - jsDebuggerManager->GetNotificationManager()->PendingJobEntryEvent(); JSHandle job(thread, pendingJob->GetJob()); ASSERT(job->IsCallable()); JSHandle argv(thread, pendingJob->GetArguments()); - const int32_t argsLength = static_cast(argv->GetLength()); + const uint32_t argsLength = argv->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, job, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); diff --git a/ecmascript/js_api/js_api_arraylist.cpp b/ecmascript/js_api/js_api_arraylist.cpp index ac0770a3c18ae6c95e470b9993e60dcd503d062f..8c79ba3d871776d33ab898672434d4543478e394 100644 --- a/ecmascript/js_api/js_api_arraylist.cpp +++ b/ecmascript/js_api/js_api_arraylist.cpp @@ -78,7 +78,7 @@ JSHandle JSAPIArrayList::Clone(JSThread *thread, const JSHandle< JSHandle srcElements(thread, obj->GetElements()); ASSERT(!srcElements->IsDictionaryMode()); - int32_t length = obj->GetSize(); + uint32_t length = obj->GetSize(); auto factory = thread->GetEcmaVM()->GetFactory(); JSHandle newArrayList = factory->NewJSAPIArrayList(0); newArrayList->SetLength(thread, JSTaggedValue(length)); @@ -199,7 +199,7 @@ bool JSAPIArrayList::Remove(JSThread *thread, const JSHandle &ar const JSHandle &value) { int index = GetIndexOf(thread, arrayList, value); - int length = arrayList->GetSize(); + uint32_t length = arrayList->GetSize(); if (index >= 0) { JSHandle elements(thread, arrayList->GetElements()); ASSERT(!elements->IsDictionaryMode()); @@ -253,7 +253,7 @@ JSTaggedValue JSAPIArrayList::ReplaceAllElements(JSThread *thread, const JSHandl const JSHandle &thisArg) { JSHandle arrayList = JSHandle::Cast(thisHandle); - uint32_t length = static_cast(arrayList->GetSize()); + uint32_t length = arrayList->GetSize(); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); const int32_t argsLength = 3; @@ -334,7 +334,7 @@ JSTaggedValue JSAPIArrayList::ForEach(JSThread *thread, const JSHandle &thisArg) { JSHandle arrayList = JSHandle::Cast(thisHandle); - uint32_t length = static_cast(arrayList->GetSize()); + uint32_t length = arrayList->GetSize(); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); const int32_t argsLength = 3; @@ -348,8 +348,8 @@ JSTaggedValue JSAPIArrayList::ForEach(JSThread *thread, const JSHandleSetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisHandle.GetTaggedValue()); JSTaggedValue funcResult = JSFunction::Call(info); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, funcResult); - if (static_cast(length) != arrayList->GetSize()) { - length = static_cast(arrayList->GetSize()); + if (length != arrayList->GetSize()) { + length = arrayList->GetSize(); } } @@ -376,12 +376,12 @@ JSHandle JSAPIArrayList::GrowCapacity(const JSThread *thread, const bool JSAPIArrayList::Has(const JSTaggedValue value) const { TaggedArray *elements = TaggedArray::Cast(GetElements().GetTaggedObject()); - int32_t length = GetSize(); + uint32_t length = GetSize(); if (length == 0) { return false; } - for (int32_t i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (JSTaggedValue::SameValue(elements->Get(i), value)) { return true; } @@ -405,6 +405,7 @@ bool JSAPIArrayList::GetOwnProperty(JSThread *thread, const JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"index\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_arraylist.h b/ecmascript/js_api/js_api_arraylist.h index a62ae8c4dbc0b52ac32f45f9f357c5c4b20cdff5..7b2438a6dfdf21c0e6fbe46d7018a6a3bed34977 100644 --- a/ecmascript/js_api/js_api_arraylist.h +++ b/ecmascript/js_api/js_api_arraylist.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_ARRAYLIST_H -#define ECMASCRIPT_JS_API_ARRAYLIST_H +#ifndef ECMASCRIPT_JS_API_JS_API_ARRAYLIST_H +#define ECMASCRIPT_JS_API_JS_API_ARRAYLIST_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -77,7 +77,7 @@ public: static bool SetProperty(JSThread *thread, const JSHandle &obj, const JSHandle &key, const JSHandle &value); - inline int GetSize() const + inline uint32_t GetSize() const { return GetLength().GetInt(); } @@ -99,4 +99,4 @@ private: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_ARRAYLIST_H +#endif // ECMASCRIPT_JS_API_JS_API_ARRAYLIST_H diff --git a/ecmascript/js_api/js_api_arraylist_iterator.h b/ecmascript/js_api/js_api_arraylist_iterator.h index 192480c6f70c4c4ade98f5e6650b18e0c5e89c1f..067889655f6a62283beb3a89aacf343d0f376406 100644 --- a/ecmascript/js_api/js_api_arraylist_iterator.h +++ b/ecmascript/js_api/js_api_arraylist_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_ARRAYLIST_ITERATOR_H -#define ECMASCRIPT_JS_API_ARRAYLIST_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_ARRAYLIST_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_ARRAYLIST_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -44,4 +44,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_ARRAYLIST_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_ARRAYLIST_ITERATOR_H diff --git a/ecmascript/js_api/js_api_deque.cpp b/ecmascript/js_api/js_api_deque.cpp index 65f8afbae33b7371d7283b49f72ec308250c5a7b..1ba39fe30b994b17eb43242d492cfca2b51db4c4 100644 --- a/ecmascript/js_api/js_api_deque.cpp +++ b/ecmascript/js_api/js_api_deque.cpp @@ -242,6 +242,7 @@ bool JSAPIDeque::GetOwnProperty(JSThread *thread, const JSHandle &de uint32_t index = 0; if (UNLIKELY(!JSTaggedValue::ToElementIndex(key.GetTaggedValue(), &index))) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"key\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_deque.h b/ecmascript/js_api/js_api_deque.h index 8e5d955325b70e204ebbec6d9f967a0e62d40cc1..d407de9440e04642153555e82b051f44ddfbe202 100644 --- a/ecmascript/js_api/js_api_deque.h +++ b/ecmascript/js_api/js_api_deque.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_DEQUE_H -#define ECMASCRIPT_JS_API_DEQUE_H +#ifndef ECMASCRIPT_JS_API_JS_API_DEQUE_H +#define ECMASCRIPT_JS_API_JS_API_DEQUE_H #include #include @@ -92,4 +92,4 @@ private: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_DEQUE_H +#endif // ECMASCRIPT_JS_API_JS_API_DEQUE_H diff --git a/ecmascript/js_api/js_api_deque_iterator.h b/ecmascript/js_api/js_api_deque_iterator.h index 5bccc32636bb323fb1bf4597433f11f65d8f7564..83972be235e1517908a761b063f049e6325af04d 100644 --- a/ecmascript/js_api/js_api_deque_iterator.h +++ b/ecmascript/js_api/js_api_deque_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_DEQUE_ITERATOR_H -#define ECMASCRIPT_JS_API_DEQUE_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_DEQUE_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_DEQUE_ITERATOR_H #include #include @@ -48,4 +48,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_DEQUE_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_DEQUE_ITERATOR_H diff --git a/ecmascript/js_api/js_api_hashmap.h b/ecmascript/js_api/js_api_hashmap.h index c79f71fb82ea8e99a712a008c380cf06d39fa0fc..745eff7b2cc3a56977c76a3ee0b11ff6e2e2394e 100644 --- a/ecmascript/js_api/js_api_hashmap.h +++ b/ecmascript/js_api/js_api_hashmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_HASHMAP_H -#define ECMASCRIPT_JS_API_HASHMAP_H +#ifndef ECMASCRIPT_JS_API_JS_API_HASHMAP_H +#define ECMASCRIPT_JS_API_JS_API_HASHMAP_H #include "ecmascript/js_object-inl.h" #include "ecmascript/js_object.h" @@ -61,4 +61,4 @@ private: static bool HasValueRBTreeNode(JSTaggedValue node, JSTaggedValue value); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_HASHMAP_H +#endif // ECMASCRIPT_JS_API_JS_API_HASHMAP_H diff --git a/ecmascript/js_api/js_api_hashmap_iterator.h b/ecmascript/js_api/js_api_hashmap_iterator.h index 63d9bc529301a1aa1f35c2466bf8cd48c9fccff4..0bbe1406dd2413e89d23c5db99a7fbe5590471b7 100644 --- a/ecmascript/js_api/js_api_hashmap_iterator.h +++ b/ecmascript/js_api/js_api_hashmap_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_HASHMAP_ITERATOR_H -#define ECMASCRIPT_JS_API_HASHMAP_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_HASHMAP_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_HASHMAP_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -56,4 +56,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_HASHMAP_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_HASHMAP_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_hashset.cpp b/ecmascript/js_api/js_api_hashset.cpp index a6de4b02423f3161d07e22aa0f893d460e142b4e..1e4b6f8f9337dff8e1f56eabdcbaa9ff95514fce 100644 --- a/ecmascript/js_api/js_api_hashset.cpp +++ b/ecmascript/js_api/js_api_hashset.cpp @@ -32,6 +32,7 @@ JSTaggedValue JSAPIHashSet::Has(JSThread *thread, JSTaggedValue value) { if (!TaggedHashArray::IsKey(value)) { JSHandle result = JSTaggedValue::ToString(thread, value); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"value\" must be Key of JS. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); @@ -80,6 +81,7 @@ JSTaggedValue JSAPIHashSet::Remove(JSThread *thread, JSHandle hash { if (!TaggedHashArray::IsKey(key)) { JSHandle result = JSTaggedValue::ToString(thread, key); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be not null. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/js_api/js_api_hashset.h b/ecmascript/js_api/js_api_hashset.h index a8ea1b02407a822577f81666641870ec0dbb3a27..59dbaab5f7b3c79619ce659dab25a1e4c00a2557 100644 --- a/ecmascript/js_api/js_api_hashset.h +++ b/ecmascript/js_api/js_api_hashset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_HASHSET_H -#define ECMASCRIPT_JS_API_HASHSET_H +#ifndef ECMASCRIPT_JS_API_JS_API_HASHSET_H +#define ECMASCRIPT_JS_API_JS_API_HASHSET_H #include "ecmascript/js_object-inl.h" #include "ecmascript/js_tagged_value-inl.h" @@ -47,4 +47,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_HASHSET_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_HASHSET_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_hashset_iterator.h b/ecmascript/js_api/js_api_hashset_iterator.h index 669602db440569e67cf6e9678c22f3cf0335c69b..6f060f64c20f38276bf9003ef752d1a454f60033 100644 --- a/ecmascript/js_api/js_api_hashset_iterator.h +++ b/ecmascript/js_api/js_api_hashset_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_HASHSET_ITERATOR_H -#define ECMASCRIPT_JS_API_HASHSET_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_HASHSET_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_HASHSET_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -55,4 +55,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_HASHSET_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_HASHSET_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_lightweightmap.cpp b/ecmascript/js_api/js_api_lightweightmap.cpp index a13968cbf3120037c4b34b4da5f9697940872c52..f434f61df9c00ed4f38bfe3ca4be67bda35df9a7 100644 --- a/ecmascript/js_api/js_api_lightweightmap.cpp +++ b/ecmascript/js_api/js_api_lightweightmap.cpp @@ -31,8 +31,8 @@ JSTaggedValue JSAPILightWeightMap::IncreaseCapacityTo(JSThread *thread, const JSHandle &lightWeightMap, int32_t index) { - int32_t num = lightWeightMap->GetSize(); - if (index < DEFAULT_CAPACITY_LENGTH || num >= index) { + uint32_t num = lightWeightMap->GetSize(); + if (index < DEFAULT_CAPACITY_LENGTH || static_cast(num) >= index) { return JSTaggedValue::False(); } JSHandle hashArray = GetArrayByKind(thread, lightWeightMap, AccossorsKind::HASH); @@ -51,7 +51,7 @@ void JSAPILightWeightMap::InsertValue(const JSThread *thread, const JSHandle &value, AccossorsKind kind) { JSHandle array = GetArrayByKind(thread, lightWeightMap, kind); - int32_t len = lightWeightMap->GetSize(); + uint32_t len = lightWeightMap->GetSize(); JSHandle newArray = GrowCapacity(thread, array, len + 1); TaggedArray::InsertElementByIndex(thread, newArray, value, index, len); SetArrayByKind(thread, lightWeightMap, newArray, kind); @@ -61,7 +61,7 @@ void JSAPILightWeightMap::ReplaceValue(const JSThread *thread, const JSHandle &value, AccossorsKind kind) { JSHandle array = GetArrayByKind(thread, lightWeightMap, kind); - ASSERT(0 <= index || index < lightWeightMap->GetSize()); + ASSERT(0 <= index || index < static_cast(lightWeightMap->GetSize())); array->Set(thread, index, value.GetTaggedValue()); } @@ -104,8 +104,8 @@ JSTaggedValue JSAPILightWeightMap::Get(JSThread *thread, const JSHandle &lightWeightMap, const JSHandle &newLightWeightMap) { - int32_t length = newLightWeightMap->GetSize(); - int32_t len = lightWeightMap->GetSize(); + uint32_t length = newLightWeightMap->GetSize(); + uint32_t len = lightWeightMap->GetSize(); if (length > len) { return JSTaggedValue::False(); } @@ -118,11 +118,11 @@ JSTaggedValue JSAPILightWeightMap::HasAll(JSThread *thread, const JSHandleGet(num); hash = Hash(dealKey); - index = BinarySearchHashes(oldHashArray, hash, len); - if (index < 0 || index >= len) { + index = BinarySearchHashes(oldHashArray, hash, static_cast(len)); + if (index < 0 || index >= static_cast(len)) { return JSTaggedValue::False(); } HashParams params { oldHashArray, oldKeyArray, &dealKey }; @@ -147,8 +147,8 @@ JSTaggedValue JSAPILightWeightMap::HasValue(JSThread *thread, const JSHandle &value) { JSHandle valueArray = GetArrayByKind(thread, lightWeightMap, AccossorsKind::VALUE); - int32_t length = lightWeightMap->GetSize(); - for (int32_t num = 0; num < length; num++) { + uint32_t length = lightWeightMap->GetSize(); + for (uint32_t num = 0; num < length; num++) { if (JSTaggedValue::SameValue(valueArray->Get(num), value.GetTaggedValue())) { return JSTaggedValue::True(); } @@ -167,7 +167,7 @@ KeyState JSAPILightWeightMap::GetStateOfKey(JSThread *thread, const JSHandle &key) { int32_t hash = Hash(key.GetTaggedValue()); - int32_t length = lightWeightMap->GetSize(); + int32_t length = static_cast(lightWeightMap->GetSize()); JSHandle hashArray = GetArrayByKind(thread, lightWeightMap, AccossorsKind::HASH); int32_t index = BinarySearchHashes(hashArray, hash, length); if (index >= 0) { @@ -196,9 +196,9 @@ int32_t JSAPILightWeightMap::GetIndexOfValue(JSThread *thread, const JSHandle &value) { JSHandle valueArray = GetArrayByKind(thread, lightWeightMap, AccossorsKind::VALUE); - int32_t length = lightWeightMap->GetSize(); + uint32_t length = lightWeightMap->GetSize(); JSTaggedValue compValue = value.GetTaggedValue(); - for (int32_t i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (valueArray->Get(i) == compValue) { return i; } @@ -209,7 +209,7 @@ int32_t JSAPILightWeightMap::GetIndexOfValue(JSThread *thread, const JSHandle &lightWeightMap, int32_t index) { - int32_t length = lightWeightMap->GetSize(); + int32_t length = static_cast(lightWeightMap->GetSize()); if (index < 0 || length <= index) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (length - 1) @@ -224,7 +224,7 @@ JSTaggedValue JSAPILightWeightMap::GetKeyAt(JSThread *thread, const JSHandle &lightWeightMap, int32_t index) { - int32_t length = lightWeightMap->GetSize(); + int32_t length = static_cast(lightWeightMap->GetSize()); if (index < 0 || length <= index) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (length - 1) @@ -243,8 +243,8 @@ void JSAPILightWeightMap::SetAll(JSThread *thread, const JSHandle needValueArray = GetArrayByKind(thread, needLightWeightMap, AccossorsKind::VALUE); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle value(thread, JSTaggedValue::Undefined()); - int32_t length = needLightWeightMap->GetSize(); - for (int32_t num = 0; num < length; num++) { + uint32_t length = needLightWeightMap->GetSize(); + for (uint32_t num = 0; num < length; num++) { key.Update(needKeyArray->Get(num)); value.Update(needValueArray->Get(num)); JSAPILightWeightMap::Set(thread, lightWeightMap, key, value); @@ -271,8 +271,8 @@ JSTaggedValue JSAPILightWeightMap::Remove(JSThread *thread, const JSHandle &lightWeightMap, int32_t index) { - int32_t length = lightWeightMap->GetSize(); - if (index < 0 || length <= index) { + uint32_t length = lightWeightMap->GetSize(); + if (index < 0 || static_cast(length) <= index) { return JSTaggedValue::False(); } RemoveValue(thread, lightWeightMap, index, AccossorsKind::HASH); @@ -306,7 +306,7 @@ void JSAPILightWeightMap::Clear(JSThread *thread, const JSHandle &lightWeightMap, int32_t index, const JSHandle &value) { - int32_t length = lightWeightMap->GetSize(); + int32_t length = static_cast(lightWeightMap->GetSize()); if (index < 0 || length <= index) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (length - 1) @@ -318,10 +318,10 @@ JSTaggedValue JSAPILightWeightMap::SetValueAt(JSThread *thread, const JSHandleGet(right).GetInt() == hash)) { + while ((right < static_cast(size)) && ((params.hashArray)->Get(right).GetInt() == hash)) { if (JSTaggedValue::SameValue((params.keyArray)->Get(right), *(params.key))) { return right; } diff --git a/ecmascript/js_api/js_api_lightweightmap.h b/ecmascript/js_api/js_api_lightweightmap.h index 09891c300968c47123a1a453dc5a5b1671f112d4..62feb9392eb20dec51157ddd83d9f426e79f4e9d 100644 --- a/ecmascript/js_api/js_api_lightweightmap.h +++ b/ecmascript/js_api/js_api_lightweightmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIGHTWEIGHTMAP_H -#define ECMASCRIPT_JS_API_LIGHTWEIGHTMAP_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTMAP_H +#define ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTMAP_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -82,9 +82,9 @@ public: const JSHandle &key, PropertyDescriptor &desc); JSTaggedValue IsEmpty(); - inline int32_t GetSize() const + inline uint32_t GetSize() const { - return static_cast(GetLength()); + return GetLength(); } static constexpr size_t LWP_HASHES_OFFSET = JSObject::SIZE; @@ -118,8 +118,8 @@ private: static JSHandle GetArrayByKind(const JSThread *thread, const JSHandle &lightWeightMap, AccossorsKind kind); - static int32_t AvoidHashCollision(HashParams ¶ms, int32_t index, int32_t size, int32_t hash); + static int32_t AvoidHashCollision(HashParams ¶ms, int32_t index, uint32_t size, int32_t hash); }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_LIGHTWEIGHTMAP_H +#endif // ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTMAP_H diff --git a/ecmascript/js_api/js_api_lightweightmap_iterator.cpp b/ecmascript/js_api/js_api_lightweightmap_iterator.cpp index 14e731d3af9ee1fa73d751a17b09e675dd9d3dcf..16fbca6962d0b22a7d8672c0b3b69dc4e905d886 100644 --- a/ecmascript/js_api/js_api_lightweightmap_iterator.cpp +++ b/ecmascript/js_api/js_api_lightweightmap_iterator.cpp @@ -48,7 +48,7 @@ JSTaggedValue JSAPILightWeightMapIterator::Next(EcmaRuntimeCallInfo *argv) return globalConst->GetUndefinedIterResult(); } int32_t index = iter->GetNextIndex(); - int32_t length = lightWeightMap->GetSize(); + int32_t length = static_cast(lightWeightMap->GetSize()); if (index >= length) { JSHandle undefinedHandle = globalConst->GetHandledUndefined(); iter->SetIteratedLightWeightMap(thread, undefinedHandle); diff --git a/ecmascript/js_api/js_api_lightweightmap_iterator.h b/ecmascript/js_api/js_api_lightweightmap_iterator.h index 40a3ea1ca3d35499dfa613ade0c4a20b8ed37bcf..c8aa2868d5f3c770330454476a5ecd4a01841675 100644 --- a/ecmascript/js_api/js_api_lightweightmap_iterator.h +++ b/ecmascript/js_api/js_api_lightweightmap_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIGHT_WEIGHT_MAP_ITERATOR_H -#define ECMASCRIPT_JS_API_LIGHT_WEIGHT_MAP_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIGHT_WEIGHT_MAP_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_LIGHT_WEIGHT_MAP_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -48,4 +48,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_LinkedList_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_LIGHT_WEIGHT_MAP_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_lightweightset.h b/ecmascript/js_api/js_api_lightweightset.h index 4f28ed2b319cbb1103582a70565e38bb4eab03b8..7118699aaa732343e4375ae602e546ddea72beea 100644 --- a/ecmascript/js_api/js_api_lightweightset.h +++ b/ecmascript/js_api/js_api_lightweightset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIGHTWEIGHTSET_H -#define ECMASCRIPT_JS_API_LIGHTWEIGHTSET_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_H +#define ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -77,4 +77,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_LIGHTWEIGHTSET_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_lightweightset_iterator.h b/ecmascript/js_api/js_api_lightweightset_iterator.h index 4f867cf9df18067e17186f56b177a23e4271a9cf..8eb6f56d33317ee5445284a271ea08085f5a8601 100644 --- a/ecmascript/js_api/js_api_lightweightset_iterator.h +++ b/ecmascript/js_api/js_api_lightweightset_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIGHTWEIGHTSET_ITERATOR_H -#define ECMASCRIPT_JS_API_LIGHTWEIGHTSET_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -43,4 +43,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_LIGHT_WEIGHT_SET_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_LIGHTWEIGHTSET_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_linked_list.cpp b/ecmascript/js_api/js_api_linked_list.cpp index 742a7c5720861c12a2a3a496e3248081d3d68fb8..b0f80707401e18819f047bb6349295dc2ec40301 100644 --- a/ecmascript/js_api/js_api_linked_list.cpp +++ b/ecmascript/js_api/js_api_linked_list.cpp @@ -55,9 +55,9 @@ JSHandle JSAPILinkedList::Clone(JSThread *thread, const JSHandl JSHandle srcDoubleList(thread, doubleListTaggedValue); JSHandle srcTaggedArray(thread, doubleListTaggedValue); ASSERT(!srcDoubleList->IsDictionaryMode()); - int numberOfNodes = srcDoubleList->NumberOfNodes(); - int numberOfDeletedNodes = srcDoubleList->NumberOfDeletedNodes(); - int effectiveCapacity = TaggedDoubleList::ELEMENTS_START_INDEX + + uint32_t numberOfNodes = static_cast(srcDoubleList->NumberOfNodes()); + uint32_t numberOfDeletedNodes = static_cast(srcDoubleList->NumberOfDeletedNodes()); + uint32_t effectiveCapacity = TaggedDoubleList::ELEMENTS_START_INDEX + (numberOfNodes + numberOfDeletedNodes + 1) * TaggedDoubleList::ENTRY_SIZE; ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle newLinkedList = factory->NewJSAPILinkedList(); @@ -234,6 +234,7 @@ bool JSAPILinkedList::GetOwnProperty(JSThread *thread, const JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"index\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_linked_list.h b/ecmascript/js_api/js_api_linked_list.h index bcbd860cac46e4d27898bc7014816f0eb73367a4..7202c9add8e75ce48dd837bfeb54f7116104003c 100644 --- a/ecmascript/js_api/js_api_linked_list.h +++ b/ecmascript/js_api/js_api_linked_list.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LINKEDLIST_H -#define ECMASCRIPT_JS_API_LINKEDLIST_H +#ifndef ECMASCRIPT_JS_API_JS_API_LINKEDLIST_H +#define ECMASCRIPT_JS_API_JS_API_LINKEDLIST_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -23,7 +23,7 @@ namespace panda::ecmascript { class JSAPILinkedList : public JSObject { public: - static constexpr int DEFAULT_CAPACITY_LENGTH = 10; + static constexpr uint32_t DEFAULT_CAPACITY_LENGTH = 10; static JSAPILinkedList *Cast(TaggedObject *object) { ASSERT(JSTaggedValue(object).IsJSAPILinkedList()); @@ -61,7 +61,7 @@ public: JSTaggedValue Remove(JSThread *thread, const JSTaggedValue &element); JSTaggedValue GetIndexOf(const JSTaggedValue &element); JSTaggedValue GetLastIndexOf(const JSTaggedValue &element); - inline int Length() + inline uint32_t Length() { return TaggedDoubleList::Cast(GetDoubleList().GetTaggedObject())->Length(); } @@ -72,4 +72,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_LinkedLIST_H +#endif // ECMASCRIPT_JS_API_JS_API_LinkedLIST_H diff --git a/ecmascript/js_api/js_api_linked_list_iterator.h b/ecmascript/js_api/js_api_linked_list_iterator.h index 6e4fe105f8d1588ffe991145c0f0cf1b3bdd33fd..0c282a81b20d9e40e5af80ccff7453929e3437f7 100644 --- a/ecmascript/js_api/js_api_linked_list_iterator.h +++ b/ecmascript/js_api/js_api_linked_list_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LINKED_LIST_ITERATOR_H -#define ECMASCRIPT_JS_API_LINKED_LIST_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_LINKED_LIST_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_LINKED_LIST_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -41,4 +41,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_LinkedList_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_LINKED_LIST_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_list.cpp b/ecmascript/js_api/js_api_list.cpp index ea5475e9b2c3258690c388e72c211dacd09d3e85..6e7dfab0bf352442ab46224de0661c148b6fb3ca 100644 --- a/ecmascript/js_api/js_api_list.cpp +++ b/ecmascript/js_api/js_api_list.cpp @@ -198,7 +198,7 @@ JSTaggedValue JSAPIList::GetSubList(JSThread *thread, const JSHandle JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::RANGE_ERROR, oss.str().c_str()); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, JSTaggedValue::Exception()); } - int len = TaggedSingleList::ELEMENTS_START_INDEX + (toIndex - fromIndex + 1) * TaggedSingleList::ENTRY_SIZE; + uint32_t len = TaggedSingleList::ELEMENTS_START_INDEX + (toIndex - fromIndex + 1) * TaggedSingleList::ENTRY_SIZE; JSHandle newElement = thread->GetEcmaVM()->GetFactory()->NewTaggedArray(len); JSHandle subSingleList = JSHandle::Cast(newElement); JSHandle sublist = thread->GetEcmaVM()->GetFactory()->NewJSAPIList(); @@ -218,6 +218,7 @@ bool JSAPIList::GetOwnProperty(JSThread *thread, const JSHandle &list uint32_t index = 0; if (UNLIKELY(!JSTaggedValue::ToElementIndex(key.GetTaggedValue(), &index))) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"index\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_list.h b/ecmascript/js_api/js_api_list.h index f5aa2027f74bb6ea0a1ded543b751207f2cf15f1..b1e163e6af22e08154ee5292dc9d0a68c99dad03 100644 --- a/ecmascript/js_api/js_api_list.h +++ b/ecmascript/js_api/js_api_list.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIST_H -#define ECMASCRIPT_JS_API_LIST_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIST_H +#define ECMASCRIPT_JS_API_JS_API_LIST_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -23,7 +23,7 @@ namespace panda::ecmascript { class JSAPIList : public JSObject { public: - static constexpr int DEFAULT_CAPACITY_LENGTH = 10; + static constexpr uint32_t DEFAULT_CAPACITY_LENGTH = 10; static JSAPIList *Cast(TaggedObject *object) { ASSERT(JSTaggedValue(object).IsJSAPIList()); @@ -63,7 +63,7 @@ public: JSTaggedValue Equal(JSThread *thread, const JSHandle &list); void Clear(JSThread *thread); JSTaggedValue Remove(JSThread *thread, const JSTaggedValue &element); - inline int Length() + inline uint32_t Length() { return TaggedSingleList::Cast(GetSingleList().GetTaggedObject())->Length(); } @@ -75,4 +75,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_LIST_H +#endif // ECMASCRIPT_JS_API_JS_API_LIST_H diff --git a/ecmascript/js_api/js_api_list_iterator.h b/ecmascript/js_api/js_api_list_iterator.h index 2bc296b2f8d08d4a4b660aa682427e29e1a1ebcc..bbe61d700466d217a2df09dac48f5d873d03a195 100644 --- a/ecmascript/js_api/js_api_list_iterator.h +++ b/ecmascript/js_api/js_api_list_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_LIST_ITERATOR_H -#define ECMASCRIPT_JS_API_LIST_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_LIST_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_LIST_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -41,4 +41,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_List_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_LIST_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_api/js_api_plain_array.cpp b/ecmascript/js_api/js_api_plain_array.cpp index d8c304c361c7a3d2839d18c07fb899222aa81cf9..d26328010dbee70205aeb317292302a661b3720a 100644 --- a/ecmascript/js_api/js_api_plain_array.cpp +++ b/ecmascript/js_api/js_api_plain_array.cpp @@ -30,7 +30,7 @@ void JSAPIPlainArray::Add(JSThread *thread, const JSHandle &obj { JSHandle keyArray(thread, obj->GetKeys()); JSHandle valueArray(thread, obj->GetValues()); - int32_t size = obj->GetLength(); + uint32_t size = obj->GetLength(); int32_t index = obj->BinarySearch(*keyArray, 0, size, key.GetTaggedValue().GetNumber()); if (index >= 0) { keyArray->Set(thread, index, key); @@ -38,12 +38,12 @@ void JSAPIPlainArray::Add(JSThread *thread, const JSHandle &obj return; } index ^= 0xFFFFFFFF; - if (index < size) { + if (index < static_cast(size)) { obj->AdjustArray(thread, *keyArray, index, size, true); obj->AdjustArray(thread, *valueArray, index, size, true); } uint32_t capacity = valueArray->GetLength(); - if (size + 1 >= static_cast(capacity)) { + if (size + 1 >= capacity) { uint32_t newCapacity = capacity << 1U; keyArray = thread->GetEcmaVM()->GetFactory()->CopyArray(keyArray, capacity, newCapacity); @@ -68,12 +68,12 @@ JSHandle JSAPIPlainArray::CreateSlot(const JSThread *thread, const bool JSAPIPlainArray::AdjustForward(JSThread *thread, int32_t index, int32_t forwardSize) { - int32_t size = GetLength(); + uint32_t size = GetLength(); TaggedArray *keys = TaggedArray::Cast(GetKeys().GetTaggedObject()); TaggedArray *values = TaggedArray::Cast(GetValues().GetTaggedObject()); AdjustArray(thread, keys, index + forwardSize, index, false); AdjustArray(thread, values, index + forwardSize, index, false); - size = size - forwardSize; + size = size - static_cast(forwardSize); SetLength(size); return true; } @@ -81,8 +81,8 @@ bool JSAPIPlainArray::AdjustForward(JSThread *thread, int32_t index, int32_t for void JSAPIPlainArray::AdjustArray(JSThread *thread, TaggedArray *srcArray, int32_t fromIndex, int32_t toIndex, bool direction) { - int32_t size = GetLength(); - int32_t idx = size - 1; + uint32_t size = GetLength(); + uint32_t idx = size - 1; if (direction) { while (fromIndex < toIndex) { JSTaggedValue value = srcArray->Get(idx); @@ -91,9 +91,9 @@ void JSAPIPlainArray::AdjustArray(JSThread *thread, TaggedArray *srcArray, int32 fromIndex++; } } else { - int32_t moveSize = size - fromIndex; - for (int32_t i = 0; i < moveSize; i++) { - if ((fromIndex + i) < size) { + uint32_t moveSize = size - static_cast(fromIndex); + for (uint32_t i = 0; i < moveSize; i++) { + if ((fromIndex + static_cast(i)) < static_cast(size)) { JSTaggedValue value = srcArray->Get(fromIndex + i); srcArray->Set(thread, toIndex + i, value); } else { @@ -126,8 +126,8 @@ void JSAPIPlainArray::Clear(JSThread *thread) { TaggedArray *keys = TaggedArray::Cast(GetKeys().GetTaggedObject()); TaggedArray *values = TaggedArray::Cast(GetValues().GetTaggedObject()); - int32_t size = GetLength(); - for (int32_t index = 0; index < size; index++) { + uint32_t size = GetLength(); + for (uint32_t index = 0; index < size; index++) { keys->Set(thread, index, JSTaggedValue::Hole()); values->Set(thread, index, JSTaggedValue::Hole()); } @@ -136,7 +136,7 @@ void JSAPIPlainArray::Clear(JSThread *thread) JSTaggedValue JSAPIPlainArray::RemoveRangeFrom(JSThread *thread, int32_t index, int32_t batchSize) { - int32_t size = GetLength(); + int32_t size = static_cast(GetLength()); if (index < 0 || index >= size) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (size - 1) @@ -168,9 +168,9 @@ bool JSAPIPlainArray::GetOwnProperty(JSThread *thread, const JSHandle &key) { TaggedArray *keyArray = TaggedArray::Cast(obj->GetKeys().GetTaggedObject()); - int32_t size = obj->GetLength(); + uint32_t size = obj->GetLength(); int32_t index = obj->BinarySearch(keyArray, 0, size, key.GetTaggedValue().GetInt()); - if (index < 0 || index >= size) { + if (index < 0 || index >= static_cast(size)) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (size - 1) << ". Received value is: " << index; @@ -186,9 +186,9 @@ OperationResult JSAPIPlainArray::GetProperty(JSThread *thread, const JSHandle &key) { TaggedArray *keyArray = TaggedArray::Cast(obj->GetKeys().GetTaggedObject()); - int32_t size = obj->GetLength(); + uint32_t size = obj->GetLength(); int32_t index = obj->BinarySearch(keyArray, 0, size, key.GetTaggedValue().GetInt()); - if (index < 0 || index >= size) { + if (index < 0 || index >= static_cast(size)) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (size - 1) << ". Received value is: " << index; @@ -206,9 +206,9 @@ bool JSAPIPlainArray::SetProperty(JSThread *thread, const JSHandle &value) { TaggedArray *keyArray = TaggedArray::Cast(obj->GetKeys().GetTaggedObject()); - int32_t size = obj->GetLength(); + uint32_t size = obj->GetLength(); int32_t index = obj->BinarySearch(keyArray, 0, size, key.GetTaggedValue().GetInt()); - if (index < 0 || index >= size) { + if (index < 0 || index >= static_cast(size)) { return false; } @@ -223,7 +223,7 @@ JSHandle JSAPIPlainArray::Clone(JSThread *thread, const JSHandl auto factory = thread->GetEcmaVM()->GetFactory(); JSHandle newPlainArray = factory->NewJSAPIPlainArray(0); - int32_t length = obj->GetLength(); + uint32_t length = obj->GetLength(); newPlainArray->SetLength(length); JSHandle srcKeyArray(thread, obj->GetKeys()); JSHandle srcValueArray(thread, obj->GetValues()); @@ -238,7 +238,7 @@ JSHandle JSAPIPlainArray::Clone(JSThread *thread, const JSHandl bool JSAPIPlainArray::Has(const int32_t key) { - int32_t size = GetLength(); + uint32_t size = GetLength(); TaggedArray *keyArray = TaggedArray::Cast(GetKeys().GetTaggedObject()); int32_t index = BinarySearch(keyArray, 0, size, key); if (index < 0) { @@ -249,7 +249,7 @@ bool JSAPIPlainArray::Has(const int32_t key) JSTaggedValue JSAPIPlainArray::Get(const JSTaggedValue key) { - int32_t size = GetLength(); + uint32_t size = GetLength(); TaggedArray *keyArray = TaggedArray::Cast(GetKeys().GetTaggedObject()); int32_t index = BinarySearch(keyArray, 0, size, key.GetNumber()); if (index < 0) { @@ -273,11 +273,11 @@ JSTaggedValue JSAPIPlainArray::ForEach(JSThread *thread, const JSHandle &thisArg) { JSAPIPlainArray *plainarray = JSAPIPlainArray::Cast(thisHandle->GetTaggedObject()); - int32_t length = plainarray->GetLength(); + uint32_t length = plainarray->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSHandle keyArray(thread, plainarray->GetKeys()); JSHandle valueArray(thread, plainarray->GetValues()); - for (int32_t k = 0; k < length; k++) { + for (uint32_t k = 0; k < length; k++) { JSTaggedValue kValue = valueArray->Get(k); JSTaggedValue key = keyArray->Get(k); EcmaRuntimeCallInfo *info = @@ -296,12 +296,12 @@ JSTaggedValue JSAPIPlainArray::ToString(JSThread *thread, const JSHandle, char16_t> {}.from_bytes(","); std::u16string colonStr = std::wstring_convert, char16_t> {}.from_bytes(":"); - int32_t length = plainarray->GetLength(); + uint32_t length = plainarray->GetLength(); std::u16string concatStr = std::wstring_convert, char16_t> {}.from_bytes(""); std::u16string concatStrNew = std::wstring_convert, char16_t> {}.from_bytes(""); JSMutableHandle valueHandle(thread, JSTaggedValue::Undefined()); JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); - for (int32_t k = 0; k < length; k++) { + for (uint32_t k = 0; k < length; k++) { std::u16string valueStr; valueHandle.Update(plainarray->GetValueAt(thread, k)); if (!valueHandle->IsUndefined() && !valueHandle->IsNull()) { @@ -336,7 +336,7 @@ JSTaggedValue JSAPIPlainArray::ToString(JSThread *thread, const JSHandle(GetLength()); int32_t index = -1; for (int32_t i = 0; i < size; i++) { if (JSTaggedValue::SameValue(values->Get(i), value)) { @@ -364,15 +364,15 @@ JSTaggedValue JSAPIPlainArray::GetIndexOfValue(JSTaggedValue value) bool JSAPIPlainArray::IsEmpty() { - int32_t length = GetLength(); + uint32_t length = GetLength(); return length == 0; } JSTaggedValue JSAPIPlainArray::GetKeyAt(int32_t index) { - int32_t size = GetLength(); + uint32_t size = GetLength(); TaggedArray *keyArray = TaggedArray::Cast(GetKeys().GetTaggedObject()); - if (index < 0 || index >= size) { + if (index < 0 || index >= static_cast(size)) { return JSTaggedValue::Undefined(); } return keyArray->Get(index); @@ -380,8 +380,8 @@ JSTaggedValue JSAPIPlainArray::GetKeyAt(int32_t index) JSTaggedValue JSAPIPlainArray::GetValueAt(JSThread *thread, int32_t index) { - int32_t size = GetLength(); - if (index < 0 || index >= size) { + uint32_t size = GetLength(); + if (index < 0 || index >= static_cast(size)) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (size - 1) << ". Received value is: " << index; @@ -394,10 +394,10 @@ JSTaggedValue JSAPIPlainArray::GetValueAt(JSThread *thread, int32_t index) JSTaggedValue JSAPIPlainArray::Remove(JSThread *thread, JSTaggedValue key) { - int32_t size = GetLength(); + uint32_t size = GetLength(); TaggedArray *keyArray = TaggedArray::Cast(GetKeys().GetTaggedObject()); int32_t index = BinarySearch(keyArray, 0, size, key.GetNumber()); - if (index < 0 || index >= size) { + if (index < 0 || index >= static_cast(size)) { return JSTaggedValue::Undefined(); } TaggedArray *values = TaggedArray::Cast(GetValues().GetTaggedObject()); @@ -408,9 +408,9 @@ JSTaggedValue JSAPIPlainArray::Remove(JSThread *thread, JSTaggedValue key) JSTaggedValue JSAPIPlainArray::RemoveAt(JSThread *thread, JSTaggedValue index) { - int32_t size = GetLength(); + uint32_t size = GetLength(); int32_t seat = index.GetNumber(); - if (seat < 0 || seat >= size) { + if (seat < 0 || static_cast(seat) >= size) { return JSTaggedValue::Undefined(); } TaggedArray *values = TaggedArray::Cast(GetValues().GetTaggedObject()); @@ -421,9 +421,9 @@ JSTaggedValue JSAPIPlainArray::RemoveAt(JSThread *thread, JSTaggedValue index) bool JSAPIPlainArray::SetValueAt(JSThread *thread, JSTaggedValue index, JSTaggedValue value) { - int32_t size = GetLength(); + uint32_t size = GetLength(); int32_t seat = index.GetNumber(); - if (seat < 0 || seat >= size) { + if (seat < 0 || static_cast(seat) >= size) { std::ostringstream oss; oss << "The value of \"index\" is out of range. It must be >= 0 && <= " << (size - 1) << ". Received value is: " << seat; diff --git a/ecmascript/js_api/js_api_plain_array.h b/ecmascript/js_api/js_api_plain_array.h index 4315ad8afae6a361b5157d60ca81b595466fd77b..027a9df0875e900b08a2bfac7a1a4ce6d67f1788 100644 --- a/ecmascript/js_api/js_api_plain_array.h +++ b/ecmascript/js_api/js_api_plain_array.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_PLAIN_ARRAY_H -#define ECMASCRIPT_JS_API_PLAIN_ARRAY_H +#ifndef ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_H +#define ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -62,14 +62,14 @@ public: void Clear(JSThread *thread); void AdjustArray(JSThread *thread, TaggedArray *srcArray, int32_t fromIndex, int32_t toIndex, bool direction); - inline int GetSize() const + inline uint32_t GetSize() const { return GetLength(); } static constexpr size_t KEYS_OFFSET = JSObject::SIZE; ACCESSORS(Keys, KEYS_OFFSET, VALUES_OFFSET); ACCESSORS(Values, VALUES_OFFSET, LENGTH_OFFSET); - ACCESSORS_PRIMITIVE_FIELD(Length, int32_t, LENGTH_OFFSET, LAST_OFFSET); + ACCESSORS_PRIMITIVE_FIELD(Length, uint32_t, LENGTH_OFFSET, LAST_OFFSET); DEFINE_ALIGN_SIZE(LAST_OFFSET); DECL_VISIT_OBJECT_FOR_JS_OBJECT(JSObject, KEYS_OFFSET, LENGTH_OFFSET) @@ -82,4 +82,4 @@ private: } }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_PLAIN_ARRAY_H +#endif // ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_H diff --git a/ecmascript/js_api/js_api_plain_array_iterator.cpp b/ecmascript/js_api/js_api_plain_array_iterator.cpp index 9c9ae9fc29f70e926ba18baf31c196bb7fa59386..66636e3fc3868806475b84024ff9146d1399f5dc 100644 --- a/ecmascript/js_api/js_api_plain_array_iterator.cpp +++ b/ecmascript/js_api/js_api_plain_array_iterator.cpp @@ -49,9 +49,9 @@ JSTaggedValue JSAPIPlainArrayIterator::Next(EcmaRuntimeCallInfo *argv) JSHandle apiPlainArray(plainArray); ASSERT(plainArray->IsJSAPIPlainArray()); - int32_t length = apiPlainArray->GetLength(); + uint32_t length = apiPlainArray->GetLength(); int32_t index = iter->GetNextIndex(); - if (index >= length) { + if (static_cast(index) >= length) { iter->SetIteratedPlainArray(thread, undefinedHandle); return JSIterator::CreateIterResultObject(thread, undefinedHandle, true).GetTaggedValue(); } diff --git a/ecmascript/js_api/js_api_plain_array_iterator.h b/ecmascript/js_api/js_api_plain_array_iterator.h index 1df2ebe44d68df8356b81b42818da4adf950c313..db8cdba574b841fda24cafdc8876480dcc769496 100644 --- a/ecmascript/js_api/js_api_plain_array_iterator.h +++ b/ecmascript/js_api/js_api_plain_array_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_PLAIN_ARRAY_ITERATOR_H -#define ECMASCRIPT_JS_API_PLAIN_ARRAY_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -42,4 +42,4 @@ public: DECL_DUMP() }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_PLAIN_ARRAY_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_PLAIN_ARRAY_ITERATOR_H diff --git a/ecmascript/js_api/js_api_queue.cpp b/ecmascript/js_api/js_api_queue.cpp index f0fc7d4c391c80cfd29862e22a998e294228c52e..67da40b2e2a0d9634dc8ff56085e3172cf043abb 100644 --- a/ecmascript/js_api/js_api_queue.cpp +++ b/ecmascript/js_api/js_api_queue.cpp @@ -197,6 +197,7 @@ bool JSAPIQueue::GetOwnProperty(JSThread *thread, const JSHandle &ob uint32_t index = 0; if (UNLIKELY(!JSTaggedValue::ToElementIndex(key.GetTaggedValue(), &index))) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"index\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_queue.h b/ecmascript/js_api/js_api_queue.h index 5c5032105260d6ce8a4ea98243d0ea1a9c0d0ab4..63d07847ca7ad9d9ab10c0f0dda716263dc499ad 100644 --- a/ecmascript/js_api/js_api_queue.h +++ b/ecmascript/js_api/js_api_queue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_QUEUE_H -#define ECMASCRIPT_JS_API_QUEUE_H +#ifndef ECMASCRIPT_JS_API_JS_API_QUEUE_H +#define ECMASCRIPT_JS_API_JS_API_QUEUE_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -22,7 +22,7 @@ namespace panda::ecmascript { class JSAPIQueue : public JSObject { public: - static constexpr int DEFAULT_CAPACITY_LENGTH = 8; + static constexpr uint32_t DEFAULT_CAPACITY_LENGTH = 8; static JSAPIQueue *Cast(TaggedObject *object) { ASSERT(JSTaggedValue(object).IsJSAPIQueue()); @@ -86,4 +86,4 @@ private: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_QUEUE_H +#endif // ECMASCRIPT_JS_API_JS_API_QUEUE_H diff --git a/ecmascript/js_api/js_api_queue_iterator.h b/ecmascript/js_api/js_api_queue_iterator.h index 3ef96cc8b7c86358294d46ee46f1c6080feeae0e..77d3a5cb06e659bbd616c04a152ffe10639082c2 100644 --- a/ecmascript/js_api/js_api_queue_iterator.h +++ b/ecmascript/js_api/js_api_queue_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_QUEUE_ITERATOR_H -#define ECMASCRIPT_JS_API_QUEUE_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_QUEUE_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_QUEUE_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -40,4 +40,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_QUEUE_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_QUEUE_ITERATOR_H diff --git a/ecmascript/js_api/js_api_stack.cpp b/ecmascript/js_api/js_api_stack.cpp index 565982932443637cd5aa7dc5626c912079db3ca7..fea7b157c4757bb05b5d33a66c3b21346521cc01 100644 --- a/ecmascript/js_api/js_api_stack.cpp +++ b/ecmascript/js_api/js_api_stack.cpp @@ -17,7 +17,7 @@ #include "ecmascript/containers/containers_errors.h" #include "ecmascript/js_tagged_value.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript { using ContainerError = containers::ContainerError; @@ -142,6 +142,7 @@ bool JSAPIStack::GetOwnProperty(JSThread *thread, const JSHandle &ob uint32_t index = 0; if (UNLIKELY(!JSTaggedValue::ToElementIndex(key.GetTaggedValue(), &index))) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); CString errorMsg = "The type of \"index\" can not obtain attributes of no-number type. Received value is: " + ConvertToString(*result); diff --git a/ecmascript/js_api/js_api_stack.h b/ecmascript/js_api/js_api_stack.h index c156cae45c25adc1f2fbea90d40a05917550d9af..c57de1c4b604cab5243578218521a7d0eafb5903 100644 --- a/ecmascript/js_api/js_api_stack.h +++ b/ecmascript/js_api/js_api_stack.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_STACK_H -#define ECMASCRIPT_JS_API_STACK_H +#ifndef ECMASCRIPT_JS_API_JS_API_STACK_H +#define ECMASCRIPT_JS_API_JS_API_STACK_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -22,7 +22,7 @@ namespace panda::ecmascript { class JSAPIStack : public JSObject { public: - static constexpr int DEFAULT_CAPACITY_LENGTH = 10; + static constexpr uint32_t DEFAULT_CAPACITY_LENGTH = 10; static JSAPIStack *Cast(TaggedObject *object) { ASSERT(JSTaggedValue(object).IsJSAPIStack()); @@ -48,7 +48,7 @@ public: int Search(const JSHandle &value); - inline int GetSize() const + inline uint32_t GetSize() const { return GetTop(); } @@ -78,4 +78,4 @@ private: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_STACK_H +#endif // ECMASCRIPT_JS_API_JS_API_STACK_H diff --git a/ecmascript/js_api/js_api_stack_iterator.h b/ecmascript/js_api/js_api_stack_iterator.h index 884bf7a8d8fff1e2c535b5c5ddb8c98ad2c8ddd1..66964a0513824277dd8fa011fe521ae4456c23d3 100644 --- a/ecmascript/js_api/js_api_stack_iterator.h +++ b/ecmascript/js_api/js_api_stack_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_STACK_ITERATOR_H -#define ECMASCRIPT_JS_API_STACK_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_STACK_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_STACK_ITERATOR_H #include "ecmascript/js_api/js_api_stack.h" #include "ecmascript/js_object.h" @@ -40,4 +40,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_STACK_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_STACK_ITERATOR_H diff --git a/ecmascript/js_api/js_api_tree_map.cpp b/ecmascript/js_api/js_api_tree_map.cpp index 54234b5f32314ccf7c13c998072f041d347b083f..e0eccb6e19ccb991f6a085fa446f8315ac686414 100644 --- a/ecmascript/js_api/js_api_tree_map.cpp +++ b/ecmascript/js_api/js_api_tree_map.cpp @@ -27,6 +27,7 @@ void JSAPITreeMap::Set(JSThread *thread, const JSHandle &map, cons { if (!TaggedTreeMap::IsKey(key.GetTaggedValue())) { JSHandle result = JSTaggedValue::ToString(thread, key.GetTaggedValue()); + RETURN_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"key\" must be not null. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/js_api/js_api_tree_map.h b/ecmascript/js_api/js_api_tree_map.h index 2ef3a26bac98ab3ba82b9c524a8e31b3110cfe54..40e91b8c38233941da4f7475f937b20229fb4b93 100644 --- a/ecmascript/js_api/js_api_tree_map.h +++ b/ecmascript/js_api/js_api_tree_map.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_TREE_MAP_H -#define ECMASCRIPT_JS_API_TREE_MAP_H +#ifndef ECMASCRIPT_JS_API_JS_API_TREE_MAP_H +#define ECMASCRIPT_JS_API_JS_API_TREE_MAP_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -62,4 +62,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_TREE_MAP_H +#endif // ECMASCRIPT_JS_API_JS_API_TREE_MAP_H diff --git a/ecmascript/js_api/js_api_tree_map_iterator.h b/ecmascript/js_api/js_api_tree_map_iterator.h index b6f3190f92a81cfdb93e54961182c82b8b7f60ad..83ea3880528cfdb152f82f5ee2cf884232d8e1dd 100644 --- a/ecmascript/js_api/js_api_tree_map_iterator.h +++ b/ecmascript/js_api/js_api_tree_map_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_TREE_MAP_ITERATOR_H -#define ECMASCRIPT_JS_API_TREE_MAP_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_TREE_MAP_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_TREE_MAP_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -53,4 +53,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_TREE_MAP_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_TREE_MAP_ITERATOR_H diff --git a/ecmascript/js_api/js_api_tree_set.cpp b/ecmascript/js_api/js_api_tree_set.cpp index b8b57be71ae6d615f97f0a1e64715b33c2a7f3d7..39d78fa5756c3d1e1e089f089160683d72a1a266 100644 --- a/ecmascript/js_api/js_api_tree_set.cpp +++ b/ecmascript/js_api/js_api_tree_set.cpp @@ -26,6 +26,7 @@ void JSAPITreeSet::Add(JSThread *thread, const JSHandle &set, cons { if (!TaggedTreeSet::IsKey(value.GetTaggedValue())) { JSHandle result = JSTaggedValue::ToString(thread, value.GetTaggedValue()); + RETURN_IF_ABRUPT_COMPLETION(thread); CString errorMsg = "The type of \"value\" must be Key of JS. Received value is: " + ConvertToString(*result); JSTaggedValue error = ContainerError::BusinessError(thread, ErrorFlag::TYPE_ERROR, errorMsg.c_str()); diff --git a/ecmascript/js_api/js_api_tree_set.h b/ecmascript/js_api/js_api_tree_set.h index 0d53d1c49039f9297a794cb2561cc87d82b09a8b..7f799562b494ddcebbb15445186e7b5566e35cc1 100644 --- a/ecmascript/js_api/js_api_tree_set.h +++ b/ecmascript/js_api/js_api_tree_set.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_TREE_SET_H -#define ECMASCRIPT_JS_API_TREE_SET_H +#ifndef ECMASCRIPT_JS_API_JS_API_TREE_SET_H +#define ECMASCRIPT_JS_API_JS_API_TREE_SET_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -57,4 +57,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_TREE_SET_H_ +#endif // ECMASCRIPT_JS_API_JS_API_TREE_SET_H_ diff --git a/ecmascript/js_api/js_api_tree_set_iterator.h b/ecmascript/js_api/js_api_tree_set_iterator.h index 67292b50c2f57f972de483b494477348ab212acf..adfb068a8169c0a586ccb10ca67740572974977c 100644 --- a/ecmascript/js_api/js_api_tree_set_iterator.h +++ b/ecmascript/js_api/js_api_tree_set_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_TREE_SET_ITERATOR_H -#define ECMASCRIPT_JS_API_TREE_SET_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_TREE_SET_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_TREE_SET_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -53,4 +53,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_TREE_SET_ITERATOR_H +#endif // ECMASCRIPT_JS_API_JS_API_TREE_SET_ITERATOR_H diff --git a/ecmascript/js_api/js_api_vector.cpp b/ecmascript/js_api/js_api_vector.cpp index 956fc4025a29171dea7d2fb0ccc3a995fdda58cd..634d2941e22407122439cb27a783c59c781988c9 100644 --- a/ecmascript/js_api/js_api_vector.cpp +++ b/ecmascript/js_api/js_api_vector.cpp @@ -15,6 +15,7 @@ #include "ecmascript/js_api/js_api_vector.h" +#include "ecmascript/global_env_constants-inl.h" #include "ecmascript/interpreter/interpreter.h" #include "ecmascript/js_array.h" #include "ecmascript/js_api/js_api_vector_iterator.h" @@ -29,7 +30,7 @@ static const uint32_t MAX_VALUE = 0x7fffffff; static const uint32_t MAX_ARRAY_SIZE = MAX_VALUE - 8; bool JSAPIVector::Add(JSThread *thread, const JSHandle &vector, const JSHandle &value) { - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); GrowCapacity(thread, vector, length + 1); TaggedArray *elements = TaggedArray::Cast(vector->GetElements().GetTaggedObject()); @@ -43,15 +44,15 @@ bool JSAPIVector::Add(JSThread *thread, const JSHandle &vector, con void JSAPIVector::Insert(JSThread *thread, const JSHandle &vector, const JSHandle &value, int32_t index) { - int32_t length = vector->GetSize(); - if (index < 0 || index > length) { + uint32_t length = vector->GetSize(); + if (index < 0 || index > static_cast(length)) { THROW_ERROR(thread, ErrorType::RANGE_ERROR, "the index is out-of-bounds"); } GrowCapacity(thread, vector, length + 1); TaggedArray *elements = TaggedArray::Cast(vector->GetElements().GetTaggedObject()); ASSERT(!elements->IsDictionaryMode()); - for (int32_t i = length - 1; i >= index; i--) { + for (int32_t i = static_cast(length) - 1; i >= index; i--) { elements->Set(thread, i + 1, elements->Get(i)); } @@ -61,7 +62,7 @@ void JSAPIVector::Insert(JSThread *thread, const JSHandle &vector, void JSAPIVector::SetLength(JSThread *thread, const JSHandle &vector, uint32_t newSize) { - uint32_t len = static_cast(vector->GetSize()); + uint32_t len = vector->GetSize(); if (newSize > len) { GrowCapacity(thread, vector, newSize); } @@ -103,15 +104,15 @@ int32_t JSAPIVector::GetIndexFrom(JSThread *thread, const JSHandle { TaggedArray *elements = TaggedArray::Cast(vector->GetElements().GetTaggedObject()); ASSERT(!elements->IsDictionaryMode()); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); if (index < 0) { index = 0; - } else if (index >= length) { + } else if (index >= static_cast(length)) { THROW_RANGE_ERROR_AND_RETURN(thread, "no-such-element", -1); } JSMutableHandle value(thread, JSTaggedValue::Undefined()); - for (int32_t i = index; i < length; i++) { + for (uint32_t i = static_cast(index); i < length; i++) { value.Update(JSTaggedValue(elements->Get(i))); if (JSTaggedValue::StrictEqual(thread, obj, value)) { return i; @@ -127,7 +128,7 @@ bool JSAPIVector::IsEmpty() const JSTaggedValue JSAPIVector::GetLastElement() { - int32_t length = GetSize(); + uint32_t length = GetSize(); if (length == 0) { return JSTaggedValue::Undefined(); } @@ -139,7 +140,7 @@ JSTaggedValue JSAPIVector::GetLastElement() int32_t JSAPIVector::GetLastIndexOf(JSThread *thread, const JSHandle &vector, const JSHandle &obj) { - int32_t index = vector->GetSize() - 1; + int32_t index = static_cast(vector->GetSize()) - 1; if (index < 0) { return -1; // vector isEmpty, defalut return -1 } @@ -149,8 +150,8 @@ int32_t JSAPIVector::GetLastIndexOf(JSThread *thread, const JSHandle &vector, const JSHandle &obj, int32_t index) { - int32_t length = vector->GetSize(); - if (index >= length) { + uint32_t length = vector->GetSize(); + if (index >= static_cast(length)) { THROW_RANGE_ERROR_AND_RETURN(thread, "index-out-of-bounds", -1); } else if (index < 0) { index = 0; @@ -170,7 +171,7 @@ int32_t JSAPIVector::GetLastIndexFrom(JSThread *thread, const JSHandle &vector, const JSHandle &obj) { int32_t index = GetIndexOf(thread, vector, obj); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); if (index >= 0) { JSHandle elements(thread, vector->GetElements()); ASSERT(!elements->IsDictionaryMode()); @@ -184,8 +185,8 @@ bool JSAPIVector::Remove(JSThread *thread, const JSHandle &vector, JSTaggedValue JSAPIVector::RemoveByIndex(JSThread *thread, const JSHandle &vector, int32_t index) { - int32_t length = vector->GetSize(); - if (index < 0 || index >= length) { + uint32_t length = vector->GetSize(); + if (index < 0 || index >= static_cast(length)) { THROW_RANGE_ERROR_AND_RETURN(thread, "the index is out-of-bounds", JSTaggedValue::Exception()); } TaggedArray *resElements = TaggedArray::Cast(vector->GetElements().GetTaggedObject()); @@ -206,7 +207,7 @@ JSTaggedValue JSAPIVector::RemoveByIndex(JSThread *thread, const JSHandle &vector, int32_t fromIndex, int32_t toIndex) { - int32_t length = vector->GetSize(); + int32_t length = static_cast(vector->GetSize()); if (toIndex <= fromIndex) { THROW_RANGE_ERROR_AND_RETURN(thread, "the fromIndex cannot be less than or equal to toIndex", JSTaggedValue::Exception()); @@ -233,8 +234,9 @@ JSTaggedValue JSAPIVector::RemoveByRange(JSThread *thread, const JSHandle JSAPIVector::SubVector(JSThread *thread, const JSHandle &vector, int32_t fromIndex, int32_t toIndex) { - int32_t length = vector->GetSize(); - if (fromIndex < 0 || toIndex < 0 || fromIndex >= length || toIndex >= length) { + int32_t length = static_cast(vector->GetSize()); + if (fromIndex < 0 || toIndex < 0 || + fromIndex >= length || toIndex >= length) { THROW_RANGE_ERROR_AND_RETURN(thread, "the fromIndex or the toIndex is out-of-bounds", JSHandle()); } @@ -243,12 +245,12 @@ JSHandle JSAPIVector::SubVector(JSThread *thread, const JSHandle()); } - int32_t newLength = toIndex - fromIndex; + uint32_t newLength = static_cast(toIndex - fromIndex); JSHandle subVector = thread->GetEcmaVM()->GetFactory()->NewJSAPIVector(newLength); TaggedArray *elements = TaggedArray::Cast(vector->GetElements().GetTaggedObject()); subVector->SetLength(newLength); - for (int32_t i = 0; i < newLength; i++) { + for (uint32_t i = 0; i < newLength; i++) { subVector->Set(thread, i, elements->Get(fromIndex + i)); } @@ -260,10 +262,10 @@ JSTaggedValue JSAPIVector::ToString(JSThread *thread, const JSHandleGetEcmaVM()->GetFactory(); std::u16string sepHandle = std::wstring_convert, char16_t> {}.from_bytes(","); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); std::u16string concatStr; JSMutableHandle element(thread, JSTaggedValue::Undefined()); - for (int32_t k = 0; k < length; k++) { + for (uint32_t k = 0; k < length; k++) { std::u16string nextStr; element.Update(Get(thread, vector, k)); if (!element->IsUndefined() && !element->IsNull()) { @@ -290,13 +292,13 @@ JSTaggedValue JSAPIVector::ForEach(JSThread *thread, const JSHandle &thisArg) { JSHandle vector = JSHandle::Cast(thisHandle); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); JSTaggedValue key = JSTaggedValue::Undefined(); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = NUM_MANDATORY_JSFUNC_ARGS; + const uint32_t argsLength = NUM_MANDATORY_JSFUNC_ARGS; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - for (int32_t k = 0; k < length; k++) { + for (uint32_t k = 0; k < length; k++) { kValue.Update(Get(thread, vector, k)); key = JSTaggedValue(k); EcmaRuntimeCallInfo *info = @@ -318,13 +320,13 @@ JSTaggedValue JSAPIVector::ReplaceAllElements(JSThread *thread, const JSHandle &thisArg) { JSHandle vector = JSHandle::Cast(thisHandle); - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); JSTaggedValue key = JSTaggedValue::Undefined(); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = NUM_MANDATORY_JSFUNC_ARGS; + const uint32_t argsLength = NUM_MANDATORY_JSFUNC_ARGS; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - for (int32_t k = 0; k < length; k++) { + for (uint32_t k = 0; k < length; k++) { kValue.Update(Get(thread, vector, k)); key = JSTaggedValue(k); EcmaRuntimeCallInfo *info = @@ -367,8 +369,8 @@ void JSAPIVector::GrowCapacity(JSThread *thread, const JSHandle &ve JSTaggedValue JSAPIVector::Get(JSThread *thread, const JSHandle &vector, int32_t index) { - int32_t len = vector->GetSize(); - if (index < 0 || index >= len) { + uint32_t len = vector->GetSize(); + if (index < 0 || index >= static_cast(len)) { THROW_RANGE_ERROR_AND_RETURN(thread, "the index is out-of-bounds", JSTaggedValue::Exception()); } @@ -386,12 +388,12 @@ JSTaggedValue JSAPIVector::Set(JSThread *thread, int32_t index, const JSTaggedVa bool JSAPIVector::Has(const JSTaggedValue &value) const { TaggedArray *elements = TaggedArray::Cast(GetElements().GetTaggedObject()); - int32_t length = GetSize(); + uint32_t length = GetSize(); if (length == 0) { return false; } - for (int32_t i = 0; i < length; i++) { + for (uint32_t i = 0; i < length; i++) { if (JSTaggedValue::SameValue(elements->Get(i), value)) { return true; } @@ -417,7 +419,7 @@ bool JSAPIVector::GetOwnProperty(JSThread *thread, const JSHandle & THROW_TYPE_ERROR_AND_RETURN(thread, "Can not obtain attributes of no-number type", false); } - uint32_t length = static_cast(obj->GetSize()); + uint32_t length = obj->GetSize(); if (index >= length) { THROW_RANGE_ERROR_AND_RETURN(thread, "GetOwnProperty index out-of-bounds", false); } @@ -429,21 +431,21 @@ bool JSAPIVector::GetOwnProperty(JSThread *thread, const JSHandle & void JSAPIVector::TrimToCurrentLength(JSThread *thread, const JSHandle &obj) { - int32_t length = obj->GetSize(); + uint32_t length = obj->GetSize(); uint32_t capacity = obj->GetCapacity(); TaggedArray *elements = TaggedArray::Cast(obj->GetElements().GetTaggedObject()); ASSERT(!elements->IsDictionaryMode()); - if (capacity > static_cast(length)) { + if (capacity > length) { elements->Trim(thread, length); } } void JSAPIVector::Clear(JSThread *thread, const JSHandle &obj) { - int length = obj->GetLength(); + uint32_t length = obj->GetLength(); JSHandle elements(thread, obj->GetElements()); ASSERT(!elements->IsDictionaryMode()); - for (int i = 0; i <= length; ++i) { + for (uint32_t i = 0; i <= length; ++i) { elements->Set(thread, i, JSTaggedValue::Hole()); } obj->SetLength(0); @@ -455,7 +457,7 @@ JSHandle JSAPIVector::Clone(JSThread *thread, const JSHandleGetEcmaVM()->GetFactory(); JSHandle newVector = factory->NewJSAPIVector(0); - int32_t length = obj->GetSize(); + uint32_t length = obj->GetSize(); newVector->SetLength(length); JSHandle dstElements = factory->NewAndCopyTaggedArray(srcElements, length, length); @@ -465,7 +467,7 @@ JSHandle JSAPIVector::Clone(JSThread *thread, const JSHandle &vector) { - int32_t length = vector->GetSize(); + uint32_t length = vector->GetSize(); if (length == 0) { return JSTaggedValue::Undefined(); } @@ -484,9 +486,9 @@ JSTaggedValue JSAPIVector::GetIteratorObj(JSThread *thread, const JSHandle &obj, const JSHandle &key) { - int length = obj->GetSize(); + uint32_t length = obj->GetSize(); int index = key->GetInt(); - if (index < 0 || index >= length) { + if (index < 0 || index >= static_cast(length)) { THROW_RANGE_ERROR_AND_RETURN(thread, "GetProperty index out-of-bounds", OperationResult(thread, JSTaggedValue::Exception(), PropertyMetaData(false))); } @@ -498,9 +500,9 @@ bool JSAPIVector::SetProperty(JSThread *thread, const JSHandle &obj const JSHandle &key, const JSHandle &value) { - int length = obj->GetSize(); + uint32_t length = obj->GetSize(); int index = key->GetInt(); - if (index < 0 || index >= length) { + if (index < 0 || index >= static_cast(length)) { return false; } diff --git a/ecmascript/js_api/js_api_vector.h b/ecmascript/js_api/js_api_vector.h index 28edd7d20e2cb7b27bb579c3f5e67fc474bb3239..68a05bd24b182138ca895c6fb1b1502e2453a2aa 100644 --- a/ecmascript/js_api/js_api_vector.h +++ b/ecmascript/js_api/js_api_vector.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_VECTOR_H -#define ECMASCRIPT_JS_API_VECTOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_VECTOR_H +#define ECMASCRIPT_JS_API_JS_API_VECTOR_H #include "ecmascript/js_object.h" #include "ecmascript/js_tagged_value-inl.h" @@ -103,13 +103,13 @@ public: const JSHandle &key, const JSHandle &value); - inline int32_t GetSize() const + inline uint32_t GetSize() const { return GetLength(); } static constexpr size_t ELEMENT_COUNT_OFFSET = JSObject::SIZE; - ACCESSORS_PRIMITIVE_FIELD(Length, int32_t, ELEMENT_COUNT_OFFSET, ENDL_OFFSET) + ACCESSORS_PRIMITIVE_FIELD(Length, uint32_t, ELEMENT_COUNT_OFFSET, ENDL_OFFSET) DEFINE_ALIGN_SIZE(ENDL_OFFSET); DECL_VISIT_OBJECT_FOR_JS_OBJECT(JSObject, ELEMENT_COUNT_OFFSET, ELEMENT_COUNT_OFFSET) @@ -119,4 +119,4 @@ private: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JSAPIVECTOR_H +#endif // ECMASCRIPT_JS_API_JS_API_VECTOR_H diff --git a/ecmascript/js_api/js_api_vector_iterator.h b/ecmascript/js_api/js_api_vector_iterator.h index d4d5c975401c4bd95fac7aa2c2a1d7d79000e21d..e52371d76a6325a7d3a71739ff48c09a9d64814a 100644 --- a/ecmascript/js_api/js_api_vector_iterator.h +++ b/ecmascript/js_api/js_api_vector_iterator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef ECMASCRIPT_JS_API_VECTOR_ITERATOR_H -#define ECMASCRIPT_JS_API_VECTOR_ITERATOR_H +#ifndef ECMASCRIPT_JS_API_JS_API_VECTOR_ITERATOR_H +#define ECMASCRIPT_JS_API_JS_API_VECTOR_ITERATOR_H #include "ecmascript/js_iterator.h" #include "ecmascript/js_object.h" @@ -40,4 +40,4 @@ public: }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_JS_API_VECTOR_ITERATOR_H \ No newline at end of file +#endif // ECMASCRIPT_JS_API_JS_API_VECTOR_ITERATOR_H \ No newline at end of file diff --git a/ecmascript/js_array.cpp b/ecmascript/js_array.cpp index 60b2b31a553c724535a77ed23e46e2c138c94ba8..a9730532b7f454e6f233c8b9294bf65b19f9167b 100644 --- a/ecmascript/js_array.cpp +++ b/ecmascript/js_array.cpp @@ -26,7 +26,7 @@ namespace panda::ecmascript { JSTaggedValue JSArray::LengthGetter([[maybe_unused]] JSThread *thread, const JSHandle &self) { - return JSArray::Cast(*self)->GetLength(); + return JSTaggedValue(JSArray::Cast(*self)->GetLength()); } bool JSArray::LengthSetter(JSThread *thread, const JSHandle &self, const JSHandle &value, @@ -74,7 +74,7 @@ JSHandle JSArray::ArrayCreate(JSThread *thread, JSTaggedNumber le // 8. Set the [[Prototype]] internal slot of A to proto. JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); JSHandle arrayFunc(env->GetArrayFunction()); - JSHandle obj = factory->NewJSObjectByConstructor(JSHandle(arrayFunc), newTarget); + JSHandle obj = factory->NewJSObjectByConstructor(arrayFunc, newTarget); RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); // 9. Set the [[Extensible]] internal slot of A to true. obj->GetJSHClass()->SetExtensible(true); @@ -84,7 +84,7 @@ JSHandle JSArray::ArrayCreate(JSThread *thread, JSTaggedNumber le if (mode == ArrayMode::LITERAL) { JSArray::Cast(*obj)->SetArrayLength(thread, normalArrayLength); } else { - JSArray::SetCapacity(thread, obj, 0, normalArrayLength); + JSArray::SetCapacity(thread, obj, 0, normalArrayLength, true); } return JSHandle(obj); @@ -115,7 +115,7 @@ JSTaggedValue JSArray::ArraySpeciesCreate(JSThread *thread, const JSHandleGetJSHClass(); if (hclass->IsJSArray() && !hclass->HasConstructor()) { - return JSArray::ArrayCreate(thread, length).GetTaggedValue(); + return JSArray::ArrayCreate(thread, length, ArrayMode::LITERAL).GetTaggedValue(); } JSHandle constructorKey = globalConst->GetHandledConstructorString(); constructor = JSTaggedValue::GetProperty(thread, originalValue, constructorKey).GetValue(); @@ -133,7 +133,7 @@ JSTaggedValue JSArray::ArraySpeciesCreate(JSThread *thread, const JSHandleGetArrayFunction().GetTaggedValue(); // If SameValue(C, realmC.[[intrinsics]].[[%Array%]]) is true, let C be undefined. if (JSTaggedValue::SameValue(constructor.GetTaggedValue(), realmArrayConstructor)) { - return JSArray::ArrayCreate(thread, length).GetTaggedValue(); + return JSArray::ArrayCreate(thread, length, ArrayMode::LITERAL).GetTaggedValue(); } } } @@ -147,14 +147,14 @@ JSTaggedValue JSArray::ArraySpeciesCreate(JSThread *thread, const JSHandleIsNull()) { - return JSArray::ArrayCreate(thread, length).GetTaggedValue(); + return JSArray::ArrayCreate(thread, length, ArrayMode::LITERAL).GetTaggedValue(); } } } // If C is undefined, return ArrayCreate(length). if (constructor->IsUndefined()) { - return JSArray::ArrayCreate(thread, length).GetTaggedValue(); + return JSArray::ArrayCreate(thread, length, ArrayMode::LITERAL).GetTaggedValue(); } // If IsConstructor(C) is false, throw a TypeError exception. if (!constructor->IsConstructor()) { @@ -167,6 +167,7 @@ JSTaggedValue JSArray::ArraySpeciesCreate(JSThread *thread, const JSHandleSetCallArg(JSTaggedValue(arrayLength)); JSTaggedValue result = JSFunction::Construct(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // NOTEIf originalArray was created using the standard built-in Array constructor for // a Realm that is not the Realm of the running execution context, then a new Array is @@ -176,7 +177,8 @@ JSTaggedValue JSArray::ArraySpeciesCreate(JSThread *thread, const JSHandle &array, uint32_t oldLen, uint32_t newLen) +void JSArray::SetCapacity(JSThread *thread, const JSHandle &array, uint32_t oldLen, uint32_t newLen, + bool isNew) { TaggedArray *element = TaggedArray::Cast(array->GetElements().GetTaggedObject()); @@ -221,7 +223,7 @@ void JSArray::SetCapacity(JSThread *thread, const JSHandle &array, uin if (JSObject::ShouldTransToDict(oldLen, newLen)) { JSObject::ElementsToDictionary(thread, array); } else if (newLen > capacity) { - JSObject::GrowElementsCapacity(thread, array, newLen); + JSObject::GrowElementsCapacity(thread, array, newLen, isNew); } JSArray::Cast(*array)->SetArrayLength(thread, newLen); } @@ -479,15 +481,16 @@ void JSArray::CheckAndCopyArray(const JSThread *thread, JSHandle obj) JSHandle arr(thread, obj->GetElements()); // Check whether array is shared in the nonmovable space before set properties and elements. // If true, then really copy array in the semi space. + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); if (arr.GetTaggedValue().IsCOWArray()) { - auto newArray = thread->GetEcmaVM()->GetFactory()->CopyArray(arr, arr->GetLength(), arr->GetLength(), + auto newArray = factory->CopyArray(arr, arr->GetLength(), arr->GetLength(), JSTaggedValue::Hole(), MemSpaceType::SEMI_SPACE); obj->SetElements(thread, newArray.GetTaggedValue()); } JSHandle prop(thread, obj->GetProperties()); if (prop.GetTaggedValue().IsCOWArray()) { - auto newProps = thread->GetEcmaVM()->GetFactory()->CopyArray(prop, - prop->GetLength(), prop->GetLength(), JSTaggedValue::Hole(), MemSpaceType::SEMI_SPACE); + auto newProps = factory->CopyArray(prop, prop->GetLength(), prop->GetLength(), + JSTaggedValue::Hole(), MemSpaceType::SEMI_SPACE); obj->SetProperties(thread, newProps.GetTaggedValue()); } } diff --git a/ecmascript/js_array.h b/ecmascript/js_array.h index 0215afb537f061095c8fca351784ded1e3ce6464..d2d3b88af7438f11684897214d648632ed916956 100644 --- a/ecmascript/js_array.h +++ b/ecmascript/js_array.h @@ -49,18 +49,19 @@ public: // use first inlined property slot for array length inline uint32_t GetArrayLength() const { - return GetLength().GetArrayLength(); + return GetLength(); } - inline void SetArrayLength(const JSThread *thread, uint32_t length) + inline void SetArrayLength([[maybe_unused]]const JSThread *thread, uint32_t length) { - SetLength(thread, JSTaggedValue(length)); + SetLength(length); } static constexpr size_t LENGTH_OFFSET = JSObject::SIZE; - ACCESSORS(Length, LENGTH_OFFSET, SIZE); + ACCESSORS_PRIMITIVE_FIELD(Length, uint32_t, LENGTH_OFFSET, TRACE_INDEX_OFFSET) + ACCESSORS_PRIMITIVE_FIELD(TraceIndex, uint32_t, TRACE_INDEX_OFFSET, SIZE) - DECL_VISIT_OBJECT_FOR_JS_OBJECT(JSObject, LENGTH_OFFSET, SIZE) + DECL_VISIT_OBJECT_FOR_JS_OBJECT(JSObject, SIZE, SIZE) static const uint32_t MAX_ARRAY_INDEX = MAX_ELEMENT_INDEX; DECL_DUMP() @@ -94,7 +95,8 @@ public: const JSHandle &value); static JSHandle ToTaggedArray(JSThread *thread, const JSHandle &obj); static void CheckAndCopyArray(const JSThread *thread, JSHandle obj); - static void SetCapacity(JSThread *thread, const JSHandle &array, uint32_t oldLen, uint32_t newLen); + static void SetCapacity(JSThread *thread, const JSHandle &array, uint32_t oldLen, uint32_t newLen, + bool isNew = false); }; } // namespace panda::ecmascript diff --git a/ecmascript/js_arraybuffer.cpp b/ecmascript/js_arraybuffer.cpp index 6b6beb39052e658977b5670e1343951ae90c57cd..178d0e14a0cb9245fb9a981efe550a151a214312 100644 --- a/ecmascript/js_arraybuffer.cpp +++ b/ecmascript/js_arraybuffer.cpp @@ -18,6 +18,7 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_vm.h" +#include "ecmascript/mem/barriers-inl.h" #include "ecmascript/object_factory.h" #include "ecmascript/platform/os.h" #include "ecmascript/tagged_array.h" diff --git a/ecmascript/js_async_from_sync_iterator.cpp b/ecmascript/js_async_from_sync_iterator.cpp index 821dc47e6caec7a1b18f70d28dd5c854a2b7a6c6..ec7cd891d44202ceb6dd2abdf5cf8adb3733973d 100644 --- a/ecmascript/js_async_from_sync_iterator.cpp +++ b/ecmascript/js_async_from_sync_iterator.cpp @@ -45,6 +45,7 @@ JSHandle JSAsyncFromSyncIterator::CreateAsyncFromSyncIterator(JST JSHandle nextStr = thread->GlobalConstants()->GetHandledNextString(); JSHandle tmpAsyncIterator(thread, asyncIterator.GetTaggedValue()); JSHandle nextMethod = JSTaggedValue::GetProperty(thread, tmpAsyncIterator, nextStr).GetValue(); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); // 4.Let iteratorRecord be the Record {[[Iterator]]: asyncIterator, [[NextMethod]]: nextMethod, [[Done]]: false}. JSHandle iteratorRecord = factory->NewAsyncIteratorRecord(tmpAsyncIterator, nextMethod, false); diff --git a/ecmascript/js_async_function.cpp b/ecmascript/js_async_function.cpp index 93b85197d45bcdf466ff8a9fc522bec10197b360..af1ea467950f4052892462012c8ce19e455ec71d 100644 --- a/ecmascript/js_async_function.cpp +++ b/ecmascript/js_async_function.cpp @@ -57,6 +57,7 @@ void JSAsyncFunction::AsyncFunctionAwait(JSThread *thread, const JSHandleSetCallArg(value.GetTaggedValue()); [[maybe_unused]] JSTaggedValue res = JSFunction::Call(info); + RETURN_IF_ABRUPT_COMPLETION(thread); // 4.Let onFulfilled be a new built-in function object as defined in AsyncFunction Awaited Fulfilled. JSHandle fulFunc = factory->NewJSAsyncAwaitStatusFunction( @@ -100,10 +101,12 @@ void JSAsyncFunction::AsyncFunctionAwait(JSThread *thread, const JSHandle asyncCtxt; if (asyncFuncObj->IsAsyncGeneratorObject()) { JSHandle obj = JSTaggedValue::ToObject(thread, asyncFuncObj); + RETURN_IF_ABRUPT_COMPLETION(thread); JSHandle asyncGen = JSHandle::Cast(obj); asyncCtxt = JSHandle(thread, asyncGen->GetGeneratorContext()); } else { JSHandle obj = JSTaggedValue::ToObject(thread, asyncFuncObj); + RETURN_IF_ABRUPT_COMPLETION(thread); JSHandle asyncFun = JSHandle::Cast(obj); asyncCtxt = JSHandle(thread, asyncFun->GetGeneratorContext()); } diff --git a/ecmascript/js_async_generator_object.cpp b/ecmascript/js_async_generator_object.cpp index 505299ddd7c5eee6483a3fa4ce0c648a4254998f..16cd628b02998c74752d27de1dfe9a151ae4b32e 100644 --- a/ecmascript/js_async_generator_object.cpp +++ b/ecmascript/js_async_generator_object.cpp @@ -45,6 +45,7 @@ void JSAsyncGeneratorObject::AsyncGeneratorValidate(JSThread *thread, const JSHa } // 4. If generator.[[GeneratorBrand]] is not the same value as generatorBrand, throw a TypeError exception. JSHandle obj = JSTaggedValue::ToObject(thread, gen); + RETURN_IF_ABRUPT_COMPLETION(thread); JSHandle generator = JSHandle::Cast(obj); if (!JSTaggedValue::SameValue(generator->GetGeneratorBrand(), val)) { THROW_TYPE_ERROR(thread, "Results are not equal"); @@ -75,8 +76,10 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorResolve(JSThread *thread, JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, undefined, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(its.GetTaggedValue()); [[maybe_unused]] JSTaggedValue res = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 9. Perform ! AsyncGeneratorResumeNext(generator). AsyncGeneratorResumeNext(thread, generator); @@ -108,8 +111,10 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorReject(JSThread *thread, const JSHandle undefined = constants->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, reject, thisArg, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(value.GetTaggedValue()); [[maybe_unused]] JSTaggedValue res = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 8. Perform ! AsyncGeneratorResumeNext(generator). AsyncGeneratorResumeNext(thread, generator); // 9. Return undefined. @@ -187,6 +192,7 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorResumeNext(JSThread *thread, // 11. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected). JSHandle tcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); [[maybe_unused]] JSTaggedValue pres = BuiltinsPromise::PerformPromiseThen( thread, handPromise, JSHandle::Cast(onFulfilled), JSHandle::Cast(onFulRejected), tcap); @@ -246,6 +252,7 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorEnqueue(JSThread *thread, co ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 2. Let check be AsyncGeneratorValidate(generator, generatorBrand). AsyncGeneratorValidate(thread, gen, JSTaggedValue::Undefined()); // 3. If check is an abrupt completion, then @@ -262,8 +269,10 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorEnqueue(JSThread *thread, co JSHandle undefined = constants->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, reject, thisArg, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(rstErr.GetTaggedValue()); [[maybe_unused]] JSTaggedValue res = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // c. Return promiseCapability.[[Promise]]. JSHandle promise(thread, pcap->GetPromise()); @@ -271,6 +280,7 @@ JSTaggedValue JSAsyncGeneratorObject::AsyncGeneratorEnqueue(JSThread *thread, co } // 4. Let queue be generator.[[AsyncGeneratorQueue]]. JSHandle obj = JSTaggedValue::ToObject(thread, gen); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle generator = JSHandle::Cast(obj); JSHandle queue(thread, generator->GetAsyncGeneratorQueue()); // 5. Let request be AsyncGeneratorRequest { [[Completion]]: completion, [[Capability]]: promiseCapability }. @@ -316,6 +326,7 @@ JSTaggedValue JSAsyncGeneratorObject::PromiseResolve(JSThread *thread, const JSH JSHandle thisArg = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, thisArg, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(value.GetTaggedValue()); [[maybe_unused]] JSTaggedValue res = JSFunction::Call(info); diff --git a/ecmascript/js_bigint.cpp b/ecmascript/js_bigint.cpp index 9acb5f44d65c4150db1442b33be43e5ec2bb001e..8cf25c4821e7a986264d924220bee716940df270 100644 --- a/ecmascript/js_bigint.cpp +++ b/ecmascript/js_bigint.cpp @@ -546,6 +546,7 @@ JSHandle BigInt::Int64ToBigInt(JSThread *thread, const int64_t &number) value = number; } JSHandle bigint = Uint64ToBigInt(thread, value); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(BigInt, thread); bigint->SetSign(sign); return BigIntHelper::RightTruncate(thread, bigint); } @@ -591,6 +592,8 @@ void BigInt::BigIntToInt64(JSThread *thread, JSHandle bigint, int RETURN_IF_ABRUPT_COMPLETION(thread); if (Equal(bigInt64.GetTaggedValue(), bigint.GetTaggedValue())) { *lossless = true; + } else { + *lossless = false; } *cValue = bigInt64->ToInt64(); } @@ -603,6 +606,8 @@ void BigInt::BigIntToUint64(JSThread *thread, JSHandle bigint, ui RETURN_IF_ABRUPT_COMPLETION(thread); if (Equal(bigUint64.GetTaggedValue(), bigint.GetTaggedValue())) { *lossless = true; + } else { + *lossless = false; } *cValue = bigUint64->ToUint64(); } @@ -919,14 +924,15 @@ JSHandle BigInt::LeftShift(JSThread *thread, JSHandle x, JSHandl JSHandle BigInt::LeftShiftHelper(JSThread *thread, JSHandle x, JSHandle y) { - ASSERT(y->GetLength() == 1); - ASSERT(y->GetDigit(0) <= MAXBITS); + if (x->IsZero()) { + return x; + } + ASSERT(y->GetLength() > 0); uint32_t moveNum = y->GetDigit(0); uint32_t digitMove = moveNum / DATEBITS; uint32_t bitsMove = moveNum % DATEBITS; // If bitsMove is not zero, needLen needs to be increased by 1 uint32_t needLen = digitMove + x->GetLength() + static_cast(!!bitsMove); - ASSERT(needLen < MAXSIZE); JSHandle bigint = CreateBigint(thread, needLen); RETURN_HANDLE_IF_ABRUPT_COMPLETION(BigInt, thread); if (bitsMove == 0) { @@ -999,20 +1005,26 @@ JSHandle BigInt::Exponentiate(JSThread *thread, JSHandle base, J JSHandle bigint(thread, JSTaggedValue::Exception()); THROW_RANGE_ERROR_AND_RETURN(thread, "Exponent must be positive", bigint); } - ASSERT(exponent->GetLength() == 1); + ASSERT(exponent->GetLength() > 0); if (exponent->IsZero()) { return Int32ToBigInt(thread, 1); } - uint32_t expValue = exponent->GetDigit(0); - if (base->IsZero() || expValue == 1) { + if (base->IsZero()) { return base; } + uint32_t expValue = exponent->GetDigit(0); if (base->GetLength() == 1 && base->GetDigit(0) == 1) { if (base->GetSign() && !(expValue & 1)) { return BigInt::UnaryMinus(thread, base); } return base; } + if (exponent->GetLength() > 1) { + // The result is at least 2n ** 2n ** 32n, which is too big. + JSHandle bigint(thread, JSTaggedValue::Exception()); + THROW_RANGE_ERROR_AND_RETURN(thread, "Maximum BigInt size exceeded", bigint); + } + if (base->GetLength() == 1 && base->GetDigit(0) == 2) { // 2 : We use fast path processing 2 ^ n uint32_t needLength = expValue / DATEBITS + 1; JSHandle bigint = CreateBigint(thread, needLength); @@ -1032,11 +1044,13 @@ JSHandle BigInt::Exponentiate(JSThread *thread, JSHandle base, J expValue >>= 1; for (; expValue; expValue >>= 1) { temp.Update(BigInt::Multiply(thread, temp, temp)); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(BigInt, thread); if (expValue & 1) { if (result.GetTaggedValue().IsNull()) { result.Update(temp); } else { result.Update(BigInt::Multiply(thread, result, temp)); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(BigInt, thread); } } } @@ -1450,6 +1464,7 @@ JSHandle BigInt::Remainder(JSThread *thread, JSHandle n, JSHandl JSHandle BigInt::FloorMod(JSThread *thread, JSHandle leftVal, JSHandle rightVal) { JSHandle remainder = Remainder(thread, leftVal, rightVal); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(BigInt, thread); if (leftVal->GetSign() && !remainder->IsZero()) { return Add(thread, remainder, rightVal); } diff --git a/ecmascript/js_collator.cpp b/ecmascript/js_collator.cpp index 363316fa5e0ab12521ca667aeaed9022e377f6ba..4691cfb60bdeada3205892a3141ca7557aef5c06 100644 --- a/ecmascript/js_collator.cpp +++ b/ecmascript/js_collator.cpp @@ -19,6 +19,7 @@ #include "ecmascript/global_env.h" #include "ecmascript/mem/c_string.h" #include "ecmascript/mem/barriers-inl.h" +#include "ecmascript/object_factory-inl.h" #include "unicode/udata.h" @@ -159,8 +160,10 @@ JSHandle JSCollator::InitializeCollator(JSThread *thread, } ResolvedLocale r = JSLocale::ResolveLocale(thread, availableLocales, requestedLocales, matcher, relevantExtensionKeys); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSCollator, thread); icu::Locale icuLocale = r.localeData; JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSCollator, thread); collator->SetLocale(thread, localeStr.GetTaggedValue()); ASSERT_PRINT(!icuLocale.isBogus(), "icuLocale is bogus"); @@ -415,6 +418,7 @@ JSHandle JSCollator::ResolvedOptions(JSThread *thread, const JSHandle< JSHandle property = globalConst->GetHandledLocaleString(); JSHandle locale(thread, collator->GetLocale()); JSObject::CreateDataPropertyOrThrow(thread, options, property, locale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSObject, thread); // [[Usage]] UsageOption usageOption = collator->GetUsage(); diff --git a/ecmascript/js_date_time_format.cpp b/ecmascript/js_date_time_format.cpp index 3cac3e066e3fcef489c4a63d1750375dd23675aa..8bc273c1a08d172496c518f8886610457b102fde 100644 --- a/ecmascript/js_date_time_format.cpp +++ b/ecmascript/js_date_time_format.cpp @@ -23,7 +23,7 @@ #include "ecmascript/js_intl.h" #include "ecmascript/js_locale.h" #include "ecmascript/js_object-inl.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript { struct CommonDateFormatPart { @@ -492,6 +492,7 @@ JSHandle JSDateTimeFormat::InitializeDateTimeFormat(JSThread * } } JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, resolvedIcuLocaleCopy); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSDateTimeFormat, thread); dateTimeFormat->SetLocale(thread, localeStr.GetTaggedValue()); // Set dateTimeFormat.[[boundFormat]]. @@ -844,6 +845,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandle locale(thread, dateTimeFormat->GetLocale()); JSHandle property = globalConst->GetHandledLocaleString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, locale); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[Calendar]] JSMutableHandle calendarValue(thread, dateTimeFormat->GetCalendar()); icu::SimpleDateFormat *icuSimpleDateFormat = dateTimeFormat->GetIcuSimpleDateFormat(); @@ -860,6 +862,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledCalendarString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, calendarValue); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[NumberingSystem]] JSHandle numberingSystem(thread, dateTimeFormat->GetNumberingSystem()); if (numberingSystem->IsUndefined()) { @@ -867,6 +870,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledNumberingSystemString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, numberingSystem); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[TimeZone]] JSMutableHandle timezoneValue(thread, dateTimeFormat->GetTimeZone()); const icu::TimeZone &icuTZ = calendar->getTimeZone(); @@ -885,6 +889,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledTimeZoneString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, timezoneValue); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[HourCycle]] // For web compatibility reasons, if the property "hourCycle" is set, the "hour12" property should be set to true // when "hourCycle" is "h11" or "h12", or to false when "hourCycle" is "h23" or "h24". @@ -895,6 +900,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledHourCycleString(); hcValue = ToHourCycleEcmaString(thread, dateTimeFormat->GetHourCycle()); JSObject::CreateDataPropertyOrThrow(thread, options, property, hcValue); + RETURN_IF_ABRUPT_COMPLETION(thread); if (hc == HourCycleOption::H11 || hc == HourCycleOption::H12) { JSHandle trueValue(thread, JSTaggedValue::True()); hcValue = trueValue; @@ -904,6 +910,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledHour12String(); JSObject::CreateDataPropertyOrThrow(thread, options, property, hcValue); + RETURN_IF_ABRUPT_COMPLETION(thread); } // [[DateStyle]], [[TimeStyle]]. icu::UnicodeString patternUnicode; @@ -921,6 +928,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandle fsdValue(thread, JSTaggedValue(fsd)); property = globalConst->GetHandledFractionalSecondDigitsString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, fsdValue); + RETURN_IF_ABRUPT_COMPLETION(thread); } } property = JSHandle::Cast(factory->NewFromStdString(item.property)); @@ -928,6 +936,7 @@ void JSDateTimeFormat::ResolvedOptions(JSThread *thread, const JSHandle::Cast(factory->NewFromStdString(pair.second)); JSObject::CreateDataPropertyOrThrow(thread, options, property, hcValue); + RETURN_IF_ABRUPT_COMPLETION(thread); break; } } @@ -1120,6 +1129,7 @@ JSHandle JSDateTimeFormat::ConstructFDateIntervalToJSArray(JSThread *th JSHandle value = JSHandle::Cast( ToValueString(thread, TrackValue(part.fBeginIndex, part.fEndIndex, begin, end))); JSObject::SetProperty(thread, element, thread->GlobalConstants()->GetHandledSourceString(), value, true); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSArray, thread); } return array; } diff --git a/ecmascript/js_displaynames.cpp b/ecmascript/js_displaynames.cpp index c43377262606dcfb03e5c4d7be44dd76dd65bb57..364b79b0192061b5330fa0763a8a1c14dbdd79d5 100644 --- a/ecmascript/js_displaynames.cpp +++ b/ecmascript/js_displaynames.cpp @@ -20,6 +20,7 @@ #include "ecmascript/intl/locale_helper.h" #include "ecmascript/global_env.h" #include "ecmascript/global_env_constants.h" +#include "ecmascript/object_factory-inl.h" #include "unicode/errorcode.h" #include "unicode/locdspnm.h" @@ -193,6 +194,7 @@ JSHandle JSDisplayNames::InitializeDisplayNames(JSThread *thread // 18. Set displayNames.[[Locale]] to the value of r.[[Locale]]. JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSDisplayNames, thread); displayNames->SetLocale(thread, localeStr.GetTaggedValue()); // 19. Let dataLocale be r.[[dataLocale]]. // 20. Let dataLocaleData be localeData.[[]]. @@ -256,6 +258,7 @@ JSHandle JSDisplayNames::CanonicalCodeForDisplayNames(JSThread *thre THROW_TYPE_ERROR_AND_RETURN(thread, "not a structurally valid", code); } JSHandle codeStr = intl::LocaleHelper::CanonicalizeUnicodeLocaleId(thread, code); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(EcmaString, thread); icu::LocaleDisplayNames *icuLocaldisplaynames = displayNames->GetIcuLocaleDisplayNames(); icu::UnicodeString result; std::string codeString = intl::LocaleHelper::ConvertToStdString(codeStr); @@ -378,23 +381,27 @@ void JSDisplayNames::ResolvedOptions(JSThread *thread, const JSHandle propertyKey = globalConst->GetHandledLocaleString(); JSHandle locale(thread, displayNames->GetLocale()); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, locale); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[Style]] StyOption style = displayNames->GetStyle(); propertyKey = globalConst->GetHandledStyleString(); JSHandle styleString = StyOptionToEcmaString(thread, style); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, styleString); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[type]] TypednsOption type = displayNames->GetType(); propertyKey = globalConst->GetHandledTypeString(); JSHandle typeString = TypeOptionToEcmaString(thread, type); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, typeString); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[fallback]] FallbackOption fallback = displayNames->GetFallback(); propertyKey = globalConst->GetHandledFallbackString(); JSHandle fallbackString = FallbackOptionToEcmaString(thread, fallback); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, fallbackString); + RETURN_IF_ABRUPT_COMPLETION(thread); } } // namespace panda::ecmascript diff --git a/ecmascript/js_for_in_iterator.cpp b/ecmascript/js_for_in_iterator.cpp index a39828abe89bfe2cb0210b7c17abddcfb5d072fe..cbb3477de877e94460c613d6514cb66f5e4b5fe6 100644 --- a/ecmascript/js_for_in_iterator.cpp +++ b/ecmascript/js_for_in_iterator.cpp @@ -143,6 +143,7 @@ std::pair JSForInIterator::NextInternal(JSThread *thread, c visited.Update(JSTaggedValue(newQueue)); it->SetVisitedObjs(thread, visited); JSTaggedValue proto = JSTaggedValue::GetPrototype(thread, object); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, std::make_pair(JSTaggedValue::Exception(), false)); it->SetObject(thread, proto); it->SetWasVisited(false); it->SetHasVisitObjs(true); diff --git a/ecmascript/js_function.cpp b/ecmascript/js_function.cpp index 74837e9b0747050701f8339ebe2e001a4b56b4b4..17a24e0d0a316be9fca0cd299f9d69023b9ed616 100644 --- a/ecmascript/js_function.cpp +++ b/ecmascript/js_function.cpp @@ -24,7 +24,6 @@ #include "ecmascript/jspandafile/class_info_extractor.h" #include "ecmascript/js_handle.h" #include "ecmascript/js_promise.h" -#include "ecmascript/js_proxy.h" #include "ecmascript/js_tagged_value-inl.h" #include "ecmascript/mem/c_containers.h" #include "ecmascript/module/js_module_source_text.h" @@ -238,6 +237,7 @@ bool JSFunction::MakeConstructor(JSThread *thread, const JSHandle &f PropertyDescriptor constructorDesc(thread, JSHandle::Cast(func), writable, false, true); status = JSTaggedValue::DefinePropertyOrThrow(thread, proto, constructorKey, constructorDesc); } + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); ASSERT_PRINT(status, "DefineProperty construct failed"); // func.prototype = proto @@ -402,6 +402,7 @@ JSTaggedValue JSFunction::InvokeOptimizedEntrypoint(JSThread *thread, JSHandleIsFastCall()) { if (needPushUndefined) { info = EcmaInterpreter::ReBuildRuntimeCallInfo(thread, info, numArgs); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSTaggedType *stackArgs = info->GetArgs(); stackArgs[1] = stackArgs[0]; @@ -590,6 +591,7 @@ void JSProxyRevocFunction::ProxyRevocFunctions(const JSThread *thread, const JSH // 5 ~ 6 Set internal slot of p to null. proxyHandle->SetTarget(thread, JSTaggedValue::Null()); proxyHandle->SetHandler(thread, JSTaggedValue::Null()); + proxyHandle->SetIsRevoked(true); } JSTaggedValue JSFunction::AccessCallerArgumentsThrowTypeError(EcmaRuntimeCallInfo *argv) diff --git a/ecmascript/js_function.h b/ecmascript/js_function.h index 52dbd2ba63cfcfa217657dbb85c8484e6e99f717..695058748a8a97c19ae2731ae990b73264f343f1 100644 --- a/ecmascript/js_function.h +++ b/ecmascript/js_function.h @@ -20,6 +20,7 @@ #include "ecmascript/ecma_macros.h" #include "ecmascript/js_object-inl.h" #include "ecmascript/lexical_env.h" +#include "ecmascript/js_proxy.h" namespace panda::ecmascript { class JSThread; @@ -66,6 +67,7 @@ public: }; static_assert((JSFunctionBase::SIZE % static_cast(MemAlignment::MEM_ALIGN_OBJECT)) == 0); +static_assert(JSFunctionBase::METHOD_OFFSET == JSProxy::METHOD_OFFSET); class JSFunction : public JSFunctionBase { public: diff --git a/ecmascript/js_hclass-inl.h b/ecmascript/js_hclass-inl.h index dae4710f10de62f91bf3a935b5d895de3b130aea..bb1d10d67f83b60b785edb0335defba9ae19e58a 100644 --- a/ecmascript/js_hclass-inl.h +++ b/ecmascript/js_hclass-inl.h @@ -206,6 +206,10 @@ inline size_t JSHClass::SizeFromJSHClass(TaggedObject *header) size = TreeEcmaString::SIZE; size = AlignUp(size, static_cast(MemAlignment::MEM_ALIGN_OBJECT)); break; + case JSType::SLICED_STRING: + size = SlicedString::SIZE; + size = AlignUp(size, static_cast(MemAlignment::MEM_ALIGN_OBJECT)); + break; case JSType::MACHINE_CODE_OBJECT: size = reinterpret_cast(header)->GetMachineCodeObjectSize(); size = AlignUp(size, static_cast(MemAlignment::MEM_ALIGN_OBJECT)); @@ -230,6 +234,7 @@ inline void JSHClass::Copy(const JSThread *thread, const JSHClass *jshclass) // copy jshclass SetPrototype(thread, jshclass->GetPrototype()); SetBitField(jshclass->GetBitField()); + SetIsAllTaggedProp(jshclass->IsAllTaggedProp()); SetNumberOfProps(jshclass->NumberOfProps()); } diff --git a/ecmascript/js_hclass.cpp b/ecmascript/js_hclass.cpp index ffede585755d33e51b7dbcecb40ae0b417707d4b..b38809f179ce92c487b1e2d563f5f9bf7f511f1c 100644 --- a/ecmascript/js_hclass.cpp +++ b/ecmascript/js_hclass.cpp @@ -13,12 +13,14 @@ * limitations under the License. */ +#include "ecmascript/elements.h" #include "ecmascript/js_hclass-inl.h" #include #include "ecmascript/base/config.h" #include "ecmascript/global_env.h" +#include "ecmascript/tagged_array.h" #include "ecmascript/vtable.h" #include "ecmascript/ic/proto_change_details.h" #include "ecmascript/js_object-inl.h" @@ -148,7 +150,8 @@ void JSHClass::Initialize(const JSThread *thread, uint32_t size, JSType type, ui SetExtensible(true); SetIsPrototype(false); SetHasDeleteProperty(false); - SetElementRepresentation(Representation::NONE); + SetIsAllTaggedProp(true); + SetElementsKind(ElementsKind::GENERIC); SetTransitions(thread, JSTaggedValue::Undefined()); SetProtoChangeMarker(thread, JSTaggedValue::Null()); SetProtoChangeDetails(thread, JSTaggedValue::Null()); @@ -207,6 +210,7 @@ void JSHClass::TransitionElementsToDictionary(const JSThread *thread, const JSHa } obj->GetJSHClass()->SetIsDictionaryElement(true); obj->GetJSHClass()->SetIsStableElements(false); + obj->GetJSHClass()->SetElementsKind(ElementsKind::GENERIC); } JSHandle JSHClass::SetPropertyOfObjHClass(const JSThread *thread, JSHandle &jshclass, @@ -245,7 +249,7 @@ void JSHClass::AddProperty(const JSThread *thread, const JSHandle &obj JSHandle jshclass(thread, obj->GetJSHClass()); JSHClass *newClass = jshclass->FindTransitions(key.GetTaggedValue(), JSTaggedValue(attr.GetPropertyMetaData())); if (newClass != nullptr) { - obj->SetClass(newClass); + obj->SynchronizedSetClass(newClass); #if ECMASCRIPT_ENABLE_IC JSHClass::NotifyHclassChanged(thread, jshclass, JSHandle(thread, newClass), key.GetTaggedValue()); #endif @@ -279,7 +283,7 @@ void JSHClass::AddProperty(const JSThread *thread, const JSHandle &obj #if ECMASCRIPT_ENABLE_IC JSHClass::NotifyHclassChanged(thread, jshclass, newJsHClass, key.GetTaggedValue()); #endif - obj->SetClass(*newJsHClass); + obj->SynchronizedSetClass(*newJsHClass); // Maintaining subtyping is no longer required when transition succeeds. if (jshclass->HasTSSubtyping()) { @@ -406,7 +410,7 @@ void JSHClass::ShouldUpdateProtoClass(const JSThread *thread, const JSHandleGetTaggedObject())->SetClass(*newProtoClass); + JSObject::Cast(proto->GetTaggedObject())->SynchronizedSetClass(*newProtoClass); newProtoClass->SetIsPrototype(true); } } @@ -428,7 +432,86 @@ void JSHClass::TransitionToDictionary(const JSThread *thread, const JSHandle(thread, obj->GetJSHClass()), newJsHClass); #endif - obj->SetClass(newJsHClass); + obj->SynchronizedSetClass(*newJsHClass); + } +} + +void JSHClass::TransitionForRepChange(const JSThread *thread, const JSHandle &receiver, + const JSHandle &key, PropertyAttributes attr) +{ + JSHandle oldHClass(thread, receiver->GetJSHClass()); + + // 1. Create hclass and copy layout + JSHandle newHClass = JSHClass::Clone(thread, oldHClass); + + JSHandle oldLayout(thread, newHClass->GetLayout()); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle newLayout(factory->CopyLayoutInfo(oldLayout)); + newHClass->SetLayout(thread, newLayout); + + // 2. update attr + auto hclass = JSHClass::Cast(newHClass.GetTaggedValue().GetTaggedObject()); + int entry = JSHClass::FindPropertyEntry(thread, hclass, key.GetTaggedValue()); + ASSERT(entry != -1); + newLayout->SetNormalAttr(thread, entry, attr); + + // 3. update hclass in object. +#if ECMASCRIPT_ENABLE_IC + JSHClass::NotifyHclassChanged(thread, oldHClass, newHClass, key.GetTaggedValue()); +#endif + + receiver->SynchronizedSetClass(*newHClass); + // 4. Maybe Transition And Maintain subtypeing check +} + +void JSHClass::TransitToElementsKind(const JSThread *thread, const JSHandle &array) +{ + JSTaggedValue elements = array->GetElements(); + if (!elements.IsTaggedArray()) { + return; + } + ElementsKind newKind = ElementsKind::NONE; + auto elementArray = TaggedArray::Cast(elements); + uint32_t length = elementArray->GetLength(); + for (uint32_t i = 0; i < length; i++) { + JSTaggedValue value = elementArray->Get(i); + newKind = Elements::ToElementsKind(value, newKind); + } + ElementsKind current = array->GetJSHClass()->GetElementsKind(); + if (newKind == current) { + return; + } + auto arrayHClassIndexMap = thread->GetArrayHClassIndexMap(); + if (arrayHClassIndexMap.find(newKind) != arrayHClassIndexMap.end()) { + auto index = static_cast(thread->GetArrayHClassIndexMap().at(newKind)); + auto hclassVal = thread->GlobalConstants()->GetGlobalConstantObject(index); + JSHClass *hclass = JSHClass::Cast(hclassVal.GetTaggedObject()); + array->SetClass(hclass); + } +} + +void JSHClass::TransitToElementsKind( + const JSThread *thread, const JSHandle &object, const JSHandle &value, ElementsKind kind) +{ + if (!object->IsJSArray()) { + return; + } + ElementsKind current = object->GetJSHClass()->GetElementsKind(); + if (Elements::IsGeneric(current)) { + return; + } + auto newKind = Elements::ToElementsKind(value.GetTaggedValue(), kind); + // Merge current kind and new kind + newKind = Elements::MergeElementsKind(current, newKind); + if (newKind == current) { + return; + } + auto arrayHClassIndexMap = thread->GetArrayHClassIndexMap(); + if (arrayHClassIndexMap.find(newKind) != arrayHClassIndexMap.end()) { + auto index = static_cast(thread->GetArrayHClassIndexMap().at(newKind)); + auto hclassVal = thread->GlobalConstants()->GetGlobalConstantObject(index); + JSHClass *hclass = JSHClass::Cast(hclassVal.GetTaggedObject()); + object->SetClass(hclass); } } @@ -614,7 +697,10 @@ void JSHClass::RefreshUsers(const JSThread *thread, const JSHandle &ol ASSERT(newHclass->IsPrototype()); bool onceRegistered = UnregisterOnProtoChain(thread, oldHclass); - newHclass->SetProtoChangeDetails(thread, oldHclass->GetProtoChangeDetails()); + // oldHclass is already marked. Only update newHclass.protoChangeDetails if it doesn't exist for further use. + if (!newHclass->GetProtoChangeDetails().IsProtoChangeDetails()) { + newHclass->SetProtoChangeDetails(thread, oldHclass->GetProtoChangeDetails()); + } oldHclass->SetProtoChangeDetails(thread, JSTaggedValue::Undefined()); if (onceRegistered) { if (newHclass->GetProtoChangeDetails().IsProtoChangeDetails()) { @@ -657,6 +743,7 @@ PropertyLookupResult JSHClass::LookupPropertyInAotHClass(const JSThread *thread, if (attr.IsAccessor()) { result.SetIsAccessor(true); } + result.SetRepresentation(attr.GetRepresentation()); result.SetIsWritable(attr.IsWritable()); return result; } @@ -683,6 +770,36 @@ PropertyLookupResult JSHClass::LookupPropertyInAotHClass(const JSThread *thread, return result; } +PropertyLookupResult JSHClass::LookupPropertyInBuiltinPrototypeHClass(const JSThread *thread, JSHClass *hclass, + JSTaggedValue key) +{ + DISALLOW_GARBAGE_COLLECTION; + ASSERT(hclass->IsPrototype()); + + PropertyLookupResult result; + int entry = JSHClass::FindPropertyEntry(thread, hclass, key); + // When the property is not found, the value of 'entry' is -1. + // Currently, not all methods on the prototype of 'builtin' have been changed to inlined. + // Therefore, when a non-inlined method is encountered, it is also considered not found. + if (entry == -1 || static_cast(entry) >= hclass->GetInlinedProperties()) { + result.SetIsFound(false); + return result; + } + + result.SetIsFound(true); + result.SetIsLocal(true); + uint32_t offset = hclass->GetInlinedPropertiesOffset(entry); + result.SetOffset(offset); + PropertyAttributes attr = LayoutInfo::Cast(hclass->GetLayout().GetTaggedObject())->GetAttr(entry); + result.SetIsNotHole(true); + if (attr.IsAccessor()) { + result.SetIsAccessor(true); + } + result.SetRepresentation(attr.GetRepresentation()); + result.SetIsWritable(attr.IsWritable()); + return result; +} + void JSHClass::CopyTSInheritInfo(const JSThread *thread, const JSHandle &oldHClass, JSHandle &newHClass) { @@ -704,6 +821,9 @@ bool JSHClass::DumpForProfile(const JSHClass *hclass, PGOHClassLayoutDesc &desc, if (hclass->IsDictionaryMode()) { return false; } + if (kind == PGOObjKind::ELEMENT) { + desc.UpdateElementKind(hclass->GetElementsKind()); + } LayoutInfo *layout = LayoutInfo::Cast(hclass->GetLayout().GetTaggedObject()); int element = static_cast(hclass->NumberOfProps()); diff --git a/ecmascript/js_hclass.h b/ecmascript/js_hclass.h index 66f30431e8db03d1a0aff4cc1422087eba8eaf0c..1963e4a9d36e8681db6be2a74e095ce4f12a0b52 100644 --- a/ecmascript/js_hclass.h +++ b/ecmascript/js_hclass.h @@ -17,12 +17,12 @@ #define ECMASCRIPT_JS_HCLASS_H #include "ecmascript/ecma_macros.h" +#include "ecmascript/elements.h" #include "ecmascript/js_tagged_value.h" #include "ecmascript/mem/tagged_object.h" #include "ecmascript/mem/barriers.h" #include "ecmascript/mem/slots.h" #include "ecmascript/mem/visitor.h" -#include "ecmascript/pgo_profiler/pgo_profiler_layout.h" #include "ecmascript/property_attributes.h" #include "libpandabase/utils/bit_field.h" @@ -62,6 +62,14 @@ namespace panda::ecmascript { class ProtoChangeDetails; class PropertyLookupResult; +namespace pgo { + class PGOHClassLayoutDesc; + enum class PGOObjKind; +} // namespace pgo +using PGOHClassLayoutDesc = pgo::PGOHClassLayoutDesc; +using PGOObjKind = pgo::PGOObjKind; + +struct Reference; // NOLINTNEXTLINE(cppcoreguidelines-macro-usage) #define JSTYPE_DECL /* //////////////////////////////////////////////////////////////////////////////-PADDING */ \ @@ -186,6 +194,7 @@ class PropertyLookupResult; HCLASS, /* //////////////////////////////////////////////////////////////////////////////////-PADDING */ \ LINE_STRING, /* //////////////////////////////////////////////////////////////////////////////////-PADDING */\ CONSTANT_STRING, /* ///////////////////////////////////////////////////////////////////////////////-PADDING */\ + SLICED_STRING, /* ////////////////////////////////////////////////////////////////////////////////-PADDING */ \ TREE_STRING, /* //////////////////////////////////////////////////////////////////////////////////-PADDING */ \ BIGINT, /* //////////////////////////////////////////////////////////////////////////////////-PADDING */ \ TAGGED_ARRAY, /* //////////////////////////////////////////////////////////////////////////////////-PADDING */ \ @@ -290,39 +299,45 @@ class JSHClass : public TaggedObject { public: static constexpr int TYPE_BITFIELD_NUM = 8; static constexpr int LEVEL_BTTFIELD_NUM = 5; - using ObjectTypeBits = BitField; // 8 - using CallableBit = ObjectTypeBits::NextFlag; - using ConstructorBit = CallableBit::NextFlag; // 10 - using ExtensibleBit = ConstructorBit::NextFlag; - using IsPrototypeBit = ExtensibleBit::NextFlag; - using ElementRepresentationBits = IsPrototypeBit::NextField; // 3 means next 3 bit - using DictionaryElementBits = ElementRepresentationBits::NextFlag; // 16 - using IsDictionaryBit = DictionaryElementBits::NextFlag; // 17 - using IsStableElementsBit = IsDictionaryBit::NextFlag; // 18 - using HasConstructorBits = IsStableElementsBit::NextFlag; // 19 - using IsLiteralBit = HasConstructorBits::NextFlag; // 20 - using ClassConstructorBit = IsLiteralBit::NextFlag; // 21 - using ClassPrototypeBit = ClassConstructorBit::NextFlag; // 22 - using GlobalConstOrBuiltinsObjectBit = ClassPrototypeBit::NextFlag; // 23 - using IsTSBit = GlobalConstOrBuiltinsObjectBit::NextFlag; // 24 - using LevelBit = IsTSBit::NextField; // 29 - using IsJSFunctionBit = LevelBit::NextFlag; // 30 - using IsOptimizedBit = IsJSFunctionBit::NextFlag; // 31 - using CanFastCallBit = IsOptimizedBit::NextFlag; // 32 + static constexpr int ELEMENTS_KIND_BITFIELD_NUM = 5; + static constexpr unsigned BITS_PER_BYTE = 8; + using ObjectTypeBits = BitField; // 8 + using CallableBit = ObjectTypeBits::NextFlag; // 9 + using ConstructorBit = CallableBit::NextFlag; // 10 + using ExtensibleBit = ConstructorBit::NextFlag; // 11 + using IsPrototypeBit = ExtensibleBit::NextFlag; // 12 + using ElementsKindBits = IsPrototypeBit::NextField; // 13-17 + using DictionaryElementBits = ElementsKindBits::NextFlag; // 18 + using IsDictionaryBit = DictionaryElementBits::NextFlag; // 19 + using IsStableElementsBit = IsDictionaryBit::NextFlag; // 20 + using HasConstructorBits = IsStableElementsBit::NextFlag; // 21 + using IsClassConstructorOrPrototypeBit = HasConstructorBits::NextFlag; // 22 + using GlobalConstOrBuiltinsObjectBit = IsClassConstructorOrPrototypeBit::NextFlag; // 23 + using IsTSBit = GlobalConstOrBuiltinsObjectBit::NextFlag; // 24 + using LevelBit = IsTSBit::NextField; // 25-29 + using IsJSFunctionBit = LevelBit::NextFlag; // 30 + using IsOptimizedBit = IsJSFunctionBit::NextFlag; // 31 + using CanFastCallBit = IsOptimizedBit::NextFlag; // 32 + using BitFieldLastBit = CanFastCallBit; + static_assert(BitFieldLastBit::START_BIT + BitFieldLastBit::SIZE <= sizeof(uint32_t) * BITS_PER_BYTE, "Invalid"); static constexpr int DEFAULT_CAPACITY_OF_IN_OBJECTS = 4; - static constexpr int MAX_CAPACITY_OF_OUT_OBJECTS = - PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES - DEFAULT_CAPACITY_OF_IN_OBJECTS; static constexpr int OFFSET_MAX_OBJECT_SIZE_IN_WORDS_WITHOUT_INLINED = 5; static constexpr int OFFSET_MAX_OBJECT_SIZE_IN_WORDS = PropertyAttributes::OFFSET_BITFIELD_NUM + OFFSET_MAX_OBJECT_SIZE_IN_WORDS_WITHOUT_INLINED; static constexpr int MAX_OBJECT_SIZE_IN_WORDS = (1U << OFFSET_MAX_OBJECT_SIZE_IN_WORDS) - 1; + static constexpr uint64_t OPTIMIZED_BIT = 1LU << IsOptimizedBit::START_BIT; + static constexpr uint64_t FASTCALL_BIT = 1LU << CanFastCallBit::START_BIT; + static constexpr uint64_t OPTIMIZED_FASTCALL_BITS = OPTIMIZED_BIT | FASTCALL_BIT; - using NumberOfPropsBits = BitField; // 10 + using NumberOfPropsBits = BitField; // 10 using InlinedPropsStartBits = NumberOfPropsBits::NextField; // 15 + OFFSET_MAX_OBJECT_SIZE_IN_WORDS_WITHOUT_INLINED>; // 15 using ObjectSizeInWordsBits = InlinedPropsStartBits::NextField; // 30 - using HasDeletePropertyBit = ObjectSizeInWordsBits::NextFlag; + using HasDeletePropertyBit = ObjectSizeInWordsBits::NextFlag; // + using IsAllTaggedPropBit = HasDeletePropertyBit::NextFlag; // 32 + using BitField1LastBit = IsAllTaggedPropBit; + static_assert(BitField1LastBit::START_BIT + BitField1LastBit::SIZE <= sizeof(uint32_t) * BITS_PER_BYTE, "Invalid"); static JSHClass *Cast(const TaggedObject *object); @@ -350,6 +365,11 @@ public: static JSHandle TransProtoWithoutLayout(const JSThread *thread, const JSHandle &jshclass, const JSHandle &proto); static void TransitionToDictionary(const JSThread *thread, const JSHandle &obj); + static void TransitionForRepChange(const JSThread *thread, const JSHandle &receiver, + const JSHandle &key, PropertyAttributes attr); + static void TransitToElementsKind(const JSThread *thread, const JSHandle &array); + static void TransitToElementsKind(const JSThread *thread, const JSHandle &object, + const JSHandle &value, ElementsKind kind = ElementsKind::NONE); static JSHandle EnableProtoChangeMarker(const JSThread *thread, const JSHandle &jshclass); @@ -425,19 +445,16 @@ public: IsPrototypeBit::Set(flag, GetBitFieldAddr()); } - inline void SetIsLiteral(bool flag) const - { - IsLiteralBit::Set(flag, GetBitFieldAddr()); - } - inline void SetClassConstructor(bool flag) const { - ClassConstructorBit::Set(flag, GetBitFieldAddr()); + IsClassConstructorOrPrototypeBit::Set(flag, GetBitFieldAddr()); + SetConstructor(flag); } inline void SetClassPrototype(bool flag) const { - ClassPrototypeBit::Set(flag, GetBitFieldAddr()); + IsClassConstructorOrPrototypeBit::Set(flag, GetBitFieldAddr()); + SetIsPrototype(flag); } inline void SetGlobalConstOrBuiltinsObject(bool flag) const @@ -514,6 +531,11 @@ public: return GetObjectType() == JSType::CONSTANT_STRING; } + inline bool IsSlicedString() const + { + return GetObjectType() == JSType::SLICED_STRING; + } + inline bool IsTreeString() const { return GetObjectType() == JSType::TREE_STRING; @@ -1153,16 +1175,10 @@ public: return IsPrototypeBit::Decode(bits); } - inline bool IsLiteral() const - { - uint32_t bits = GetBitField(); - return IsLiteralBit::Decode(bits); - } - inline bool IsClassConstructor() const { uint32_t bits = GetBitField(); - return ClassConstructorBit::Decode(bits); + return IsClassConstructorOrPrototypeBit::Decode(bits) && IsConstructor(); } inline bool IsJSGlobalObject() const @@ -1173,7 +1189,7 @@ public: inline bool IsClassPrototype() const { uint32_t bits = GetBitField(); - return ClassPrototypeBit::Decode(bits); + return IsClassConstructorOrPrototypeBit::Decode(bits) && IsPrototype(); } inline bool IsGlobalConstOrBuiltinsObject() const @@ -1467,17 +1483,17 @@ public: return GetObjectType() == JSType::JS_MODULE_NAMESPACE; } - inline void SetElementRepresentation(Representation representation) + inline void SetElementsKind(ElementsKind kind) { uint32_t bits = GetBitField(); - uint32_t newVal = ElementRepresentationBits::Update(bits, representation); + uint32_t newVal = ElementsKindBits::Update(bits, kind); SetBitField(newVal); } - inline Representation GetElementRepresentation() const + inline ElementsKind GetElementsKind() const { uint32_t bits = GetBitField(); - return ElementRepresentationBits::Decode(bits); + return ElementsKindBits::Decode(bits); } inline void SetLevel(uint8_t level) @@ -1493,12 +1509,6 @@ public: return LevelBit::Decode(bits); } - inline void UpdateRepresentation(JSTaggedValue value) - { - Representation rep = PropertyAttributes::UpdateRepresentation(GetElementRepresentation(), value); - SetElementRepresentation(rep); - } - inline void SetIsDictionaryElement(bool value) { uint32_t newVal = DictionaryElementBits::Update(GetBitField(), value); @@ -1548,7 +1558,7 @@ public: inline void IncNumberOfProps() { - ASSERT(NumberOfProps() < PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES); + ASSERT(NumberOfProps() < PropertyAttributes::MAX_FAST_PROPS_CAPACITY); SetNumberOfProps(NumberOfProps() + 1); } @@ -1640,9 +1650,22 @@ public: return HasDeletePropertyBit::Decode(bits); } + inline void SetIsAllTaggedProp(bool flag) const + { + IsAllTaggedPropBit::Set(flag, GetBitField1Addr()); + } + + inline bool IsAllTaggedProp() const + { + uint32_t bits = GetBitField1(); + return IsAllTaggedPropBit::Decode(bits); + } + inline static int FindPropertyEntry(const JSThread *thread, JSHClass *hclass, JSTaggedValue key); static PropertyLookupResult LookupPropertyInAotHClass(const JSThread *thread, JSHClass *hclass, JSTaggedValue key); + static PropertyLookupResult LookupPropertyInBuiltinPrototypeHClass(const JSThread *thread, JSHClass *hclass, + JSTaggedValue key); static constexpr size_t PROTOTYPE_OFFSET = TaggedObjectSize(); ACCESSORS(Proto, PROTOTYPE_OFFSET, LAYOUT_OFFSET); @@ -1711,6 +1734,7 @@ public: using IsAccessorBit = IsNotHoleBit::NextFlag; using OffsetBits = IsAccessorBit::NextField; using WritableField = OffsetBits::NextFlag; + using RepresentationBits = WritableField::NextField; explicit PropertyLookupResult(uint32_t data = 0) : data_(data) {} ~PropertyLookupResult() = default; @@ -1793,6 +1817,16 @@ public: OffsetBits::Set(offset, &data_); } + inline void SetRepresentation(Representation rep) + { + RepresentationBits::Set(rep, &data_); + } + + inline Representation GetRepresentation() + { + return RepresentationBits::Get(data_); + } + inline uint32_t GetData() const { return data_; @@ -1802,7 +1836,7 @@ private: uint32_t data_ {0}; }; static_assert(PropertyLookupResult::OffsetBits::MaxValue() > - (PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES * JSTaggedValue::TaggedTypeSize())); + (PropertyAttributes::MAX_FAST_PROPS_CAPACITY * JSTaggedValue::TaggedTypeSize())); } // namespace panda::ecmascript #endif // ECMASCRIPT_JS_HCLASS_H diff --git a/ecmascript/js_iterator.cpp b/ecmascript/js_iterator.cpp index 751e89a9da7222e4665c9fd2269fd4650b87f7a9..cd92e32a52604c04bcba49e70b416f4d3c86da93 100644 --- a/ecmascript/js_iterator.cpp +++ b/ecmascript/js_iterator.cpp @@ -34,6 +34,7 @@ JSTaggedValue JSIterator::IteratorCloseAndReturn(JSThread *thread, const JSHandl JSHandle record = JSHandle(factory->NewCompletionRecord(CompletionRecordType::THROW, JSHandle(thread, exception))); JSHandle result = JSIterator::IteratorClose(thread, iter, record); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (result->IsCompletionRecord()) { return CompletionRecord::Cast(result->GetTaggedObject())->GetValue(); } @@ -88,6 +89,7 @@ JSHandle JSIterator::GetAsyncIterator(JSThread *thread, const JSH JSHandle syncIterator = GetIterator(thread, obj, func); JSHandle nextStr = thread->GlobalConstants()->GetHandledNextString(); JSHandle nextMethod = JSTaggedValue::GetProperty(thread, syncIterator, nextStr).GetValue(); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle syncIteratorRecord = factory->NewAsyncIteratorRecord(syncIterator, nextMethod, false); JSHandle asyncIterator = @@ -99,6 +101,7 @@ JSHandle JSIterator::GetAsyncIterator(JSThread *thread, const JSH JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, method, obj, undefined, 0); JSTaggedValue ret = JSFunction::Call(info); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle iterator(thread, ret); // 5.If Type(iterator) is not Object, throw a TypeError exception if (!iterator->IsECMAObject()) { @@ -116,6 +119,7 @@ JSHandle JSIterator::IteratorNext(JSThread *thread, const JSHandl // 1.If value was not passed, then Let result be Invoke(iterator, "next", «‍ »). JSHandle key(globalConst->GetHandledNextString()); JSHandle next(JSObject::GetMethod(thread, iter, key)); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, next, iter, undefined, 0); JSTaggedValue ret = JSFunction::Call(info); @@ -136,8 +140,10 @@ JSHandle JSIterator::IteratorNext(JSThread *thread, const JSHandl // 2.Let result be Invoke(iterator, "next", «‍value»). JSHandle key(globalConst->GetHandledNextString()); JSHandle next(JSObject::GetMethod(thread, iter, key)); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, next, iter, undefined, 1); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, undefined); info->SetCallArg(value.GetTaggedValue()); JSTaggedValue ret = JSFunction::Call(info); // 3.ReturnIfAbrupt(result) @@ -159,6 +165,7 @@ JSHandle JSIterator::IteratorNext(JSThread *thread, const JSHandl JSHandle next(thread, iter->GetNextMethod()); JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, next, iterator, undefined, 1); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, undefined); info->SetCallArg(value.GetTaggedValue()); JSTaggedValue ret = JSFunction::Call(info); // 3.ReturnIfAbrupt(result) @@ -206,6 +213,7 @@ JSHandle JSIterator::IteratorValue(JSThread *thread, const JSHand // Return Get(iterResult, "value"). JSHandle valueStr = thread->GlobalConstants()->GetHandledValueString(); JSHandle value = JSTaggedValue::GetProperty(thread, iterResult, valueStr).GetValue(); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); return value; } // 7.4.5 @@ -261,6 +269,7 @@ JSHandle JSIterator::IteratorClose(JSThread *thread, const JSHand JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, returnFunc, iter, undefined, 0); JSTaggedValue ret = JSFunction::Call(info); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); if (!exceptionOnThread.IsEmpty()) { thread->SetException(exceptionOnThread.GetTaggedValue()); } diff --git a/ecmascript/js_list_format.cpp b/ecmascript/js_list_format.cpp index a0c35845dc56812ccadaee5d54d2280344677e3b..80dfbf467349342622230cf65d74f5421c202fd7 100644 --- a/ecmascript/js_list_format.cpp +++ b/ecmascript/js_list_format.cpp @@ -25,6 +25,7 @@ #include "ecmascript/js_array.h" #include "ecmascript/js_locale.h" #include "ecmascript/js_iterator.h" +#include "ecmascript/object_factory-inl.h" #include "unicode/fieldpos.h" #include "unicode/fpositer.h" @@ -134,6 +135,7 @@ JSHandle JSListFormat::InitializeListFormat(JSThread *thread, // 10. Set listFormat.[[Locale]] to r.[[locale]]. icu::Locale icuLocale = r.localeData; JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSListFormat, thread); listFormat->SetLocale(thread, localeStr.GetTaggedValue()); // 11. Let type be ? GetOption(options, "type", "string", « "conjunction", "disjunction", "unit" », "conjunction"). @@ -410,17 +412,20 @@ void JSListFormat::ResolvedOptions(JSThread *thread, const JSHandle propertyKey = globalConst->GetHandledLocaleString(); JSHandle locale(thread, listFormat->GetLocale()); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, locale); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[type]] ListTypeOption type = listFormat->GetType(); propertyKey = globalConst->GetHandledTypeString(); JSHandle typeString = ListOptionTypeToEcmaString(thread, type); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, typeString); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[Style]] ListStyleOption style = listFormat->GetStyle(); propertyKey = globalConst->GetHandledStyleString(); JSHandle styleString = ListOptionStyleToEcmaString(thread, style); JSObject::CreateDataPropertyOrThrow(thread, options, propertyKey, styleString); + RETURN_IF_ABRUPT_COMPLETION(thread); } } // namespace panda::ecmascript diff --git a/ecmascript/js_locale.cpp b/ecmascript/js_locale.cpp index c0d3c8f819c0d219fdec4d5bf7d3ac98cec52a97..1a1269af744f5eca92983d89a45598e5efb85017 100644 --- a/ecmascript/js_locale.cpp +++ b/ecmascript/js_locale.cpp @@ -20,7 +20,7 @@ #include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_vm.h" #include "ecmascript/global_env.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" #if defined(__clang__) #pragma clang diagnostic push @@ -642,11 +642,14 @@ JSHandle JSLocale::PutElement(JSThread *thread, int index, const JSHan auto globalConst = thread->GlobalConstants(); // obj.type = field_type_string JSObject::CreateDataPropertyOrThrow(thread, record, globalConst->GetHandledTypeString(), fieldTypeString); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSObject, thread); // obj.value = value JSObject::CreateDataPropertyOrThrow(thread, record, globalConst->GetHandledValueString(), value); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSObject, thread); JSTaggedValue::SetProperty(thread, JSHandle::Cast(array), index, JSHandle::Cast(record), true); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSObject, thread); return record; } diff --git a/ecmascript/js_locale.h b/ecmascript/js_locale.h index b3125463b603eafb972a214b648005e95e2eb2f5..896d7c8b2fe1347de3b74571b2d7b714660073ac 100644 --- a/ecmascript/js_locale.h +++ b/ecmascript/js_locale.h @@ -524,18 +524,22 @@ public: // 6. Let mnfd be ? Get(options, "minimumFractionDigits"). JSHandle mnfdKey = globalConst->GetHandledMinimumFractionDigitsString(); JSHandle mnfd = JSTaggedValue::GetProperty(thread, options, mnfdKey).GetValue(); + intlObj->SetMinimumIntegerDigits(thread, JSTaggedValue(mnid)); + RETURN_IF_ABRUPT_COMPLETION(thread); // 7. Let mxfd be ? Get(options, "maximumFractionDigits"). JSHandle mxfdKey = globalConst->GetHandledMaximumFractionDigitsString(); JSHandle mxfd = JSTaggedValue::GetProperty(thread, options, mxfdKey).GetValue(); + RETURN_IF_ABRUPT_COMPLETION(thread); // 8. Let mnsd be ? Get(options, "minimumSignificantDigits"). JSHandle mnsdKey = globalConst->GetHandledMinimumSignificantDigitsString(); JSHandle mnsd = JSTaggedValue::GetProperty(thread, options, mnsdKey).GetValue(); + RETURN_IF_ABRUPT_COMPLETION(thread); // 9. Let mxsd be ? Get(options, "maximumSignificantDigits"). JSHandle mxsdKey = globalConst->GetHandledMaximumSignificantDigitsString(); JSHandle mxsd = JSTaggedValue::GetProperty(thread, options, mxsdKey).GetValue(); + RETURN_IF_ABRUPT_COMPLETION(thread); // 10. Set intlObj.[[MinimumIntegerDigits]] to mnid. - intlObj->SetMinimumIntegerDigits(thread, JSTaggedValue(mnid)); // 11. If mnsd is not undefined or mxsd is not undefined, then if (!mnsd->IsUndefined() || !mxsd->IsUndefined()) { // a. Set intlObj.[[RoundingType]] to significantDigits. diff --git a/ecmascript/js_map.cpp b/ecmascript/js_map.cpp index 7ac825873cce5e3745cff5fa5c26556534610934..b10502cae9cda41e09afac5ae1d5055b09b52add 100644 --- a/ecmascript/js_map.cpp +++ b/ecmascript/js_map.cpp @@ -61,18 +61,18 @@ JSTaggedValue JSMap::Get(JSTaggedValue key) const return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->Get(key); } -int JSMap::GetSize() const +uint32_t JSMap::GetSize() const { return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->NumberOfElements(); } -JSTaggedValue JSMap::GetKey(int entry) const +JSTaggedValue JSMap::GetKey(uint32_t entry) const { ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetKey(entry); } -JSTaggedValue JSMap::GetValue(int entry) const +JSTaggedValue JSMap::GetValue(uint32_t entry) const { ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetValue(entry); diff --git a/ecmascript/js_map.h b/ecmascript/js_map.h index 3fb8c1dc5e0fa9ef58b13a815145e4699ded3f9c..57f7420f35c62a4a13e266e39e6b1588260966c7 100644 --- a/ecmascript/js_map.h +++ b/ecmascript/js_map.h @@ -34,11 +34,11 @@ public: JSTaggedValue Get(JSTaggedValue key) const; - int GetSize() const; + uint32_t GetSize() const; - JSTaggedValue GetKey(int entry) const; + JSTaggedValue GetKey(uint32_t entry) const; - JSTaggedValue GetValue(int entry) const; + JSTaggedValue GetValue(uint32_t entry) const; static constexpr size_t LINKED_MAP_OFFSET = JSObject::SIZE; ACCESSORS(LinkedMap, LINKED_MAP_OFFSET, SIZE) diff --git a/ecmascript/js_map_iterator.cpp b/ecmascript/js_map_iterator.cpp index 0307114bc50cf52bda03ad1cffc8a4630fe98b75..dd45f4cf569826d77cdeeb9f50b55d19ee7894a1 100644 --- a/ecmascript/js_map_iterator.cpp +++ b/ecmascript/js_map_iterator.cpp @@ -115,4 +115,57 @@ JSHandle JSMapIterator::CreateMapIterator(JSThread *thread, const JSHandle iter(factory->NewJSMapIterator(JSHandle(obj), kind)); return iter; } + +JSTaggedValue JSMapIterator::MapIteratorToList(JSThread *thread, JSHandle &items, + JSHandle &method) +{ + JSTaggedValue newArray = JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + JSHandle newArrayHandle(thread, newArray); + JSHandle iterator = JSIterator::GetIterator(thread, items, method); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle iter(iterator); + JSHandle iteratedMap(thread, iter->GetIteratedMap()); + if (iteratedMap->IsUndefined()) { + return newArrayHandle.GetTaggedValue(); + } + IterationKind itemKind = iter->GetIterationKind(); + JSHandle map(iteratedMap); + int totalElements = map->NumberOfElements() + map->NumberOfDeletedElements(); + int index = static_cast(iter->GetNextIndex()); + int k = 0; + + JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); + JSMutableHandle valueHandle(thread, JSTaggedValue::Undefined()); + JSMutableHandle indexHandle(thread, JSTaggedValue::Undefined()); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + while (index < totalElements) { + JSTaggedValue key = map->GetKey(index); + indexHandle.Update(JSTaggedValue(k)); + if (!key.IsHole()) { + keyHandle.Update(key); + valueHandle.Update(map->GetValue(index)); + if (itemKind == IterationKind::KEY) { + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, indexHandle, keyHandle); + } else if (itemKind == IterationKind::VALUE) { + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, indexHandle, valueHandle); + } else { + JSHandle array(factory->NewTaggedArray(2)); // 2 means the length of array + array->Set(thread, 0, keyHandle); + array->Set(thread, 1, valueHandle); + JSHandle keyAndValue(JSArray::CreateArrayFromList(thread, array)); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, indexHandle, keyAndValue); + } + k++; + } + index++; + } + + indexHandle.Update(JSTaggedValue(k)); + JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), lengthKey, indexHandle, true); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return newArrayHandle.GetTaggedValue(); +} } // namespace panda::ecmascript diff --git a/ecmascript/js_map_iterator.h b/ecmascript/js_map_iterator.h index 9e39ee68ad7b582a2581911f0991aac14fb9d64b..b005c15c3420236c711acadbf8a7cc10b201760c 100644 --- a/ecmascript/js_map_iterator.h +++ b/ecmascript/js_map_iterator.h @@ -32,6 +32,8 @@ public: static JSTaggedValue Next(EcmaRuntimeCallInfo *argv); void Update(const JSThread *thread); + static JSTaggedValue MapIteratorToList(JSThread *thread, JSHandle &items, + JSHandle &method); static constexpr size_t ITERATED_MAP_OFFSET = JSObject::SIZE; ACCESSORS(IteratedMap, ITERATED_MAP_OFFSET, NEXT_INDEX_OFFSET); diff --git a/ecmascript/js_number_format.cpp b/ecmascript/js_number_format.cpp index 756f99ab02bfcd1fb890998821ea137b2c752df0..3e533fe963dd0456155ecec8ea5303f39a1a98bf 100644 --- a/ecmascript/js_number_format.cpp +++ b/ecmascript/js_number_format.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/js_number_format.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript { constexpr uint32_t DEFAULT_FRACTION_DIGITS = 2; @@ -504,6 +505,7 @@ void JSNumberFormat::InitializeNumberFormat(JSThread *thread, const JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_IF_ABRUPT_COMPLETION(thread); numberFormat->SetLocale(thread, localeStr.GetTaggedValue()); // Set numberingSystemStr to UnicodeKeyWord "nu" @@ -936,6 +938,7 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandle property = globalConst->GetHandledLocaleString(); JSHandle locale(thread, numberFormat->GetLocale()); JSObject::CreateDataPropertyOrThrow(thread, options, property, locale); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[NumberingSystem]] JSHandle numberingSystem(thread, numberFormat->GetNumberingSystem()); @@ -944,12 +947,14 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledNumberingSystemString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, numberingSystem); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[Style]] StyleOption style = numberFormat->GetStyle(); property = globalConst->GetHandledStyleString(); JSHandle styleString = OptionToEcmaString(thread, style); JSObject::CreateDataPropertyOrThrow(thread, options, property, styleString); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[currency]] JSHandle currency(thread, JSTaggedValue::Undefined()); @@ -960,18 +965,21 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandleIsUndefined()) { // NOLINT(readability-implicit-bool-conversion) property = globalConst->GetHandledCurrencyString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, currency); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[CurrencyDisplay]] property = globalConst->GetHandledCurrencyDisplayString(); CurrencyDisplayOption currencyDisplay = numberFormat->GetCurrencyDisplay(); JSHandle currencyDisplayString = OptionToEcmaString(thread, currencyDisplay); JSObject::CreateDataPropertyOrThrow(thread, options, property, currencyDisplayString); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[CurrencySign]] property = globalConst->GetHandledCurrencySignString(); CurrencySignOption currencySign = numberFormat->GetCurrencySign(); JSHandle currencySignString = OptionToEcmaString(thread, currencySign); JSObject::CreateDataPropertyOrThrow(thread, options, property, currencySignString); + RETURN_IF_ABRUPT_COMPLETION(thread); } if (style == StyleOption::UNIT) { @@ -980,17 +988,20 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledUnitString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, unit); + RETURN_IF_ABRUPT_COMPLETION(thread); } // [[UnitDisplay]] property = globalConst->GetHandledUnitDisplayString(); UnitDisplayOption unitDisplay = numberFormat->GetUnitDisplay(); JSHandle unitDisplayString = OptionToEcmaString(thread, unitDisplay); JSObject::CreateDataPropertyOrThrow(thread, options, property, unitDisplayString); + RETURN_IF_ABRUPT_COMPLETION(thread); } // [[MinimumIntegerDigits]] property = globalConst->GetHandledMinimumIntegerDigitsString(); JSHandle minimumIntegerDigits(thread, numberFormat->GetMinimumIntegerDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumIntegerDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); RoundingType roundingType = numberFormat->GetRoundingType(); if (roundingType == RoundingType::SIGNIFICANTDIGITS) { @@ -998,31 +1009,37 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandleGetHandledMinimumSignificantDigitsString(); JSHandle minimumSignificantDigits(thread, numberFormat->GetMinimumSignificantDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumSignificantDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[MaximumSignificantDigits]] property = globalConst->GetHandledMaximumSignificantDigitsString(); JSHandle maximumSignificantDigits(thread, numberFormat->GetMaximumSignificantDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, maximumSignificantDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); } else { // [[MinimumFractionDigits]] property = globalConst->GetHandledMinimumFractionDigitsString(); JSHandle minimumFractionDigits(thread, numberFormat->GetMinimumFractionDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumFractionDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[MaximumFractionDigits]] property = globalConst->GetHandledMaximumFractionDigitsString(); JSHandle maximumFractionDigits(thread, numberFormat->GetMaximumFractionDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, maximumFractionDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); } // [[UseGrouping]] property = globalConst->GetHandledUserGroupingString(); JSObject::CreateDataPropertyOrThrow(thread, options, property, JSHandle(thread, numberFormat->GetUseGrouping())); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[Notation]] property = globalConst->GetHandledNotationString(); NotationOption notation = numberFormat->GetNotation(); JSHandle notationString = OptionToEcmaString(thread, notation); JSObject::CreateDataPropertyOrThrow(thread, options, property, notationString); + RETURN_IF_ABRUPT_COMPLETION(thread); // Only output compactDisplay when notation is compact. if (notation == NotationOption::COMPACT) { @@ -1031,6 +1048,7 @@ void JSNumberFormat::ResolvedOptions(JSThread *thread, const JSHandleGetCompactDisplay(); JSHandle compactDisplayString = OptionToEcmaString(thread, compactDisplay); JSObject::CreateDataPropertyOrThrow(thread, options, property, compactDisplayString); + RETURN_IF_ABRUPT_COMPLETION(thread); } // [[SignDisplay]] diff --git a/ecmascript/js_object-inl.h b/ecmascript/js_object-inl.h index a95529a45fa7aa021827497b7a534ed53053ec57..d63ba411b7cbc2f0ca08e078d9059f5f5070644b 100644 --- a/ecmascript/js_object-inl.h +++ b/ecmascript/js_object-inl.h @@ -58,6 +58,15 @@ inline JSHClass *JSObject::GetJSHClass() const return GetClass(); } +inline uint32_t JSObject::GetNonInlinedFastPropsCapacity() const +{ + uint32_t inlineProps = GetJSHClass()->GetInlinedProperties(); + if (inlineProps < JSHClass::DEFAULT_CAPACITY_OF_IN_OBJECTS) { + return PropertyAttributes::MAX_FAST_PROPS_CAPACITY - JSHClass::DEFAULT_CAPACITY_OF_IN_OBJECTS; + } + return PropertyAttributes::MAX_FAST_PROPS_CAPACITY - inlineProps; +} + inline bool JSObject::IsJSGlobalObject() const { return GetJSHClass()->IsJSGlobalObject(); @@ -223,9 +232,62 @@ inline bool JSObject::IsTypedArray() const return GetJSHClass()->IsTypedArray(); } +std::pair JSObject::ConvertValueWithRep(uint32_t index, JSTaggedValue value) +{ + auto layout = LayoutInfo::Cast(GetJSHClass()->GetLayout().GetTaggedObject()); + auto attr = layout->GetAttr(index); + if (attr.IsDoubleRep()) { + if (value.IsInt()) { + double doubleValue = value.GetInt(); + return std::pair(true, JSTaggedValue(bit_cast(doubleValue))); + } else if (value.IsDouble()) { + return std::pair(true, JSTaggedValue(bit_cast(value.GetDouble()))); + } else { + return std::pair(false, value); + } + } else if (attr.IsIntRep()) { + if (value.IsInt()) { + int intValue = value.GetInt(); + return std::pair(true, JSTaggedValue(static_cast(intValue))); + } else { + return std::pair(false, value); + } + } + return std::pair(true, value); +} + +void JSObject::SetPropertyInlinedPropsWithRep(const JSThread *thread, uint32_t index, JSTaggedValue value) +{ + auto layout = LayoutInfo::Cast(GetJSHClass()->GetLayout().GetTaggedObject()); + auto attr = layout->GetAttr(index); + if (attr.IsTaggedRep()) { + SetPropertyInlinedProps(thread, index, value); + } else { + SetPropertyInlinedProps(thread, index, value); + } +} + +template void JSObject::SetPropertyInlinedProps(const JSThread *thread, uint32_t index, JSTaggedValue value) { - SetPropertyInlinedProps(thread, GetJSHClass(), index, value); + SetPropertyInlinedProps(thread, GetJSHClass(), index, value); +} + +JSTaggedValue JSObject::GetPropertyInlinedPropsWithRep(uint32_t index, PropertyAttributes attr) const +{ + return GetPropertyInlinedPropsWithRep(GetJSHClass(), index, attr); +} + +JSTaggedValue JSObject::GetPropertyInlinedPropsWithRep(const JSHClass *hclass, uint32_t index, + PropertyAttributes attr) const +{ + auto value = GetPropertyInlinedProps(hclass, index); + if (attr.IsDoubleRep()) { + value = JSTaggedValue(bit_cast(value.GetRawData())); + } else if (attr.IsIntRep()) { + value = JSTaggedValue(static_cast(value.GetRawData())); + } + return value; } JSTaggedValue JSObject::GetPropertyInlinedProps(uint32_t index) const @@ -233,11 +295,16 @@ JSTaggedValue JSObject::GetPropertyInlinedProps(uint32_t index) const return GetPropertyInlinedProps(GetJSHClass(), index); } +template void JSObject::SetPropertyInlinedProps(const JSThread *thread, const JSHClass *hclass, uint32_t index, JSTaggedValue value) { uint32_t offset = hclass->GetInlinedPropertiesOffset(index); - SET_VALUE_WITH_BARRIER(thread, this, offset, value); + if (needBarrier) { + SET_VALUE_WITH_BARRIER(thread, this, offset, value); + } else { + SET_VALUE_PRIMITIVE(this, offset, value); + } } JSTaggedValue JSObject::GetPropertyInlinedProps(const JSHClass *hclass, uint32_t index) const @@ -249,19 +316,20 @@ JSTaggedValue JSObject::GetPropertyInlinedProps(const JSHClass *hclass, uint32_t JSTaggedValue JSObject::GetProperty(const JSHClass *hclass, PropertyAttributes attr) const { if (attr.IsInlinedProps()) { - return GetPropertyInlinedProps(hclass, attr.GetOffset()); + return GetPropertyInlinedPropsWithRep(hclass, attr.GetOffset(), attr); } TaggedArray *array = TaggedArray::Cast(GetProperties().GetTaggedObject()); return array->Get(attr.GetOffset() - hclass->GetInlinedProperties()); } +template void JSObject::SetProperty(const JSThread *thread, const JSHClass *hclass, PropertyAttributes attr, JSTaggedValue value) { if (attr.IsInlinedProps()) { - SetPropertyInlinedProps(thread, hclass, attr.GetOffset(), value); + SetPropertyInlinedProps(thread, hclass, attr.GetOffset(), value); } else { TaggedArray *array = TaggedArray::Cast(GetProperties().GetTaggedObject()); - array->Set(thread, attr.GetOffset() - hclass->GetInlinedProperties(), value); + array->Set(thread, attr.GetOffset() - hclass->GetInlinedProperties(), value); } } @@ -270,7 +338,7 @@ inline bool JSObject::ShouldTransToDict(uint32_t capacity, uint32_t index) if (index < capacity) { return false; } - + if (index - capacity > MAX_GAP) { return true; } @@ -286,17 +354,23 @@ inline bool JSObject::ShouldTransToDict(uint32_t capacity, uint32_t index) return false; } -inline uint32_t JSObject::ComputeElementCapacity(uint32_t oldCapacity) +inline uint32_t JSObject::ComputeElementCapacity(uint32_t oldCapacity, bool isNew) +{ + uint32_t newCapacity = isNew ? oldCapacity : (oldCapacity + (oldCapacity >> 1U)); + return newCapacity > MIN_ELEMENTS_LENGTH ? newCapacity : MIN_ELEMENTS_LENGTH; +} + +inline uint32_t JSObject::ComputeElementCapacityHighGrowth(uint32_t oldCapacity) { - uint32_t newCapacity = oldCapacity + (oldCapacity >> 1U); + uint32_t newCapacity = oldCapacity * 2; return newCapacity > MIN_ELEMENTS_LENGTH ? newCapacity : MIN_ELEMENTS_LENGTH; } -inline uint32_t JSObject::ComputePropertyCapacity(uint32_t oldCapacity) +inline uint32_t JSObject::ComputeNonInlinedFastPropsCapacity(uint32_t oldCapacity, + uint32_t maxNonInlinedFastPropsCapacity) { uint32_t newCapacity = static_cast(oldCapacity + PROPERTIES_GROW_SIZE); - return newCapacity > JSHClass::MAX_CAPACITY_OF_OUT_OBJECTS ? JSHClass::MAX_CAPACITY_OF_OUT_OBJECTS - : newCapacity; + return newCapacity > maxNonInlinedFastPropsCapacity ? maxNonInlinedFastPropsCapacity : newCapacity; } // static diff --git a/ecmascript/js_object.cpp b/ecmascript/js_object.cpp index a04f07a63be1cc6d74a404b0c0f5863b03682de2..30527283f97593c21d616e6ba64096ca57e3cba1 100644 --- a/ecmascript/js_object.cpp +++ b/ecmascript/js_object.cpp @@ -26,12 +26,14 @@ #include "ecmascript/js_iterator.h" #include "ecmascript/js_primitive_ref.h" #include "ecmascript/js_thread.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" #include "ecmascript/object_fast_operator-inl.h" +#include "ecmascript/pgo_profiler/pgo_profiler.h" #include "ecmascript/property_attributes.h" #include "ecmascript/tagged_array-inl.h" namespace panda::ecmascript { +using PGOProfiler = pgo::PGOProfiler; PropertyAttributes::PropertyAttributes(const PropertyDescriptor &desc) { DISALLOW_GARBAGE_COLLECTION; @@ -71,9 +73,14 @@ Method *ECMAObject::GetCallTarget() const } JSHandle JSObject::GrowElementsCapacity(const JSThread *thread, const JSHandle &obj, - uint32_t capacity) + uint32_t capacity, bool highGrowth, bool isNew) { - uint32_t newCapacity = ComputeElementCapacity(capacity); + uint32_t newCapacity; + if (highGrowth) { + newCapacity = ComputeElementCapacityHighGrowth(capacity); + } else { + newCapacity = ComputeElementCapacity(capacity, isNew); + } ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle oldElements(thread, obj->GetElements()); uint32_t oldLength = oldElements->GetLength(); @@ -163,7 +170,7 @@ JSHandle JSObject::TransitionToDictionary(const JSThread *thread JSTaggedValue value; if (i < numberInlinedProps) { - value = receiver->GetPropertyInlinedProps(i); + value = receiver->GetPropertyInlinedPropsWithRep(i, attr); // If delete a property in hclass which has subtyping info and not prototype, only set value as hole and // not remove. When transition to dictionary, exclude it. if (value.IsHole()) { @@ -236,6 +243,7 @@ bool JSObject::AddElementInternal(JSThread *thread, const JSHandle &re const JSHandle &value, PropertyAttributes attr) { bool isDictionary = receiver->GetJSHClass()->IsDictionaryElement(); + ElementsKind kind = ElementsKind::NONE; if (receiver->IsJSArray()) { DISALLOW_GARBAGE_COLLECTION; JSArray *arr = JSArray::Cast(*receiver); @@ -245,9 +253,12 @@ bool JSObject::AddElementInternal(JSThread *thread, const JSHandle &re return false; } arr->SetArrayLength(thread, index + 1); + if (index > oldLength) { + kind = ElementsKind::HOLE; + } } } - thread->NotifyStableArrayElementsGuardians(receiver); + thread->NotifyStableArrayElementsGuardians(receiver, StableArrayChangeKind::NOT_PROTO); TaggedArray *elements = TaggedArray::Cast(receiver->GetElements().GetTaggedObject()); if (isDictionary) { @@ -272,7 +283,7 @@ bool JSObject::AddElementInternal(JSThread *thread, const JSHandle &re elements = *JSObject::GrowElementsCapacity(thread, receiver, index + 1); } elements->Set(thread, index, value); - receiver->GetJSHClass()->UpdateRepresentation(value.GetTaggedValue()); + JSHClass::TransitToElementsKind(thread, receiver, value, kind); return true; } @@ -332,7 +343,7 @@ void JSObject::GetAllKeys(const JSThread *thread, const JSHandle &obj, } void JSObject::GetAllKeysByFilter(const JSThread *thread, const JSHandle &obj, - uint32_t& keyArrayEffectivelength, + uint32_t &keyArrayEffectivelength, const JSHandle &keyArray, uint32_t filter) { @@ -340,8 +351,8 @@ void JSObject::GetAllKeysByFilter(const JSThread *thread, const JSHandleIsDictionaryMode()) { uint32_t numberOfProps = obj->GetJSHClass()->NumberOfProps(); if (numberOfProps > 0) { - LayoutInfo::Cast(obj->GetJSHClass()->GetLayout().GetTaggedObject()) - ->GetAllKeysByFilter(thread, numberOfProps, keyArrayEffectivelength, *keyArray, obj, filter); + LayoutInfo::Cast(obj->GetJSHClass()->GetLayout().GetTaggedObject())-> + GetAllKeysByFilter(thread, numberOfProps, keyArrayEffectivelength, *keyArray, obj, filter); } return; } @@ -388,8 +399,9 @@ JSHandle JSObject::GetAllEnumKeys(const JSThread *thread, const JSH JSHClass *jsHclass = obj->GetJSHClass(); JSTaggedValue enumCache = jsHclass->GetEnumCache(); if (!enumCache.IsNull()) { - auto keyArray = JSHandle(thread, enumCache); - *keys = keyArray->GetLength(); + JSHandle cacheArray = JSHandle(thread, enumCache); + *keys = cacheArray->GetLength(); + JSHandle keyArray = factory->CopyArray(cacheArray, *keys, *keys); return keyArray; } JSHandle keyArray = factory->NewTaggedArray(numOfKeys); @@ -399,6 +411,8 @@ JSHandle JSObject::GetAllEnumKeys(const JSThread *thread, const JSH ->GetAllEnumKeys(thread, end, offset, *keyArray, keys, obj); if (*keys == keyArray->GetLength()) { jsHclass->SetEnumCache(thread, keyArray.GetTaggedValue()); + JSHandle newkeyArray = factory->CopyArray(keyArray, *keys, *keys); + return newkeyArray; } } return keyArray; @@ -438,11 +452,10 @@ void JSObject::GetAllElementKeys(JSThread *thread, const JSHandle &obj const JSHandle &keyArray) { uint32_t elementIndex = 0; - if (obj->IsJSPrimitiveRef() && JSPrimitiveRef::Cast(*obj)->IsString()) { elementIndex = JSPrimitiveRef::Cast(*obj)->GetStringLength() + static_cast(offset); for (uint32_t i = static_cast(offset); i < elementIndex; ++i) { - auto key = base::NumberHelper::NumberToString(thread, JSTaggedValue(i)); + auto key = base::NumberHelper::IntToEcmaString(thread, i); keyArray->Set(thread, i, key); } } @@ -452,7 +465,7 @@ void JSObject::GetAllElementKeys(JSThread *thread, const JSHandle &obj uint32_t elementsLen = elements->GetLength(); for (uint32_t i = 0, j = elementIndex; i < elementsLen; ++i) { if (!elements->Get(i).IsHole()) { - auto key = base::NumberHelper::NumberToString(thread, JSTaggedValue(i)); + auto key = base::NumberHelper::IntToEcmaString(thread, i); keyArray->Set(thread, j++, key); } } @@ -464,7 +477,7 @@ void JSObject::GetAllElementKeys(JSThread *thread, const JSHandle &obj void JSObject::GetAllElementKeysByFilter(JSThread *thread, const JSHandle &obj, const JSHandle &keyArray, - uint32_t &keyArrayEffectivelength, + uint32_t &keyArrayEffectiveLength, uint32_t filter) { ASSERT_PRINT(obj->IsECMAObject(), "obj is not object"); @@ -474,8 +487,8 @@ void JSObject::GetAllElementKeysByFilter(JSThread *thread, if ((filter & NATIVE_ENUMERABLE) && obj->IsJSPrimitiveRef() && JSPrimitiveRef::Cast(*obj)->IsString()) { elementIndex = JSPrimitiveRef::Cast(*obj)->GetStringLength(); for (uint32_t i = 0; i < elementIndex; ++i) { - keyArray->Set(thread, keyArrayEffectivelength, JSTaggedValue(i)); - keyArrayEffectivelength++; + keyArray->Set(thread, keyArrayEffectiveLength, JSTaggedValue(i)); + keyArrayEffectiveLength++; } } @@ -491,13 +504,13 @@ void JSObject::GetAllElementKeysByFilter(JSThread *thread, if (bIgnore) { continue; } - keyArray->Set(thread, keyArrayEffectivelength, JSTaggedValue(i)); - keyArrayEffectivelength++; + keyArray->Set(thread, keyArrayEffectiveLength, JSTaggedValue(i)); + keyArrayEffectiveLength++; } } } else { NumberDictionary::GetAllKeysByFilter(thread, JSHandle(elements), - keyArrayEffectivelength, keyArray, filter); + keyArrayEffectiveLength, keyArray, filter); } } @@ -531,9 +544,8 @@ JSHandle JSObject::GetEnumElementKeys(JSThread *thread, const JSHan *keys += elementIndex; elementIndex += static_cast(offset); for (uint32_t i = static_cast(offset); i < elementIndex; ++i) { - keyHandle.Update(JSTaggedValue(i)); - auto key = JSTaggedValue::ToString(thread, keyHandle); - elementArray->Set(thread, i, key); + keyHandle.Update(base::NumberHelper::IntToEcmaString(thread, i)); + elementArray->Set(thread, i, keyHandle); } } @@ -543,7 +555,7 @@ JSHandle JSObject::GetEnumElementKeys(JSThread *thread, const JSHan uint32_t preElementIndex = elementIndex; for (uint32_t i = 0; i < elementsLen; ++i) { if (!arr->Get(i).IsHole()) { - keyHandle.Update(factory->NewFromASCII(ToCString(i)).GetTaggedValue()); + keyHandle.Update(base::NumberHelper::IntToEcmaString(thread, i)); elementArray->Set(thread, elementIndex++, keyHandle); } } @@ -561,7 +573,7 @@ void JSObject::GetEnumElementKeys(JSThread *thread, const JSHandle &ob if (obj->IsJSPrimitiveRef() && JSPrimitiveRef::Cast(*obj)->IsString()) { elementIndex = JSPrimitiveRef::Cast(*obj)->GetStringLength() + static_cast(offset); for (uint32_t i = static_cast(offset); i < elementIndex; ++i) { - auto key = base::NumberHelper::NumberToString(thread, JSTaggedValue(i)); + auto key = base::NumberHelper::IntToEcmaString(thread, i); keyArray->Set(thread, i, key); } } @@ -571,7 +583,7 @@ void JSObject::GetEnumElementKeys(JSThread *thread, const JSHandle &ob uint32_t elementsLen = elements->GetLength(); for (uint32_t i = 0, j = elementIndex; i < elementsLen; ++i) { if (!elements->Get(i).IsHole()) { - auto key = base::NumberHelper::NumberToString(thread, JSTaggedValue(i)); + auto key = base::NumberHelper::IntToEcmaString(thread, i); keyArray->Set(thread, j++, key); } } @@ -800,6 +812,10 @@ bool JSObject::CallSetter(JSThread *thread, const AccessorData &accessor, const } JSHandle func(thread, setter); + if (thread->IsPGOProfilerEnable()) { + auto profiler = thread->GetEcmaVM()->GetPGOProfiler(); + profiler->ProfileCall(JSTaggedValue::VALUE_UNDEFINED, func.GetTaggedType()); + } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, receiver, undefined, 1); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -822,6 +838,10 @@ JSTaggedValue JSObject::CallGetter(JSThread *thread, const AccessorData *accesso } JSHandle func(thread, getter); + if (thread->IsPGOProfilerEnable()) { + auto profiler = thread->GetEcmaVM()->GetPGOProfiler(); + profiler->ProfileCall(JSTaggedValue::VALUE_UNDEFINED, func.GetTaggedType()); + } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, receiver, undefined, 0); JSTaggedValue res = JSFunction::Call(info); @@ -974,9 +994,12 @@ bool JSObject::OrdinaryGetOwnProperty(JSThread *thread, const JSHandle op.ToPropertyDescriptor(desc); if (desc.HasValue() && obj->IsJSGlobalObject()) { - PropertyBox *cell = PropertyBox::Cast(desc.GetValue().GetTaggedValue().GetTaggedObject()); - JSHandle valueHandle(thread, cell->GetValue()); - desc.SetValue(valueHandle); + JSTaggedValue val = desc.GetValue().GetTaggedValue(); + if (val.IsPropertyBox()) { + PropertyBox *cell = PropertyBox::Cast(val.GetTaggedObject()); + JSHandle valueHandle(thread, cell->GetValue()); + desc.SetValue(valueHandle); + } } return true; @@ -1188,8 +1211,8 @@ bool JSObject::SetPrototype(JSThread *thread, const JSHandle &obj, con JSHandle hclass(thread, obj->GetJSHClass()); JSHandle newClass = JSHClass::TransitionProto(thread, hclass, proto); JSHClass::NotifyHclassChanged(thread, hclass, newClass); - obj->SetClass(newClass); - thread->NotifyStableArrayElementsGuardians(obj); + obj->SynchronizedSetClass(*newClass); + thread->NotifyStableArrayElementsGuardians(obj, StableArrayChangeKind::PROTO); return true; } @@ -1241,7 +1264,7 @@ bool JSObject::PreventExtensions(JSThread *thread, const JSHandle &obj if (obj->IsExtensible()) { JSHandle jshclass(thread, obj->GetJSHClass()); JSHandle newHclass = JSHClass::TransitionExtension(thread, jshclass); - obj->SetClass(newHclass); + obj->SynchronizedSetClass(*newHclass); } return true; @@ -1264,16 +1287,15 @@ JSHandle JSObject::GetOwnPropertyKeys(JSThread *thread, const JSHan JSHandle JSObject::GetAllPropertyKeys(JSThread *thread, const JSHandle &obj, uint32_t filter) { - bool isInculdePrototypes = (filter & NATIVE_KEY_INCLUDE_PROTOTYPES); JSMutableHandle currentObj(thread, obj); JSMutableHandle currentObjValue(thread, currentObj); uint32_t curObjNumberOfElements = currentObj->GetNumberOfElements(); uint32_t curObjNumberOfKeys = currentObj->GetNumberOfKeys(); uint32_t curObjectKeysLength = curObjNumberOfElements + curObjNumberOfKeys; - uint32_t retArraylength = curObjectKeysLength; + uint32_t retArrayLength = curObjectKeysLength; ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSMutableHandle retArray(thread, factory->NewTaggedArray(retArraylength)); + JSMutableHandle retArray(thread, factory->NewTaggedArray(retArrayLength)); uint32_t retArrayEffectivelength = 0; do { @@ -1281,16 +1303,20 @@ JSHandle JSObject::GetAllPropertyKeys(JSThread *thread, const JSHan curObjNumberOfKeys = currentObj->GetNumberOfKeys(); curObjectKeysLength = curObjNumberOfElements + curObjNumberOfKeys; uint32_t minRequireLength = curObjectKeysLength + retArrayEffectivelength; - if (retArraylength < minRequireLength) { + if (retArrayLength < minRequireLength) { // expand retArray - retArray.Update(factory->NewAndCopyTaggedArray(retArray, minRequireLength, retArraylength)); - retArraylength = minRequireLength; + if (retArrayLength != 0) { + retArray.Update(factory->NewAndCopyTaggedArray(retArray, minRequireLength, retArrayLength)); + } else { + retArray.Update(factory->NewTaggedArray(minRequireLength)); + } + retArrayLength = minRequireLength; } GetAllElementKeysByFilter(thread, currentObj, retArray, retArrayEffectivelength, filter); GetAllKeysByFilter(thread, currentObj, retArrayEffectivelength, retArray, filter); - + bool isInculdePrototypes = (filter & NATIVE_KEY_INCLUDE_PROTOTYPES); if (!isInculdePrototypes) { break; } @@ -1298,6 +1324,9 @@ JSHandle JSObject::GetAllPropertyKeys(JSThread *thread, const JSHan currentObjValue.Update(currentObj); } while (currentObjValue->IsHeapObject()); + if (retArrayEffectivelength == 0 && (filter & NATIVE_KEY_OWN_ONLY)) { + return retArray; + } JSMutableHandle element(thread, JSTaggedValue::Undefined()); if (filter & NATIVE_KEY_NUMBERS_TO_STRINGS) { for (uint32_t i = 0; i < retArrayEffectivelength; i++) { @@ -1545,9 +1574,11 @@ JSHandle JSObject::EnumerableOwnNames(JSThread *thread, const JSHan if (copyLengthOfKeys != 0 && copyLengthOfElements != 0) { keys = TaggedArray::AppendSkipHole(thread, elementArray, keyArray, copyLengthOfKeys + copyLengthOfElements); } else if (copyLengthOfKeys != 0) { - keys = factory->CopyArray(keyArray, copyLengthOfKeys, copyLengthOfKeys); + keyArray->SetLength(copyLengthOfKeys); // keyArray will skip nonEnumerable properties, need re-set length. + return keyArray; } else if (copyLengthOfElements != 0) { - keys = factory->CopyArray(elementArray, copyLengthOfElements, copyLengthOfElements); + elementArray->SetLength(copyLengthOfElements); // elementArray will skip hole value, need re-set length. + return elementArray; } else { keys = factory->EmptyArray(); } @@ -1748,6 +1779,7 @@ bool JSObject::InstanceOf(JSThread *thread, const JSHandle &objec JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, instOfHandler, target, undefined, 1); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(object.GetTaggedValue()); JSTaggedValue tagged = JSFunction::Call(info); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -2101,7 +2133,8 @@ void JSObject::DefineGetter(JSThread *thread, const JSHandle &obj op.DefineGetter(value); } -JSHandle JSObject::CreateObjectFromProperties(const JSThread *thread, const JSHandle &properties) +JSHandle JSObject::CreateObjectFromProperties(const JSThread *thread, const JSHandle &properties, + JSTaggedValue ihcVal) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); size_t length = properties->GetLength(); @@ -2112,13 +2145,10 @@ JSHandle JSObject::CreateObjectFromProperties(const JSThread *thread, } propsLen++; } - if (propsLen <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES) { + if (propsLen <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY) { JSHandle obj = factory->NewOldSpaceObjLiteralByHClass(properties, propsLen); ASSERT_PRINT(obj->IsECMAObject(), "Obj is not a valid object"); - for (size_t i = 0; i < propsLen; i++) { - // 2: literal contains a pair of key-value - obj->SetPropertyInlinedProps(thread, i, properties->Get(i * 2 + 1)); - } + SetAllPropertys(thread, obj, properties, propsLen, ihcVal); return obj; } else { JSHandle obj = factory->NewEmptyJSObject(); @@ -2142,6 +2172,52 @@ JSHandle JSObject::CreateObjectFromProperties(const JSThread *thread, } } +void JSObject::SetAllPropertys(const JSThread *thread, JSHandle &obj, const JSHandle &properties, + uint32_t propsLen, JSTaggedValue ihcVal) +{ + // AOT runtime + if (ihcVal.IsJSHClass()) { + bool isSuccess = true; + JSHClass *ihc = JSHClass::Cast(ihcVal.GetTaggedObject()); + JSHClass *oldHC = obj->GetJSHClass(); + ihc->SetPrototype(thread, oldHC->GetPrototype()); + obj->SetClass(ihc); + for (size_t i = 0; i < propsLen; i++) { + auto value = obj->ConvertValueWithRep(i, properties->Get(i * 2 + 1)); + // If value.first is false, indicating that value cannot be converted to the expected value of + // representation. For example, the representation is INT, but the value type is string. + if (!value.first) { + isSuccess = false; + break; + } + obj->SetPropertyInlinedPropsWithRep(thread, i, value.second); + } + if (isSuccess) { + return; + } + // If conversion fails, it needs to be rolled back to the old HClass and reset the value. + obj->SetClass(oldHC); + } else if (thread->IsPGOProfilerEnable()) { + // PGO need to track TrackType + JSHClass *oldHC = obj->GetJSHClass(); + LayoutInfo *layoutInfo = LayoutInfo::Cast(oldHC->GetLayout().GetTaggedObject()); + for (size_t i = 0; i < propsLen; i++) { + auto value = properties->Get(i * 2 + 1); + auto attr = layoutInfo->GetAttr(i); + if (attr.UpdateTrackType(value)) { + layoutInfo->SetNormalAttr(thread, i, attr); + } + obj->SetPropertyInlinedProps(thread, i, value); + } + return; + } + // Interpreter runtime or track field initialized fail. + for (size_t i = 0; i < propsLen; i++) { + // 2: literal contains a pair of key-value + obj->SetPropertyInlinedProps(thread, i, properties->Get(i * 2 + 1)); + } +} + void JSObject::AddAccessor(JSThread *thread, const JSHandle &obj, const JSHandle &key, const JSHandle &value, PropertyAttributes attr) { diff --git a/ecmascript/js_object.h b/ecmascript/js_object.h index ef5855a3b270f6da07820714ce7eaf752fc87e68..d56a3fc88eb515048c204d87b6ddf4bc4f045d64 100644 --- a/ecmascript/js_object.h +++ b/ecmascript/js_object.h @@ -527,6 +527,7 @@ public: void FillElementsWithHoles(const JSThread *thread, uint32_t start, uint32_t end); JSHClass *GetJSHClass() const; + uint32_t GetNonInlinedFastPropsCapacity() const; bool IsJSGlobalObject() const; bool IsConstructor() const; bool IsECMAObject() const; @@ -569,13 +570,14 @@ public: static void DefineGetter(JSThread *thread, const JSHandle &obj, const JSHandle &key, const JSHandle &value); static JSHandle CreateObjectFromProperties(const JSThread *thread, - const JSHandle &properties); + const JSHandle &properties, + JSTaggedValue ihc = JSTaggedValue::Undefined()); static void GetAllKeys(const JSThread *thread, const JSHandle &obj, int offset, const JSHandle &keyArray); static void GetAllKeys(const JSHandle &obj, std::vector &keyVector); static void GetAllKeysByFilter(const JSThread *thread, const JSHandle &obj, - uint32_t& keyArrayEffectivelength, + uint32_t &keyArrayEffectivelength, const JSHandle &keyArray, uint32_t filter); static void GetAllElementKeys(JSThread *thread, const JSHandle &obj, int offset, @@ -583,7 +585,7 @@ public: static void GetAllElementKeysByFilter(JSThread *thread, const JSHandle &obj, const JSHandle &keyArray, - uint32_t &keyArrayEffectivelength, + uint32_t &keyArrayEffectiveLength, uint32_t filter); static void GetALLElementKeysIntoVector(const JSThread *thread, const JSHandle &obj, @@ -614,12 +616,20 @@ public: static JSHandle TransitionToDictionary(const JSThread *thread, const JSHandle &receiver); + inline std::pair ConvertValueWithRep(uint32_t index, JSTaggedValue value); + inline void SetPropertyInlinedPropsWithRep(const JSThread *thread, uint32_t index, JSTaggedValue value); + template inline void SetPropertyInlinedProps(const JSThread *thread, uint32_t index, JSTaggedValue value); + template inline void SetPropertyInlinedProps(const JSThread *thread, const JSHClass *hclass, uint32_t index, JSTaggedValue value); + inline JSTaggedValue GetPropertyInlinedPropsWithRep(uint32_t index, PropertyAttributes attr) const; + inline JSTaggedValue GetPropertyInlinedPropsWithRep(const JSHClass *hclass, uint32_t index, + PropertyAttributes attr) const; inline JSTaggedValue GetPropertyInlinedProps(uint32_t index) const; inline JSTaggedValue GetPropertyInlinedProps(const JSHClass *hclass, uint32_t index) const; inline JSTaggedValue GetProperty(const JSHClass *hclass, PropertyAttributes attr) const; + template inline void SetProperty(const JSThread *thread, const JSHClass *hclass, PropertyAttributes attr, JSTaggedValue value); @@ -627,7 +637,7 @@ public: bool UpdatePropertyInDictionary(const JSThread *thread, JSTaggedValue key, JSTaggedValue value); static bool ShouldTransToDict(uint32_t capacity, uint32_t index); static JSHandle GrowElementsCapacity(const JSThread *thread, const JSHandle &obj, - uint32_t capacity); + uint32_t capacity, bool highGrowth = false, bool isNew = false); static JSHandle IterableToList(JSThread *thread, const JSHandle &items, JSTaggedValue method = JSTaggedValue::Undefined()); @@ -651,10 +661,13 @@ private: static bool SetProperty(ObjectOperator *op, const JSHandle &value, bool mayThrow); static void DeletePropertyInternal(JSThread *thread, const JSHandle &obj, const JSHandle &key, uint32_t index); + static void SetAllPropertys(const JSThread *thread, JSHandle &obj, + const JSHandle &properties, uint32_t propsLen, JSTaggedValue ihcVal); int FindProperty(const JSHandle &key); - static uint32_t ComputeElementCapacity(uint32_t oldCapacity); - static uint32_t ComputePropertyCapacity(uint32_t oldCapacity); + static uint32_t ComputeElementCapacity(uint32_t oldCapacity, bool isNew = false); + static uint32_t ComputeElementCapacityHighGrowth(uint32_t oldCapacity); + static uint32_t ComputeNonInlinedFastPropsCapacity(uint32_t oldCapacity, uint32_t maxNonInlinedFastPropsCapacity); static JSTaggedValue ShouldGetValueFromBox(ObjectOperator *op); static std::pair, JSHandle> GetOwnEnumerableNamesInFastMode( diff --git a/ecmascript/js_plural_rules.cpp b/ecmascript/js_plural_rules.cpp index f95e0d28adab9492ec3a64522728630fed169842..bbf14ec131f0a893f7bd745fe14feeeb22efb979 100644 --- a/ecmascript/js_plural_rules.cpp +++ b/ecmascript/js_plural_rules.cpp @@ -19,6 +19,7 @@ #include "ecmascript/ecma_macros.h" #include "ecmascript/global_env.h" #include "ecmascript/global_env_constants.h" +#include "ecmascript/object_factory-inl.h" #include "ecmascript/js_number_format.h" namespace panda::ecmascript { @@ -256,6 +257,7 @@ JSHandle JSPluralRules::InitializePluralRules(JSThread *thread, // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]]. JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSPluralRules, thread); pluralRules->SetLocale(thread, localeStr.GetTaggedValue()); // 13. Return pluralRules. @@ -325,6 +327,7 @@ void JSPluralRules::ResolvedOptions(JSThread *thread, const JSHandle::Cast(globalConst->GetHandledMinimumIntegerDigitsString()); JSHandle minimumIntegerDigits(thread, pluralRules->GetMinimumIntegerDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumIntegerDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); RoundingType roundingType = pluralRules->GetRoundingType(); if (roundingType == RoundingType::SIGNIFICANTDIGITS) { @@ -332,19 +335,23 @@ void JSPluralRules::ResolvedOptions(JSThread *thread, const JSHandleGetHandledMinimumSignificantDigitsString(); JSHandle minimumSignificantDigits(thread, pluralRules->GetMinimumSignificantDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumSignificantDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[MaximumSignificantDigits]] property = globalConst->GetHandledMaximumSignificantDigitsString(); JSHandle maximumSignificantDigits(thread, pluralRules->GetMaximumSignificantDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, maximumSignificantDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); } else { // [[MinimumFractionDigits]] property = globalConst->GetHandledMinimumFractionDigitsString(); JSHandle minimumFractionDigits(thread, pluralRules->GetMinimumFractionDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, minimumFractionDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); // [[MaximumFractionDigits]] property = globalConst->GetHandledMaximumFractionDigitsString(); JSHandle maximumFractionDigits(thread, pluralRules->GetMaximumFractionDigits()); JSObject::CreateDataPropertyOrThrow(thread, options, property, maximumFractionDigits); + RETURN_IF_ABRUPT_COMPLETION(thread); } // 5. Let pluralCategories be a List of Strings representing the possible results of PluralRuleSelect diff --git a/ecmascript/js_primitive_ref.cpp b/ecmascript/js_primitive_ref.cpp index df91649167afaf577521e42da45860548de50180..3a867c91da0a2e810a18f9e418dc126ca05bb339 100644 --- a/ecmascript/js_primitive_ref.cpp +++ b/ecmascript/js_primitive_ref.cpp @@ -42,6 +42,7 @@ JSHandle JSPrimitiveRef::StringCreate(JSThread *thread, const JS // [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }). PropertyDescriptor desc(thread, JSHandle(thread, JSTaggedValue(length)), false, false, false); [[maybe_unused]] bool status = JSTaggedValue::DefinePropertyOrThrow(thread, str, lengthStr, desc); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSPrimitiveRef, thread); ASSERT(status); // 9. Return S. return JSHandle(str); diff --git a/ecmascript/js_proxy.cpp b/ecmascript/js_proxy.cpp index f9d841e659880984602f24524108cd58d71c40bc..6c6398918dcd286cfed1a622dc630c6ede91ce56 100644 --- a/ecmascript/js_proxy.cpp +++ b/ecmascript/js_proxy.cpp @@ -132,13 +132,13 @@ bool JSProxy::SetPrototype(JSThread *thread, const JSHandle &proxy, con return JSTaggedValue::SetPrototype(thread, targetHandle, proto); } JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 2; // 2: target and proto + const uint32_t argsLength = 2; // 2: target and proto JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue(), proto.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); // 9. Let booleanTrapResult be ToBoolean(Call(trap, handler, «target, V»)). // If booleanTrapResult is false, return false bool booleanTrapResult = trapResult.ToBoolean(); @@ -199,7 +199,7 @@ bool JSProxy::IsExtensible(JSThread *thread, const JSHandle &proxy) RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 9. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -249,7 +249,7 @@ bool JSProxy::PreventExtensions(JSThread *thread, const JSHandle &proxy RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 9. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -297,12 +297,13 @@ bool JSProxy::GetOwnProperty(JSThread *thread, const JSHandle &proxy, c return JSTaggedValue::GetOwnProperty(thread, targetHandle, key, desc); } JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 2; // 2: target and key + const uint32_t argsLength = 2; // 2: target and key JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue(), key.GetTaggedValue()); JSTaggedValue trapResultObj = JSFunction::Call(info); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); JSHandle resultHandle(thread, trapResultObj); @@ -395,13 +396,13 @@ bool JSProxy::DefineOwnProperty(JSThread *thread, const JSHandle &proxy // 9. Let descObj be FromPropertyDescriptor(Desc). JSHandle descObj = JSObject::FromPropertyDescriptor(thread, desc); JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 3; // 3: target, key and desc + const uint32_t argsLength = 3; // 3: target, key and desc JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue(), key.GetTaggedValue(), descObj.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 11. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -483,13 +484,13 @@ bool JSProxy::HasProperty(JSThread *thread, const JSHandle &proxy, cons // 9. Let booleanTrapResult be ToBoolean(Call(trap, handler, «target, P»)). JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 2; // 2: target and key + const uint32_t argsLength = 2; // 2: target and key JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue(), key.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 10. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -544,7 +545,7 @@ OperationResult JSProxy::GetProperty(JSThread *thread, const JSHandle & } // 9. Let trapResult be Call(trap, handler, «target, P, Receiver»). JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 3; // 3: «target, P, Receiver» + const uint32_t argsLength = 3; // 3: «target, P, Receiver» JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION( @@ -615,14 +616,14 @@ bool JSProxy::SetProperty(JSThread *thread, const JSHandle &proxy, cons // 9. Let booleanTrapResult be ToBoolean(Call(trap, handler, «target, P, V, Receiver»)) JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 4; // 4: «target, P, V, Receiver» + const uint32_t argsLength = 4; // 4: «target, P, V, Receiver» JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg( targetHandle.GetTaggedValue(), key.GetTaggedValue(), value.GetTaggedValue(), receiver.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 11. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -675,13 +676,13 @@ bool JSProxy::DeleteProperty(JSThread *thread, const JSHandle &proxy, c // 9. Let booleanTrapResult be ToBoolean(Call(trap, handler, «target, P»)). JSHandle newTgt(thread, JSTaggedValue::Undefined()); JSHandle handlerTag(thread, proxy->GetHandler()); - const int32_t argsLength = 2; // 2: target and key + const uint32_t argsLength = 2; // 2: target and key JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerTag, undefined, argsLength); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); info->SetCallArg(targetHandle.GetTaggedValue(), key.GetTaggedValue()); JSTaggedValue trapResult = JSFunction::Call(info); - + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); bool booleanTrapResult = trapResult.ToBoolean(); // 11. ReturnIfAbrupt(booleanTrapResult). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); @@ -856,6 +857,72 @@ JSHandle JSProxy::OwnPropertyKeys(JSThread *thread, const JSHandle< return trapRes; } +JSHandle JSProxy::GetAllPropertyKeys(JSThread *thread, const JSHandle &proxy, uint32_t filter) +{ + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + + JSTaggedValue handler = proxy->GetHandler(); + if (handler.IsNull()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "OwnPropertyKeys: handler is null", + JSHandle(thread, JSTaggedValue::Exception())); + } + + ASSERT(handler.IsECMAObject()); + JSHandle targetHandle(thread, proxy->GetTarget()); + + JSHandle key = globalConst->GetHandledOwnKeysString(); + JSHandle handlerHandle(thread, handler); + JSHandle trap(JSObject::GetMethod(thread, handlerHandle, key)); + + RETURN_HANDLE_IF_ABRUPT_COMPLETION(TaggedArray, thread); + + if (trap->IsUndefined()) { + return JSTaggedValue::GetAllPropertyKeys(thread, targetHandle, filter); + } + + JSHandle undefined = globalConst->GetHandledUndefined(); + EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, trap, handlerHandle, undefined, 1); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(TaggedArray, thread); + info->SetCallArg(targetHandle.GetTaggedValue()); + JSTaggedValue res = JSFunction::Call(info); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(TaggedArray, thread); + JSHandle trapResArr(thread, res); + + JSHandle trapRes( + JSObject::CreateListFromArrayLike(thread, trapResArr)); + JSHandle ownKeys = JSTaggedValue::GetOwnPropertyKeys(thread, targetHandle); + JSHandle reciveArray = JSTaggedValue::GetAllPropertyKeys(thread, targetHandle, filter); + + uint32_t trapResLength = trapRes->GetLength(); + uint32_t ownKeysLength = ownKeys->GetLength(); + uint32_t reciveArrayLength = reciveArray->GetLength(); + uint32_t newArrayLength = reciveArrayLength + trapResLength - ownKeysLength; + + JSHandle resArray = thread->GetEcmaVM()->GetFactory()->NewTaggedArray(newArrayLength); + + uint32_t elementIndex = 0; + if (filter & NATIVE_KEY_SKIP_SYMBOLS) { + for (uint32_t index = 0; index < reciveArrayLength; index++) { + if (!ownKeys->Get(index).IsSymbol()) { + resArray->Set(thread, elementIndex, reciveArray->Get(index)); + elementIndex++; + } + } + return resArray; + } + + for (uint32_t i = 0; i < trapResLength; i++) { + resArray->Set(thread, i, trapRes->Get(i)); + } + + uint32_t index = ownKeysLength; + for (uint32_t j = 0; j < reciveArrayLength - ownKeysLength; j++) { + resArray->Set(thread, trapResLength + j, reciveArray->Get(index)); + index++; + } + return resArray; +} + // ES6 9.5.13 [[Call]] (thisArgument, argumentsList) JSTaggedValue JSProxy::CallInternal(EcmaRuntimeCallInfo *info) { @@ -953,7 +1020,7 @@ JSTaggedValue JSProxy::ConstructInternal(EcmaRuntimeCallInfo *info) // step 8 ~ 9 Call(trap, handler, «target, argArray, newTarget »). JSHandle newTarget(info->GetNewTarget()); - const int32_t argsLength = 3; // 3: «target, argArray, newTarget » + const uint32_t argsLength = 3; // 3: «target, argArray, newTarget » JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *runtimeInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, method, handler, undefined, argsLength); diff --git a/ecmascript/js_proxy.h b/ecmascript/js_proxy.h index 6745a2f15161b1e052e9890376ca845a76df9acd..15cad4bb1cb22a0cf6df33a0d2d2b2a4a11ea216 100644 --- a/ecmascript/js_proxy.h +++ b/ecmascript/js_proxy.h @@ -67,6 +67,8 @@ public: // ES6 9.5.12 [[OwnPropertyKeys]] () static JSHandle OwnPropertyKeys(JSThread *thread, const JSHandle &proxy); + static JSHandle GetAllPropertyKeys(JSThread *thread, const JSHandle &proxy, uint32_t filter); + void SetCallable(bool callable) const { GetClass()->SetCallable(callable); @@ -89,12 +91,17 @@ public: static constexpr size_t TARGET_OFFSET = ECMAObject::SIZE; ACCESSORS(Target, TARGET_OFFSET, HANDLER_OFFSET) ACCESSORS(Handler, HANDLER_OFFSET, METHOD_OFFSET) - ACCESSORS(Method, METHOD_OFFSET, LAST_OFFSET) + ACCESSORS(Method, METHOD_OFFSET, BIT_FIELD_OFFSET) + ACCESSORS_BIT_FIELD(BitField, BIT_FIELD_OFFSET, LAST_OFFSET) DEFINE_ALIGN_SIZE(LAST_OFFSET); + // define BitField + static constexpr size_t IS_REVOKED_BITS = 1; + FIRST_BIT_FIELD(BitField, IsRevoked, bool, IS_REVOKED_BITS) + DECL_DUMP() - DECL_VISIT_OBJECT(TARGET_OFFSET, LAST_OFFSET) + DECL_VISIT_OBJECT(TARGET_OFFSET, BIT_FIELD_OFFSET) }; } // namespace panda::ecmascript diff --git a/ecmascript/js_relative_time_format.cpp b/ecmascript/js_relative_time_format.cpp index 992209fbddd02c17f5ba2c0905cafe3b611682c8..8821aa2fcb31260710c87703ac4afff05b1cce3c 100644 --- a/ecmascript/js_relative_time_format.cpp +++ b/ecmascript/js_relative_time_format.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/js_relative_time_format.h" +#include "ecmascript/object_factory-inl.h" #include "unicode/decimfmt.h" #include "unicode/numfmt.h" @@ -88,6 +89,7 @@ JSHandle JSRelativeTimeFormat::InitializeRelativeTimeForma // 12. Let locale be r.[[Locale]]. JSHandle localeStr = intl::LocaleHelper::ToLanguageTag(thread, icuLocale); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSRelativeTimeFormat, thread); // 13. Set relativeTimeFormat.[[Locale]] to locale. relativeTimeFormat->SetLocale(thread, localeStr.GetTaggedValue()); diff --git a/ecmascript/js_runtime_options.cpp b/ecmascript/js_runtime_options.cpp index 5a7cc3ab5721957a47050639b6ba707890802d05..d2e0e88d95a9ba4263b5e3b18a79c7a4c1ce3dfe 100644 --- a/ecmascript/js_runtime_options.cpp +++ b/ecmascript/js_runtime_options.cpp @@ -76,12 +76,14 @@ const std::string PUBLIC_API HELP_OPTION_MSG = "--enable-force-gc: Enable force gc when allocating object. Default: 'true'\n" "--enable-ic: Switch of inline cache. Default: 'true'\n" "--enable-runtime-stat: Enable statistics of runtime state. Default: 'false'\n" + "--compiler-opt-array-bounds-check-elimination: Enable Index Check elimination. Default: 'false'\n" "--compiler-opt-type-lowering: Enable all type optimization pass for aot compiler. Default: 'true'\n" "--compiler-opt-early-elimination: Enable EarlyElimination for aot compiler. Default: 'true'\n" "--compiler-opt-later-elimination: Enable LaterElimination for aot compiler. Default: 'true'\n" "--compiler-opt-value-numbering: Enable ValueNumbering for aot compiler. Default: 'true'\n" "--compiler-opt-inlining: Enable inlining function for aot compiler: Default: 'true'\n" "--compiler-opt-pgotype: Enable pgo type for aot compiler: Default: 'true'\n" + "--compiler-opt-track-field: Enable track field for aot compiler: Default: 'false'\n" "--entry-point: Full name of entrypoint function. Default: '_GLOBAL::func_main_0'\n" "--force-full-gc: If true trigger full gc, else trigger semi and old gc. Default: 'true'\n" "--framework-abc-file: Snapshot file. Default: 'strip.native.min.abc'\n" @@ -137,10 +139,15 @@ const std::string PUBLIC_API HELP_OPTION_MSG = " Format:--compile-skip-methods=record1:m1,m2,record2:m3\n" "--target-compiler-mode The compilation mode at the device side, including partial, full and none." " Default: ''\n" - "--hap-path The path of the app hap. Default: ''\n" - "--hap-abc-offset The offset of the abc file in app hap. Default: '0'\n" - "--hap-abc-size The size of the abc file in app hap. Default: '0'\n" - "--compiler-no-check Enable remove checks for aot compiler. Default: 'false'\n\n"; + "--hap-path(Deprecated) The path of the app hap. Default: ''\n" + "--hap-abc-offset(Deprecated) The offset of the abc file in app hap. Default: '0'\n" + "--hap-abc-size(Deprecated) The size of the abc file in app hap. Default: '0'\n" + "--compiler-fast-compile Disable some time-consuming pass. Default: 'true'\n" + "--compiler-no-check Enable remove checks for aot compiler. Default: 'false'\n" + "--compiler-opt-loop-peeling: Enable loop peeling for aot compiler: Default: 'false'\n" + "--compiler-pkg-info Specify the package json info for ark aot compiler\n" + "--compiler-external-pkg-info Specify the external package json info for ark aot compiler\n" + "--compiler-opt-array-onheap-check: Enable TypedArray on heap check for aot compiler: Default: 'false'\n\n"; bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) { @@ -170,12 +177,15 @@ bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) {"enable-force-gc", required_argument, nullptr, OPTION_ENABLE_FORCE_GC}, {"enable-ic", required_argument, nullptr, OPTION_ENABLE_IC}, {"enable-runtime-stat", required_argument, nullptr, OPTION_ENABLE_RUNTIME_STAT}, + {"compiler-opt-array-bounds-check-elimination", required_argument, nullptr, + OPTION_COMPILER_OPT_ARRAY_BOUNDS_CHECK_ELIMINATION}, {"compiler-opt-type-lowering", required_argument, nullptr, OPTION_COMPILER_OPT_TYPE_LOWERING}, {"compiler-opt-early-elimination", required_argument, nullptr, OPTION_COMPILER_OPT_EARLY_ELIMINATION}, {"compiler-opt-later-elimination", required_argument, nullptr, OPTION_COMPILER_OPT_LATER_ELIMINATION}, {"compiler-opt-value-numbering", required_argument, nullptr, OPTION_COMPILER_OPT_VALUE_NUMBERING}, {"compiler-opt-inlining", required_argument, nullptr, OPTION_COMPILER_OPT_INLINING}, {"compiler-opt-pgotype", required_argument, nullptr, OPTION_COMPILER_OPT_PGOTYPE}, + {"compiler-opt-track-field", required_argument, nullptr, OPTION_COMPILER_OPT_TRACK_FIELD}, {"entry-point", required_argument, nullptr, OPTION_ENTRY_POINT}, {"force-full-gc", required_argument, nullptr, OPTION_FORCE_FULL_GC}, {"gc-thread-num", required_argument, nullptr, OPTION_GC_THREADNUM}, @@ -215,6 +225,11 @@ bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) {"hap-abc-offset", required_argument, nullptr, OPTION_HAP_ABC_OFFSET}, {"hap-abc-size", required_argument, nullptr, OPTION_HAP_ABC_SIZE}, {"compiler-no-check", required_argument, nullptr, OPTION_COMPILER_NOCHECK}, + {"compiler-fast-compile", required_argument, nullptr, OPTION_FAST_AOT_COMPILE_MODE}, + {"compiler-opt-loop-peeling", required_argument, nullptr, OPTION_COMPILER_OPT_LOOP_PEELING}, + {"compiler-opt-array-onheap-check", required_argument, nullptr, OPTION_COMPILER_OPT_ON_HEAP_CHECK}, + {"compiler-pkg-info", required_argument, nullptr, OPTION_COMPILER_PKG_INFO}, + {"compiler-external-pkg-info", required_argument, nullptr, OPTION_COMPILER_EXTERNAL_PKG_INFO}, {nullptr, 0, nullptr, 0}, }; @@ -610,6 +625,14 @@ bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) return false; } break; + case OPTION_COMPILER_OPT_ARRAY_BOUNDS_CHECK_ELIMINATION: + ret = ParseBoolParam(&argBool); + if (ret) { + SetEnableArrayBoundsCheckElimination(argBool); + } else { + return false; + } + break; case OPTION_COMPILER_OPT_EARLY_ELIMINATION: ret = ParseBoolParam(&argBool); if (ret) { @@ -650,6 +673,14 @@ bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) return false; } break; + case OPTION_COMPILER_OPT_TRACK_FIELD: + ret = ParseBoolParam(&argBool); + if (ret) { + SetEnableOptTrackField(argBool); + } else { + return false; + } + break; case OPTION_COMPILER_OPT_GLOBAL_TYPEINFER: ret = ParseBoolParam(&argBool); if (ret) { @@ -699,6 +730,35 @@ bool JSRuntimeOptions::ParseCommand(const int argc, const char **argv) } SetCompilerNoCheck(argBool); break; + case OPTION_FAST_AOT_COMPILE_MODE: + ret = ParseBoolParam(&argBool); + if (!ret) { + return false; + } + SetFastAOTCompileMode(argBool); + break; + case OPTION_COMPILER_OPT_LOOP_PEELING: + ret = ParseBoolParam(&argBool); + if (ret) { + SetEnableOptLoopPeeling(argBool); + } else { + return false; + } + break; + case OPTION_COMPILER_OPT_ON_HEAP_CHECK: + ret = ParseBoolParam(&argBool); + if (ret) { + SetEnableOptOnHeapCheck(argBool); + } else { + return false; + } + break; + case OPTION_COMPILER_PKG_INFO: + SetCompilerPkgJsonInfo(optarg); + break; + case OPTION_COMPILER_EXTERNAL_PKG_INFO: + SetCompilerExternalPkgJsonInfo(optarg); + break; default: LOG_ECMA(ERROR) << "Invalid option\n"; return false; diff --git a/ecmascript/js_runtime_options.h b/ecmascript/js_runtime_options.h index 1086bd451db530a3ec15a1ed47060d43c6ec94dd..4ac2126a727a4bf3d2cc43684ef6a068b480d01d 100644 --- a/ecmascript/js_runtime_options.h +++ b/ecmascript/js_runtime_options.h @@ -119,6 +119,7 @@ enum CommandValues { OPTION_COMPILER_OPT_VALUE_NUMBERING, OPTION_COMPILER_OPT_INLINING, OPTION_COMPILER_OPT_PGOTYPE, + OPTION_COMPILER_OPT_TRACK_FIELD, OPTION_COMPILER_OPT_GLOBAL_TYPEINFER, OPTION_HELP, OPTION_COMPILER_PGO_PROFILER_PATH, @@ -133,7 +134,13 @@ enum CommandValues { OPTION_HAP_PATH, OPTION_HAP_ABC_OFFSET, OPTION_HAP_ABC_SIZE, - OPTION_COMPILER_NOCHECK + OPTION_COMPILER_NOCHECK, + OPTION_FAST_AOT_COMPILE_MODE, + OPTION_COMPILER_OPT_LOOP_PEELING, + OPTION_COMPILER_OPT_ON_HEAP_CHECK, + OPTION_COMPILER_PKG_INFO, + OPTION_COMPILER_EXTERNAL_PKG_INFO, + OPTION_COMPILER_OPT_ARRAY_BOUNDS_CHECK_ELIMINATION, }; class PUBLIC_API JSRuntimeOptions { @@ -186,6 +193,26 @@ public: stubFile_ = std::move(value); } + void SetCompilerPkgJsonInfo(std::string pkgJsonInfo) + { + compilerPkgInfo_ = std::move(pkgJsonInfo); + } + + const std::string &GetCompilerPkgJsonInfo() const + { + return compilerPkgInfo_; + } + + void SetCompilerExternalPkgJsonInfo(std::string pkgJsonInfo) + { + compilerExternalPkgInfo_ = std::move(pkgJsonInfo); + } + + const std::string &GetCompilerExternalPkgJsonInfo() const + { + return compilerExternalPkgInfo_; + } + bool WasStubFileSet() const { return WasOptionSet(OPTION_STUB_FILE); @@ -897,6 +924,16 @@ public: enableTypeLowering_ = value; } + bool IsEnableArrayBoundsCheckElimination() const + { + return enableArrayBoundsCheckElimination_; + } + + void SetEnableArrayBoundsCheckElimination(bool value) + { + enableArrayBoundsCheckElimination_ = value; + } + bool IsEnableTypeLowering() const { return enableTypeLowering_; @@ -952,6 +989,16 @@ public: return enableOptPGOType_; } + void SetEnableOptTrackField(bool value) + { + enableOptTrackField_ = value; + } + + bool IsEnableOptTrackField() const + { + return enableOptTrackField_; + } + void SetEnableGlobalTypeInfer(bool value) { enableGlobalTypeInfer_ = value; @@ -1136,6 +1183,36 @@ public: void SetOptionsForTargetCompilation(); + void SetFastAOTCompileMode(bool value) + { + fastAOTCompileMode_ = value; + } + + bool GetFastAOTCompileMode() const + { + return fastAOTCompileMode_; + } + + void SetEnableOptLoopPeeling(bool value) + { + enableOptLoopPeeling_ = value; + } + + bool IsEnableOptLoopPeeling() const + { + return enableOptLoopPeeling_; + } + + void SetEnableOptOnHeapCheck(bool value) + { + enableOptOnHeapCheck_ = value; + } + + bool IsEnableOptOnHeapCheck() const + { + return enableOptOnHeapCheck_; + } + private: static bool StartsWith(const std::string &haystack, const std::string &needle) { @@ -1156,15 +1233,17 @@ private: bool enableArkTools_ {true}; std::string stubFile_ {"stub.an"}; - bool enableForceGc_ {true}; - bool forceFullGc_ {true}; + std::string compilerPkgInfo_ {}; + std::string compilerExternalPkgInfo_ {}; + bool enableForceGc_ {false}; + bool forceFullGc_ {false}; int arkProperties_ = GetDefaultProperties(); std::string arkBundleName_ = {""}; uint32_t gcThreadNum_ {7}; // 7: default thread num uint32_t longPauseTime_ {40}; // 40: default pause time std::string aotOutputFile_ {""}; std::string targetTriple_ {TARGET_X64}; - uint32_t asmOptLevel_ {3}; // 3: default opt level + uint32_t asmOptLevel_ {2}; uint32_t relocationMode_ {2}; // 2: default relocation mode uint32_t maxNonmovableSpaceCapacity_ {4_MB}; bool enableAsmInterpreter_ {true}; @@ -1198,13 +1277,15 @@ private: double typeThreshold_ {-1}; std::string entryPoint_ {"_GLOBAL::func_main_0"}; bool mergeAbc_ {false}; + bool enableArrayBoundsCheckElimination_ {false}; bool enableTypeLowering_ {true}; bool enableEarlyElimination_ {true}; bool enableLaterElimination_ {true}; bool enableValueNumbering_ {true}; - bool enableOptInlining_ {false}; + bool enableOptInlining_ {true}; bool enableOptPGOType_ {true}; bool enableGlobalTypeInfer_ {false}; + bool enableOptTrackField_ {true}; uint32_t compilerModuleMethods_ {100}; uint64_t wasSet_ {0}; bool enablePrintExecuteTime_ {false}; @@ -1221,12 +1302,15 @@ private: std::string compilerSelectMethods_ {""}; std::string compilerSkipMethods_ {""}; bool traceInline_ {false}; - size_t maxInlineBytecodes_ {25}; + size_t maxInlineBytecodes_ {45}; std::string targetCompilerMode_ {""}; std::string hapPath_ {""}; uint32_t hapAbcOffset_ {0}; uint32_t hapAbcSize_ {0}; bool compilerNoCheck_ {false}; + bool fastAOTCompileMode_ {false}; + bool enableOptLoopPeeling_ {true}; + bool enableOptOnHeapCheck_ {true}; }; } // namespace panda::ecmascript diff --git a/ecmascript/js_serializer.cpp b/ecmascript/js_serializer.cpp index b1f218fd719b20aa79df1b4ea6a418f8cb21f4ac..64cf662c58677022137673b361cc2688fb927361 100644 --- a/ecmascript/js_serializer.cpp +++ b/ecmascript/js_serializer.cpp @@ -26,6 +26,7 @@ #include "ecmascript/base/array_helper.h" #include "ecmascript/base/typed_array_helper-inl.h" #include "ecmascript/base/typed_array_helper.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/global_env.h" #include "ecmascript/js_array.h" #include "ecmascript/js_arraybuffer.h" @@ -34,6 +35,7 @@ #include "ecmascript/js_set.h" #include "ecmascript/js_typed_array.h" #include "ecmascript/linked_hash_table.h" +#include "ecmascript/object_factory-inl.h" #include "ecmascript/shared_mm/shared_mm.h" #include "securec.h" @@ -329,6 +331,7 @@ bool JSSerializer::WriteTaggedObject(const JSHandle &value) case JSType::LINE_STRING: case JSType::CONSTANT_STRING: case JSType::TREE_STRING: + case JSType::SLICED_STRING: return WriteEcmaString(value); case JSType::JS_OBJECT: return WritePlainObject(value); @@ -485,6 +488,12 @@ bool JSSerializer::WriteMethod(const JSHandle &value) if (!WriteString(desc)) { return false; } + if (method->IsAotWithCallField()) { + uintptr_t codeEntry = method->GetCodeEntryOrLiteral(); + if (!WriteRawData(&codeEntry, sizeof(uintptr_t))) { + return false; + } + } } return true; } @@ -581,7 +590,7 @@ bool JSSerializer::WriteJSArray(const JSHandle &value) if (!WritePlainObject(value)) { return false; } - uint32_t arrayLength = static_cast(array->GetLength().GetInt()); + uint32_t arrayLength = array->GetLength(); if (!WriteInt(arrayLength)) { return false; } @@ -591,12 +600,12 @@ bool JSSerializer::WriteJSArray(const JSHandle &value) bool JSSerializer::WriteEcmaString(const JSHandle &value) { JSHandle strHandle = JSHandle::Cast(value); - auto string = JSHandle(thread_, EcmaStringAccessor::Flatten(thread_->GetEcmaVM(), strHandle)); + auto string = EcmaStringAccessor::FlattenAllString(thread_->GetEcmaVM(), strHandle); if (!WriteType(SerializationUID::ECMASTRING)) { return false; } - size_t length = EcmaStringAccessor(string).GetLength(); + size_t length = string.GetLength(); if (!WriteInt(static_cast(length))) { return false; } @@ -605,19 +614,19 @@ bool JSSerializer::WriteEcmaString(const JSHandle &value) return true; } - bool isUtf8 = EcmaStringAccessor(string).IsUtf8(); + bool isUtf8 = string.IsUtf8(); // write utf encode flag if (!WriteBoolean(isUtf8)) { return false; } if (isUtf8) { - const uint8_t *data = EcmaStringAccessor(string).GetDataUtf8(); + const uint8_t *data = string.GetDataUtf8(); const uint8_t strEnd = '\0'; if (!WriteRawData(data, length) || !WriteRawData(&strEnd, sizeof(uint8_t))) { return false; } } else { - const uint16_t *data = EcmaStringAccessor(string).GetDataUtf16(); + const uint16_t *data = string.GetDataUtf16(); if (!WriteRawData(data, length * sizeof(uint16_t))) { return false; } @@ -634,13 +643,13 @@ bool JSSerializer::WriteJSMap(const JSHandle &value) if (!WritePlainObject(value)) { return false; } - int size = map->GetSize(); - if (!WriteInt(size)) { + uint32_t size = map->GetSize(); + if (!WriteInt(static_cast(size))) { return false; } JSMutableHandle key(thread_, JSTaggedValue::Undefined()); JSMutableHandle val(thread_, JSTaggedValue::Undefined()); - for (int i = 0; i < size; i++) { + for (uint32_t i = 0; i < size; i++) { key.Update(map->GetKey(i)); if (!SerializeJSTaggedValue(key)) { return false; @@ -662,12 +671,12 @@ bool JSSerializer::WriteJSSet(const JSHandle &value) if (!WritePlainObject(value)) { return false; } - int size = set->GetSize(); - if (!WriteInt(size)) { + uint32_t size = set->GetSize(); + if (!WriteInt(static_cast(size))) { return false; } JSMutableHandle val(thread_, JSTaggedValue::Undefined()); - for (int i = 0; i < size; i++) { + for (uint32_t i = 0; i < size; i++) { val.Update(set->GetValue(i)); if (!SerializeJSTaggedValue(val)) { return false; @@ -797,15 +806,15 @@ bool JSSerializer::WriteJSArrayBuffer(const JSHandle &value) } bool shared = arrayBuffer->GetShared(); bool transfer = transferDataSet_.find(static_cast(value.GetTaggedType())) != transferDataSet_.end(); - if (shared && transfer) { - LOG_ECMA(ERROR) << "Can't transfer a shared JSArrayBuffer"; - return false; - } if (shared) { + if (transfer) { + LOG_ECMA(ERROR) << "Can't transfer a shared JSArrayBuffer"; + return false; + } if (!WriteType(SerializationUID::JS_SHARED_ARRAY_BUFFER)) { return false; } - } else if (transfer) { + } else if (defaultTransfer_ || transfer) { if (!WriteType(SerializationUID::JS_TRANSFER_ARRAY_BUFFER)) { return false; } @@ -836,7 +845,7 @@ bool JSSerializer::WriteJSArrayBuffer(const JSHandle &value) if (!WriteRawData(&bufferAddr, sizeof(uint64_t))) { return false; } - } else if (transfer) { + } else if (defaultTransfer_ || transfer) { // Write Accessors(ArrayBufferData) which is a pointer to a Buffer if (!WriteJSNativePointer(np)) { return false; @@ -1088,16 +1097,15 @@ bool JSDeserializer::ReadDouble(double *value) JSDeserializer::~JSDeserializer() { referenceMap_.clear(); - free(begin_); - begin_ = nullptr; } JSHandle JSDeserializer::Deserialize() { size_t maxSerializerSize = thread_->GetEcmaVM()->GetEcmaParamConfiguration().GetMaxJSSerializerSize(); - uint8_t dataSize = end_ - begin_; + size_t dataSize = end_ - begin_; if (dataSize > maxSerializerSize) { - LOG_ECMA(ERROR) << "The Serialization data size exceed limit Size"; + LOG_ECMA(ERROR) << "The Serialization data size has exceed limit Size, current size is: " << dataSize << + " max size is: " << maxSerializerSize; return JSHandle(); } JSHandle res = DeserializeJSTaggedValue(); @@ -1329,6 +1337,17 @@ JSHandle JSDeserializer::ReadMethod() JSHandle constPool = thread_->GetCurrentEcmaContext()->FindOrCreateConstPool(jsPandaFile.get(), method->GetMethodId()); method->SetConstantPool(thread_, constPool.GetTaggedValue()); + + if (method->IsAotWithCallField()) { + uintptr_t codeEntry; + if (!ReadNativePointer(&codeEntry)) { + return JSHandle(); + } + method->SetCodeEntryAndMarkAOT(codeEntry); + + uint8_t deoptThreshold = thread_->GetEcmaVM()->GetJSOptions().GetDeoptThreshold(); + method->SetDeoptThreshold(deoptThreshold); + } return methodTag; } @@ -1445,7 +1464,7 @@ JSHandle JSDeserializer::ReadJSArray() if (!JudgeType(SerializationUID::INT32) || !ReadInt(&arrLength)) { return JSHandle(); } - jsArray->SetLength(thread_, JSTaggedValue(arrLength)); + jsArray->SetLength(arrLength); return arrayTag; } @@ -1974,11 +1993,14 @@ bool JSDeserializer::ReadBoolean(bool *value) bool Serializer::WriteValue( JSThread *thread, const JSHandle &value, const JSHandle &transfer) { + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Serializer::WriteValue"); if (data_ != nullptr) { return false; } data_.reset(new SerializationData); - if (!PrepareTransfer(thread, transfer)) { + if (value.GetTaggedValue() == transfer.GetTaggedValue()) { + valueSerializer_.SetDefaultTransfer(); + } else if (!PrepareTransfer(thread, transfer)) { return false; } if (!valueSerializer_.SerializeJSTaggedValue(value)) { @@ -2025,6 +2047,7 @@ bool Serializer::PrepareTransfer(JSThread *thread, const JSHandle JSHandle Deserializer::ReadValue() { + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Deserializer::ReadValue"); return valueDeserializer_.Deserialize(); } } // namespace panda::ecmascript diff --git a/ecmascript/js_serializer.h b/ecmascript/js_serializer.h index 52ed82f74c262496da492f42ae54a63564c9c884..e153768e7476cafd689f3490d3f567ce03709a96 100644 --- a/ecmascript/js_serializer.h +++ b/ecmascript/js_serializer.h @@ -115,6 +115,11 @@ public: // Return pointer to the buffer and its length, should not use this Serializer anymore after Release std::pair ReleaseBuffer(); + void SetDefaultTransfer() + { + defaultTransfer_ = true; + } + private: bool WriteJSFunction(const JSHandle &value); bool WriteMethod(const JSHandle &value); @@ -165,6 +170,7 @@ private: CUnorderedMap referenceMap_; CUnorderedSet transferDataSet_; uint64_t objectId_ = 0; + bool defaultTransfer_ = false; }; class JSDeserializer { @@ -279,13 +285,17 @@ private: class Deserializer { public: Deserializer(JSThread *thread, SerializationData *data, void *hint) - : valueDeserializer_(thread, data->GetData(), data->GetSize(), hint) {} - ~Deserializer() = default; + : valueDeserializer_(thread, data->GetData(), data->GetSize(), hint), data_(data) {} + ~Deserializer() + { + data_.reset(nullptr); + } JSHandle ReadValue(); private: ecmascript::JSDeserializer valueDeserializer_; + std::unique_ptr data_; NO_COPY_SEMANTIC(Deserializer); }; diff --git a/ecmascript/js_set.cpp b/ecmascript/js_set.cpp index d26c765d81784b64389c2588da67aceb49b79e68..0ab9c93cd23cdc4100627abe05aa1ce848c960c3 100644 --- a/ecmascript/js_set.cpp +++ b/ecmascript/js_set.cpp @@ -56,14 +56,14 @@ bool JSSet::Has(JSTaggedValue value) const return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->Has(value); } -int JSSet::GetSize() const +uint32_t JSSet::GetSize() const { return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->NumberOfElements(); } JSTaggedValue JSSet::GetValue(int entry) const { - ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); + ASSERT_PRINT(entry >= 0 && static_cast(entry) < GetSize(), "entry must be non-negative integer less than capacity"); return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->GetValue(entry); } } // namespace panda::ecmascript diff --git a/ecmascript/js_set.h b/ecmascript/js_set.h index 50d5ee8a8a58523ed71918c1a0a88449d9153dcc..d02372ec4023037650095fd960028f578647280b 100644 --- a/ecmascript/js_set.h +++ b/ecmascript/js_set.h @@ -32,7 +32,7 @@ public: bool Has(JSTaggedValue value) const; - int GetSize() const; + uint32_t GetSize() const; JSTaggedValue GetValue(int entry) const; diff --git a/ecmascript/js_stable_array.cpp b/ecmascript/js_stable_array.cpp index 94f85eb807d4ed665c11391c47047eafddb90a41..16d181854776acbc3a49da733fd5bc80e9410696 100644 --- a/ecmascript/js_stable_array.cpp +++ b/ecmascript/js_stable_array.cpp @@ -19,13 +19,17 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/base/typed_array_helper-inl.h" #include "ecmascript/builtins/builtins_arraybuffer.h" +#include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_vm.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/fast_runtime_stub-inl.h" #include "ecmascript/js_array.h" #include "ecmascript/js_tagged_value-inl.h" +#include "ecmascript/js_tagged_value.h" #include "ecmascript/object_factory.h" #include "ecmascript/tagged_array.h" +#include "macros.h" namespace panda::ecmascript { using TypedArrayHelper = base::TypedArrayHelper; @@ -40,7 +44,7 @@ JSTaggedValue JSStableArray::Push(JSHandle receiver, EcmaRuntimeCallInf TaggedArray *elements = TaggedArray::Cast(receiver->GetElements().GetTaggedObject()); if (newLength > elements->GetLength()) { - elements = *JSObject::GrowElementsCapacity(thread, JSHandle::Cast(receiver), newLength); + elements = *JSObject::GrowElementsCapacity(thread, JSHandle::Cast(receiver), newLength, true); } for (uint32_t k = 0; k < argc; k++) { @@ -124,6 +128,7 @@ JSTaggedValue JSStableArray::Splice(JSHandle receiver, EcmaRuntimeCallI toKey.Update(JSTaggedValue(k)); if (newArrayHandle->IsJSProxy()) { toKey.Update(JSTaggedValue::ToString(thread, toKey).GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValue); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -219,12 +224,14 @@ JSTaggedValue JSStableArray::Join(JSHandle receiver, EcmaRuntimeCallInf int sep = ','; uint32_t sepLength = 1; JSHandle sepStringHandle; + auto context = thread->GetCurrentEcmaContext(); + JSHandle receiverValue = JSHandle::Cast(receiver); if (!sepHandle->IsUndefined()) { if (sepHandle->IsString()) { sepStringHandle = JSHandle::Cast(sepHandle); } else { sepStringHandle = JSTaggedValue::ToString(thread, sepHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, receiverValue); } if (EcmaStringAccessor(sepStringHandle).IsUtf8() && EcmaStringAccessor(sepStringHandle).GetLength() == 1) { // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic) @@ -239,6 +246,7 @@ JSTaggedValue JSStableArray::Join(JSHandle receiver, EcmaRuntimeCallInf } if (length == 0) { const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + context->JoinStackPopFastPath(receiverValue); return globalConst->GetEmptyString(); } TaggedArray *elements = TaggedArray::Cast(receiver->GetElements().GetTaggedObject()); @@ -261,7 +269,7 @@ JSTaggedValue JSStableArray::Join(JSHandle receiver, EcmaRuntimeCallInf if (!element.IsString()) { elementHandle.Update(element); JSHandle strElement = JSTaggedValue::ToString(thread, elementHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, receiverValue); element = strElement.GetTaggedValue(); elements = TaggedArray::Cast(receiver->GetElements().GetTaggedObject()); } @@ -298,6 +306,7 @@ JSTaggedValue JSStableArray::Join(JSHandle receiver, EcmaRuntimeCallInf } ASSERT_PRINT( isOneByte == EcmaStringAccessor::CanBeCompressed(newString), "isOneByte does not match the real value!"); + context->JoinStackPopFastPath(receiverValue); return JSTaggedValue(newString); } @@ -339,6 +348,40 @@ JSTaggedValue JSStableArray::HandleFindIndexOfStable(JSThread *thread, JSHandle< return callResult; } +JSTaggedValue JSStableArray::HandleFindLastIndexOfStable(JSThread *thread, JSHandle thisObjHandle, + JSHandle callbackFnHandle, + JSHandle thisArgHandle, int64_t &k) +{ + JSHandle thisObjVal(thisObjHandle); + JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); + JSTaggedValue callResult = base::BuiltinsBase::GetTaggedBoolean(false); + const int32_t argsLength = 3; // 3: ?kValue, k, O? + JSMutableHandle array(thread, thisObjHandle->GetElements()); + JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); + while (k >= 0) { + // Elements of thisObjHandle may change. + array.Update(thisObjHandle->GetElements()); + kValue.Update(array->Get(k)); + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(kValue.GetTaggedValue(), JSTaggedValue(k), thisObjVal.GetTaggedValue()); + callResult = JSFunction::Call(info); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, callResult); + if (callResult.ToBoolean()) { + return callResult; + } + k--; + if (base::ArrayHelper::GetArrayLength(thread, thisObjVal) - 1 < k) { + return callResult; + } + if (!thisObjVal->IsStableJSArray(thread)) { + return callResult; + } + } + return callResult; +} + JSTaggedValue JSStableArray::HandleEveryOfStable(JSThread *thread, JSHandle thisObjHandle, JSHandle callbackFnHandle, JSHandle thisArgHandle, uint32_t &k) @@ -357,6 +400,7 @@ JSTaggedValue JSStableArray::HandleEveryOfStable(JSThread *thread, JSHandleSetCallArg(kValue.GetTaggedValue(), JSTaggedValue(k), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -368,6 +412,7 @@ JSTaggedValue JSStableArray::HandleEveryOfStable(JSThread *thread, JSHandleSetCallArg(kValue1.GetTaggedValue(), JSTaggedValue(k), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -385,13 +430,12 @@ JSTaggedValue JSStableArray::HandleEveryOfStable(JSThread *thread, JSHandle thisObjHandle, JSHandle callbackFnHandle, - JSHandle thisArgHandle, uint32_t &k) + JSHandle thisArgHandle, uint32_t len, uint32_t &k) { JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle thisObjVal(thisObjHandle); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSMutableHandle array(thread, thisObjHandle->GetElements()); - uint64_t len = static_cast(base::ArrayHelper::GetArrayLength(thread, thisObjVal)); const int32_t argsLength = 3; // 3: ?kValue, k, O? JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); if (array->GetLength() <= k) { @@ -431,33 +475,205 @@ JSTaggedValue JSStableArray::HandleforEachOfStable(JSThread *thread, JSHandle receiver, - JSHandle searchElement, uint32_t from, uint32_t len) +template +JSTaggedValue JSStableArray::FindRawData(IndexOfContext &ctx, Predicate &&predicate) { - JSHandle elements(thread, JSHandle::Cast(receiver)->GetElements()); - while (from < len) { - JSTaggedValue value = JSTaggedValue::Hole(); - if (elements->GetLength() > from) { - value = elements->Get(from); - } - if (!value.IsUndefined() && !value.IsHole()) { - if (JSTaggedValue::StrictEqual(searchElement.GetTaggedValue(), value)) { - return JSTaggedValue(from); + DISALLOW_GARBAGE_COLLECTION; + JSHandle elements(ctx.thread, JSHandle::Cast(ctx.receiver)->GetElements()); + // Note: GC is guaranteed not to happen since no new object is created during the searching process. + JSTaggedType *data = elements->GetData(); + JSTaggedType *first = data + ctx.fromIndex; + JSTaggedType *last = data + ctx.length; + // Note: for stable arrays, elements->GetLength() returns the CAPACITY, instead of actual length! + + JSMutableHandle indexHandle(ctx.thread, JSTaggedValue::Undefined()); + for (JSTaggedType *cur = first; cur < last; ++cur) { + if (LIKELY(*cur != JSTaggedValue::VALUE_HOLE)) { + if (UNLIKELY(std::invoke(predicate, *cur))) { + return base::BuiltinsBase::GetTaggedInt64(cur - data); } - } else { - bool exist = JSTaggedValue::HasProperty(thread, receiver, from); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exist) { - JSHandle kValueHandle = JSArray::FastGetPropertyByValue(thread, receiver, from); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (JSTaggedValue::StrictEqual(thread, searchElement, kValueHandle)) { - return JSTaggedValue(from); - } + continue; + } + // Fallback slow path + indexHandle.Update(base::BuiltinsBase::GetTaggedInt64(cur - data)); + bool found = base::ArrayHelper::ElementIsStrictEqualTo( + ctx.thread, ctx.receiver, indexHandle, ctx.searchElement); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(ctx.thread); + if (found) { + return indexHandle.GetTaggedValue(); + } + } + return JSTaggedValue(-1); // Not found +} + +template +JSTaggedValue JSStableArray::FindLastRawData(IndexOfContext &ctx, Predicate &&predicate) +{ + DISALLOW_GARBAGE_COLLECTION; + JSHandle elements(ctx.thread, JSHandle::Cast(ctx.receiver)->GetElements()); + // Note: GC is guaranteed not to happen since no new object is created during the searching process. + JSTaggedType *data = elements->GetData(); + JSTaggedType *beforeFirst = data - 1; + JSTaggedType *beforeLast = data + ctx.fromIndex; + + JSMutableHandle indexHandle(ctx.thread, JSTaggedValue::Undefined()); + for (JSTaggedType *cur = beforeLast; cur > beforeFirst; --cur) { + if (LIKELY(*cur != JSTaggedValue::VALUE_HOLE)) { + if (UNLIKELY(std::invoke(predicate, *cur))) { + return base::BuiltinsBase::GetTaggedInt64(cur - data); } + continue; + } + // Fallback slow path + indexHandle.Update(base::BuiltinsBase::GetTaggedInt64(cur - data)); + bool found = base::ArrayHelper::ElementIsStrictEqualTo( + ctx.thread, ctx.receiver, indexHandle, ctx.searchElement); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(ctx.thread); + if (found) { + return indexHandle.GetTaggedValue(); + } + } + return JSTaggedValue(-1); // Not found +} + +template +JSTaggedValue JSStableArray::FindRawDataDispatch(IndexOfType type, IndexOfContext &ctx, Predicate &&predicate) +{ + switch (type) { + case IndexOfType::IndexOf: + return FindRawData(ctx, std::forward(predicate)); + case IndexOfType::LastIndexOf: + return FindLastRawData(ctx, std::forward(predicate)); + default: + UNREACHABLE(); + } +} + +// Zeros need special judge +JSTaggedValue JSStableArray::IndexOfZero(IndexOfType type, IndexOfContext &ctx) +{ + return FindRawDataDispatch(type, ctx, [](JSTaggedType cur) { + return JSTaggedValue(cur).IsExactlyZero(); + }); +} + +JSTaggedValue JSStableArray::IndexOfInt32(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + ASSERT(searchElement.IsInt()); + int32_t untagged = searchElement.GetInt(); + if (untagged == 0) { + return IndexOfZero(type, ctx); + } + JSTaggedType targetInt32 = searchElement.GetRawData(); + JSTaggedType targetDouble = JSTaggedValue(static_cast(untagged)).GetRawData(); + return FindRawDataDispatch(type, ctx, [targetInt32, targetDouble](JSTaggedType cur) { + return cur == targetInt32 || cur == targetDouble; + }); +} + +JSTaggedValue JSStableArray::IndexOfDouble(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + ASSERT(searchElement.IsDouble()); + double untagged = searchElement.GetDouble(); + if (std::isnan(untagged)) { + return JSTaggedValue(-1); + } + if (untagged == 0.0) { + return IndexOfZero(type, ctx); + } + JSTaggedType targetDouble = searchElement.GetRawData(); + if (searchElement.WithinInt32()) { + JSTaggedType targetInt32 = JSTaggedValue(static_cast(untagged)).GetRawData(); + return FindRawDataDispatch(type, ctx, [targetDouble, targetInt32](JSTaggedType cur) { + return cur == targetDouble || cur == targetInt32; + }); + } else { + return FindRawDataDispatch(type, ctx, [targetDouble](JSTaggedType cur) { + return cur == targetDouble; + }); + } +} + +JSTaggedValue JSStableArray::IndexOfObjectAddress(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + ASSERT(searchElement.IsObject()); + JSTaggedType targetAddress = searchElement.GetRawData(); + return FindRawDataDispatch(type, ctx, [targetAddress](JSTaggedType cur) { + return cur == targetAddress; + }); +} + +JSTaggedValue JSStableArray::IndexOfString(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + ASSERT(searchElement.IsString()); + JSTaggedType targetAddress = searchElement.GetRawData(); + return FindRawDataDispatch(type, ctx, [searchElement, targetAddress](JSTaggedType cur) { + if (targetAddress == cur) { + return true; + } + JSTaggedValue curValue(cur); + if (!curValue.IsString()) { + return false; + } + return JSTaggedValue::StringCompare( + EcmaString::Cast(curValue.GetTaggedObject()), + EcmaString::Cast(searchElement.GetTaggedObject())); + }); +} + +JSTaggedValue JSStableArray::IndexOfBigInt(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + ASSERT(searchElement.IsBigInt()); + JSTaggedType targetAddress = searchElement.GetRawData(); + return FindRawDataDispatch(type, ctx, [searchElement, targetAddress](JSTaggedType cur) { + if (cur == targetAddress) { + return true; + } + JSTaggedValue curValue(cur); + if (!curValue.IsBigInt()) { + return false; } - from++; + return BigInt::Equal(curValue, searchElement); + }); +} + +JSTaggedValue JSStableArray::IndexOfDispatch(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement) +{ + if (searchElement.IsInt()) { + return IndexOfInt32(type, ctx, searchElement); + } else if (searchElement.IsDouble()) { + return IndexOfDouble(type, ctx, searchElement); + } else if (searchElement.IsString()) { + return IndexOfString(type, ctx, searchElement); + } else if (searchElement.IsBigInt()) { + return IndexOfBigInt(type, ctx, searchElement); + } else { + return IndexOfObjectAddress(type, ctx, searchElement); } - return JSTaggedValue(-1); +} + +JSTaggedValue JSStableArray::IndexOf(JSThread *thread, JSHandle receiver, + JSHandle searchElement, uint32_t from, uint32_t len) +{ + IndexOfContext ctx; + ctx.thread = thread; + ctx.receiver = receiver; + ctx.searchElement = searchElement; + ctx.fromIndex = from; + ctx.length = len; + return IndexOfDispatch(IndexOfType::IndexOf, ctx, searchElement.GetTaggedValue()); +} + +JSTaggedValue JSStableArray::LastIndexOf(JSThread *thread, JSHandle receiver, + JSHandle searchElement, uint32_t from, uint32_t len) +{ + IndexOfContext ctx; + ctx.thread = thread; + ctx.receiver = receiver; + ctx.searchElement = searchElement; + ctx.fromIndex = from; + ctx.length = len; + return IndexOfDispatch(IndexOfType::LastIndexOf, ctx, searchElement.GetTaggedValue()); } JSTaggedValue JSStableArray::Filter(JSHandle newArrayHandle, JSHandle thisObjHandle, @@ -545,53 +761,19 @@ JSTaggedValue JSStableArray::Map(JSHandle newArrayHandle, JSHandle thisObjHandle, - JSHandle thisHandle, int64_t &lower, uint32_t len) +JSTaggedValue JSStableArray::Reverse(JSThread *thread, JSHandle thisObjHandle, uint32_t len) { - JSHandle thisObjVal(thisObjHandle); if (thisObjHandle->IsJSArray()) { JSArray::CheckAndCopyArray(thread, JSHandle::Cast(thisObjHandle)); } - JSHandle array(thread, thisObjHandle->GetElements()); - JSMutableHandle lowerP(thread, JSTaggedValue::Undefined()); - JSMutableHandle upperP(thread, JSTaggedValue::Undefined()); - JSMutableHandle lowerValueHandle(thread, JSTaggedValue::Undefined()); - JSMutableHandle upperValueHandle(thread, JSTaggedValue::Undefined()); - int64_t middle = std::floor(len / 2); - while (lower != middle) { - if (array->GetLength() != len) { - break; - } - int64_t upper = static_cast(len) - lower - 1; - lowerP.Update(JSTaggedValue(lower)); - upperP.Update(JSTaggedValue(upper)); - bool lowerExists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, lowerP)); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (lowerExists) { - lowerValueHandle.Update(array->Get(lower)); - } - bool upperExists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, upperP)); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (upperExists) { - upperValueHandle.Update(array->Get(upper)); - } - if (lowerExists && upperExists) { - array->Set(thread, lower, upperValueHandle.GetTaggedValue()); - array->Set(thread, upper, lowerValueHandle.GetTaggedValue()); - } else if (upperExists) { - array->Set(thread, lower, upperValueHandle.GetTaggedValue()); - JSTaggedValue::SetProperty(thread, thisObjVal, lowerP, upperValueHandle, true); - JSTaggedValue::DeletePropertyOrThrow(thread, thisObjVal, upperP); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } else if (lowerExists) { - array->Set(thread, upper, lowerValueHandle.GetTaggedValue()); - JSTaggedValue::SetProperty(thread, thisObjVal, upperP, lowerValueHandle, true); - JSTaggedValue::DeletePropertyOrThrow(thread, thisObjVal, lowerP); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } - lower++; - } - return base::BuiltinsBase::GetTaggedDouble(true); + DISALLOW_GARBAGE_COLLECTION; + JSHandle elements(thread, thisObjHandle->GetElements()); + JSTaggedType *data = elements->GetData(); + ASSERT_PRINT(len <= elements->GetLength(), "Length exceeds capacity of contiguous array container."); + // Reversing raw data in-place is OK since no object is created or deleted, + // only pointers swapping for objects or values swapping for primitive types. + std::reverse(data, data + len); + return thisObjHandle.GetTaggedValue(); // Returns the address of thisValue } JSTaggedValue JSStableArray::Concat(JSThread *thread, JSHandle newArrayHandle, @@ -618,7 +800,7 @@ JSTaggedValue JSStableArray::Concat(JSThread *thread, JSHandle newArra } JSTaggedValue JSStableArray::FastCopyFromArrayToTypedArray(JSThread *thread, JSHandle &targetArray, - DataViewType targetType, uint32_t targetOffset, + DataViewType targetType, uint64_t targetOffset, uint32_t srcLength, JSHandle &elements) { JSHandle targetBuffer(thread, targetArray->GetViewedArrayBufferOrByteArray()); @@ -658,4 +840,200 @@ JSTaggedValue JSStableArray::FastCopyFromArrayToTypedArray(JSThread *thread, JSH } return JSTaggedValue::Undefined(); } + +JSTaggedValue JSStableArray::At(JSHandle receiver, EcmaRuntimeCallInfo *argv) +{ + JSThread *thread = argv->GetThread(); + uint32_t thisLen = receiver->GetArrayLength(); + if (thisLen == 0) { + return JSTaggedValue::Undefined(); + } + JSTaggedNumber index = JSTaggedValue::ToInteger(thread, base::BuiltinsBase::GetCallArg(argv, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t relativeIndex = index.GetNumber(); + int64_t k = 0; + if (relativeIndex >= 0) { + k = relativeIndex; + } else { + k = static_cast(thisLen) + relativeIndex; + } + if (k < 0 || k >= thisLen) { + return JSTaggedValue::Undefined(); + } + + TaggedArray *elements = TaggedArray::Cast(receiver->GetElements().GetTaggedObject()); + auto result = JSTaggedValue::Hole(); + result = elements->Get(k); + return result.IsHole() ? JSTaggedValue::Undefined() : result; +} + +JSTaggedValue JSStableArray::With(JSThread *thread, JSHandle receiver, + int64_t insertCount, int64_t index, JSHandle value) +{ + JSHandle thisObjHandle(receiver); + JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, + JSTaggedNumber(static_cast(insertCount))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + JSHandle thisObjVal(thisObjHandle); + TaggedArray *srcElements = TaggedArray::Cast(thisObjHandle->GetElements().GetTaggedObject()); + JSMutableHandle srcElementsHandle(thread, srcElements); + TaggedArray *destElements = TaggedArray::Cast(newArrayHandle->GetElements().GetTaggedObject()); + + if (insertCount > destElements->GetLength()) { + destElements = *JSObject::GrowElementsCapacity(thread, newArrayHandle, insertCount); + } + + for (uint32_t idx = 0; idx < insertCount; idx++) { + if (idx == index) { + destElements->Set(thread, idx, value.GetTaggedValue()); + } else { + auto kValue = srcElementsHandle->Get(idx); + if (kValue.IsHole()) { + destElements->Set(thread, idx, JSTaggedValue::Undefined()); + } else { + destElements->Set(thread, idx, kValue); + } + } + } + JSHandle::Cast(newArrayHandle)->SetArrayLength(thread, insertCount); + + return newArrayHandle.GetTaggedValue(); +} + +JSTaggedValue JSStableArray::ToSpliced(JSHandle receiver, EcmaRuntimeCallInfo *argv, + int64_t argc, int64_t actualStart, int64_t actualSkipCount, int64_t insertCount) +{ + JSThread *thread = argv->GetThread(); + + JSHandle thisObjHandle(receiver); + JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, + JSTaggedNumber(static_cast(insertCount))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + JSHandle thisObjVal(thisObjHandle); + TaggedArray *srcElements = TaggedArray::Cast(thisObjHandle->GetElements().GetTaggedObject()); + JSMutableHandle srcElementsHandle(thread, srcElements); + TaggedArray *destElements = TaggedArray::Cast(newArrayHandle->GetElements().GetTaggedObject()); + + if (insertCount > destElements->GetLength()) { + destElements = *JSObject::GrowElementsCapacity(thread, newArrayHandle, insertCount); + } + + uint32_t i = 0, r = actualStart + actualSkipCount; + + for (uint32_t idx = 0; idx < actualStart; idx++, i++) { + auto kValue = srcElementsHandle->Get(idx); + if (kValue.IsHole()) { + destElements->Set(thread, i, JSTaggedValue::Undefined()); + } else { + destElements->Set(thread, i, kValue); + } + } + + for (uint32_t pos = 2; pos < argc; ++pos) { // 2:2 means there two arguments before the insert items. + auto element = base::BuiltinsBase::GetCallArg(argv, pos); + destElements->Set(thread, i, element.GetTaggedValue()); + ++i; + } + + while (i < insertCount) { + auto kValue = srcElementsHandle->Get(r); + if (kValue.IsHole()) { + destElements->Set(thread, i, JSTaggedValue::Undefined()); + } else { + destElements->Set(thread, i, kValue); + } + ++i; + ++r; + } + + JSHandle::Cast(newArrayHandle)->SetArrayLength(thread, insertCount); + + return newArrayHandle.GetTaggedValue(); +} + +JSTaggedValue JSStableArray::ToReversed(JSThread *thread, JSHandle receiver, + int64_t insertCount) +{ + JSHandle thisObjHandle(receiver); + JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, + JSTaggedNumber(static_cast(insertCount))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + JSHandle thisObjVal(thisObjHandle); + TaggedArray *srcElements = TaggedArray::Cast(thisObjHandle->GetElements().GetTaggedObject()); + JSMutableHandle srcElementsHandle(thread, srcElements); + TaggedArray *destElements = TaggedArray::Cast(newArrayHandle->GetElements().GetTaggedObject()); + + if (insertCount > destElements->GetLength()) { + destElements = *JSObject::GrowElementsCapacity(thread, newArrayHandle, insertCount); + } + + for (uint32_t idx = 0; idx < insertCount; idx++) { + auto kValue = srcElementsHandle->Get(idx); + if (kValue.IsHole()) { + destElements->Set(thread, insertCount - idx - 1, JSTaggedValue::Undefined()); + } else { + destElements->Set(thread, insertCount - idx - 1, kValue); + } + } + JSHandle::Cast(newArrayHandle)->SetArrayLength(thread, insertCount); + + return newArrayHandle.GetTaggedValue(); +} + +JSTaggedValue JSStableArray::Reduce(JSThread *thread, JSHandle thisObjHandle, + JSHandle callbackFnHandle, + JSMutableHandle accumulator, int64_t &k, int64_t &len) +{ + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + JSMutableHandle array(thread, thisObjHandle->GetElements()); + JSHandle thisObjVal(thisObjHandle); + JSTaggedValue callResult = JSTaggedValue::Undefined(); + while (k < len) { + array.Update(thisObjHandle->GetElements()); + JSTaggedValue kValue(array->Get(k)); + if (!kValue.IsHole()) { + JSHandle undefined = globalConst->GetHandledUndefined(); + const uint32_t argsLength = 4; // 4: «accumulator, kValue, k, O» + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(accumulator.GetTaggedValue(), kValue, JSTaggedValue(k), + thisObjVal.GetTaggedValue()); + callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (array->GetLength() < len) { + len = array->GetLength(); + } + accumulator.Update(callResult); + } + k++; + if (!thisObjVal->IsStableJSArray(thread)) { + break; + } + } + return base::BuiltinsBase::GetTaggedDouble(true); +} + +JSTaggedValue JSStableArray::Slice(JSThread *thread, JSHandle thisObjHandle, + int64_t &k, int64_t &count) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle srcElements(thread, thisObjHandle->GetElements()); + int64_t len = static_cast(srcElements->GetLength()); + int64_t oldLen; + if (len > k + count) { + oldLen = count; + } else { + oldLen = len - k; + } + JSHandle dstElements = factory->NewAndCopyTaggedArray(srcElements, count, oldLen, k); + return factory->NewJSStableArrayWithElements(dstElements).GetTaggedValue(); +} + } // namespace panda::ecmascript diff --git a/ecmascript/js_stable_array.h b/ecmascript/js_stable_array.h index 48da8cb0ff57d460aeaf7a7f811238b708347d64..c56b3891b490125e87853e83d7068fc34fb6ee1e 100644 --- a/ecmascript/js_stable_array.h +++ b/ecmascript/js_stable_array.h @@ -34,25 +34,68 @@ public: static JSTaggedValue HandleFindIndexOfStable(JSThread *thread, JSHandle thisObjHandle, JSHandle callbackFnHandle, JSHandle thisArgHandle, uint32_t &k); + static JSTaggedValue HandleFindLastIndexOfStable(JSThread *thread, JSHandle thisObjHandle, + JSHandle callbackFnHandle, + JSHandle thisArgHandle, int64_t &k); static JSTaggedValue HandleEveryOfStable(JSThread *thread, JSHandle thisObjHandle, JSHandle callbackFnHandle, JSHandle thisArgHandle, uint32_t &k); static JSTaggedValue HandleforEachOfStable(JSThread *thread, JSHandle thisObjHandle, JSHandle callbackFnHandle, - JSHandle thisArgHandle, uint32_t &k); + JSHandle thisArgHandle, uint32_t len, uint32_t &k); static JSTaggedValue IndexOf(JSThread *thread, JSHandle receiver, JSHandle searchElement, uint32_t from, uint32_t len); + static JSTaggedValue LastIndexOf(JSThread *thread, JSHandle receiver, + JSHandle searchElement, uint32_t from, uint32_t len); static JSTaggedValue Filter(JSHandle newArrayHandle, JSHandle thisObjHandle, EcmaRuntimeCallInfo *argv, uint32_t &k, uint32_t &toIndex); static JSTaggedValue Map(JSHandle newArrayHandle, JSHandle thisObjHandle, EcmaRuntimeCallInfo *argv, uint32_t &k, uint32_t len); - static JSTaggedValue Reverse(JSThread *thread, JSHandle thisObjHandle, - JSHandle thisHandle, int64_t &lower, uint32_t len); + static JSTaggedValue Reverse(JSThread *thread, JSHandle thisObjHandle, uint32_t len); static JSTaggedValue Concat(JSThread *thread, JSHandle newArrayHandle, JSHandle thisObjHandle, int64_t &k, int64_t &n); static JSTaggedValue FastCopyFromArrayToTypedArray(JSThread *thread, JSHandle &target, - DataViewType targetType, uint32_t targetOffset, + DataViewType targetType, uint64_t targetOffset, uint32_t srcLength, JSHandle &elements); + static JSTaggedValue At(JSHandle receiver, EcmaRuntimeCallInfo *argv); + static JSTaggedValue With(JSThread *thread, JSHandle receiver, + int64_t insertCount, int64_t index, JSHandle value); + static JSTaggedValue ToSpliced(JSHandle receiver, EcmaRuntimeCallInfo *argv, + int64_t argc, int64_t actualStart, int64_t actualSkipCount, int64_t insertCount); + static JSTaggedValue ToReversed(JSThread *thread, JSHandle receiver, int64_t insertCount); + static JSTaggedValue Reduce(JSThread *thread, JSHandle thisObjHandle, + JSHandle callbackFnHandle, + JSMutableHandle accumulator, int64_t &k, int64_t &len); + static JSTaggedValue Slice(JSThread *thread, JSHandle thisObjHandle, int64_t &k, int64_t &count); + +private: + enum class IndexOfType { + IndexOf, + LastIndexOf + }; + + struct IndexOfContext { + JSThread *thread; + JSHandle receiver; + JSHandle searchElement; + uint32_t fromIndex; + uint32_t length; + }; + + template + static JSTaggedValue FindRawData(IndexOfContext &ctx, Predicate &&predicate); + template + static JSTaggedValue FindLastRawData(IndexOfContext &ctx, Predicate &&predicate); + template + static JSTaggedValue FindRawDataDispatch(IndexOfType type, IndexOfContext &ctx, Predicate &&predicate); + + static JSTaggedValue IndexOfZero(IndexOfType type, IndexOfContext &ctx); + static JSTaggedValue IndexOfInt32(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); + static JSTaggedValue IndexOfDouble(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); + static JSTaggedValue IndexOfObjectAddress(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); + static JSTaggedValue IndexOfString(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); + static JSTaggedValue IndexOfBigInt(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); + static JSTaggedValue IndexOfDispatch(IndexOfType type, IndexOfContext &ctx, JSTaggedValue searchElement); }; } // namespace panda::ecmascript #endif // ECMASCRIPT_JS_STABLE_ARRAY_H diff --git a/ecmascript/js_tagged_value-inl.h b/ecmascript/js_tagged_value-inl.h index 0d5be6ffa8a67d885388e16d3e275ddfda664201..231b85dd2c80c2a8bb3b111d4a5c994e16dd0f97 100644 --- a/ecmascript/js_tagged_value-inl.h +++ b/ecmascript/js_tagged_value-inl.h @@ -24,9 +24,7 @@ #include "ecmascript/base/string_helper.h" #include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_runtime_call_info.h" -#include "ecmascript/ecma_string-inl.h" #include "ecmascript/js_bigint.h" -#include "ecmascript/js_hclass-inl.h" #include "ecmascript/js_object.h" #include "ecmascript/js_proxy.h" #include "ecmascript/js_symbol.h" @@ -305,6 +303,11 @@ inline bool JSTaggedValue::IsExtensible(JSThread *thread) const return IsHeapObject() && GetTaggedObject()->GetClass()->IsExtensible(); } +inline bool JSTaggedValue::IsExactlyZero() const +{ + return value_ == VALUE_ZERO || value_ == VALUE_POSITIVE_ZERO || value_ == VALUE_NEGATIVE_ZERO; +} + inline bool JSTaggedValue::IsClassConstructor() const { return IsHeapObject() && GetTaggedObject()->GetClass()->IsClassConstructor(); @@ -414,23 +417,17 @@ inline bool JSTaggedValue::StrictEqual(const JSTaggedValue &x, const JSTaggedVal if (x.IsInt() && y.IsInt()) { return StrictIntEquals(x.GetInt(), y.GetInt()); } - - if (x.IsDouble() && y.IsDouble()) { - return StrictNumberEquals(x.GetDouble(), y.GetDouble()); - } - if (x.IsNumber() && y.IsNumber()) { return StrictNumberEquals(x.GetNumber(), y.GetNumber()); } - + // Note: x == y must be put after number comparison + // in case of NaN (whose comparison result is always false even with another NaN) if (x == y) { return true; } - if (x.IsString() && y.IsString()) { return StringCompare(EcmaString::Cast(x.GetTaggedObject()), EcmaString::Cast(y.GetTaggedObject())); } - if (x.IsBigInt() && y.IsBigInt()) { return BigInt::Equal(x, y); } @@ -476,6 +473,11 @@ inline bool JSTaggedValue::IsTreeString() const return IsHeapObject() && GetTaggedObject()->GetClass()->IsTreeString(); } +inline bool JSTaggedValue::IsSlicedString() const +{ + return IsHeapObject() && GetTaggedObject()->GetClass()->IsSlicedString(); +} + inline bool JSTaggedValue::IsBigInt() const { return IsHeapObject() && GetTaggedObject()->GetClass()->IsBigInt(); @@ -491,6 +493,11 @@ inline bool JSTaggedValue::IsTaggedArray() const return IsHeapObject() && GetTaggedObject()->GetClass()->IsTaggedArray(); } +inline bool JSTaggedValue::IsDictionary() const +{ + return IsHeapObject() && GetTaggedObject()->GetClass()->IsDictionary(); +} + inline bool JSTaggedValue::IsByteArray() const { return IsHeapObject() && GetTaggedObject()->GetClass()->IsByteArray(); @@ -533,7 +540,13 @@ inline bool JSTaggedValue::IsJSNativePointer() const inline bool JSTaggedValue::CheckIsJSNativePointer() const { - return IsHeapObject() && !IsInvalidValue() && GetTaggedObject()->GetClass()->IsJSNativePointer(); + if (IsHeapObject() && !IsInvalidValue()) { + auto hclass = GetTaggedObject()->GetClass(); + if (hclass != nullptr) { + return hclass->IsJSNativePointer(); + } + } + return false; } inline bool JSTaggedValue::IsSymbol() const @@ -548,7 +561,13 @@ inline bool JSTaggedValue::IsJSProxy() const inline bool JSTaggedValue::CheckIsJSProxy() const { - return IsHeapObject() && !IsInvalidValue() && GetTaggedObject()->GetClass()->IsJSProxy(); + if (IsHeapObject() && !IsInvalidValue()) { + auto hclass = GetTaggedObject()->GetClass(); + if (hclass != nullptr) { + return hclass->IsJSProxy(); + } + } + return false; } inline bool JSTaggedValue::IsBoolean() const @@ -992,7 +1011,13 @@ inline bool JSTaggedValue::IsJSFunctionBase() const inline bool JSTaggedValue::CheckIsJSFunctionBase() const { - return IsHeapObject() && !IsInvalidValue() && GetTaggedObject()->GetClass()->IsJSFunctionBase(); + if (IsHeapObject() && !IsInvalidValue()) { + auto hclass = GetTaggedObject()->GetClass(); + if (hclass != nullptr) { + return hclass->IsJSFunctionBase(); + } + } + return false; } inline bool JSTaggedValue::IsBoundFunction() const diff --git a/ecmascript/js_tagged_value.cpp b/ecmascript/js_tagged_value.cpp index 9c3bcaa9fc4b171ce7845eff94c3758a7f8ec89d..4c2b1f943e0fea0695b82b94329dc778f23b31da 100644 --- a/ecmascript/js_tagged_value.cpp +++ b/ecmascript/js_tagged_value.cpp @@ -41,6 +41,7 @@ #include "ecmascript/module/js_module_namespace.h" #include "ecmascript/tagged_array.h" #include "ecmascript/object_factory.h" +#include "ecmascript/symbol_table.h" namespace panda::ecmascript { JSHandle GetTypeString(JSThread *thread, PreferredPrimitiveType type) @@ -387,6 +388,7 @@ JSTaggedValue JSTaggedValue::OrdinaryToPrimitive(JSThread *thread, const JSHandl keyString = globalConst->GetHandledValueOfString(); } JSHandle entryfunc = GetProperty(thread, tagged, keyString).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); if (entryfunc->IsCallable()) { JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = @@ -810,6 +812,16 @@ bool JSTaggedValue::SetPrototype(JSThread *thread, const JSHandle if (obj->IsSpecialContainer() || !obj->IsECMAObject()) { THROW_TYPE_ERROR_AND_RETURN(thread, "Can not set Prototype on Container or non ECMA Object", false); } + if (obj->IsJSFunction() && proto->IsJSFunction()) { + JSHandle objFunc = JSHandle::Cast(obj); + JSHandle protoFunc = JSHandle::Cast(proto); + JSTaggedValue objProtoOrHClass(objFunc->GetProtoOrHClass()); + JSTaggedValue protoOrHClass(protoFunc->GetProtoOrHClass()); + if (objProtoOrHClass.IsJSHClass() && protoOrHClass.IsJSHClass() && objProtoOrHClass != protoOrHClass) { + JSHandle cachedJSHClass = JSHandle(thread, objProtoOrHClass); + objFunc->SetProtoOrHClass(thread, cachedJSHClass->GetPrototype()); + } + } return JSObject::SetPrototype(thread, JSHandle(obj), proto); } @@ -824,6 +836,7 @@ JSTaggedValue JSTaggedValue::GetPrototype(JSThread *thread, const JSHandle(obj)); } + bool JSTaggedValue::PreventExtensions(JSThread *thread, const JSHandle &obj) { if (obj->IsJSProxy()) { @@ -853,11 +866,10 @@ JSHandle JSTaggedValue::GetOwnPropertyKeys(JSThread *thread, const } JSHandle JSTaggedValue::GetAllPropertyKeys(JSThread *thread, - const JSHandle &obj, uint32_t filter) + const JSHandle &obj, uint32_t filter) { if (obj->IsJSProxy()) { - LOG_ECMA(WARN) << "GetAllPropertyKeys do not support JSProxy yet"; - return thread->GetEcmaVM()->GetFactory()->EmptyArray(); + return JSProxy::GetAllPropertyKeys(thread, JSHandle(obj), filter); } if (obj->IsTypedArray()) { LOG_ECMA(WARN) << "GetAllPropertyKeys do not support TypedArray yet"; @@ -942,6 +954,25 @@ bool JSTaggedValue::GlobalHasOwnProperty(JSThread *thread, const JSHandle &tagged) +{ + // 1. If v is an Object, return true. + if (tagged->IsECMAObject()) { + return true; + } + // 2. If v is a Symbol and KeyForSymbol(v) is undefined, return true. + if (tagged->IsSymbol()) { + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + auto *table = env->GetRegisterSymbols().GetObject(); + JSTaggedValue key = table->FindSymbol(tagged.GetTaggedValue()); + if (key.IsUndefined()) { + return true; + } + } + // 3. Return false. + return false; +} + JSTaggedNumber JSTaggedValue::ToIndex(JSThread *thread, const JSHandle &tagged) { if (tagged->IsInt() && tagged->GetInt() >= 0) { @@ -1170,6 +1201,7 @@ bool JSTaggedValue::GetContainerProperty(JSThread *thread, const JSHandle JSTaggedValue::ToNumeric(JSThread *thread, JSHandle tagged) { // 1. Let primValue be ? ToPrimitive(value, number) @@ -1185,6 +1217,7 @@ JSHandle JSTaggedValue::ToNumeric(JSThread *thread, JSHandle value(thread, number); return value; } + OperationResult JSTaggedValue::GetJSAPIProperty(JSThread *thread, const JSHandle &obj, const JSHandle &key) { diff --git a/ecmascript/js_tagged_value.h b/ecmascript/js_tagged_value.h index afe68e793aa366c03794a6cda1a9cbadf04a899a..bb4d6b5e71841df67138600a873f972aa0e76e37 100644 --- a/ecmascript/js_tagged_value.h +++ b/ecmascript/js_tagged_value.h @@ -34,6 +34,7 @@ class PropertyDescriptor; class OperationResult; class EcmaString; class JSThread; +struct Reference; static constexpr double SAFE_NUMBER = 9007199254740991LL; @@ -112,6 +113,10 @@ public: static constexpr size_t INT_SIGN_BIT_OFFSET = 31; static constexpr size_t DOUBLE_ENCODE_OFFSET_BIT = 48; static constexpr JSTaggedType DOUBLE_ENCODE_OFFSET = 1ULL << DOUBLE_ENCODE_OFFSET_BIT; + // Tagged +0.0 = IEEE754 representation of +0.0 + offset + static constexpr JSTaggedType VALUE_POSITIVE_ZERO = 0x0000'0000'0000'0000uLL + DOUBLE_ENCODE_OFFSET; + // Tagged -0.0 = IEEE754 representation of -0.0 + offset + static constexpr JSTaggedType VALUE_NEGATIVE_ZERO = 0x8000'0000'0000'0000uLL + DOUBLE_ENCODE_OFFSET; static JSTaggedValue Cast(TaggedObject *object) { @@ -443,6 +448,7 @@ public: bool IsInteger() const; bool WithinInt32() const; bool IsZero() const; + bool IsExactlyZero() const; static bool IsPropertyKey(const JSHandle &key); static JSHandle RequireObjectCoercible(JSThread *thread, const JSHandle &tagged, const char *message = "RequireObjectCoercible throw Error"); @@ -458,6 +464,9 @@ public: // ES6 7.4 Operations on Iterator Objects static JSObject *CreateIterResultObject(JSThread *thread, const JSHandle &value, bool done); + // ECMAScript 2023 allow the use of most Symbols as keys in weak collections + static bool CanBeHeldWeakly(JSThread *thread, const JSHandle &tagged); + // ecma6 7.3 static OperationResult GetProperty(JSThread *thread, const JSHandle &obj, const JSHandle &key); @@ -513,8 +522,10 @@ public: bool IsLineString() const; bool IsConstantString() const; bool IsTreeString() const; + bool IsSlicedString() const; bool IsStringOrSymbol() const; bool IsTaggedArray() const; + bool IsDictionary() const; bool IsByteArray() const; bool IsConstantPool() const; bool IsAOTLiteralInfo() const; @@ -705,8 +716,7 @@ public: void DumpTaggedValue(std::ostream &os) const DUMP_API_ATTR; void Dump(std::ostream &os) const DUMP_API_ATTR; void D() const DUMP_API_ATTR; - void DumpForSnapshot(std::vector> &vec, - bool isVmMode = true) const; + void DumpForSnapshot(std::vector &vec, bool isVmMode = true) const; static void DV(JSTaggedType val) DUMP_API_ATTR; private: diff --git a/ecmascript/js_thread.cpp b/ecmascript/js_thread.cpp index f78f68a70e9247aa7908706f965f1cbf274d11e4..0cdb857b3b5a8427f7dd56812c444585353ccc07 100644 --- a/ecmascript/js_thread.cpp +++ b/ecmascript/js_thread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Huawei Device Co., Ltd. + * Copyright (c) 2021-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -25,6 +25,7 @@ #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) #include "ecmascript/dfx/cpu_profiler/cpu_profiler.h" #endif +#include "ecmascript/dfx/vm_thread_control.h" #include "ecmascript/ecma_global_storage.h" #include "ecmascript/ecma_param_configuration.h" #include "ecmascript/global_env_constants-inl.h" @@ -361,18 +362,22 @@ void JSThread::ShrinkHandleStorage(int prevIndex) GetCurrentEcmaContext()->ShrinkHandleStorage(prevIndex); } -void JSThread::NotifyStableArrayElementsGuardians(JSHandle receiver) +void JSThread::NotifyStableArrayElementsGuardians(JSHandle receiver, StableArrayChangeKind changeKind) { if (!glueData_.stableArrayElementsGuardians_) { return; } - if (!receiver->GetJSHClass()->IsPrototype()) { + if (!receiver->GetJSHClass()->IsPrototype() && !receiver->IsJSArray()) { return; } auto env = GetEcmaVM()->GetGlobalEnv(); if (receiver.GetTaggedValue() == env->GetObjectFunctionPrototype().GetTaggedValue() || receiver.GetTaggedValue() == env->GetArrayPrototype().GetTaggedValue()) { glueData_.stableArrayElementsGuardians_ = false; + return; + } + if (changeKind == StableArrayChangeKind::PROTO && receiver->IsJSArray()) { + glueData_.stableArrayElementsGuardians_ = false; } } @@ -443,8 +448,8 @@ bool JSThread::CheckSafepoint() bool gcTriggered = false; #ifndef NDEBUG if (vm_->GetJSOptions().EnableForceGC()) { - GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC); - gcTriggered = true; +// GetEcmaVM()->CollectGarbage(TriggerGCType::FULL_GC); +// gcTriggered = true; } #endif if (IsMarkFinished() && GetEcmaVM()->GetHeap()->GetConcurrentMarker()->IsTriggeredConcurrentMark()) { @@ -657,4 +662,14 @@ const GlobalEnvConstants *JSThread::GetFirstGlobalConst() const { return contexts_[0]->GlobalConstants(); } + +bool JSThread::IsAllContextsInitialized() const +{ + for (auto item : contexts_) { + if (!item->IsInitialized()) { + return false; + } + } + return true; +} } // namespace panda::ecmascript diff --git a/ecmascript/js_thread.h b/ecmascript/js_thread.h index 6010e0ccf7891dcda20acff9e46f4e9292fd3f06..43c09df9322283f8060870d50798249520f70107 100644 --- a/ecmascript/js_thread.h +++ b/ecmascript/js_thread.h @@ -24,7 +24,7 @@ #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/interpreter_stub.h" #include "ecmascript/compiler/rt_call_signature.h" -#include "ecmascript/dfx/vm_thread_control.h" +#include "ecmascript/elements.h" #include "ecmascript/frames.h" #include "ecmascript/global_env_constants.h" #include "ecmascript/mem/visitor.h" @@ -58,6 +58,8 @@ enum class BCStubStatus: uint8_t { PROFILE_BC_STUB, }; +enum class StableArrayChangeKind { PROTO, NOT_PROTO }; + struct BCStubEntries { static constexpr size_t EXISTING_BC_HANDLER_STUB_ENTRIES_COUNT = kungfu::BytecodeStubCSigns::NUM_OF_ALL_NORMAL_STUBS; @@ -197,6 +199,12 @@ public: using BCStubStatusBits = PGOStatusBits::NextField; using ThreadId = uint32_t; + enum FrameDroppedState { + StateFalse = 0, + StateTrue, + StatePending + }; + explicit JSThread(EcmaVM *vm); PUBLIC_API ~JSThread(); @@ -316,7 +324,12 @@ public: return glueData_.globalConst_; } - void NotifyStableArrayElementsGuardians(JSHandle receiver); + const CMap &GetArrayHClassIndexMap() const + { + return arrayHClassIndexMap_; + } + + void NotifyStableArrayElementsGuardians(JSHandle receiver, StableArrayChangeKind changeKind); bool IsStableArrayElementsGuardiansInvalid() const { @@ -640,6 +653,61 @@ public: return glueData_.allowCrossThreadExecution_; } + bool IsFrameDropped() + { + return glueData_.isFrameDropped_; + } + + void SetFrameDroppedState() + { + glueData_.isFrameDropped_ = true; + } + + void ResetFrameDroppedState() + { + glueData_.isFrameDropped_ = false; + } + + bool IsEntryFrameDroppedTrue() + { + return glueData_.entryFrameDroppedState_ == FrameDroppedState::StateTrue; + } + + bool IsEntryFrameDroppedPending() + { + return glueData_.entryFrameDroppedState_ == FrameDroppedState::StatePending; + } + + void SetEntryFrameDroppedState() + { + glueData_.entryFrameDroppedState_ = FrameDroppedState::StateTrue; + } + + void ResetEntryFrameDroppedState() + { + glueData_.entryFrameDroppedState_ = FrameDroppedState::StateFalse; + } + + void PendingEntryFrameDroppedState() + { + glueData_.entryFrameDroppedState_ = FrameDroppedState::StatePending; + } + + bool IsDebugMode() + { + return glueData_.isDebugMode_; + } + + void SetDebugModeState() + { + glueData_.isDebugMode_ = true; + } + + void ResetDebugModeState() + { + glueData_.isDebugMode_ = false; + } + bool IsStartGlobalLeakCheck() const; bool EnableGlobalObjectLeakCheck() const; bool EnableGlobalPrimitiveLeakCheck() const; @@ -673,6 +741,9 @@ public: base::AlignedPointer, base::AlignedUint64, base::AlignedUint64, + JSTaggedValue, + base::AlignedBool, + base::AlignedBool, JSTaggedValue> { enum class Index : size_t { BCStubEntriesIndex = 0, @@ -697,6 +768,9 @@ public: AllowCrossThreadExecutionIndex, InterruptVectorIndex, IsStartHeapSamplingIndex, + IsDebugModeIndex, + IsFrameDroppedIndex, + EntryFrameDroppedStateIndex, NumOfMembers }; static_assert(static_cast(Index::NumOfMembers) == NumOfTypes); @@ -806,6 +880,21 @@ public: return GetOffset(Index::IsStartHeapSamplingIndex)>(isArch32); } + static size_t GetIsDebugModeOffset(bool isArch32) + { + return GetOffset(Index::IsDebugModeIndex)>(isArch32); + } + + static size_t GetIsFrameDroppedOffset(bool isArch32) + { + return GetOffset(Index::IsFrameDroppedIndex)>(isArch32); + } + + static size_t GetEntryFrameDroppedStateOffset(bool isArch32) + { + return GetOffset(Index::EntryFrameDroppedStateIndex)>(isArch32); + } + alignas(EAS) BCStubEntries bcStubEntries_; alignas(EAS) JSTaggedValue exception_ {JSTaggedValue::Hole()}; alignas(EAS) JSTaggedValue globalObject_ {JSTaggedValue::Hole()}; @@ -828,6 +917,9 @@ public: alignas(EAS) bool allowCrossThreadExecution_ {false}; alignas(EAS) volatile uint64_t interruptVector_ {0}; alignas(EAS) JSTaggedValue isStartHeapSampling_ {JSTaggedValue::False()}; + alignas(EAS) bool isDebugMode_ {false}; + alignas(EAS) bool isFrameDropped_ {false}; + alignas(EAS) uint64_t entryFrameDroppedState_ {FrameDroppedState::StateFalse}; }; STATIC_ASSERT_EQ_ARCH(sizeof(GlueData), GlueData::SizeArch32, GlueData::SizeArch64); @@ -848,6 +940,7 @@ public: bool EraseContext(EcmaContext *context); const GlobalEnvConstants *GetFirstGlobalConst() const; + bool IsAllContextsInitialized() const; private: NO_COPY_SEMANTIC(JSThread); NO_MOVE_SEMANTIC(JSThread); @@ -860,6 +953,11 @@ private: currentContext_ = context; } + void SetArrayHClassIndexMap(const CMap &map) + { + arrayHClassIndexMap_ = map; + } + void DumpStack() DUMP_API_ATTR; static size_t GetAsmStackLimit(); @@ -904,6 +1002,8 @@ private: bool finalizationCheckState_ {false}; + CMap arrayHClassIndexMap_; + CVector contexts_; EcmaContext *currentContext_ {nullptr}; friend class GlobalHandleCollection; diff --git a/ecmascript/js_typed_array.cpp b/ecmascript/js_typed_array.cpp index 624a44a861f112c78f200b0b0d49f613dffb873b..50cce14f50606fa38f7100f8a493013e54abf11f 100644 --- a/ecmascript/js_typed_array.cpp +++ b/ecmascript/js_typed_array.cpp @@ -99,6 +99,7 @@ bool JSTypedArray::HasProperty(JSThread *thread, const JSHandle & } JSHandle numericIndexHandle(thread, numericIndex); JSTaggedNumber numericIndexNumber = JSTaggedValue::ToNumber(thread, numericIndexHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); double tNegZero = -0.0; auto eZero = JSTaggedNumber(tNegZero); JSHandle zero(thread, JSTaggedValue(0)); @@ -120,6 +121,7 @@ bool JSTypedArray::HasProperty(JSThread *thread, const JSHandle & return true; } JSTaggedValue parent = JSTaggedValue::GetPrototype(thread, JSHandle::Cast(typedarrayObj)); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); if (!parent.IsNull()) { return JSTaggedValue::HasProperty(thread, JSHandle(thread, parent), key); } @@ -161,6 +163,7 @@ bool JSTypedArray::DefineOwnProperty(JSThread *thread, const JSHandle numericIndexHandle(thread, numericIndex); JSTaggedNumber numericIndexNumber = JSTaggedValue::ToNumber(thread, numericIndexHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); double tNegZero = -0.0; auto eZero = JSTaggedNumber(tNegZero); JSHandle zero(thread, JSTaggedValue(0)); @@ -293,6 +296,7 @@ JSHandle JSTypedArray::OwnPropertyKeys(JSThread *thread, const JSHa for (uint32_t k = 0; k < bufferKeysLen; k++) { tKey.Update(JSTaggedValue(k)); JSHandle sKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(TaggedArray, thread); nameList->Set(thread, copyLength, sKey.GetTaggedValue()); copyLength++; } @@ -340,7 +344,7 @@ JSHandle JSTypedArray::OwnEnumPropertyKeys(JSThread *thread, const // a. Add ToString(i) as the last element of keys. uint32_t copyLength = 0; for (uint32_t k = 0; k < bufferKeysLen; k++) { - auto key = base::NumberHelper::NumberToString(thread, JSTaggedValue(k)); + auto key = base::NumberHelper::IntToEcmaString(thread, k); nameList->Set(thread, copyLength, key); copyLength++; } @@ -724,4 +728,41 @@ JSTaggedValue JSTypedArray::GetOffHeapBuffer(JSThread *thread, JSHandle &typedArray, + const JSHandle &value, uint32_t start, uint32_t end) +{ + // Assert: O is an Object that has [[ViewedArrayBuffer]], [[ArrayLength]], [[ByteOffset]], and + // [[TypedArrayName]] internal slots. + ASSERT(typedArray->IsTypedArray()); + // If O.[[ContentType]] is BigInt, let numValue be ? ToBigInt(value). + JSHandle typedArrayObj = JSHandle::Cast(typedArray); + if (UNLIKELY(typedArrayObj->GetContentType() == ContentType::BigInt || value->IsECMAObject())) { + return false; + } + JSTaggedNumber numValue = JSTypedArray::NonEcmaObjectToNumber(thread, value.GetTaggedValue()); + // ReturnIfAbrupt(numValue). + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, true); + JSTaggedValue buffer = typedArrayObj->GetViewedArrayBufferOrByteArray(); + // If ℝ(index) < 0 or ℝ(index) ≥ O.[[ArrayLength]], return false. + uint32_t arrLen = typedArrayObj->GetArrayLength(); + // Let offset be the value of O’s [[ByteOffset]] internal slot. + uint32_t offset = typedArrayObj->GetByteOffset(); + // Let arrayTypeName be the String value of O’s [[TypedArrayName]] + // Let elementSize be the Number value of the Element Size value specified in Table 49 for arrayTypeName. + JSType jsType = typedArrayObj->GetClass()->GetObjectType(); + uint32_t elementSize = TypedArrayHelper::GetElementSize(jsType); + // Let elementType be the String value of the Element Type value in Table 49 for arrayTypeName. + DataViewType elementType = TypedArrayHelper::GetType(jsType); + uint64_t byteIndex = 0; + uint32_t k = start; + while (k < end && k < arrLen) { + // Let indexedPosition = (index × elementSize) + offset. + byteIndex = k * elementSize + offset; + // Perform SetValueInBuffer(buffer, indexedPosition, elementType, numValue). + BuiltinsArrayBuffer::FastSetValueInBuffer(thread, buffer, byteIndex, elementType, numValue.GetNumber(), true); + k++; + } + return true; +} } // namespace panda::ecmascript diff --git a/ecmascript/js_typed_array.h b/ecmascript/js_typed_array.h index ea9391472bdf2066e261edea7e03db9bcd5bbb53..39eb78065f2f488e730e5f873e10a63e97f54b2b 100644 --- a/ecmascript/js_typed_array.h +++ b/ecmascript/js_typed_array.h @@ -98,6 +98,8 @@ public: // only use in TypeArray fast set property static JSTaggedNumber NonEcmaObjectToNumber(JSThread *thread, const JSTaggedValue tagged); static JSTaggedValue GetOffHeapBuffer(JSThread *thread, JSHandle &typedArray); + static bool FastTypedArrayFill(JSThread *thread, const JSHandle &typedArray, + const JSHandle &value, uint32_t start, uint32_t end); static constexpr size_t VIEWED_ARRAY_BUFFER_OFFSET = JSObject::SIZE; static DataViewType GetTypeFromName(JSThread *thread, const JSHandle &typeName); ACCESSORS(ViewedArrayBufferOrByteArray, VIEWED_ARRAY_BUFFER_OFFSET, TYPED_ARRAY_NAME_OFFSET) diff --git a/ecmascript/js_vm/context_main.cpp b/ecmascript/js_vm/context_main.cpp index fe4caf213c0b0661ae26ece314caff15f07f563f..6b75f7c3e7bd442ae61e9851f03c2d94a627f49e 100644 --- a/ecmascript/js_vm/context_main.cpp +++ b/ecmascript/js_vm/context_main.cpp @@ -17,7 +17,7 @@ #include #include #include -#include // NOLINTNEXTLINE(modernize-deprecated-headers) +#include #include #include "ecmascript/base/string_helper.h" @@ -100,8 +100,6 @@ int Main(const int argc, const char **argv) arg_list_t fileNames = base::StringHelper::SplitString(files, ":"); #endif EcmaContext *context1 = JSNApi::CreateJSContext(vm); - EcmaContext *context2 = JSNApi::CreateJSContext(vm); - JSNApi::SwitchCurrentContext(vm, context1); ClockScope execute; for (const auto &fileName : fileNames) { auto res = JSNApi::Execute(vm, fileName, entry); @@ -112,7 +110,6 @@ int Main(const int argc, const char **argv) } } auto totalTime = execute.TotalSpentTime(); - JSNApi::DestroyJSContext(vm, context2); JSNApi::DestroyJSContext(vm, context1); if (runtimeOptions.IsEnablePrintExecuteTime()) { diff --git a/ecmascript/js_weak_container.cpp b/ecmascript/js_weak_container.cpp index 8874c7ba8e3a32d025ab38eb0f10900037771073..6cb6bfeebe0a151b27a7875b2e51ab0eb44c1856 100644 --- a/ecmascript/js_weak_container.cpp +++ b/ecmascript/js_weak_container.cpp @@ -25,7 +25,7 @@ void JSWeakMap::Set(JSThread *thread, const JSHandle &map, const JSHa { [[maybe_unused]] EcmaHandleScope handleScope(thread); if (!LinkedHashMap::IsKey(JSTaggedValue(key.GetTaggedValue().CreateAndGetWeakRef()))) { - THROW_TYPE_ERROR(thread, "the value must be Key of JSMap"); + THROW_TYPE_ERROR(thread, "the value must be Key of JSWeakMap"); } JSHandle mapHandle(thread, LinkedHashMap::Cast(map->GetLinkedMap().GetTaggedObject())); @@ -62,6 +62,18 @@ int JSWeakMap::GetSize() const return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->NumberOfElements(); } +JSTaggedValue JSWeakMap::GetKey(int entry) const +{ + ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); + return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetKey(entry); +} + +JSTaggedValue JSWeakMap::GetValue(int entry) const +{ + ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); + return LinkedHashMap::Cast(GetLinkedMap().GetTaggedObject())->GetValue(entry); +} + void JSWeakSet::Add(JSThread *thread, const JSHandle &weakSet, const JSHandle &value) { if (!LinkedHashSet::IsKey(value.GetTaggedValue())) { @@ -95,4 +107,10 @@ int JSWeakSet::GetSize() const { return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->NumberOfElements(); } + +JSTaggedValue JSWeakSet::GetValue(int entry) const +{ + ASSERT_PRINT(entry >= 0 && entry < GetSize(), "entry must be non-negative integer less than capacity"); + return LinkedHashSet::Cast(GetLinkedSet().GetTaggedObject())->GetValue(entry); +} } // namespace panda::ecmascript diff --git a/ecmascript/js_weak_container.h b/ecmascript/js_weak_container.h index 48e9e41df2a9debb03fe68fe922894fb13ba8300..badfb7f4e7aefc6955c0dc7733746a18f64deb34 100644 --- a/ecmascript/js_weak_container.h +++ b/ecmascript/js_weak_container.h @@ -38,6 +38,10 @@ public: int GetSize() const; + JSTaggedValue GetKey(int entry) const; + + JSTaggedValue GetValue(int entry) const; + static constexpr size_t LINKED_MAP_OFFSET = JSObject::SIZE; ACCESSORS(LinkedMap, LINKED_MAP_OFFSET, SIZE) @@ -60,6 +64,8 @@ public: int GetSize() const; + JSTaggedValue GetValue(int entry) const; + static constexpr size_t LINKED_SET_OFFSET = JSObject::SIZE; ACCESSORS(LinkedSet, LINKED_SET_OFFSET, SIZE) diff --git a/ecmascript/jspandafile/accessor/module_data_accessor.cpp b/ecmascript/jspandafile/accessor/module_data_accessor.cpp index d833fa44c3eca856c7628a87c28089ed818dd3be..da6ec423d4d16857126997525e388c7d477ddb37 100644 --- a/ecmascript/jspandafile/accessor/module_data_accessor.cpp +++ b/ecmascript/jspandafile/accessor/module_data_accessor.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/jspandafile/accessor/module_data_accessor.h" +#include "ecmascript/global_env_constants-inl.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" namespace panda::ecmascript { diff --git a/ecmascript/jspandafile/bytecode_inst/old_instruction_enum.h b/ecmascript/jspandafile/bytecode_inst/old_instruction_enum.h index ca507445e06387dfb13b24419d9022adcc435881..c814aa41f3a8ef4d0d69c3a809a08de6b03b5443 100644 --- a/ecmascript/jspandafile/bytecode_inst/old_instruction_enum.h +++ b/ecmascript/jspandafile/bytecode_inst/old_instruction_enum.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Huawei Device Co., Ltd. + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +13,8 @@ * limitations under the License. */ -#ifndef OLD_INSTRUCTION_ENUM_H -#define OLD_INSTRUCTION_ENUM_H +#ifndef ECMASCRIPT_JSPANDAFILE_BYTECODE_INST_OLD_INSTRUCTION_ENUM_H +#define ECMASCRIPT_JSPANDAFILE_BYTECODE_INST_OLD_INSTRUCTION_ENUM_H enum class Format : uint8_t { ID16, @@ -478,4 +478,4 @@ enum Flags : uint32_t { ACC_WRITE = 0x40000, }; -#endif // OLD_INSTRUCTION_ENUM_H +#endif // ECMASCRIPT_JSPANDAFILE_BYTECODE_INST_OLD_INSTRUCTION_ENUM_H diff --git a/ecmascript/jspandafile/class_info_extractor.cpp b/ecmascript/jspandafile/class_info_extractor.cpp index 2e03bcb2cd7abf84cca6c6cd674b9a667dce8ae7..8031331ca11d88685ba5dd468e99a2ddbe682058 100644 --- a/ecmascript/jspandafile/class_info_extractor.cpp +++ b/ecmascript/jspandafile/class_info_extractor.cpp @@ -185,7 +185,7 @@ JSHandle ClassInfoExtractor::CreatePrototypeHClass(JSThread *thread, c return JSHandle(globalConst->GetHandledClassPrototypeClass()); } JSHandle hclass; - if (LIKELY(length <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)) { + if (LIKELY(length <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY)) { JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle layout = factory->CreateLayoutInfo(length, MemSpaceType::OLD_SPACE, GrowMode::KEEP); for (uint32_t index = 0; index < length; ++index) { @@ -198,7 +198,7 @@ JSHandle ClassInfoExtractor::CreatePrototypeHClass(JSThread *thread, c } attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::TAGGED); attributes.SetOffset(index); layout->AddKey(thread, index, key.GetTaggedValue(), attributes); } @@ -241,7 +241,7 @@ JSHandle ClassInfoExtractor::CreateConstructorHClass(JSThread *thread, } } JSHandle hclass; - if (LIKELY(length <= PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES)) { + if (LIKELY(length <= PropertyAttributes::MAX_FAST_PROPS_CAPACITY)) { JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle layout = factory->CreateLayoutInfo(length, MemSpaceType::OLD_SPACE, GrowMode::KEEP); for (uint32_t index = 0; index < length; ++index) { @@ -273,7 +273,7 @@ JSHandle ClassInfoExtractor::CreateConstructorHClass(JSThread *thread, } attributes.SetIsInlinedProps(true); - attributes.SetRepresentation(Representation::MIXED); + attributes.SetRepresentation(Representation::TAGGED); attributes.SetOffset(index); layout->AddKey(thread, index, key.GetTaggedValue(), attributes); } @@ -407,7 +407,7 @@ JSHandle ClassHelper::DefineClassFromExtractor(JSThread *thread, con const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSTaggedValue::DefinePropertyOrThrow(thread, JSHandle(prototype), globalConst->GetHandledConstructorString(), ctorDesc); - + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSFunction, thread); constructor->SetHomeObject(thread, prototype); constructor->SetProtoOrHClass(thread, prototype); @@ -415,7 +415,6 @@ JSHandle ClassHelper::DefineClassFromExtractor(JSThread *thread, con } JSHandle ClassHelper::DefineClassWithIHClass(JSThread *thread, - [[maybe_unused]] const JSHandle &base, JSHandle &extractor, const JSHandle &lexenv, const JSHandle &ihclass, @@ -467,7 +466,8 @@ JSHandle ClassHelper::DefineClassWithIHClass(JSThread *thread, // static uint32_t staticLength = staticProperties->GetLength(); - + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + int correntIndex = 0; if (LIKELY(!constructorHClass->IsDictionaryMode())) { for (uint32_t index = 0; index < staticLength; ++index) { propValue.Update(staticProperties->Get(index)); @@ -477,7 +477,13 @@ JSHandle ClassHelper::DefineClassWithIHClass(JSThread *thread, propFunc->SetLexicalEnv(thread, lexenv); propValue.Update(propFunc); } - JSHandle::Cast(constructor)->SetPropertyInlinedProps(thread, index, propValue.GetTaggedValue()); + bool needCorrentIndex = index >= ClassInfoExtractor::STATIC_RESERVED_LENGTH; + if (needCorrentIndex) { + key.Update(staticKeys->Get(index)); + correntIndex = JSHClass::FindPropertyEntry(thread, *constructorHClass, key.GetTaggedValue()); + } + JSHandle::Cast(constructor)->SetPropertyInlinedProps(thread, + needCorrentIndex ? static_cast(correntIndex) : index, propValue.GetTaggedValue()); } } else { JSHandle dict = BuildDictionaryProperties(thread, JSHandle(constructor), staticKeys, @@ -495,7 +501,7 @@ JSHandle ClassHelper::DefineClassWithIHClass(JSThread *thread, const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSTaggedValue::DefinePropertyOrThrow(thread, JSHandle(prototype), globalConst->GetHandledConstructorString(), ctorDesc); - + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSFunction, thread); constructor->SetHomeObject(thread, prototype); constructor->SetProtoOrHClass(thread, ihclass); @@ -510,7 +516,7 @@ JSHandle ClassHelper::BuildDictionaryProperties(JSThread *thread { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); uint32_t length = keys->GetLength(); - ASSERT(length > PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES); + ASSERT(length > PropertyAttributes::MAX_FAST_PROPS_CAPACITY); ASSERT(keys->GetLength() == properties->GetLength()); JSMutableHandle dict( diff --git a/ecmascript/jspandafile/class_info_extractor.h b/ecmascript/jspandafile/class_info_extractor.h index 2aed38c8817b11bd0c7d7fdcede08c7711a1501f..8428df920329eb55ea2174e3d2be32ddd2ad4821 100644 --- a/ecmascript/jspandafile/class_info_extractor.h +++ b/ecmascript/jspandafile/class_info_extractor.h @@ -98,7 +98,6 @@ public: const JSHandle &lexenv); static JSHandle DefineClassWithIHClass(JSThread *thread, - const JSHandle &base, JSHandle &extractor, const JSHandle &lexenv, const JSHandle &ihclass, diff --git a/ecmascript/jspandafile/debug_info_extractor.cpp b/ecmascript/jspandafile/debug_info_extractor.cpp index 01176395b919e319f945138c70b9283b148dfaa6..81138076c5cdbb2dbce15587e3fc668f070db6c0 100644 --- a/ecmascript/jspandafile/debug_info_extractor.cpp +++ b/ecmascript/jspandafile/debug_info_extractor.cpp @@ -55,6 +55,13 @@ public: void ProcessEnd() { + // When process ends, update any variableInfo + // with end_offset = 0, set it to the state address. + for (auto iter = lvt_.begin(); iter != lvt_.end(); iter++) { + if (iter->endOffset == 0) { + iter->endOffset = state_->GetAddress(); + } + } } bool HandleAdvanceLine(int32_t lineDiff) const @@ -93,21 +100,34 @@ public: bool HandleStartLocal(int32_t regNumber, uint32_t nameId, [[maybe_unused]] uint32_t typeId) { + // start_offset is the current state address, end_offset will temporarily be 0 here, + // then being updated inside the HandleEndLocal method. + uint32_t startOffset = state_->GetAddress(); + uint32_t endOffset = 0; const char *name = GetStringFromConstantPool(state_->GetPandaFile(), nameId); - lvt_.emplace(name, regNumber); + lvt_.push_back({name, regNumber, startOffset, endOffset}); return true; } bool HandleStartLocalExtended(int32_t regNumber, uint32_t nameId, [[maybe_unused]] uint32_t typeId, [[maybe_unused]] uint32_t typeSignatureId) { + uint32_t startOffset = state_->GetAddress(); + uint32_t endOffset = 0; const char *name = GetStringFromConstantPool(state_->GetPandaFile(), nameId); - lvt_.emplace(name, regNumber); + lvt_.push_back({name, regNumber, startOffset, endOffset}); return true; } bool HandleEndLocal([[maybe_unused]] int32_t regNumber) { + for (auto iter = lvt_.rbegin(); iter != lvt_.rend(); iter++) { + // reversely finds the variable and updates its end_offset to be state address + if (iter->regNumber == regNumber && iter->endOffset == 0) { + iter->endOffset = state_->GetAddress(); + break; + } + } return true; } diff --git a/ecmascript/jspandafile/debug_info_extractor.h b/ecmascript/jspandafile/debug_info_extractor.h index 8f1375c582022135430f6ebe5ebc8b1a66926e62..c70ae26c3f0aa8adc8fde9940b9b54c9b34c09c8 100644 --- a/ecmascript/jspandafile/debug_info_extractor.h +++ b/ecmascript/jspandafile/debug_info_extractor.h @@ -56,7 +56,8 @@ using ColumnNumberTable = CVector; using JSPtLocation = tooling::JSPtLocation; /* - * LocalVariableInfo define in frontend, now only use name and regNumber: + * Full version of LocalVariableInfo is defined in frontend, + * here only using name, reg_number, start_offset, and end_offset: * std::string name * std::string type * std::string typeSignature @@ -64,7 +65,13 @@ using JSPtLocation = tooling::JSPtLocation; * uint32_t startOffset * uint32_t endOffset */ -using LocalVariableTable = CUnorderedMap; // name, regNumber +struct LocalVariableInfo { + std::string name; + int32_t regNumber; + uint32_t startOffset; + uint32_t endOffset; +}; +using LocalVariableTable = CVector; // public for debugger class PUBLIC_API DebugInfoExtractor { diff --git a/ecmascript/jspandafile/js_pandafile.cpp b/ecmascript/jspandafile/js_pandafile.cpp index 35b07aaaf970af48ca628478fb8d681d70a6a3a4..4f6080630818cff8538a60091fb73a5ea34d47bd 100644 --- a/ecmascript/jspandafile/js_pandafile.cpp +++ b/ecmascript/jspandafile/js_pandafile.cpp @@ -25,15 +25,15 @@ JSPandaFile::JSPandaFile(const panda_file::File *pf, const CString &descriptor) : pf_(pf), desc_(descriptor) { ASSERT(pf_ != nullptr); - CheckIsBundlePack(); - if (isBundlePack_) { + CheckIsMergedPF(); + if (!IsMergedPF()) { InitializeUnMergedPF(); } else { InitializeMergedPF(); } checksum_ = pf->GetHeader()->checksum; isNewVersion_ = pf_->GetHeader()->version > OLD_VERSION; - if (!loadedFirstPandaFile && !isBundlePack_) { + if (!loadedFirstPandaFile && IsMergedPF()) { // Tag the first merged abc to use constant string. The lifetime of this first panda file is the same // as the vm. And make sure the first pandafile is the same at the compile time and runtime. isFirstPandafile_ = true; @@ -41,7 +41,7 @@ JSPandaFile::JSPandaFile(const panda_file::File *pf, const CString &descriptor) } } -void JSPandaFile::CheckIsBundlePack() +void JSPandaFile::CheckIsMergedPF() { Span classIndexes = pf_->GetClasses(); for (const uint32_t index : classIndexes) { @@ -55,10 +55,10 @@ void JSPandaFile::CheckIsBundlePack() panda_file::File::StringData sd = GetStringData(fieldNameId); const char *fieldName = utf::Mutf8AsCString(sd.data); if (std::strcmp(IS_COMMON_JS, fieldName) == 0 || std::strcmp(MODULE_RECORD_IDX, fieldName) == 0) { - isBundlePack_ = false; + isMergedPF_ = true; } }); - if (!isBundlePack_) { + if (isMergedPF_) { return; } } @@ -108,7 +108,7 @@ uint32_t JSPandaFile::GetOrInsertConstantPool(ConstPoolType type, uint32_t offse const CUnorderedMap *constpoolMap) { CUnorderedMap *map = nullptr; - if (constpoolMap != nullptr && !IsBundlePack()) { + if (constpoolMap != nullptr && IsMergedPF()) { map = const_cast *>(constpoolMap); } else { map = &constpoolMap_; @@ -213,68 +213,30 @@ MethodLiteral *JSPandaFile::FindMethodLiteral(uint32_t offset) const bool JSPandaFile::IsFirstMergedAbc() const { - if (isFirstPandafile_ && !IsBundlePack()) { + if (isFirstPandafile_ && IsMergedPF()) { return true; } return false; } -bool JSPandaFile::IsModule(JSThread *thread, const CString &recordName, CString fullRecordName) const +bool JSPandaFile::CheckAndGetRecordInfo(const CString &recordName, JSRecordInfo &recordInfo) const { - if (IsBundlePack()) { - return jsRecordInfo_.begin()->second.moduleRecordIdx != -1; - } - auto info = jsRecordInfo_.find(recordName); - if (info != jsRecordInfo_.end()) { - return info->second.moduleRecordIdx != -1; - } - if (fullRecordName.empty()) { - fullRecordName = recordName; - } - CString msg = "cannot find record '" + fullRecordName + "', please check the request path."; - THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), false); -} - -bool JSPandaFile::IsCjs(JSThread *thread, const CString &recordName) const -{ - if (IsBundlePack()) { - return jsRecordInfo_.begin()->second.isCjs; - } - auto info = jsRecordInfo_.find(recordName); - if (info != jsRecordInfo_.end()) { - return info->second.isCjs; - } - CString msg = "cannot find record '" + recordName + "', please check the request path."; - THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), false); -} - -bool JSPandaFile::IsJson(JSThread *thread, const CString &recordName) const -{ - if (IsBundlePack()) { - return jsRecordInfo_.begin()->second.isJson; + if (!IsMergedPF()) { + recordInfo = jsRecordInfo_.begin()->second; + return true; } auto info = jsRecordInfo_.find(recordName); if (info != jsRecordInfo_.end()) { - return info->second.isJson; + recordInfo = info->second; + return true; } - CString msg = "cannot find record '" + recordName + "', please check the request path."; - THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), false); + return false; } -CString JSPandaFile::GetJsonStringId(JSThread *thread, const CString &recordName) const +CString JSPandaFile::GetJsonStringId(const JSRecordInfo &jsRecordInfo) const { - if (IsBundlePack()) { - StringData sd = GetStringData(EntityId(jsRecordInfo_.begin()->second.jsonStringId)); - return utf::Mutf8AsCString(sd.data); - } - - auto info = jsRecordInfo_.find(recordName); - if (info != jsRecordInfo_.end()) { - StringData sd = GetStringData(EntityId(info->second.jsonStringId)); - return utf::Mutf8AsCString(sd.data); - } - CString msg = "cannot find record '" + recordName + "', please check the request path."; - THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), ""); + StringData sd = GetStringData(EntityId(jsRecordInfo.jsonStringId)); + return utf::Mutf8AsCString(sd.data); } CString JSPandaFile::GetEntryPoint(const CString &recordName) const diff --git a/ecmascript/jspandafile/js_pandafile.h b/ecmascript/jspandafile/js_pandafile.h index 73aad318cbab59a38f2f61ded866185ab78a6a7a..306bafbf549e630f79e5451576d4430478615504 100644 --- a/ecmascript/jspandafile/js_pandafile.h +++ b/ecmascript/jspandafile/js_pandafile.h @@ -77,6 +77,7 @@ public: static constexpr char BUNDLE_INSTALL_PATH[] = "/data/storage/el1/bundle/"; static constexpr int PACKAGE_NAME_LEN = 8; static constexpr int TYPE_SUMMARY_OFFSET_NOT_FOUND = 0; + static constexpr int32_t PF_OFFSET = 0; JSPandaFile(const panda_file::File *pf, const CString &descriptor); ~JSPandaFile(); @@ -138,7 +139,7 @@ public: uint32_t GetMainMethodIndex(const CString &recordName = ENTRY_FUNCTION_NAME) const { - if (IsBundlePack()) { + if (!IsMergedPF()) { return jsRecordInfo_.begin()->second.mainMethodIndex; } auto info = jsRecordInfo_.find(recordName); @@ -167,15 +168,18 @@ public: uint32_t PUBLIC_API GetOrInsertConstantPool(ConstPoolType type, uint32_t offset, const CUnorderedMap *constpoolMap = nullptr); - void UpdateMainMethodIndex(uint32_t mainMethodIndex, const CString &recordName = ENTRY_FUNCTION_NAME) + // Only for unmerged abc + void UpdateMainMethodIndex(uint32_t mainMethodIndex) { - if (IsBundlePack()) { - jsRecordInfo_.begin()->second.mainMethodIndex = mainMethodIndex; - } else { - auto info = jsRecordInfo_.find(recordName); - if (info != jsRecordInfo_.end()) { - info->second.mainMethodIndex = mainMethodIndex; - } + jsRecordInfo_.begin()->second.mainMethodIndex = mainMethodIndex; + } + + // merged abc + void UpdateMainMethodIndex(uint32_t mainMethodIndex, const CString &recordName) + { + auto info = jsRecordInfo_.find(recordName); + if (info != jsRecordInfo_.end()) { + info->second.mainMethodIndex = mainMethodIndex; } } @@ -183,7 +187,7 @@ public: int GetModuleRecordIdx(const CString &recordName = ENTRY_FUNCTION_NAME) const { - if (IsBundlePack()) { + if (!IsMergedPF()) { return jsRecordInfo_.begin()->second.moduleRecordIdx; } auto info = jsRecordInfo_.find(recordName); @@ -241,18 +245,28 @@ public: return pf_->GetHeader()->file_size; } - bool PUBLIC_API IsModule(JSThread *thread, const CString &recordName = ENTRY_FUNCTION_NAME, - CString fullRecordName = "") const; + bool CheckAndGetRecordInfo(const CString &recordName, JSRecordInfo &recordInfo) const; + + CString GetJsonStringId(const JSRecordInfo &jsRecordInfo) const; - bool IsCjs(JSThread *thread, const CString &recordName = ENTRY_FUNCTION_NAME) const; + bool PUBLIC_API IsModule(const JSRecordInfo &jsRecordInfo) const + { + return jsRecordInfo.moduleRecordIdx != -1; + } - bool IsJson(JSThread *thread, const CString &recordName = ENTRY_FUNCTION_NAME) const; + bool IsCjs(const JSRecordInfo &jsRecordInfo) const + { + return jsRecordInfo.isCjs; + } - CString GetJsonStringId(JSThread *thread, const CString &recordName = ENTRY_FUNCTION_NAME) const; + bool IsJson(const JSRecordInfo &jsRecordInfo) const + { + return jsRecordInfo.isJson; + } - bool IsBundlePack() const + inline bool IsMergedPF() const { - return isBundlePack_; + return isMergedPF_; } bool IsLoadedAOT() const @@ -302,7 +316,7 @@ public: return desc.substr(1, desc.size() - 2); // 2 : skip symbol "L" and ";" } - void CheckIsBundlePack(); + void CheckIsMergedPF(); void CheckIsRecordWithBundleName(const CString &entry); bool IsRecordWithBundleName() const { @@ -334,6 +348,11 @@ public: return false; } + bool HasTSTypes(const JSRecordInfo &recordInfo) const + { + return recordInfo.hasTSTypes; + } + uint32_t GetTypeSummaryOffset(const CString &recordName) const { auto it = jsRecordInfo_.find(recordName); @@ -370,19 +389,18 @@ private: static constexpr size_t VERSION_SIZE = 4; static constexpr std::array OLD_VERSION {0, 0, 0, 2}; + const panda_file::File *pf_ {nullptr}; uint32_t constpoolIndex_ {0}; uint32_t checksum_ {0}; CUnorderedMap methodLiteralMap_; CUnorderedMap constpoolMap_; uint32_t numMethods_ {0}; MethodLiteral *methodLiterals_ {nullptr}; - const panda_file::File *pf_ {nullptr}; CString desc_; uint32_t anFileInfoIndex_ {INVALID_INDEX}; bool isNewVersion_ {false}; - // marge abc - bool isBundlePack_ {true}; // isBundlePack means app compile mode is JSBundle + bool isMergedPF_ {false};// marge abc CUnorderedMap jsRecordInfo_; bool isRecordWithBundleName_ {true}; static bool loadedFirstPandaFile; diff --git a/ecmascript/jspandafile/js_pandafile_executor.cpp b/ecmascript/jspandafile/js_pandafile_executor.cpp index fb3dc1cac6c5262df10ee61d202b4b5b1e20b1eb..420b90c1f88ab717f1b8e6e4c73a5efe6e322527 100644 --- a/ecmascript/jspandafile/js_pandafile_executor.cpp +++ b/ecmascript/jspandafile/js_pandafile_executor.cpp @@ -24,6 +24,7 @@ #include "ecmascript/mem/c_string.h" #include "ecmascript/mem/c_containers.h" #include "ecmascript/module/js_module_manager.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/patch/quick_fix_manager.h" namespace panda::ecmascript { @@ -32,16 +33,17 @@ Expected JSPandaFileExecutor::ExecuteFromFile(JSThread *thr std::string_view entryPoint, bool needUpdate, bool excuteFromJob) { LOG_ECMA(DEBUG) << "JSPandaFileExecutor::ExecuteFromFile filename " << filename; + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "JSPandaFileExecutor::ExecuteFromFile"); CString entry; CString name; - CString normalName = PathHelper::NormalizePath(filename); EcmaVM *vm = thread->GetEcmaVM(); if (!vm->IsBundlePack() && !excuteFromJob) { #if defined(PANDA_TARGET_LINUX) || defined(OHOS_UNIT_TEST) name = filename; entry = entryPoint.data(); #else - entry = PathHelper::ParseOhmUrl(vm, normalName, name); + CString normalName = PathHelper::NormalizePath(filename); + ModulePathHelper::ParseOhmUrl(vm, normalName, name, entry); #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) if (name.empty()) { name = vm->GetAssetPath(); @@ -70,28 +72,30 @@ Expected JSPandaFileExecutor::ExecuteFromFile(JSThread *thr // realEntry is used to record the original record, which is easy to throw when there are exceptions const CString realEntry = entry; // If it is an old record, delete the bundleName and moduleName - if (!jsPandaFile->IsBundlePack() && !excuteFromJob && !vm->GetBundleName().empty()) { + if (jsPandaFile->IsMergedPF() && !excuteFromJob && !vm->GetBundleName().empty()) { jsPandaFile->CheckIsRecordWithBundleName(entry); if (!jsPandaFile->IsRecordWithBundleName()) { - PathHelper::CroppingRecord(entry); + PathHelper::AdaptOldIsaRecord(entry); } } - bool isModule = jsPandaFile->IsModule(thread, entry, realEntry); - if (thread->HasPendingException()) { - thread->GetCurrentEcmaContext()->HandleUncaughtException(thread->GetException()); - return Unexpected(false); + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << realEntry <<"' in baseFileName " << name << "."; + CString msg = "cannot find record '" + realEntry + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), Unexpected(false)); } - if (isModule) { + if (jsPandaFile->IsModule(recordInfo)) { [[maybe_unused]] EcmaHandleScope scope(thread); ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); JSHandle moduleRecord(thread->GlobalConstants()->GetHandledUndefined()); - if (jsPandaFile->IsBundlePack()) { - moduleRecord = moduleManager->HostResolveImportedModule(name); + if (!jsPandaFile->IsMergedPF()) { + moduleRecord = moduleManager->HostResolveImportedModule(name, excuteFromJob); } else { - moduleRecord = moduleManager->HostResolveImportedModuleWithMerge(name, entry); + moduleRecord = moduleManager->HostResolveImportedModuleWithMerge(name, entry, excuteFromJob); } - SourceTextModule::Instantiate(thread, moduleRecord); + SourceTextModule::Instantiate(thread, moduleRecord, excuteFromJob); if (thread->HasPendingException()) { if (!excuteFromJob) { thread->GetCurrentEcmaContext()->HandleUncaughtException(thread->GetException()); @@ -111,6 +115,7 @@ Expected JSPandaFileExecutor::ExecuteFromBuffer(JSThread *t const void *buffer, size_t size, std::string_view entryPoint, const CString &filename, bool needUpdate) { LOG_ECMA(DEBUG) << "JSPandaFileExecutor::ExecuteFromBuffer filename " << filename; + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "JSPandaFileExecutor::ExecuteFromBuffer"); CString normalName = PathHelper::NormalizePath(filename); std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, normalName, entryPoint, buffer, size, needUpdate); @@ -122,10 +127,16 @@ Expected JSPandaFileExecutor::ExecuteFromBuffer(JSThread *t LoadAOTFilesForFile(vm, jsPandaFile.get()); CString entry = entryPoint.data(); - bool isModule = jsPandaFile->IsModule(thread, entry); - if (isModule) { - bool isBundle = jsPandaFile->IsBundlePack(); - return CommonExecuteBuffer(thread, isBundle, normalName, entry, buffer, size); + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << entry <<"' in baseFileName " << normalName << "."; + CString msg = "cannot find record '" + entry + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), Unexpected(false)); + } + if (jsPandaFile->IsModule(recordInfo)) { + bool isMergedPF = jsPandaFile->IsMergedPF(); + return CommonExecuteBuffer(thread, isMergedPF, normalName, entry, buffer, size); } return JSPandaFileExecutor::Execute(thread, jsPandaFile.get(), entry); } @@ -135,6 +146,7 @@ Expected JSPandaFileExecutor::ExecuteModuleBuffer( JSThread *thread, const void *buffer, size_t size, const CString &filename, bool needUpdate) { LOG_ECMA(DEBUG) << "JSPandaFileExecutor::ExecuteModuleBuffer filename " << filename; + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "JSPandaFileExecutor::ExecuteModuleBuffer"); CString name; EcmaVM *vm = thread->GetEcmaVM(); #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) @@ -147,7 +159,8 @@ Expected JSPandaFileExecutor::ExecuteModuleBuffer( name = assetPath + "/" + JSPandaFile::MERGE_ABC_NAME; #endif CString normalName = PathHelper::NormalizePath(filename); - CString entry = PathHelper::ParseOhmUrl(vm, normalName, name); + CString entry; + ModulePathHelper::ParseOhmUrl(vm, normalName, name, entry); std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, name, entry, buffer, size, needUpdate); if (jsPandaFile == nullptr) { @@ -156,35 +169,38 @@ Expected JSPandaFileExecutor::ExecuteModuleBuffer( } LoadAOTFilesForFile(vm, jsPandaFile.get()); - bool isBundle = jsPandaFile->IsBundlePack(); + bool isMergedPF = jsPandaFile->IsMergedPF(); // realEntry is used to record the original record, which is easy to throw when there are exceptions const CString realEntry = entry; - if (!isBundle) { + if (isMergedPF) { jsPandaFile->CheckIsRecordWithBundleName(entry); if (!jsPandaFile->IsRecordWithBundleName()) { - PathHelper::CroppingRecord(entry); + PathHelper::AdaptOldIsaRecord(entry); } } - // will be refactored, temporarily use the function IsModule to verify realEntry - [[maybe_unused]] bool isModule = jsPandaFile->IsModule(thread, entry, realEntry); - if (thread->HasPendingException()) { - thread->GetCurrentEcmaContext()->HandleUncaughtException(thread->GetException()); - return Unexpected(false); + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << realEntry <<"' in baseFileName " << name << "."; + CString msg = "cannot find record '" + realEntry + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), Unexpected(false)); + } + if (!jsPandaFile->IsModule(recordInfo)) { + LOG_ECMA(FATAL) << "Input file is not esmodule"; } - ASSERT(isModule); - return CommonExecuteBuffer(thread, isBundle, name, entry, buffer, size); + return CommonExecuteBuffer(thread, isMergedPF, name, entry, buffer, size); } // The security interface needs to be modified accordingly. Expected JSPandaFileExecutor::CommonExecuteBuffer(JSThread *thread, - bool isBundle, const CString &filename, const CString &entry, const void *buffer, size_t size) + bool isMergedPF, const CString &filename, const CString &entry, const void *buffer, size_t size) { [[maybe_unused]] EcmaHandleScope scope(thread); ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); moduleManager->SetExecuteMode(true); JSMutableHandle moduleRecord(thread, thread->GlobalConstants()->GetUndefined()); - if (isBundle) { + if (!isMergedPF) { moduleRecord.Update(moduleManager->HostResolveImportedModule(buffer, size, filename)); } else { moduleRecord.Update(moduleManager->HostResolveImportedModuleWithMerge(filename, entry)); @@ -235,6 +251,7 @@ Expected JSPandaFileExecutor::ExecuteFromBufferSecure(JSThr size_t size, std::string_view entryPoint, const CString &filename, bool needUpdate) { LOG_ECMA(DEBUG) << "JSPandaFileExecutor::ExecuteFromBufferSecure with secure buffer filename " << filename; + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "JSPandaFileExecutor::ExecuteFromBufferSecure"); CString normalName = PathHelper::NormalizePath(filename); std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()-> LoadJSPandaFileSecure(thread, normalName, entryPoint, buffer, size, needUpdate); @@ -246,8 +263,14 @@ Expected JSPandaFileExecutor::ExecuteFromBufferSecure(JSThr LoadAOTFilesForFile(vm, jsPandaFile.get()); CString entry = entryPoint.data(); - bool isModule = jsPandaFile->IsModule(thread, entry); - if (isModule) { + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << entry <<"' in baseFileName " << normalName << "."; + CString msg = "cannot find record '" + entry + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), Unexpected(false)); + } + if (jsPandaFile->IsModule(recordInfo)) { return CommonExecuteBuffer(thread, normalName, entry, jsPandaFile.get()); } return JSPandaFileExecutor::Execute(thread, jsPandaFile.get(), entry); @@ -260,7 +283,7 @@ Expected JSPandaFileExecutor::CommonExecuteBuffer(JSThread ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); moduleManager->SetExecuteMode(true); JSMutableHandle moduleRecord(thread, thread->GlobalConstants()->GetUndefined()); - if (jsPandaFile->IsBundlePack()) { + if (!jsPandaFile->IsMergedPF()) { moduleRecord.Update(moduleManager->HostResolveImportedModule(jsPandaFile, filename)); } else { moduleRecord.Update(moduleManager->HostResolveImportedModuleWithMerge(filename, entry)); @@ -282,6 +305,7 @@ Expected JSPandaFileExecutor::ExecuteModuleBufferSecure(JST size_t size, const CString &filename, bool needUpdate) { LOG_ECMA(DEBUG) << "JSPandaFileExecutor::ExecuteModuleBufferSecure with secure buffer filename " << filename; + ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "JSPandaFileExecutor::ExecuteModuleBufferSecure"); CString name; EcmaVM *vm = thread->GetEcmaVM(); #if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) @@ -294,7 +318,8 @@ Expected JSPandaFileExecutor::ExecuteModuleBufferSecure(JST name = assetPath + "/" + JSPandaFile::MERGE_ABC_NAME; #endif CString normalName = PathHelper::NormalizePath(filename); - CString entry = PathHelper::ParseOhmUrl(vm, normalName, name); + CString entry; + ModulePathHelper::ParseOhmUrl(vm, normalName, name, entry); std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()-> LoadJSPandaFileSecure(thread, name, entry, buffer, size, needUpdate); if (jsPandaFile == nullptr) { @@ -305,20 +330,24 @@ Expected JSPandaFileExecutor::ExecuteModuleBufferSecure(JST // realEntry is used to record the original record, which is easy to throw when there are exceptions const CString realEntry = entry; - if (!jsPandaFile->IsBundlePack()) { + if (jsPandaFile->IsMergedPF()) { jsPandaFile->CheckIsRecordWithBundleName(entry); if (!jsPandaFile->IsRecordWithBundleName()) { - PathHelper::CroppingRecord(entry); + PathHelper::AdaptOldIsaRecord(entry); } } // will be refactored, temporarily use the function IsModule to verify realEntry - [[maybe_unused]] bool isModule = jsPandaFile->IsModule(thread, entry, realEntry); - if (thread->HasPendingException()) { - thread->GetCurrentEcmaContext()->HandleUncaughtException(thread->GetException()); - return Unexpected(false); + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << realEntry <<"' in baseFileName " << name << "."; + CString msg = "cannot find record '" + realEntry + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), Unexpected(false)); + } + if (!jsPandaFile->IsModule(recordInfo)) { + LOG_ECMA(FATAL) << "Input file is not esmodule"; } - ASSERT(isModule); return CommonExecuteBuffer(thread, name, entry, jsPandaFile.get()); } } // namespace panda::ecmascript diff --git a/ecmascript/jspandafile/js_pandafile_manager.cpp b/ecmascript/jspandafile/js_pandafile_manager.cpp index bb9a0aff00df3eadc4b71ef5f3356bd5c3075089..9d5a24d74462fc634c8bd2b933c531f8a3577f07 100644 --- a/ecmascript/jspandafile/js_pandafile_manager.cpp +++ b/ecmascript/jspandafile/js_pandafile_manager.cpp @@ -15,15 +15,18 @@ #include "ecmascript/jspandafile/js_pandafile_manager.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/compiler/aot_file/an_file_data_manager.h" #include "ecmascript/compiler/aot_file/aot_file_manager.h" #include "ecmascript/js_file_path.h" #include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/module/js_module_manager.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/pgo_profiler/pgo_profiler_manager.h" #include "file.h" +#include "jsnapi.h" namespace panda::ecmascript { +using PGOProfilerManager = pgo::PGOProfilerManager; static const size_t MALLOC_SIZE_LIMIT = 2147483648; // Max internal memory used by the VM declared in options JSPandaFileManager *JSPandaFileManager::GetInstance() @@ -58,11 +61,6 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFile(JSThread *threa } if (jsPandaFile != nullptr) { InsertJSPandaFileVmUnlocked(thread->GetEcmaVM(), jsPandaFile); -#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) - if (thread->GetIsProfiling()) { - GetJSPtExtractorAndExtract(jsPandaFile.get()); - } -#endif return jsPandaFile; } } @@ -76,12 +74,18 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFile(JSThread *threa LOG_ECMA(ERROR) << "resolveBufferCallback is nullptr"; return nullptr; } - std::vector data = resolveBufferCallback(base::PathHelper::ParseHapPath(filename)); - if (data.empty()) { + uint8_t *data = nullptr; + size_t dataSize = 0; + bool getBuffer = resolveBufferCallback(ModulePathHelper::ParseHapPath(filename), &data, &dataSize); + if (!getBuffer) { LOG_ECMA(ERROR) << "resolveBufferCallback get buffer failed"; return nullptr; } - pf = panda_file::OpenPandaFileFromMemory(data.data(), data.size()); + if (!JSNApi::CheckSecureMem(reinterpret_cast(data))) { + LOG_ECMA(ERROR) << "Hsp secure memory check failed, please execute in secure memory."; + return nullptr; + } + pf = panda_file::OpenPandaFileFromSecureMemory(data, dataSize); } else { pf = panda_file::OpenPandaFileOrZip(filename, panda_file::File::READ_WRITE); } @@ -105,6 +109,7 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFile(JSThread *threa std::string_view entryPoint, const void *buffer, size_t size, bool needUpdate) { if (buffer == nullptr || size == 0) { + LOG_FULL(ERROR) << "Input buffer is empty"; return nullptr; } { @@ -122,11 +127,6 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFile(JSThread *threa } if (jsPandaFile != nullptr) { InsertJSPandaFileVmUnlocked(thread->GetEcmaVM(), jsPandaFile); -#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) - if (thread->GetIsProfiling()) { - GetJSPtExtractorAndExtract(jsPandaFile.get()); - } -#endif return jsPandaFile; } } @@ -153,6 +153,7 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFileSecure(JSThread std::string_view entryPoint, uint8_t *buffer, size_t size, bool needUpdate) { if (buffer == nullptr || size == 0) { + LOG_FULL(ERROR) << "Input buffer is empty"; return nullptr; } { @@ -170,11 +171,6 @@ std::shared_ptr JSPandaFileManager::LoadJSPandaFileSecure(JSThread } if (jsPandaFile != nullptr) { InsertJSPandaFileVmUnlocked(thread->GetEcmaVM(), jsPandaFile); -#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) - if (thread->GetIsProfiling()) { - GetJSPtExtractorAndExtract(jsPandaFile.get()); - } -#endif return jsPandaFile; } } @@ -201,10 +197,6 @@ JSHandle JSPandaFileManager::GenerateProgram(EcmaVM *vm, const JSPandaF std::string_view entryPoint) { ASSERT(GetJSPandaFile(jsPandaFile->GetPandaFile()) != nullptr); - if (AnFileDataManager::GetInstance()->IsEnable()) { - vm->GetJSThread()->GetCurrentEcmaContext()->GetAOTFileManager()->LoadAiFile(jsPandaFile); - } - return PandaFileTranslator::GenerateProgram(vm, jsPandaFile, entryPoint); } @@ -416,35 +408,43 @@ DebugInfoExtractor *JSPandaFileManager::GetJSPtExtractorAndExtract(const JSPanda return extractor; } - DebugInfoExtractor *extractor = iter->second.get(); - extractor->Extract(); - return extractor; + return iter->second.get(); } DebugInfoExtractor *JSPandaFileManager::CpuProfilerGetJSPtExtractor(const JSPandaFile *jsPandaFile) { - auto const &filename = jsPandaFile->GetJSPandaFileDesc(); + LOG_ECMA_IF(jsPandaFile == nullptr, FATAL) << "GetJSPtExtractor error, js pandafile is nullptr"; + + os::memory::LockHolder lock(jsPandaFileLock_); + const auto &filename = jsPandaFile->GetJSPandaFileDesc(); if (loadedJSPandaFiles_.find(filename) == loadedJSPandaFiles_.end()) { - return nullptr; + LOG_ECMA(FATAL) << "get extractor failed, file not exist: " << filename; + UNREACHABLE(); } + DebugInfoExtractor *extractor = nullptr; auto iter = extractors_.find(jsPandaFile); if (iter == extractors_.end()) { - return nullptr; + auto extractorPtr = std::make_unique(jsPandaFile); + extractor = extractorPtr.get(); + extractors_[jsPandaFile] = std::move(extractorPtr); + } else { + extractor = iter->second.get(); } - return iter->second.get(); + extractor->Extract(); + return extractor; } -std::shared_ptr JSPandaFileManager::GenerateJSPandaFile(JSThread *thread, const panda_file::File *pf, - const CString &desc, std::string_view entryPoint) +std::shared_ptr JSPandaFileManager::GenerateJSPandaFile(JSThread *thread, + const panda_file::File *pf, const CString &desc, std::string_view entryPoint) { ASSERT(GetJSPandaFile(pf) == nullptr); std::shared_ptr newJsPandaFile = NewJSPandaFile(pf, desc); EcmaVM *vm = thread->GetEcmaVM(); CString methodName = entryPoint.data(); - if (newJsPandaFile->IsBundlePack()) { + if (!newJsPandaFile->IsMergedPF()) { // entryPoint maybe is _GLOBAL::func_main_watch to execute func_main_watch auto pos = entryPoint.find_last_of("::"); if (pos != std::string_view::npos) { diff --git a/ecmascript/jspandafile/js_pandafile_manager.h b/ecmascript/jspandafile/js_pandafile_manager.h index 77cf2e414a1c424a5220496abeb421791a9d0e02..40447b56372b853ad76f89b0797a1586859ec145 100644 --- a/ecmascript/jspandafile/js_pandafile_manager.h +++ b/ecmascript/jspandafile/js_pandafile_manager.h @@ -87,7 +87,7 @@ private: }; std::shared_ptr GenerateJSPandaFile(JSThread *thread, const panda_file::File *pf, const CString &desc, - std::string_view entryPoint); + std::string_view entryPoint = JSPandaFile::ENTRY_FUNCTION_NAME); std::shared_ptr GetJSPandaFile(const panda_file::File *pf); std::shared_ptr FindJSPandaFileWithChecksum(const CString &filename, uint32_t checksum); std::shared_ptr FindJSPandaFileUnlocked(const CString &filename); diff --git a/ecmascript/jspandafile/literal_data_extractor.cpp b/ecmascript/jspandafile/literal_data_extractor.cpp index 3f13336bb902368f669ae10713873554d8275622..ab6465ebe5bda0494f9458ab6d79c2ccfc819ca0 100644 --- a/ecmascript/jspandafile/literal_data_extractor.cpp +++ b/ecmascript/jspandafile/literal_data_extractor.cpp @@ -233,7 +233,12 @@ JSHandle LiteralDataExtractor::DefineMethodInLiteral(JSThread *threa moduleName = entryPoint; entry = entryPoint; } - if (jsPandaFile->IsModule(thread, entry)) { + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entry, recordInfo); + if (!hasRecord) { + LOG_ECMA(FATAL) << "cannot find record '" + entry + "', please check the request path."; + } + if (jsPandaFile->IsModule(recordInfo)) { JSHandle module = thread->GetCurrentEcmaContext()->GetModuleManager()->HostGetImportedModule( moduleName); jsFunc->SetModule(thread, module.GetTaggedValue()); diff --git a/ecmascript/jspandafile/method_literal.cpp b/ecmascript/jspandafile/method_literal.cpp index 55ef561333463eb5a0282fcd0fa3c588143ff632..b1003af7b73166681c567834346a2eab8d2a7c5f 100644 --- a/ecmascript/jspandafile/method_literal.cpp +++ b/ecmascript/jspandafile/method_literal.cpp @@ -121,6 +121,18 @@ CString MethodLiteral::GetRecordName(const JSPandaFile *jsPandaFile, EntityId me return JSPandaFile::ParseEntryPoint(desc); } +const char *MethodLiteral::GetRecordNameWithSymbol(const JSPandaFile *jsPandaFile, EntityId methodId) +{ + if (jsPandaFile == nullptr) { + return ""; + } + + const panda_file::File *pf = jsPandaFile->GetPandaFile(); + panda_file::MethodDataAccessor mda(*pf, methodId); + panda_file::ClassDataAccessor cda(*pf, mda.GetClassId()); + return utf::Mutf8AsCString(cda.GetDescriptor()); +} + uint32_t MethodLiteral::GetCodeSize(const JSPandaFile *jsPandaFile, EntityId methodId) { if (jsPandaFile == nullptr) { diff --git a/ecmascript/jspandafile/method_literal.h b/ecmascript/jspandafile/method_literal.h index 9cdd6c12e595141c93d34cbb4ad65c69a4d5e2a0..0fb63b76498956b04e72d68932b63a340bdadd4a 100644 --- a/ecmascript/jspandafile/method_literal.h +++ b/ecmascript/jspandafile/method_literal.h @@ -216,6 +216,7 @@ public: static constexpr size_t FUNCTION_KIND_NUM_BITS = 4; using BuiltinIdBits = BitField; // offset 0-7 using FunctionKindBits = BuiltinIdBits::NextField; // offset 8-11 + using IsNoGCBit = FunctionKindBits::NextFlag; // offset 12 inline NO_THREAD_SANITIZE void SetHotnessCounter(int16_t counter) { @@ -253,6 +254,16 @@ public: extraLiteralInfo_ = FunctionKindBits::Update(extraLiteralInfo_, kind); } + void SetNoGCBit(bool isNoGC) + { + extraLiteralInfo_ = IsNoGCBit::Update(extraLiteralInfo_, isNoGC); + } + + bool IsNoGC() const + { + return IsNoGCBit::Decode(extraLiteralInfo_); + } + FunctionKind GetFunctionKind() const { return static_cast(FunctionKindBits::Decode(extraLiteralInfo_)); @@ -297,6 +308,7 @@ public: static std::string PUBLIC_API ParseFunctionName(const JSPandaFile *jsPandaFile, EntityId methodId); static uint32_t GetCodeSize(const JSPandaFile *jsPandaFile, EntityId methodId); static CString GetRecordName(const JSPandaFile *jsPandaFile, EntityId methodId); + static const char PUBLIC_API *GetRecordNameWithSymbol(const JSPandaFile *jsPandaFile, EntityId methodId); const uint8_t *GetBytecodeArray() const { diff --git a/ecmascript/jspandafile/panda_file_translator.cpp b/ecmascript/jspandafile/panda_file_translator.cpp index 78fccd7a040a6c9f8173c0ebdf43a8051945ee39..109b7ed3b77443ec3d391a5122d366acf4318496 100644 --- a/ecmascript/jspandafile/panda_file_translator.cpp +++ b/ecmascript/jspandafile/panda_file_translator.cpp @@ -59,7 +59,7 @@ void PandaFileTranslator::TranslateClasses(JSPandaFile *jsPandaFile, const CStri auto methodId = mda.GetMethodId(); CString name = reinterpret_cast(jsPandaFile->GetStringData(mda.GetNameId()).data); auto methodOffset = methodId.GetOffset(); - if (jsPandaFile->IsBundlePack()) { + if (!jsPandaFile->IsMergedPF()) { if (!isUpdateMainMethodIndex && name == methodName) { jsPandaFile->UpdateMainMethodIndex(methodOffset); isUpdateMainMethodIndex = true; @@ -88,7 +88,7 @@ void PandaFileTranslator::TranslateClasses(JSPandaFile *jsPandaFile, const CStri const uint8_t *insns = codeDataAccessor.GetInstructions(); if (translatedCode.find(insns) == translatedCode.end()) { translatedCode.insert(insns); - if (jsPandaFile->IsBundlePack()) { + if (!jsPandaFile->IsMergedPF()) { TranslateBytecode(jsPandaFile, codeSize, insns, methodLiteral); } else { TranslateBytecode(jsPandaFile, codeSize, insns, methodLiteral, recordName); @@ -119,7 +119,7 @@ JSHandle PandaFileTranslator::GenerateProgram(EcmaVM *vm, const JSPanda constpool = JSHandle(vm->GetJSThread(), constpoolVal); } - if (!jsPandaFile->IsBundlePack()) { + if (jsPandaFile->IsMergedPF()) { ParseFuncAndLiteralConstPool(vm, jsPandaFile, entryPoint.data(), constpool); } } diff --git a/ecmascript/jspandafile/program_object.h b/ecmascript/jspandafile/program_object.h index ae7473d259fc042785ea4b03bd3fdb95b530a8ae..0daf53e714f7693a67f658106fc15e4eaa026054 100644 --- a/ecmascript/jspandafile/program_object.h +++ b/ecmascript/jspandafile/program_object.h @@ -24,7 +24,7 @@ #include "ecmascript/jspandafile/constpool_value.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" #include "ecmascript/jspandafile/literal_data_extractor.h" -#include "ecmascript/module/js_module_manager.h" +#include "ecmascript/module/js_module_source_text.h" #include "ecmascript/patch/quick_fix_manager.h" #include "ecmascript/pgo_profiler/pgo_profiler.h" @@ -56,6 +56,13 @@ public: * | object literal(JSObject) | | * | class literal(ClassLiteral) | v * +--------------------------------+---- + * | ... | ^ + * | InstanceTSHClass | | + * | ConstructorTSHClass |snapshotCPList(the positions of each part are random) + * | ArrayTSElements | | + * | ArrayTSElementsKind | v + * | constIndexInfo(TaggedArray) |at the end of snapshotCPList + * +--------------------------------+---- * | IndexHeader | * +--------------------------------+ * | JSPandaFile | @@ -63,9 +70,10 @@ public: */ class ConstantPool : public TaggedArray { public: - static constexpr size_t JS_PANDA_FILE_INDEX = 1; - static constexpr size_t INDEX_HEADER_INDEX = 2; - static constexpr size_t RESERVED_POOL_LENGTH = INDEX_HEADER_INDEX; + static constexpr size_t JS_PANDA_FILE_INDEX = 1; // not need gc + static constexpr size_t INDEX_HEADER_INDEX = 2; // not need gc + static constexpr size_t CONSTANT_INDEX_INFO_INDEX = 3; + static constexpr size_t RESERVED_POOL_LENGTH = INDEX_HEADER_INDEX; // divide the gc area static ConstantPool *Cast(TaggedObject *object) { @@ -137,18 +145,22 @@ public: static size_t ComputeSize(uint32_t cacheSize) { - return TaggedArray::ComputeSize(JSTaggedValue::TaggedTypeSize(), cacheSize + RESERVED_POOL_LENGTH); + // 1 : constIndexInfo is a TaggedArray, take up an extra spot + return TaggedArray::ComputeSize(JSTaggedValue::TaggedTypeSize(), cacheSize + 1 + RESERVED_POOL_LENGTH); } - inline void InitializeWithSpecialValue(JSTaggedValue initValue, uint32_t capacity, uint32_t extraLength = 0) + inline void InitializeWithSpecialValue(JSThread *thread, JSTaggedValue initValue, + uint32_t capacity, uint32_t extraLength = 0) { ASSERT(initValue.IsSpecial()); - SetLength(capacity + RESERVED_POOL_LENGTH); + // 1 : constIndexInfo is a TaggedArray, take up an extra spot + SetLength(capacity + 1 + RESERVED_POOL_LENGTH); SetExtraLength(extraLength); for (uint32_t i = 0; i < capacity; i++) { size_t offset = JSTaggedValue::TaggedTypeSize() * i; Barriers::SetPrimitive(GetData(), offset, initValue.GetRawData()); } + SetConstantIndexInfo(thread); SetJSPandaFile(nullptr); SetIndexHeader(nullptr); } @@ -168,6 +180,12 @@ public: return Barriers::GetValue(GetData(), GetJSPandaFileOffset()); } + inline void SetConstantIndexInfo(JSThread *thread) + { + JSHandle array(thread->GlobalConstants()->GetHandledEmptyArray()); + Barriers::SetPrimitive(GetData(), GetConstantIndexInfoOffset(), array.GetTaggedValue().GetRawData()); + } + inline void SetObjectToCache(JSThread *thread, uint32_t index, JSTaggedValue value) { Set(thread, index, value); @@ -275,7 +293,11 @@ public: JSMutableHandle properties(thread, JSTaggedValue::Undefined()); LiteralDataExtractor::ExtractObjectDatas(thread, jsPandaFile, id, elements, properties, constpoolHandle, entry, needSetAotFlag, entryIndexes); - JSHandle obj = JSObject::CreateObjectFromProperties(thread, properties); + JSTaggedValue ihcVal = JSTaggedValue::Undefined(); + if (needSetAotFlag) { + ihcVal = entryIndexes->GetIhc(); + } + JSHandle obj = JSObject::CreateObjectFromProperties(thread, properties, ihcVal); JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle valueHandle(thread, JSTaggedValue::Undefined()); size_t elementsLen = elements->GetLength(); @@ -287,18 +309,7 @@ public: valueHandle.Update(elements->Get(i + 1)); JSObject::DefinePropertyByLiteral(thread, obj, key, valueHandle); } - if (needSetAotFlag) { - JSTaggedValue ihcVal = entryIndexes->GetIhc(); - if (!ihcVal.IsUndefined()) { - JSHClass *ihc = JSHClass::Cast(ihcVal.GetTaggedObject()); - JSHClass *oldHC = obj->GetJSHClass(); - ihc->SetPrototype(thread, oldHC->GetPrototype()); - obj->SetClass(ihc); - } - } - PGOProfiler *profiler = thread->GetEcmaVM()->GetPGOProfiler(); - profiler->InsertLiteralId(JSTaggedType(obj->GetJSHClass()), id); val = obj.GetTaggedValue(); break; } @@ -308,6 +319,9 @@ public: uint32_t length = literal->GetLength(); JSHandle arr(JSArray::ArrayCreate(thread, JSTaggedNumber(length), ArrayMode::LITERAL)); arr->SetElements(thread, literal); + if (thread->GetEcmaVM()->IsEnablePGOProfiler()) { + JSHClass::TransitToElementsKind(thread, arr); + } val = arr.GetTaggedValue(); break; } @@ -321,11 +335,18 @@ public: return val; } + static panda_file::File::EntityId GetIdFromCache(JSTaggedValue constpool, uint32_t index) + { + const ConstantPool *taggedPool = ConstantPool::Cast(constpool.GetTaggedObject()); + panda_file::File::EntityId id = taggedPool->GetEntityId(index); + return id; + } + template static JSTaggedValue GetLiteralFromCache(JSThread *thread, JSTaggedValue constpool, uint32_t index, JSTaggedValue module) { - CString entry = ModuleManager::GetRecordName(module); + CString entry = SourceTextModule::GetRecordName(module); return GetLiteralFromCache(thread, constpool, index, entry); } @@ -369,6 +390,11 @@ private: return JSTaggedValue::TaggedTypeSize() * (GetLength() - INDEX_HEADER_INDEX); } + inline size_t GetConstantIndexInfoOffset() const + { + return JSTaggedValue::TaggedTypeSize() * (GetLength() - CONSTANT_INDEX_INFO_INDEX); + } + inline size_t GetLastOffset() const { return JSTaggedValue::TaggedTypeSize() * GetLength() + DATA_OFFSET; diff --git a/ecmascript/jspandafile/scope_info_extractor.cpp b/ecmascript/jspandafile/scope_info_extractor.cpp index 3d5e748af39766d47c27d5d49dd73e5d209301e9..e4b33e291ea7b35bd7653b122ea40d066e9369ec 100644 --- a/ecmascript/jspandafile/scope_info_extractor.cpp +++ b/ecmascript/jspandafile/scope_info_extractor.cpp @@ -18,6 +18,7 @@ #include "ecmascript/interpreter/frame_handler.h" #include "ecmascript/jspandafile/literal_data_extractor.h" #include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/object_factory-inl.h" #include "ecmascript/tagged_array-inl.h" namespace panda::ecmascript { diff --git a/ecmascript/jspandafile/tests/js_pandafile_test.cpp b/ecmascript/jspandafile/tests/js_pandafile_test.cpp index c9eec87b6e1b5640c96f7155e87459e2a78fd6ac..f922ec3a68e69642b466906c1121b57ecef3151d 100644 --- a/ecmascript/jspandafile/tests/js_pandafile_test.cpp +++ b/ecmascript/jspandafile/tests/js_pandafile_test.cpp @@ -263,8 +263,10 @@ HWTEST_F_L0(JSPandaFileTest, IsModule_IsCjs) )"; const CString fileName1 = "test1.pa"; std::shared_ptr pf1 = CreateJSPandaFile(source1, fileName1); - EXPECT_EQ(pf1->IsModule(thread), false); - EXPECT_EQ(pf1->IsCjs(thread), false); + JSPandaFile::JSRecordInfo info = + const_cast(pf1.get())-> FindRecordInfo(JSPandaFile::ENTRY_FUNCTION_NAME); + EXPECT_EQ(pf1->IsModule(info), false); + EXPECT_EQ(pf1->IsCjs(info), false); } HWTEST_F_L0(JSPandaFileTest, SetLoadedAOTStatus_IsLoadedAOT) diff --git a/ecmascript/layout_info.cpp b/ecmascript/layout_info.cpp index 478f1edeb96438e16a51fced3a0a993ed8302595..3ce99afba68e04ea17af2faa018b230a86a9d6de 100644 --- a/ecmascript/layout_info.cpp +++ b/ecmascript/layout_info.cpp @@ -20,8 +20,20 @@ #include "ecmascript/js_object-inl.h" #include "ecmascript/js_symbol.h" #include "ecmascript/mem/assert_scope.h" +#include "pgo_profiler/pgo_profiler_layout.h" namespace panda::ecmascript { +using PGOHandler = pgo::PGOHandler; +void LayoutInfo::Initialize(const JSThread *thread, int num) +{ + SetExtraLength(num); + int propNum = GetPropertiesCapacity(); + auto attr = PropertyAttributes(); + for (int i = 0; i < propNum; i++) { + SetPropertyInit(thread, i, JSTaggedValue::Hole(), attr); + } +} + void LayoutInfo::AddKey(const JSThread *thread, [[maybe_unused]] int index, const JSTaggedValue &key, const PropertyAttributes &attr) { @@ -76,7 +88,7 @@ void LayoutInfo::GetAllKeys(const JSThread *thread, int end, int offset, TaggedA } } void LayoutInfo::GetAllKeysByFilter(const JSThread *thread, uint32_t numberOfProps, uint32_t &keyArrayEffectivelength, - TaggedArray *keyArray, const JSHandle object, uint32_t filter) + TaggedArray *keyArray, const JSHandle object, uint32_t filter) { ASSERT(numberOfProps <= static_cast(NumberOfElements())); ASSERT_PRINT(keyArrayEffectivelength + numberOfProps <= keyArray->GetLength(), @@ -172,7 +184,7 @@ void LayoutInfo::DumpFieldIndexForProfile(int index, PGOHClassLayoutDesc &desc, TrackType type = attr.GetTrackType(); bool isAccessor = attr.IsAccessor(); auto keyString = EcmaStringAccessor(key).ToCString(); - desc.UpdateKeyAndDesc(keyString.c_str(), PGOHandler(type, isAccessor), kind); + desc.UpdateKeyAndDesc(keyString, PGOHandler(type, isAccessor), kind); } } } // namespace panda::ecmascript diff --git a/ecmascript/layout_info.h b/ecmascript/layout_info.h index ec350c646a634be3e054cb9b5cd2cea2b126de30..3dd9b020f39e2fd94406bc435a7486e0790b77e3 100644 --- a/ecmascript/layout_info.h +++ b/ecmascript/layout_info.h @@ -29,7 +29,7 @@ struct Properties { class LayoutInfo : private TaggedArray { public: static constexpr int MIN_PROPERTIES_LENGTH = JSObject::MIN_PROPERTIES_LENGTH; - static constexpr int MAX_PROPERTIES_LENGTH = PropertyAttributes::MAX_CAPACITY_OF_PROPERTIES; + static constexpr int MAX_PROPERTIES_LENGTH = PropertyAttributes::MAX_FAST_PROPS_CAPACITY; static constexpr uint32_t ELEMENTS_INDEX_LOG2 = 1; static constexpr uint32_t ATTR_INDEX_OFFSET = 1; @@ -39,6 +39,7 @@ public: return reinterpret_cast(obj); } + void Initialize(const JSThread *thread, int num = 0); int GetPropertiesCapacity() const; int NumberOfElements() const; void SetNumberOfElements(const JSThread *thread, int properties); diff --git a/ecmascript/mapleall/BUILD.gn b/ecmascript/mapleall/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..e35cf53430890f818dbd420c1ee5bfa9fc949de6 --- /dev/null +++ b/ecmascript/mapleall/BUILD.gn @@ -0,0 +1,242 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# C/CXX Build flags +config("mapleallcompilecfg") { + cflags_cc = [] + + cflags = [] + cflags_cc = [] + cflags_c = [] + if (GN_BUILD_TYPE == "RELEASE") { + cflags_cc += [ + "-O2", + "-fno-strict-aliasing", + "-D_FORTIFY_SOURCE=2", + ] + cflags_c += [ + "-O2", + "-fno-strict-aliasing", + "-D_FORTIFY_SOURCE=2", + ] + } else if (GN_BUILD_TYPE == "DEBUG") { + cflags_cc += [ + "-O0", + "-g3", + "-ftrapv", + "-fstack-check", + ] + cflags_c += [ + "-O0", + "-g3", + "-ftrapv", + "-fstack-check", + ] + } else { + cflags_cc += [ + "-O2", + "-fno-strict-aliasing", + "-g", + ] + cflags_c += [ + "-O2", + "-fno-strict-aliasing", + "-g", + ] + } + + cflags_c += [ + "-Wall", + "-fstack-protector-strong", + "-fPIC", + "-fPIE", + "-fvisibility=hidden", + "-pipe", + "-Werror", + "-Wdate-time", + "-Wfloat-equal", + "${DESIGNATOR}", + ] + + cflags_cc += [ + "-Wall", + "-fstack-protector-strong", + "-fPIC", + "-fPIE", + "-fvisibility=hidden", + "-pipe", + "-Wno-c99-designator", + "-Wno-range-loop-construct", + "-Werror", + "-Wdate-time", + "-Wfloat-equal", + "${DESIGNATOR}", + ] + + if (HOST_ARCH == 64) { + cflags_c += [ "-m64" ] + cflags_cc += [ "-m64" ] + } else { + cflags_c += [ "-m32" ] + cflags_cc += [ "-m32" ] + } + + if (DYNAMICLANG) { + cflags_cc += [ "-DDYNAMICLANG" ] + } + + if (RC_V2) { + cflags_cc += [ "-DRC_NO_MMAP" ] + } + + if (TEST_BENCHMARK) { + cflags_cc += [ "-DTEST_BENCHMARK" ] + } + + if (MEMORY_LEAK_CHECK) { + cflags_cc += [ "-DMEMORY_LEAK_CHECK" ] + } + + if (MARK_CYCLE_ROOTS) { + cflags_cc += [ "-DMARK_CYCLE_ROOTS" ] + } + + cflags_cc += [ "-DMIR_FEATURE_FULL=1" ] + + if (MIR_JAVA == 1) { + cflags_cc += [ "-DMIR_JAVA=1" ] + } else { + TARGET = "vm" + cflags_cc += [ "-DMIR_JAVA=0" ] + } + + cflags_cc += [ + "-std=c++17", + "-fno-common", + ] + + +# if (ASAN == 1) { +# cflags_cc += [ +# "-fsanitize=address" +# ] +# libs = [ +# "${LLVMLIBDIR}/libclang_rt.asan-x86_64.a" +# ] +# } + + if (TARGET == "aarch64") { + cflags_cc += [ + "-DTARGAARCH64", +# "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "x86_64") { + cflags_cc += [ + "-DTARGX86_64", +# "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "riscv64") { + cflags_cc += [ + "-DTARGRISCV64", +# "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "ark") { + cflags_cc += [ + "-DTARGARK", +# "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (GN_BUILD_TYPE == "DEBUG") { + cflags_c += [ "-DDEBUG" ] + cflags_cc += [ "-DDEBUG" ] + cflags_cc += [ "-g" ] + } + + if (HOST_ARCH == 64) { + ldflags = [] + ldflags += [ + "-fPIC", + "-rdynamic", + "-lpthread", + "-Wl,-z,relro", + "-Wl,-z,now", + "-Wl,-z,noexecstack", + "-pie", + ] + } + +# if (ASAN == 1) { +# ldflags += ["-ldl"] +# } + if (COV == 1) { + ldflags += ["--coverage"] + cflags_cc += [ + "-fprofile-arcs", + "-ftest-coverage", + "-Xclang", + "-coverage-version=A75*" + ] + } + + if (MAJOR_VERSION != "") { + cflags_cc += [ "-DMAJOR_VERSION=${MAJOR_VERSION}", ] + } + + if (MINOR_VERSION != "") { + cflags_cc += [ "-DMINOR_VERSION=${MINOR_VERSION}", ] + } + + if (RELEASE_VERSION != "") { + cflags_cc += [ "-DRELEASE_VERSION=\"${RELEASE_VERSION}\"", ] + } + + if (BUILD_VERSION != "") { + cflags_cc += [ "-DBUILD_VERSION=${BUILD_VERSION}", ] + } + + if (GIT_REVISION != "") { + cflags_cc += [ "-DGIT_REVISION=\"${GIT_REVISION}\"", ] + } + + cflags_cc += [ + "-Wno-bitwise-instead-of-logical", + "-Wno-error=xor-used-as-pow", + "-Wno-error=return-stack-address", + "-Wno-error=dangling-gsl", + "-Wno-unused-but-set-variable", + "-Wno-deprecated-declarations", + "-Wno-unused-but-set-parameter", + "-Wno-null-pointer-subtraction", + "-Wno-unqualified-std-cast-call", + ] +} + +#group("maple") { +# deps = [ "${MAPLEALL_ROOT}/maple_driver:maple" ] +#} + +#group("irbuild") { +# deps = [ "${MAPLEALL_ROOT}/maple_ir:irbuild" ] +#} + +#group("maplegen") { +# deps = [ "${MAPLEALL_ROOT}/maple_be:maplegen" ] +#} diff --git a/ecmascript/mapleall/bin/java2jar b/ecmascript/mapleall/bin/java2jar new file mode 100755 index 0000000000000000000000000000000000000000..c8d50f6f0e8a207ff57b0eb23442f6015c6dc97d --- /dev/null +++ b/ecmascript/mapleall/bin/java2jar @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +OUTPUT=$1 +CORE_ALL_JAR=$2 +shift 2 +javac -g -d . -bootclasspath ${CORE_ALL_JAR} $@ +jar -cvf ${OUTPUT} *.class diff --git a/ecmascript/mapleall/maple_be/BUILD.gn b/ecmascript/mapleall/maple_be/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..d4d10adc6546881536e23a945804d25671e763d0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/BUILD.gn @@ -0,0 +1,366 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/litecg", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_phase/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", +] + +deps_libcg = [] + +deps_libmplbe = [ ":libcglowerer" ] + +if (TARGET == "aarch64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/be/aarch64", + ] + deps_libcg += [ ":libcgaarch64", + ":libcgphases", + ] +} + +if (TARGET == "x86_64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/x86_64", + "${MAPLEALL_ROOT}/maple_be/include/be/x86_64", + ] + deps_libcg += [ ":libcgx8664", + ":libcgx86phases" + ] +} + +if (TARGET == "riscv64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/riscv64", + "${MAPLEALL_ROOT}/maple_be/include/be/riscv64", + ] + deps_libcg += [ ":libcgriscv64" ] +} + +if (TARGET == "ark") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/ark", + "${MAPLEALL_ROOT}/maple_be/include/be/ark", + ] + deps_libcg += [ ":libcgark" ] +} + +src_libmplad = [ "src/ad/mad.cpp" ] + +src_libcglowerer = [ + "src/be/bbt.cpp", + "src/be/trycatchblockslower.cpp", + "src/be/lower.cpp", +] + +src_libmplbe = [ + "src/be/becommon.cpp", + "src/be/switch_lowerer.cpp", + "src/be/rt.cpp", +] + +src_libcgaarch64 = [ + "src/cg/aarch64/aarch64_abi.cpp", + "src/cg/aarch64/aarch64_call_conv.cpp", + "src/cg/aarch64/mpl_atomic.cpp", + "src/cg/aarch64/aarch64_cgfunc.cpp", + "src/cg/aarch64/aarch64_dependence.cpp", + "src/cg/aarch64/aarch64_ebo.cpp", + "src/cg/aarch64/aarch64_emitter.cpp", + "src/cg/aarch64/aarch64_obj_emitter.cpp", + "src/cg/aarch64/aarch64_fixshortbranch.cpp", + "src/cg/aarch64/aarch64_global.cpp", + "src/cg/aarch64/aarch64_proepilog.cpp", + "src/cg/aarch64/aarch64_operand.cpp", + "src/cg/aarch64/aarch64_color_ra.cpp", + "src/cg/aarch64/aarch64_reg_info.cpp", + "src/cg/aarch64/aarch64_ssa.cpp", + "src/cg/aarch64/aarch64_prop.cpp", + "src/cg/aarch64/aarch64_dce.cpp", + "src/cg/aarch64/aarch64_phi_elimination.cpp", + "src/cg/aarch64/aarch64_reg_coalesce.cpp", + "src/cg/aarch64/aarch64_ico.cpp", + "src/cg/aarch64/aarch64_insn.cpp", + "src/cg/aarch64/aarch64_isa.cpp", + "src/cg/aarch64/aarch64_memlayout.cpp", + "src/cg/aarch64/aarch64_args.cpp", + "src/cg/aarch64/aarch64_live.cpp", + "src/cg/aarch64/aarch64_yieldpoint.cpp", + "src/cg/aarch64/aarch64_offset_adjust.cpp", + "src/cg/aarch64/aarch64_optimize_common.cpp", + "src/cg/aarch64/aarch64_peep.cpp", + "src/cg/aarch64/aarch64_reaching.cpp", + "src/cg/aarch64/aarch64_schedule.cpp", + "src/cg/aarch64/aarch64_strldr.cpp", + "src/cg/aarch64/aarch64_ra_opt.cpp", + "src/cg/aarch64/aarch64_alignment.cpp", + "src/cg/aarch64/aarch64_regsaves.cpp", + "src/cg/aarch64/aarch64_utils.cpp", + "src/cg/aarch64/aarch64_cg.cpp", + "src/cg/aarch64/aarch64_validbit_opt.cpp", + "src/cg/aarch64/aarch64_cfgo.cpp", +] + +src_libcgx86phases = [ + "src/cg/peep.cpp", + "src/cg/alignment.cpp", + "src/cg/reaching.cpp", + "src/cg/local_opt.cpp", + "src/cg/cfgo.cpp", +] + +src_libcgx8664 = [ + "src/cg/x86_64/x64_cg.cpp", + "src/cg/x86_64/x64_MPIsel.cpp", + "src/cg/x86_64/x64_cgfunc.cpp", + "src/cg/x86_64/x64_memlayout.cpp", + "src/cg/x86_64/x64_emitter.cpp", + "src/cg/x86_64/x64_abi.cpp", + "src/cg/x86_64/x64_call_conv.cpp", + "src/cg/x86_64/x64_standardize.cpp", + "src/cg/x86_64/x64_live.cpp", + "src/cg/x86_64/x64_reg_info.cpp", + "src/cg/x86_64/x64_proepilog.cpp", + "src/cg/x86_64/x64_args.cpp", + "src/cg/x86_64/x64_peep.cpp", + "src/cg/x86_64/x64_reaching.cpp", + "src/cg/x86_64/x64_local_opt.cpp", + "src/cg/x86_64/x64_cfgo.cpp", + "src/cg/x86_64/x64_isa.cpp", + "src/cg/x86_64/x64_optimize_common.cpp", + "src/cg/x86_64/elf_assembler.cpp", + "src/cg/x86_64/asm_assembler.cpp", +] + +src_libcgriscv64 = [ + "src/cg/riscv64/mpl_atomic.cpp", + "src/cg/riscv64/riscv64_abi.cpp", + "src/cg/riscv64/riscv64_args.cpp", + "src/cg/riscv64/riscv64_cg.cpp", + "src/cg/riscv64/riscv64_cgfunc.cpp", + "src/cg/riscv64/riscv64_color_ra.cpp", + "src/cg/riscv64/riscv64_dependence.cpp", + "src/cg/riscv64/riscv64_ebo.cpp", + "src/cg/riscv64/riscv64_emitter.cpp", + "src/cg/riscv64/riscv64_fixshortbranch.cpp", + "src/cg/riscv64/riscv64_global.cpp", + "src/cg/riscv64/riscv64_ico.cpp", + "src/cg/riscv64/riscv64_immediate.cpp", + "src/cg/riscv64/riscv64_insn.cpp", + "src/cg/riscv64/riscv64_isa.cpp", + "src/cg/riscv64/riscv64_live.cpp", + "src/cg/riscv64/riscv64_lsra.cpp", + "src/cg/riscv64/riscv64_memlayout.cpp", + "src/cg/riscv64/riscv64_offset_adjust.cpp", + "src/cg/riscv64/riscv64_operand.cpp", + "src/cg/riscv64/riscv64_optimize_common.cpp", + "src/cg/riscv64/riscv64_peep.cpp", + "src/cg/riscv64/riscv64_proepilog.cpp", + "src/cg/riscv64/riscv64_reaching.cpp", + "src/cg/riscv64/riscv64_reg_alloc.cpp", + "src/cg/riscv64/riscv64_schedule.cpp", + "src/cg/riscv64/riscv64_strldr.cpp", + "src/cg/riscv64/riscv64_yieldpoint.cpp", + "src/cg/riscv64/riscv64_ra_opt.cpp", +] + +src_libcgark = [ "src/cg/ark/foo.cpp" ] + +src_libcgphases = [ + "src/cg/cfgo.cpp", + "src/cg/local_opt.cpp", + "src/cg/ebo.cpp", + "src/cg/ra_opt.cpp", + "src/cg/cg_ssa.cpp", + "src/cg/cg_prop.cpp", + "src/cg/cg_dce.cpp", + "src/cg/cg_phi_elimination.cpp", + "src/cg/reg_coalesce.cpp", + "src/cg/global.cpp", + "src/cg/ico.cpp", + "src/cg/peep.cpp", + "src/cg/pressure.cpp", + "src/cg/reaching.cpp", + "src/cg/schedule.cpp", + "src/cg/strldr.cpp", + "src/cg/cg_dominance.cpp", + "src/cg/cg_pre.cpp", + "src/cg/cg_occur.cpp", + "src/cg/cg_ssu_pre.cpp", + "src/cg/cg_ssa_pre.cpp", + "src/cg/regsaves.cpp", + "src/cg/cg_critical_edge.cpp", + "src/cg/alignment.cpp", + "src/cg/cg_validbit_opt.cpp", +] + +src_libcg = [ + "src/cg/args.cpp", + "src/cg/cg_irbuilder.cpp", + "src/cg/cfi.cpp", + "src/cg/cgbb.cpp", + "src/cg/operand.cpp", + "src/cg/cgfunc.cpp", + "src/cg/cg_cfg.cpp", + "src/cg/cg_option.cpp", + "src/cg/cg_options.cpp", + "src/cg/dbg.cpp", + "src/cg/optimize_common.cpp", + "src/cg/eh_func.cpp", + "src/cg/emit.cpp", + "src/cg/obj_emit.cpp", + "src/cg/ifile.cpp", + "src/cg/live.cpp", + "src/cg/loop.cpp", + "src/cg/isel.cpp", + "src/cg/standardize.cpp", + "src/cg/memlayout.cpp", + "src/cg/yieldpoint.cpp", + "src/cg/label_creation.cpp", + "src/cg/offset_adjust.cpp", + "src/cg/reg_alloc.cpp", + "src/cg/reg_alloc_basic.cpp", + "src/cg/reg_alloc_lsra.cpp", + "src/cg/proepilog.cpp", + "src/cg/cg.cpp", + "src/cg/isa.cpp", + "src/cg/insn.cpp", + "src/cg/cg_phasemanager.cpp", + "src/litecg/litecg.cpp", + "src/litecg/lmir_builder.cpp", +] + +#cflags_cc -= [ "-DRC_NO_MMAP" ] +#cflags_cc -= [ "-DMIR_JAVA=1" ] +#cflags_cc += [ "-fPIC" ] + +#configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +static_library("libmplad") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libmplad + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + +# cflags_cc += [ "-DRC_NO_MMAP" ] +} + +source_set("libcglowerer") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcglowerer + include_dirs = include_directories +} + +static_library("libmplbe") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libmplbe + deps = deps_libmplbe + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +source_set("libcgaarch64") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgaarch64 + include_dirs = include_directories +} + +source_set("libcgx8664") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgx8664 + include_dirs = include_directories +} + +source_set("libcgx86phases") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgx86phases + include_dirs = include_directories +} + +source_set("libcgriscv64") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgriscv64 + include_dirs = include_directories +} + +static_library("libcgark") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgark + include_dirs = include_directories +} + +source_set("libcgphases") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcgphases + include_dirs = include_directories +} + +static_library("libcg") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libcg + include_dirs = include_directories + deps = deps_libcg + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +executable("maplegen") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = [ + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdgenerator.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdlexer.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdmain.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdparser.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdrecord.cpp", + ] + deps = [ + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libsec_static", + ] + + include_dirs = [ + "${MAPLEALL_ROOT}/maple_be/mdgen/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + ] +} diff --git a/ecmascript/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td b/ecmascript/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td new file mode 100644 index 0000000000000000000000000000000000000000..bb1127d737296c7a1e95732956f677fc8495b33f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td @@ -0,0 +1,171 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +DefType UnitType = Primary, And, Or; +DefType BypassType = Accumulator, Store, AluShift; + +// Architecture name +Class ArchitectureName ; +// Parallelism number +Class Parallelism ; + +Def ArchitectureName {cortex_a55}; +Def Parallelism {2}; + +// class parameters can be set as default. +// default parameters can only be placed at the end +// Class Unit :Name +Class Unit :string ; +// Class Reservation :Name +Class Reservation : string ; +// AnonClass Bypass : BypassNum, fromTypeReservation, toTypeReservation, BypassType +Class Bypass ; + +Def Unit : kUnitIdSlot0 {Primary}; +Def Unit : kUnitIdSlot1 {Primary}; +Def Unit : kUnitIdAgen {Primary}; +Def Unit : kUnitIdHazard {Primary}; +Def Unit : kUnitIdCrypto {Primary}; +Def Unit : kUnitIdMul {Primary}; +Def Unit : kUnitIdDiv {Primary}; +Def Unit : kUnitIdBranch {Primary}; +Def Unit : kUnitIdStAgu {Primary}; +Def Unit : kUnitIdLdAgu {Primary}; +Def Unit : kUnitIdFpAluLo {Primary}; +Def Unit : kUnitIdFpAluHi {Primary}; +Def Unit : kUnitIdFpMulLo {Primary}; +Def Unit : kUnitIdFpMulHi {Primary}; +Def Unit : kUnitIdFpDivLo {Primary}; +Def Unit : kUnitIdFpDivHi {Primary}; + +Def Unit : kUnitIdSlotS {Or, [kUnitIdSlot0, kUnitIdSlot1]}; +Def Unit : kUnitIdFpAluS {Or, [kUnitIdFpAluLo, kUnitIdFpAluHi]}; +Def Unit : kUnitIdFpMulS {Or, [kUnitIdFpMulLo, kUnitIdFpMulHi]}; +Def Unit : kUnitIdFpDivS {Or, [kUnitIdFpDivLo, kUnitIdFpDivHi]}; + +Def Unit : kUnitIdSlotD {And, [kUnitIdSlot0, kUnitIdSlot1]}; +Def Unit : kUnitIdFpAluD {And, [kUnitIdFpAluLo, kUnitIdFpAluHi]}; +Def Unit : kUnitIdFpMulD {And, [kUnitIdFpMulLo, kUnitIdFpMulHi]}; +Def Unit : kUnitIdFpDivD {And, [kUnitIdFpDivLo, kUnitIdFpDivHi]}; +Def Unit : kUnitIdSlotSHazard {And, [kUnitIdSlotS, kUnitIdHazard]}; +Def Unit : kUnitIdSlotSMul {And, [kUnitIdSlotS, kUnitIdMul]}; +Def Unit : kUnitIdSlotSBranch {And, [kUnitIdSlotS, kUnitIdBranch]}; +Def Unit : kUnitIdSlotSAgen {And, [kUnitIdSlotS, kUnitIdAgen]}; +Def Unit : kUnitIdSlotDAgen {And, [kUnitIdSlot0, kUnitIdSlot1, kUnitIdAgen]}; +Def Unit : kUnitIdSlot0LdAgu {And, [kUnitIdSlot0, kUnitIdLdAgu]}; +Def Unit : kUnitIdSlot0StAgu {And, [kUnitIdSlot0, kUnitIdStAgu]}; +Def Unit : nothing {}; + +Def Reservation : kLtUndef {0}; +Def Reservation : kLtShift {2, [kUnitIdSlotS]}; +Def Reservation : kLtShiftReg {2, [ kUnitIdSlotS, kUnitIdHazard]}; +Def Reservation : kLtAlu {3, [kUnitIdSlotS]}; +Def Reservation : kLtAluShift {3, [kUnitIdSlotS]}; +Def Reservation : kLtAluShiftReg {3, [kUnitIdSlotS, kUnitIdHazard]}; +Def Reservation : kLtAluExtr {3, [kUnitIdSlot1]}; +Def Reservation : kLtMul {4, [kUnitIdSlotS, kUnitIdMul]}; +Def Reservation : kLtDiv {4, [kUnitIdSlot0, kUnitIdDiv, kUnitIdDiv]}; +Def Reservation : kLtLoad1 {4, [kUnitIdSlotSAgen, kUnitIdLdAgu]}; +Def Reservation : kLtStore1 {2, [kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtLoad2 {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtStore2 {2, [ kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtLoad3plus {6, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtStore3plus {2, [kUnitIdSlotDAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]}; +Def Reservation : kLtBranch {0, [kUnitIdSlotSBranch]}; +Def Reservation : kLtFpalu {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFconst {2, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFpmul {4, [kUnitIdSlotS, kUnitIdFpMulS]}; +Def Reservation : kLtFpmac {8, [kUnitIdSlotS, kUnitIdFpMulS, nothing, nothing, nothing, kUnitIdFpAluS]}; +Def Reservation : kLtR2f {2, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtF2r {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtR2fCvt {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtF2rCvt {5, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFFlags {5, [kUnitIdSlotS]}; +Def Reservation : kLtFLoad64 {3, [kUnitIdSlotSAgen, kUnitIdLdAgu]}; +Def Reservation : kLtFLoadMany {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtFStore64 {0, [kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtFStoreMany {0, [kUnitIdSlotSAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]}; +Def Reservation : kLtAdvsimdAlu {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtAdvsimdAluQ {4, [kUnitIdSlot0, kUnitIdFpAluD]}; +Def Reservation : kLtAdvsimdMul {4, [kUnitIdSlotS, kUnitIdFpMulS]}; +Def Reservation : kLtAdvsimdMulQ {4, [kUnitIdSlot0, kUnitIdFpMulD]}; +Def Reservation : kLtAdvsimdDivS {14, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]}; +Def Reservation : kLtAdvsimdDivD {29, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]}; +Def Reservation : kLtAdvsimdDivSQ {14, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]}; +Def Reservation : kLtAdvsimdDivdQ {29, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]}; +Def Reservation : kLtCryptoAese {3, [kUnitIdSlot0]}; +Def Reservation : kLtCryptoAesmc {3, [kUnitIdSlotS]}; +Def Reservation : kLtClinit {14, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu]}; +Def Reservation : kLtAdrpLdr {6, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu]}; +Def Reservation : kLtClinitTail {8, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu, nothing, + kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; + +Def Bypass {0, [kLtShift, kLtShiftReg], [kLtAlu]}; +Def Bypass {1, [kLtShift], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]}; +Def Bypass {1, [kLtShiftReg], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg], [kLtAlu]}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift], AluShift}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg], AluShift}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluExtr]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShift]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShiftReg]}; +Def Bypass {2, [kLtMul], [kLtMul], Accumulator}; +Def Bypass {2, [kLtMul], [kLtAlu]}; +Def Bypass {3, [kLtMul], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]}; +Def Bypass {2, [kLtLoad1], [kLtAlu]}; +Def Bypass {3, [kLtLoad1], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]}; +Def Bypass {3, [kLtLoad2], [kLtAlu]}; +Def Bypass {0, [kLtAlu], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluShift], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluExtr], [ kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtShift], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtMul], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad1], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad2], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad3plus], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtR2f]}; +Def Bypass {1, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2f]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtR2fCvt]}; +Def Bypass {3, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2fCvt]}; +Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtBranch]}; +Def Bypass {1, [kLtFpalu, kLtFpmul, kLtR2f, kLtR2fCvt, kLtFconst], [kLtFpmac], Accumulator}; +Def Bypass {1, [kLtFLoad64, kLtFLoadMany], [kLtFpmac]}; +Def Bypass {4, [kLtFpmac], [kLtFpmac], Accumulator}; +Def Bypass {0, [kLtCryptoAese], [kLtCryptoAesmc]}; +Def Bypass {1, [kLtShiftReg], [kLtClinit]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtClinit]}; +Def Bypass {3, [kLtMul, kLtLoad1], [kLtClinit]}; +Def Bypass {13, [kLtAlu], [kLtClinit]}; +Def Bypass {11, [kLtClinit], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {11, [kLtClinit], [kLtR2f]}; +Def Bypass {13, [kLtClinit], [kLtR2fCvt]}; +Def Bypass {1, [kLtShiftReg], [kLtAdrpLdr]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtAdrpLdr]}; +Def Bypass {3, [kLtMul, kLtLoad1], [kLtAdrpLdr]}; +Def Bypass {5, [kLtAdrpLdr], [kLtAlu]}; +Def Bypass {3, [kLtAdrpLdr], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {3, [kLtAdrpLdr], [kLtR2f]}; +Def Bypass {5, [kLtAdrpLdr], [kLtR2fCvt]}; +Def Bypass {7, [kLtClinitTail], [kLtAlu]}; +Def Bypass {5, [kLtClinitTail], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {5, [kLtClinitTail], [kLtR2f]}; +Def Bypass {7, [kLtClinitTail], [kLtR2fCvt]}; diff --git a/ecmascript/mapleall/maple_be/include/ad/mad.h b/ecmascript/mapleall/maple_be/include/ad/mad.h new file mode 100644 index 0000000000000000000000000000000000000000..cde5b98da84edcd30df1cc938832a91962bfdc6d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/ad/mad.h @@ -0,0 +1,249 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_AD_MAD_H +#define MAPLEBE_INCLUDE_AD_MAD_H +#include +#include "types_def.h" +#include "mpl_logging.h" +#include "insn.h" + +namespace maplebe { +enum UnitId : maple::uint32 { +#include "mplad_unit_id.def" + kUnitIdLast +}; + +enum UnitType : maple::uint8 { + kUnitTypePrimart, + kUnitTypeOr, + kUnitTypeAnd, + KUnitTypeNone +}; + +enum RealUnitKind : maple::uint32 { + kUnitKindUndef, +#include "mplad_unit_kind.def" + kUnitKindLast = 13 +}; + +enum SlotType : maple::uint8 { + kSlotNone, + kSlot0, + kSlot1, + kSlotAny, + kSlots, +}; + +/* machine model */ +enum LatencyType : maple::uint32 { + /* LT: latency */ +#include "mplad_latency_type.def" + kLtLast, +}; + +class Unit { + public: + explicit Unit(enum UnitId theUnitId); + Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ...); + ~Unit() = default; + + enum UnitType GetUnitType() const { + return unitType; + } + + enum UnitId GetUnitId() const { + return unitId; + }; + + const std::vector &GetCompositeUnits() const; + + std::string GetName() const; + bool IsFree(maple::uint32 cycle) const; + void Occupy(const Insn &insn, maple::uint32 cycle); + void Release(); + void AdvanceCycle(); + void Dump(int indent = 0) const; + maple::uint32 GetOccupancyTable() const; + + void SetOccupancyTable(maple::uint32 table) { + occupancyTable = table; + } + + private: + void PrintIndent(int indent) const; + + enum UnitId unitId; + enum UnitType unitType; + maple::uint32 occupancyTable; + std::vector compositeUnits; +}; + +class Reservation { + public: + Reservation(LatencyType t, int l, int n, ...); + ~Reservation() = default; + + bool IsEqual(maple::uint32 typ) const { + return typ == type; + } + + int GetLatency() const { + return latency; + } + + uint32 GetUnitNum() const { + return unitNum; + } + + enum SlotType GetSlot() const { + return slot; + } + + const std::string &GetSlotName() const; + + Unit * const *GetUnit() const { + return units; + } + + private: + static const int kMaxUnit = 13; + LatencyType type; + int latency; + uint32 unitNum; + Unit *units[kMaxUnit]; + enum SlotType slot = kSlotNone; + + SlotType GetSlotType(UnitId unitID) const; +}; + +class Bypass { + public: + Bypass(LatencyType d, LatencyType u, int l) : def(d), use(u), latency(l) {} + virtual ~Bypass() = default; + + virtual bool CanBypass(const Insn &defInsn, const Insn &useInsn) const; + + int GetLatency() const { + return latency; + } + + LatencyType GetDefType() const { + return def; + } + + LatencyType GetUseType() const { + return use; + } + + private: + LatencyType def; + LatencyType use; + int latency; +}; + +class MAD { + public: + MAD() { + InitUnits(); + InitParallelism(); + InitReservation(); + InitBypass(); + } + + ~MAD(); + + using BypassVector = std::vector; + + void InitUnits() const; + void InitParallelism() const; + void InitReservation() const; + void InitBypass() const; + bool IsSlot0Free() const; + bool IsFullIssued() const; + int GetLatency(const Insn &def, const Insn &use) const; + int DefaultLatency(const Insn &insn) const; + Reservation *FindReservation(const Insn &insn) const; + void AdvanceCycle() const; + void ReleaseAllUnits() const; + void SaveStates(std::vector &occupyTable, int size) const; + void RestoreStates(std::vector &occupyTable, int size) const; + + int GetMaxParallelism() const { + return parallelism; + } + + const Unit *GetUnitByUnitId(enum UnitId uId) const { + CHECK_FATAL(!allUnits.empty(), "CHECK_CONTAINER_EMPTY"); + return allUnits[uId]; + } + + static void AddUnit(Unit &u) { + allUnits.emplace_back(&u); + } + + static maple::uint32 GetAllUnitsSize() { + return allUnits.size(); + } + + static void AddReservation(Reservation &rev) { + allReservations.emplace_back(&rev); + } + + static void AddBypass(Bypass &bp) { + DEBUG_ASSERT(bp.GetDefType() < kLtLast, "out of range"); + DEBUG_ASSERT(bp.GetUseType() < kLtLast, "out of range"); + (bypassArrays[bp.GetDefType()][bp.GetUseType()]).push_back(&bp); + } + + protected: + static void SetMaxParallelism(int num) { + parallelism = num; + } + + int BypassLatency(const Insn &def, const Insn &use) const; + + private: + static int parallelism; + static std::vector allUnits; + static std::vector allReservations; + static std::array, kLtLast> bypassArrays; +}; + +class AluShiftBypass : public Bypass { + public: + AluShiftBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~AluShiftBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; + +class AccumulatorBypass : public Bypass { + public: + AccumulatorBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~AccumulatorBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; + +class StoreBypass : public Bypass { + public: + StoreBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~StoreBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_AD_MAD_H */ diff --git a/ecmascript/mapleall/maple_be/include/ad/target/mplad_unit_kind.def b/ecmascript/mapleall/maple_be/include/ad/target/mplad_unit_kind.def new file mode 100644 index 0000000000000000000000000000000000000000..0c56044925a600296d4fbf96bbd2944e8d1d7780 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/ad/target/mplad_unit_kind.def @@ -0,0 +1,28 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* cortex_a55 function unit ID definition: */ +kUnitKindSlot0 = 1, +kUnitKindAgen = 2, +kUnitKindHazard = 4, +kUnitKindCrypto = 8, +kUnitKindMul = 16, +kUnitKindDiv = 32, +kUnitKindBranch = 64, +kUnitKindStAgu = 128, +kUnitKindLdAgu = 256, +kUnitKindFpAlu = 512, +kUnitKindFpMul = 1024, +kUnitKindFpDiv = 2048, + diff --git a/ecmascript/mapleall/maple_be/include/be/array_base_name.def b/ecmascript/mapleall/maple_be/include/be/array_base_name.def new file mode 100644 index 0000000000000000000000000000000000000000..42c8905f3c212fffdd8d3b8bc0b9582b9ceaffe6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/array_base_name.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"ALjava_2Flang_2FObject_3B", +"ALjava_2Flang_2FClass_3B", +"ALjava_2Flang_2FString_3B" diff --git a/ecmascript/mapleall/maple_be/include/be/array_klass_name.def b/ecmascript/mapleall/maple_be/include/be/array_klass_name.def new file mode 100644 index 0000000000000000000000000000000000000000..3812b8c1224c82c5ea1c73995bfc9bbdeb916063 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/array_klass_name.def @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"ALjava_2Flang_2FObject_3B", +"ALjava_2Flang_2FClass_3B", +"ALjava_2Flang_2FString_3B", +"ALjava_2Futil_2FFormatter_24Flags_3B", +"ALjava_2Futil_2FHashMap_24Node_3B", +"ALjava_2Futil_2FFormatter_24FormatString_3B", +"ALjava_2Flang_2FCharSequence_3B", +"ALjava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B", +"ALjava_2Futil_2FHashtable_24HashtableEntry_3B", +"ALlibcore_2Freflect_2FAnnotationMember_3B", +"ALsun_2Fsecurity_2Futil_2FDerValue_3B", +"ALsun_2Fsecurity_2Fx509_2FAVA_3B" \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/be/bbt.h b/ecmascript/mapleall/maple_be/include/be/bbt.h new file mode 100644 index 0000000000000000000000000000000000000000..cdae745afdd06eb56d8a279ebf6aa9bb416b1a56 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/bbt.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_BBT_H +#define MAPLEBE_INCLUDE_BE_BBT_H +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_lower.h" +namespace maplebe { +using namespace maple; + +class BBT { + /* + * if stmt is a switch/rangegoto, succs gets defined, and condJumpBranch == fallthruBranch == nullptr. + * otherwise, succs.size() ==0 && + * 1. for cond br stmt, both condJumpBranch and fallthruBranch are defined. + * 2. if bb ends with 'throw', both fields get nullptr. + * 3. for the others, condJumpBranch == nullptr && only fallthruBranch is defined + */ + public: + enum BBTType : uint8 { + kBBPlain, + kBBTry, + kBBEndTry, + kBBCatch + }; + + BBT(StmtNode *s, StmtNode *e, MemPool *memPool) + : alloc(memPool), + type(kBBPlain), + succs(alloc.Adapter()), + labelIdx(MIRLabelTable::GetDummyLabel()), + firstStmt(s != nullptr ? s : e), + lastStmt(e) {} + + ~BBT() = default; + + void Extend(const StmtNode *sNode, StmtNode *eNode) { + CHECK_FATAL(lastStmt != nullptr, "nullptr check"); + CHECK_FATAL(sNode != nullptr ? lastStmt->GetNext() == sNode : lastStmt->GetNext() == eNode, "Extend fail"); + lastStmt = eNode; + } + + void SetLabelIdx(LabelIdx li) { + labelIdx = li; + } + + bool IsLabeled() const { + return labelIdx != MIRLabelTable::GetDummyLabel(); + } + + LabelIdx GetLabelIdx() const { + return labelIdx; + } + + void SetType(BBTType t, StmtNode &k) { + type = t; + keyStmt = &k; + } + + bool IsTry() const { + return type == kBBTry; + } + + bool IsEndTry() const { + return type == kBBEndTry; + } + + bool IsCatch() const { + return type == kBBCatch; + } + + void AddSuccs(BBT *bb) { + succs.emplace_back(bb); + } + + void SetCondJumpBranch(BBT *bb) { + condJumpBranch = bb; + } + + BBT *GetCondJumpBranch() { + return condJumpBranch; + } + void SetFallthruBranch(BBT *bb) { + fallthruBranch = bb; + } + + BBT *GetFallthruBranch() { + return fallthruBranch; + } + + StmtNode *GetFirstStmt() { + return firstStmt; + } + + void SetFirstStmt(StmtNode &stmt) { + firstStmt = &stmt; + } + + StmtNode *GetLastStmt() { + return lastStmt; + } + + void SetLastStmt(StmtNode &stmt) { + lastStmt = &stmt; + } + + StmtNode *GetKeyStmt() { + return keyStmt; + } + +#if DEBUG + void Dump(const MIRModule &mod) const; + static void ValidateStmtList(StmtNode *head, StmtNode *detached = nullptr); +#endif + private: + MapleAllocator alloc; + BBTType type; + BBT *condJumpBranch = nullptr; + BBT *fallthruBranch = nullptr; + MapleVector succs; + LabelIdx labelIdx; + StmtNode *firstStmt; + StmtNode *lastStmt; + StmtNode *keyStmt = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_BBT_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/be/becommon.h b/ecmascript/mapleall/maple_be/include/be/becommon.h new file mode 100644 index 0000000000000000000000000000000000000000..ba4a6e8a7d163e6ede5614dc11ba90fbc0f7ae56 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/becommon.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_BECOMMON_H +#define MAPLEBE_INCLUDE_BE_BECOMMON_H +/* C++ headers. */ +#include +#include +/* Basic Maple-independent utility functions */ +#include "common_utils.h" +/* MapleIR headers. */ +#include "mir_nodes.h" /* maple_ir/include, for BaseNode */ +#include "mir_type.h" /* maple_ir/include, for MIRType */ +#include "mir_module.h" /* maple_ir/include, for mirModule */ + +namespace maplebe { +using namespace maple; + +enum BitsPerByte : uint8 { + kBitsPerByte = 8, + kLog2BitsPerByte = 3 +}; + +class JClassFieldInfo { /* common java class field info */ + public: + /* constructors */ + JClassFieldInfo() : isRef(false), isUnowned(false), isWeak(false), offset(0) {} + + JClassFieldInfo(bool isRef, bool isUnowned, bool isWeak, uint32 offset) + : isRef(isRef), isUnowned(isUnowned), isWeak(isWeak), offset(offset) {} + + ~JClassFieldInfo() = default; + + bool IsRef() const { + return isRef; + } + + bool IsUnowned() const { + return isUnowned; + } + + bool IsWeak() const { + return isWeak; + } + + uint32 GetOffset() const { + return offset; + } + + private: + bool isRef; /* used to generate object-map */ + bool isUnowned; /* used to mark unowned fields for RC */ + bool isWeak; /* used to mark weak fields for RC */ + uint32 offset; /* offset from the start of the java object */ +}; + +using JClassLayout = MapleVector; /* java class layout info */ + +class BECommon { + public: + explicit BECommon(MIRModule &mod); + + ~BECommon() = default; + + void LowerTypeAttribute(MIRType &ty); + + void LowerJavaTypeAttribute(MIRType &ty); + + void LowerJavaVolatileInClassType(MIRClassType &ty); + + void LowerJavaVolatileForSymbol(MIRSymbol &sym) const; + + void ComputeTypeSizesAligns(MIRType &type, uint8 align = 0); + + void GenFieldOffsetMap(const std::string &className); + + void GenFieldOffsetMap(MIRClassType &classType, FILE &outFile); + + void GenObjSize(const MIRClassType &classType, FILE &outFile); + + std::pair GetFieldOffset(MIRStructType &structType, FieldID fieldID); + + bool IsRefField(MIRStructType &structType, FieldID fieldID) const; + + /* some class may has incomplete type definition. provide an interface to check them. */ + bool HasJClassLayout(MIRClassType &klass) const { + return (jClassLayoutTable.find(&klass) != jClassLayoutTable.end()); + } + + const JClassLayout &GetJClassLayout(MIRClassType &klass) const { + return *(jClassLayoutTable.at(&klass)); + } + + void AddNewTypeAfterBecommon(uint32 oldTypeTableSize, uint32 newTypeTableSize); + + void AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info); + + bool HasFuncReturnType(MIRFunction &func) const { + return (funcReturnType.find(&func) != funcReturnType.end()); + } + + const TyIdx GetFuncReturnType(MIRFunction &func) const { + return (funcReturnType.at(&func)); + } + + void AddElementToFuncReturnType(MIRFunction &func, const TyIdx tyIdx); + + MIRType *BeGetOrCreatePointerType(const MIRType &pointedType); + + MIRType *BeGetOrCreateFunctionType(TyIdx tyIdx, const std::vector &vecTy, + const std::vector &vecAt); + + BaseNode *GetAddressOfNode(const BaseNode &node); + + bool CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const; + + PrimType GetAddressPrimType() const { + return GetLoweredPtrType(); + } + + /* update typeSizeTable and typeAlignTable when new type is created */ + void UpdateTypeTable(MIRType &ty) { + if (!TyIsInSizeAlignTable(ty)) { + AddAndComputeSizeAlign(ty); + } + } + + /* Global type table might be updated during lowering for C/C++. */ + void FinalizeTypeTable(const MIRType &ty); + + uint32 GetFieldIdxIncrement(const MIRType &ty) const { + if (ty.GetKind() == kTypeClass) { + /* number of fields + 2 */ + return static_cast(ty).GetFieldsSize() + 2; + } else if (ty.GetKind() == kTypeStruct) { + /* number of fields + 1 */ + return static_cast(ty).GetFieldsSize() + 1; + } + return 1; + } + + MIRModule &GetMIRModule() const { + return mirModule; + } + + uint64 GetTypeSize(uint32 idx) const { + return typeSizeTable.at(idx); + } + uint32 GetSizeOfTypeSizeTable() const { + return typeSizeTable.size(); + } + bool IsEmptyOfTypeSizeTable() const { + return typeSizeTable.empty(); + } + void SetTypeSize(uint32 idx, uint64 value) { + typeSizeTable.at(idx) = value; + } + void AddTypeSize(uint64 value) { + typeSizeTable.emplace_back(value); + } + + void AddTypeSizeAndAlign(const TyIdx tyIdx, uint64 value) { + if (typeSizeTable.size() == tyIdx) { + typeSizeTable.emplace_back(value); + typeAlignTable.emplace_back(value); + } else { + CHECK_FATAL(typeSizeTable.size() > tyIdx, "there are some types haven't set type size and align, %d"); + } + } + + uint8 GetTypeAlign(uint32 idx) const { + return typeAlignTable.at(idx); + } + size_t GetSizeOfTypeAlignTable() const { + return typeAlignTable.size(); + } + bool IsEmptyOfTypeAlignTable() const { + return typeAlignTable.empty(); + } + void SetTypeAlign(uint32 idx, uint8 value) { + typeAlignTable.at(idx) = value; + } + void AddTypeAlign(uint8 value) { + typeAlignTable.emplace_back(value); + } + + bool GetHasFlexibleArray(uint32 idx) const { + return typeHasFlexibleArray.at(idx); + } + void SetHasFlexibleArray(uint32 idx, bool value) { + typeHasFlexibleArray.at(idx) = value; + } + + FieldID GetStructFieldCount(uint32 idx) const { + return structFieldCountTable.at(idx); + } + uint32 GetSizeOfStructFieldCountTable() const { + return structFieldCountTable.size(); + } + void SetStructFieldCount(uint32 idx, FieldID value) { + structFieldCountTable.at(idx) = value; + } + void AppendStructFieldCount(uint32 idx, FieldID value) { + structFieldCountTable.at(idx) += value; + } + + private: + bool TyIsInSizeAlignTable(const MIRType&) const; + void AddAndComputeSizeAlign(MIRType&); + void ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + void ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint8 align = 0); + void ComputeArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + void ComputeFArrayOrJArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + + MIRModule &mirModule; + MapleVector typeSizeTable; /* index is TyIdx */ + MapleVector typeAlignTable; /* index is TyIdx */ + MapleVector typeHasFlexibleArray; /* struct with flexible array */ + /* + * gives number of fields inside + * each struct inclusive of nested structs, for speeding up + * traversal for locating the field for a given fieldID + */ + MapleVector structFieldCountTable; + /* + * a lookup table for class layout. the vector is indexed by field-id + * Note: currently only for java class types. + */ + MapleUnorderedMap jClassLayoutTable; + MapleUnorderedMap funcReturnType; +}; /* class BECommon */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_BECOMMON_H */ diff --git a/ecmascript/mapleall/maple_be/include/be/common_utils.h b/ecmascript/mapleall/maple_be/include/be/common_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6317f1d1560be103fbbb32f0e2cad5106e67a038 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/common_utils.h @@ -0,0 +1,339 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_COMMON_UTILS_H +#define MAPLEBE_INCLUDE_BE_COMMON_UTILS_H +#include +#include "types_def.h" +#include "mpl_logging.h" + +namespace maplebe { +using namespace maple; +constexpr uint32 kOffsetAlignmentOf8Bit = 0; +constexpr uint32 kOffsetAlignmentOf16Bit = 1; +constexpr uint32 kOffsetAlignmentOf32Bit = 2; +constexpr uint32 kOffsetAlignmentOf64Bit = 3; +constexpr uint32 kOffsetAlignmentOf128Bit = 4; +constexpr uint32 kBaseOffsetAlignment = 3; +/* + * The constexpr implementations, without assertions. Suitable for using in + * constants. + */ +constexpr uint32 k1FConst = 31; +constexpr uint32 k0BitSize = 0; +constexpr uint32 k1BitSize = 1; +constexpr uint32 k2BitSize = 2; +constexpr uint32 k3BitSize = 3; +constexpr uint32 k4BitSize = 4; +constexpr uint32 k5BitSize = 5; +constexpr uint32 k6BitSize = 6; +constexpr uint32 k7BitSize = 7; +constexpr uint32 k8BitSize = 8; +constexpr uint32 k16BitSize = 16; +constexpr uint32 k24BitSize = 24; +constexpr uint32 k32BitSize = 32; +constexpr uint32 k48BitSize = 48; +constexpr uint32 k56BitSize = 56; +constexpr uint32 k64BitSize = 64; +constexpr uint32 k128BitSize = 128; +constexpr uint32 k256BitSize = 256; +constexpr uint32 k512BitSize = 512; +constexpr uint32 k1024BitSize = 1024; +constexpr uint32 k2048BitSize = 2048; + +constexpr int32 k1FConstInt = 31; +constexpr int32 k0BitSizeInt = 0; +constexpr int32 k1BitSizeInt = 1; +constexpr int32 k2BitSizeInt = 2; +constexpr int32 k3BitSizeInt = 3; +constexpr int32 k4BitSizeInt = 4; +constexpr int32 k5BitSizeInt = 5; +constexpr int32 k6BitSizeInt = 6; +constexpr int32 k7BitSizeInt = 7; +constexpr int32 k8BitSizeInt = 8; +constexpr int32 k16BitSizeInt = 16; +constexpr int32 k24BitSizeInt = 24; +constexpr int32 k32BitSizeInt = 32; +constexpr int32 k48BitSizeInt = 48; +constexpr int32 k56BitSizeInt = 56; +constexpr int32 k64BitSizeInt = 64; +constexpr int32 k128BitSizeInt = 128; +constexpr int32 k256BitSizeInt = 256; +constexpr int32 k512BitSizeInt = 512; +constexpr int32 k1024BitSizeInt = 1024; + +constexpr int32 kNegative256BitSize = -256; +constexpr int32 kNegative512BitSize = -512; +constexpr int32 kNegative1024BitSize = -1024; + +constexpr uint32 k1ByteSize = 1; +constexpr uint32 k2ByteSize = 2; +constexpr uint32 k3ByteSize = 3; +constexpr uint32 k4ByteSize = 4; +constexpr uint32 k8ByteSize = 8; +constexpr uint32 k9ByteSize = 9; +constexpr uint32 k12ByteSize = 12; +constexpr uint32 k14ByteSize = 14; +constexpr uint32 k15ByteSize = 15; +constexpr uint32 k16ByteSize = 16; +constexpr uint32 k32ByteSize = 32; + +constexpr int32 k1ByteSizeInt = 1; +constexpr int32 k2ByteSizeInt = 2; +constexpr int32 k3ByteSizeInt = 3; +constexpr int32 k4ByteSizeInt = 4; +constexpr int32 k8ByteSizeInt = 8; +constexpr int32 k9ByteSizeInt = 9; +constexpr int32 k12ByteSizeInt = 12; +constexpr int32 k14ByteSizeInt = 14; +constexpr int32 k15ByteSizeInt = 15; +constexpr int32 k16ByteSizeInt = 16; +constexpr int32 k32ByteSizeInt = 32; + +constexpr uint32 k1EightBytesSize = 8; +constexpr uint32 k2EightBytesSize = 16; +constexpr uint32 k3EightBytesSize = 24; +constexpr uint32 k4EightBytesSize = 32; + +constexpr uint32 k4BitShift = 2; /* 4 is 1 << 2; */ +constexpr uint32 k8BitShift = 3; /* 8 is 1 << 3; */ +constexpr uint32 k16BitShift = 4; /* 16 is 1 << 4 */ + +constexpr uint32 kDwordSizeTwo = 2; + +constexpr uint32 k4ByteFloatSize = 4; +constexpr uint32 k8ByteDoubleSize = 8; + +/* Storage location of operands in one insn */ +constexpr int32 kInsnFirstOpnd = 0; +constexpr int32 kInsnSecondOpnd = 1; +constexpr int32 kInsnThirdOpnd = 2; +constexpr int32 kInsnFourthOpnd = 3; +constexpr int32 kInsnFifthOpnd = 4; +constexpr int32 kInsnSixthOpnd = 5; +constexpr int32 kInsnSeventhOpnd = 6; +constexpr int32 kInsnEighthOpnd = 7; +constexpr int32 kInsnMaxOpnd = 8; + +/* Reg of CCLocInfo */ +constexpr uint32 kFirstReg = 0; +constexpr uint32 kSecondReg = 1; +constexpr uint32 kThirdReg = 2; +constexpr uint32 kFourthReg = 3; + +/* inline asm operand designations */ +constexpr uint32 kAsmStringOpnd = 0; +constexpr uint32 kAsmOutputListOpnd = 1; +constexpr uint32 kAsmClobberListOpnd = 2; +constexpr uint32 kAsmInputListOpnd = 3; +constexpr uint32 kAsmOutputConstraintOpnd = 4; +constexpr uint32 kAsmInputConstraintOpnd = 5; +constexpr uint32 kAsmOutputRegPrefixOpnd = 6; +constexpr uint32 kAsmInputRegPrefixOpnd = 7; + +/* Number of registers */ +constexpr uint32 kOneRegister = 1; +constexpr uint32 kTwoRegister = 2; +constexpr uint32 kThreeRegister = 3; +constexpr uint32 kFourRegister = 4; + +/* position of an operand within an instruction */ +constexpr uint32 kOperandPosition0 = 0; +constexpr uint32 kOperandPosition1 = 1; +constexpr uint32 kOperandPosition2 = 2; + +/* Size of struct for memcpy */ +constexpr uint32 kParmMemcpySize = 40; + +/* Check whether the value is an even number. */ +constexpr int32 kDivide2 = 2; +constexpr int32 kRegNum2 = 2; +constexpr int32 kStepNum2 = 2; +constexpr int32 kSign4ByteSize = 4; + +/* alignment in bytes of uint8 */ +constexpr uint8 kAlignOfU8 = 3; + +/* + * if the number of local refvar is less than 12, use stp or str to init local refvar + * else call function MCC_InitializeLocalStackRef to init. + */ +constexpr int32 kRefNum12 = 12; + +/* mod function max argument size */ +constexpr uint32 kMaxModFuncArgSize = 8; + +/* string length of spacial name "__EARetTemp__" */ +constexpr int32 kEARetTempNameSize = 10; + +/* + * Aarch64 data processing instructions have 12 bits of space for values in their instuction word + * This is arranged as a four-bit rotate value and an eight-bit immediate value: + */ +constexpr uint32 kMaxImmVal5Bits = 5; +constexpr uint32 kMaxImmVal6Bits = 6; +constexpr uint32 kMaxImmVal8Bits = 8; +constexpr uint32 kMaxImmVal12Bits = 12; +constexpr uint32 kMaxImmVal13Bits = 13; +constexpr uint32 kMaxImmVal16Bits = 16; + +constexpr int32 kMaxPimm8 = 4095; +constexpr int32 kMaxPimm16 = 8190; +constexpr int32 kMaxPimm32 = 16380; +constexpr int32 kMaxPimm64 = 32760; +constexpr int32 kMaxPimm128 = 65520; + +constexpr int32 kMaxPimm[k5BitSize] = {kMaxPimm8, kMaxPimm16, kMaxPimm32, kMaxPimm64, kMaxPimm128}; +constexpr int32 kMaxPairPimm[k3BitSize] = {k256BitSize, k512BitSize, k512BitSize}; + +constexpr int32 kMaxSimm32 = 255; +constexpr int32 kMaxSimm32Pair = 252; +constexpr int32 kMinSimm32 = kNegative256BitSize; +constexpr int32 kMaxSimm64Pair = 504; +constexpr int32 kMinSimm64 = kNegative512BitSize; + +constexpr int32 kMax12UnsignedImm = 4096; +constexpr int32 kMax13UnsignedImm = 8192; +constexpr int32 kMax16UnsignedImm = 65535; + +/* Dedicated for Vector */ +constexpr int32 kMinImmVal = -128; +constexpr int32 kMaxImmVal = 255; + +/* aarch64 assembly takes up to 24-bits */ +constexpr uint32 kMaxImmVal24Bits = 24; + +constexpr uint32 kDecimalMax = 10; + +constexpr double kMicroSecPerMilliSec = 1000.0; + +constexpr double kPercent = 100.0; + +enum ConditionCode : uint8 { + CC_EQ, /* equal */ + CC_NE, /* not equal */ + CC_CS, /* carry set (== HS) */ + CC_HS, /* unsigned higher or same (== CS) */ + CC_CC, /* carry clear (== LO) */ + CC_LO, /* Unsigned lower (== CC) */ + CC_MI, /* Minus or negative result */ + CC_PL, /* positive or zero result */ + CC_VS, /* overflow */ + CC_VC, /* no overflow */ + CC_HI, /* unsigned higher */ + CC_LS, /* unsigned lower or same */ + CC_GE, /* signed greater than or equal */ + CC_LT, /* signed less than */ + CC_GT, /* signed greater than */ + CC_LE, /* signed less than or equal */ + CC_AL, /* always, this is the default. usually omitted. */ + kCcLast +}; + +inline ConditionCode GetReverseCC(ConditionCode cc) { + switch (cc) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_HS: + return CC_LO; + case CC_LO: + return CC_HS; + case CC_MI: + return CC_PL; + case CC_PL: + return CC_MI; + case CC_VS: + return CC_VC; + case CC_VC: + return CC_VS; + case CC_HI: + return CC_LS; + case CC_LS: + return CC_HI; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(0, "unknown condition code"); + } + return kCcLast; +} +inline ConditionCode GetReverseBasicCC(ConditionCode cc) { + switch (cc) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(false, "Not support yet."); + } + return kCcLast; +} + +inline bool IsPowerOf2Const(uint64 i) { + return (i & (i - 1)) == 0; +} + +inline uint64 RoundUpConst(uint64 offset, uint64 align) { + return (-align) & (offset + align - 1); +} + +inline bool IsPowerOf2(uint64 i) { + return IsPowerOf2Const(i); +} + +/* align must be a power of 2 */ +inline uint64 RoundUp(uint64 offset, uint64 align) { + if (align == 0) { + return offset; + } + DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundUpConst(offset, align); +} + +inline int64 RoundDownConst(int64 offset, int64 align) { + return (-align) & offset; +} + +// align must be a power of 2 +inline int64 RoundDown(int64 offset, int64 align) { + if (align == 0) { + return offset; + } + DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundDownConst(offset, align); +} + +inline bool IsAlignedTo(uint64 offset, uint64 align) { + DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return (offset & (align - 1)) == 0; +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_COMMON_UTILS_H */ diff --git a/ecmascript/mapleall/maple_be/include/be/lower.h b/ecmascript/mapleall/maple_be/include/be/lower.h new file mode 100644 index 0000000000000000000000000000000000000000..9e342f439dd472126586e2e07c686af1e65d7899 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/lower.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_LOWERER_H +#define MAPLEBE_INCLUDE_BE_LOWERER_H +/* C++ headers. */ +#include +#include +#include +#include +#include +#include +#include "intrinsics.h" /* For IntrinDesc. This includes 'intrinsic_op.h' as well */ +#include "becommon.h" +#include "cg.h" +#include "bbt.h" +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_function.h" +#include "mir_lower.h" +#include "simplify.h" + +namespace maplebe { +class CGLowerer { + enum Option : uint64 { + kUndefined = 0, + kGenEh = 1ULL << 0, + kVerboseCG = 1ULL << 1, + }; + + using BuiltinFunctionID = uint32; + using OptionFlag = uint64; + public: + CGLowerer(MIRModule &mod, BECommon &common, MIRFunction *func = nullptr) + : mirModule(mod), + beCommon(common) { + SetOptions(kGenEh); + mirBuilder = mod.GetMIRBuilder(); + SetCurrentFunc(func); + } + + CGLowerer(MIRModule &mod, BECommon &common, bool genEh, bool verboseCG) + : mirModule(mod), + beCommon(common) { + OptionFlag option = 0; + if (genEh) { + option |= kGenEh; + } + if (verboseCG) { + option |= kVerboseCG; + } + SetOptions(option); + mirBuilder = mod.GetMIRBuilder(); + SetCurrentFunc(nullptr); + } + + ~CGLowerer() { + mirBuilder = nullptr; + currentBlock = nullptr; + } + + MIRFunction *RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, const std::string &name, + const std::string ¶mName); + + void RegisterBuiltIns(); + + void LowerFunc(MIRFunction &func); + + BaseNode *LowerIntrinsicop(const BaseNode&, IntrinsicopNode&, BlockNode&); + + BaseNode *LowerIntrinsicopwithtype(const BaseNode&, IntrinsicopNode&, BlockNode&); + + StmtNode *LowerIntrinsicMplClearStack(const IntrinsiccallNode &intrinCall, BlockNode &newBlk); + + StmtNode *LowerIntrinsicRCCall(const IntrinsiccallNode &intrinCall); + + void LowerArrayStore(const IntrinsiccallNode &intrinCall, BlockNode &newBlk); + + StmtNode *LowerDefaultIntrinsicCall(IntrinsiccallNode &intrinCall, MIRSymbol &st, MIRFunction &fn); + + StmtNode *LowerIntrinsicMplCleanupLocalRefVarsSkip(IntrinsiccallNode &intrinCall); + + StmtNode *LowerIntrinsiccall(IntrinsiccallNode &intrinCall, BlockNode&); + + StmtNode *LowerSyncEnterSyncExit(StmtNode &stmt); + + MIRFunction *GetCurrentFunc() const { + return mirModule.CurFunction(); + } + + BaseNode *LowerExpr(BaseNode&, BaseNode&, BlockNode&); + + BaseNode *LowerDread(DreadNode &dread, const BlockNode& block); + + BaseNode *LowerIread(IreadNode &iread) { + /* use PTY_u8 for boolean type in dread/iread */ + if (iread.GetPrimType() == PTY_u1) { + iread.SetPrimType(PTY_u8); + } + return (iread.GetFieldID() == 0 ? &iread : LowerIreadBitfield(iread)); + } + + BaseNode *LowerCastExpr(BaseNode &expr); + + BaseNode *ExtractSymbolAddress(const StIdx &stIdx); + BaseNode *LowerDreadToThreadLocal(BaseNode &expr, const BlockNode &block); + StmtNode *LowerDassignToThreadLocal(StmtNode &stmt, const BlockNode &block); + + void LowerDassign(DassignNode &dassign, BlockNode &block); + + void LowerResetStmt(StmtNode &stmt, BlockNode &block); + + void LowerIassign(IassignNode &iassign, BlockNode &block); + + void LowerRegassign(RegassignNode ®Assign, BlockNode &block); + + void AddElemToPrintf(MapleVector &argsPrintf, int num, ...) const; + + std::string AssertBoundaryGetFileName(StmtNode &stmt) { + size_t pos = mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).rfind('/'); + return mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).substr(pos + 1); + } + + std::string GetFileNameSymbolName(const std::string &fileName) const; + + void SwitchAssertBoundary(StmtNode &stmt, MapleVector &argsPrintf); + + void LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, std::vector &abortNode); + + StmtNode *LowerIntrinsicopDassign(const DassignNode &dassign, IntrinsicopNode &intrinsic, BlockNode &block); + + void LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcNode, BlockNode &blkNode, bool perm = false); + + std::string GetNewArrayFuncName(const uint32 elemSize, const bool perm) const; + + void LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode &node, BlockNode &block, bool perm = false); + + BaseNode *LowerAddrof(AddrofNode &addrof) const { + return &addrof; + } + + BaseNode *LowerIaddrof(const IreadNode &iaddrof); + BaseNode *SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode); + BaseNode *SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); + bool IsComplexSelect(const TernaryNode &tNode) const; + int32 FindTheCurrentStmtFreq(const StmtNode *stmt) const; + BaseNode *LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); + BaseNode *LowerFarray(ArrayNode &array); + BaseNode *LowerArrayDim(ArrayNode &array, int32 dim); + BaseNode *LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offsetNode, const BaseNode &parent); + BaseNode *LowerArray(ArrayNode &array, const BaseNode &parent); + BaseNode *LowerCArray(ArrayNode &array); + + DassignNode *SaveReturnValueInLocal(StIdx, uint16); + void LowerCallStmt(StmtNode&, StmtNode*&, BlockNode&, MIRType *retty = nullptr, bool uselvar = false, + bool isIntrinAssign = false); + BlockNode *LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall); + BlockNode *LowerCallAssignedStmt(StmtNode &stmt, bool uselvar = false); + bool LowerStructReturn(BlockNode &blk, StmtNode *stmt, StmtNode *&nextStmt, bool &lvar, BlockNode *oldblk); + BlockNode *LowerMemop(StmtNode&); + + BaseNode *LowerRem(BaseNode &rem, BlockNode &block); + + void LowerStmt(StmtNode &stmt, BlockNode &block); + + void LowerSwitchOpnd(StmtNode &stmt, BlockNode &block); + + MIRSymbol *CreateNewRetVar(const MIRType &ty, const std::string &prefix); + + void RegisterExternalLibraryFunctions(); + + BlockNode *LowerBlock(BlockNode &block); + + void SimplifyBlock(BlockNode &block) const; + + void LowerTryCatchBlocks(BlockNode &body); + +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + BlockNode *LowerReturnStructUsingFakeParm(NaryStmtNode &retNode); +#endif + BlockNode *LowerReturn(NaryStmtNode &retNode); + void LowerEntry(MIRFunction &func); + + StmtNode *LowerCall( + CallNode &call, StmtNode *&stmt, BlockNode &block, MIRType *retty = nullptr, bool uselvar = false); + void SplitCallArg(CallNode &callNode, BaseNode *newOpnd, size_t i, BlockNode &newBlk); + + void CleanupBranches(MIRFunction &func) const; + + void LowerTypePtr(BaseNode &expr) const; + + BaseNode *GetBitField(int32 byteOffset, BaseNode *baseAddr, PrimType fieldPrimType); + StmtNode *WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr, BaseNode *rhs, BlockNode *block); + BaseNode *ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr); + BaseNode *LowerDreadBitfield(DreadNode &dread); + BaseNode *LowerIreadBitfield(IreadNode &iread); + StmtNode *LowerDassignBitfield(DassignNode &dassign, BlockNode &block); + StmtNode *LowerIassignBitfield(IassignNode &iassign, BlockNode &block); + + void LowerAsmStmt(AsmNode *asmNode, BlockNode *blk); + + bool ShouldOptarray() const { + DEBUG_ASSERT(mirModule.CurFunction() != nullptr, "nullptr check"); + return MIRLower::ShouldOptArrayMrt(*mirModule.CurFunction()); + } + + BaseNode *NodeConvert(PrimType mtype, BaseNode &expr); + /* Lower pointer/reference types if found in pseudo registers. */ + void LowerPseudoRegs(const MIRFunction &func) const; + + /* A pseudo register refers to a symbol when DreadNode is converted to RegreadNode. */ + StIdx GetSymbolReferredToByPseudoRegister(PregIdx regNO) const { + (void)regNO; + return StIdx(); + } + + void SetOptions(OptionFlag option) { + options = option; + } + + void SetCheckLoadStore(bool value) { + checkLoadStore = value; + } + + /* if it defines a built-in to use for the given intrinsic, return the name. otherwise, return nullptr */ + PUIdx GetBuiltinToUse(BuiltinFunctionID id) const; + void InitArrayClassCacheTableIndex(); + + MIRModule &mirModule; + BECommon &beCommon; + BlockNode *currentBlock = nullptr; /* current block for lowered statements to be inserted to */ + bool checkLoadStore = false; + int64 seed = 0; + SimplifyMemOp simplifyMemOp; + static const std::string kIntrnRetValPrefix; + static const std::string kUserRetValPrefix; + + static constexpr PUIdx kFuncNotFound = PUIdx(-1); + static constexpr int kThreeDimArray = 3; + static constexpr int kNodeThirdOpnd = 2; + static constexpr int kMCCSyncEnterFast0 = 0; + static constexpr int kMCCSyncEnterFast1 = 1; + static constexpr int kMCCSyncEnterFast2 = 2; + static constexpr int kMCCSyncEnterFast3 = 3; + + protected: + /* + * true if the lower level (e.g. mplcg) can handle the intrinsic directly. + * For example, the INTRN_MPL_ATOMIC_EXCHANGE_PTR can be directly handled by mplcg, + * and generate machine code sequences not containing any function calls. + * Such intrinsics will bypass the lowering of "assigned", + * and let mplcg handle the intrinsic results which are not return values. + */ + bool IsIntrinsicCallHandledAtLowerLevel(MIRIntrinsicID intrinsic) const; + + bool IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const; + + private: + + void SetCurrentFunc(MIRFunction *func) { + mirModule.SetCurFunction(func); + simplifyMemOp.SetFunction(func); + if (func != nullptr) { + const std::string &dumpFunc = CGOptions::GetDumpFunc(); + const bool debug = CGOptions::GetDumpPhases().find("cglower") != CGOptions::GetDumpPhases().end() && + (dumpFunc == "*" || dumpFunc == func->GetName()); + simplifyMemOp.SetDebug(debug); + } + } + + bool ShouldAddAdditionalComment() const { + return (options & kVerboseCG) != 0; + } + + bool GenerateExceptionHandlingCode() const { + return (options & kGenEh) != 0; + } + + BaseNode *MergeToCvtType(PrimType dtyp, PrimType styp, BaseNode &src) const; + BaseNode *LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, const IntrinDesc &desc); + StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, const MIRSymbol &ret, PUIdx bFunc, + BaseNode *extraInfo = nullptr) const; + StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, PregIdx retPregIdx, PUIdx bFunc, + BaseNode *extraInfo = nullptr) const; + BaseNode *LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode); + BaseNode *LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode); + BaseNode *LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode); + BaseNode *LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode); + + MIRType *GetArrayNodeType(BaseNode &baseNode); + IreadNode &GetLenNode(BaseNode &opnd0); + LabelIdx GetLabelIdx(MIRFunction &curFunc) const; + void ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode); + void ProcessClassInfo(MIRType &classType, bool &classInfoFromRt, std::string &classInfo) const; + StmtNode *GenCallNode(const StmtNode &stmt, PUIdx &funcCalled, CallNode& origCall); + StmtNode *GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalled, bool &handledAtLowerLevel, + IntrinsiccallNode &origCall); + StmtNode *GenIcallNode(PUIdx &funcCalled, IcallNode &origCall); + BlockNode *GenBlockNode(StmtNode &newCall, const CallReturnVector &p2nRets, const Opcode &opcode, + const PUIdx &funcCalled, bool handledAtLowerLevel, bool uselvar); + BaseNode *GetClassInfoExprFromRuntime(const std::string &classInfo); + BaseNode *GetClassInfoExprFromArrayClassCache(const std::string &classInfo); + BaseNode *GetClassInfoExpr(const std::string &classInfo) const; + BaseNode *GetBaseNodeFromCurFunc(MIRFunction &curFunc, bool isJarray); + + OptionFlag options = 0; + bool needBranchCleanup = false; + bool hasTry = false; + + static std::vector> builtinFuncIDs; + MIRBuilder *mirBuilder = nullptr; + uint32 labelIdx = 0; + static std::unordered_map intrinFuncIDs; + static std::unordered_map arrayClassCacheIndex; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_LOWERER_H */ diff --git a/ecmascript/mapleall/maple_be/include/be/rt.h b/ecmascript/mapleall/maple_be/include/be/rt.h new file mode 100644 index 0000000000000000000000000000000000000000..8977b12dc1d8e6a5cf13b3141b9781e8fe3ae942 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/rt.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_RT_H +#define MAPLEBE_INCLUDE_BE_RT_H + +#include +#include + +namespace maplebe { +/* + * This class contains constants about the ABI of the runtime, such as symbols + * for GC-related metadata in generated binary files. + */ +class RTSupport { + public: + static RTSupport &GetRTSupportInstance() { + static RTSupport RtSupport; + return RtSupport; + } + uint64_t GetObjectAlignment() const { + return kObjectAlignment; + } + int64_t GetArrayContentOffset() const { + return kArrayContentOffset; + } + int64_t GetArrayLengthOffset() const { + return kArrayLengthOffset; + } + uint64_t GetFieldSize() const { + return kRefFieldSize; + } + uint64_t GetFieldAlign() const { + return kRefFieldAlign; + } + + protected: + uint64_t kObjectAlignment; /* Word size. Suitable for all Java types. */ + uint64_t kObjectHeaderSize; /* java object header used by MM. */ + +#ifdef USE_32BIT_REF + uint32_t kRefFieldSize; /* reference field in java object */ + uint32_t kRefFieldAlign; +#else + uint32_t kRefFieldSize; /* reference field in java object */ + uint32_t kRefFieldAlign; +#endif /* USE_32BIT_REF */ + /* The array length offset is fixed since CONTENT_OFFSET is fixed to simplify code */ + int64_t kArrayLengthOffset; /* shadow + monitor + [padding] */ + /* The array content offset is aligned to 8B to alow hosting of size-8B elements */ + int64_t kArrayContentOffset; /* fixed */ + int64_t kGcTibOffset; + int64_t kGcTibOffsetAbs; + + private: + RTSupport() { + kObjectAlignment = 8; + kObjectHeaderSize = 8; +#ifdef USE_32BIT_REF + kRefFieldSize = 4; + kRefFieldAlign = 4; +#else + kRefFieldSize = 8; + kRefFieldAlign = 8; +#endif /* USE_32BIT_REF */ + kArrayLengthOffset = 12; + kArrayContentOffset = 16; + kGcTibOffset = -8; + kGcTibOffsetAbs = -kGcTibOffset; + } + static const std::string kObjectMapSectionName; + static const std::string kGctibLabelArrayOfObject; + static const std::string kGctibLabelJavaObject; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_RT_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/be/switch_lowerer.h b/ecmascript/mapleall/maple_be/include/be/switch_lowerer.h new file mode 100644 index 0000000000000000000000000000000000000000..5f674c8483255ca084597e8b17d9deb9a5f58e3a --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/switch_lowerer.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H +#define MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H +#include "mir_nodes.h" +#include "mir_module.h" + +namespace maplebe { +class BELowerer; + +class SwitchLowerer { + public: + SwitchLowerer(maple::MIRModule &mod, maple::SwitchNode &stmt, + maple::MapleAllocator &allocator) + : mirModule(mod), + stmt(&stmt), + switchItems(allocator.Adapter()), + ownAllocator(&allocator) {} + + ~SwitchLowerer() = default; + + maple::BlockNode *LowerSwitch(); + + private: + using Cluster = std::pair; + using SwitchItem = std::pair; + + maple::MIRModule &mirModule; + maple::SwitchNode *stmt; + /* + * the original switch table is sorted and then each dense (in terms of the + * case tags) region is condensed into 1 switch item; in the switchItems + * table, each item either corresponds to an original entry in the original + * switch table (pair's second is 0), or to a dense region (pair's second + * gives the upper limit of the dense range) + */ + maple::MapleVector switchItems; /* uint32 is index in switchTable */ + maple::MapleAllocator *ownAllocator; + const maple::int32 kClusterSwitchCutoff = 5; + const float kClusterSwitchDensityHigh = 0.4; + const float kClusterSwitchDensityLow = 0.2; + const maple::int32 kMaxRangeGotoTableSize = 127; + bool jumpToDefaultBlockGenerated = false; + + void FindClusters(maple::MapleVector &clusters) const; + void InitSwitchItems(maple::MapleVector &clusters); + maple::RangeGotoNode *BuildRangeGotoNode(maple::int32 startIdx, maple::int32 endIdx); + maple::CompareNode *BuildCmpNode(maple::Opcode opCode, maple::uint32 idx); + maple::GotoNode *BuildGotoNode(maple::int32 idx); + maple::CondGotoNode *BuildCondGotoNode(maple::int32 idx, maple::Opcode opCode, maple::BaseNode &cond); + maple::BlockNode *BuildCodeForSwitchItems(maple::int32 start, maple::int32 end, bool lowBNdChecked, + bool highBNdChecked); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H */ diff --git a/ecmascript/mapleall/maple_be/include/be/try_catch.h b/ecmascript/mapleall/maple_be/include/be/try_catch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f02298afb9bf276d9c2aa549c47873ca30699e8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/be/try_catch.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_TRY_CATCH_H +#define MAPLEBE_INCLUDE_BE_TRY_CATCH_H +#include "bbt.h" +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_lower.h" + +namespace maplebe { +using namespace maple; +class TryEndTryBlock { + public: + explicit TryEndTryBlock(MemPool &memPool) + : allocator(&memPool), enclosedBBs(allocator.Adapter()), + labeledBBsInTry(allocator.Adapter()), bbsToRelocate(allocator.Adapter()) {} + + ~TryEndTryBlock() = default; + + void Init() { + startTryBB = nullptr; + endTryBB = nullptr; + tryStmt = nullptr; + enclosedBBs.clear(); + labeledBBsInTry.clear(); + bbsToRelocate.clear(); + } + + void Reset(BBT &startBB) { + startTryBB = &startBB; + CHECK_NULL_FATAL(startTryBB->GetKeyStmt()); + tryStmt = startTryBB->GetKeyStmt(); + CHECK_FATAL(tryStmt->GetOpCode() == OP_try, "expect OPT_try"); + endTryBB = nullptr; + enclosedBBs.clear(); + labeledBBsInTry.clear(); + bbsToRelocate.clear(); + } + + void SetStartTryBB(BBT *bb) { + startTryBB = bb; + } + + BBT *GetStartTryBB() { + return startTryBB; + } + + void SetEndTryBB(BBT *bb) { + endTryBB = bb; + } + + BBT *GetEndTryBB() { + return endTryBB; + } + + StmtNode *GetTryStmtNode() { + return tryStmt; + } + + MapleVector &GetEnclosedBBs() { + return enclosedBBs; + } + + size_t GetEnclosedBBsSize() const { + return enclosedBBs.size(); + } + + const BBT *GetEnclosedBBsElem(size_t index) const{ + DEBUG_ASSERT(index < enclosedBBs.size(), "out of range"); + return enclosedBBs[index]; + } + + void PushToEnclosedBBs(BBT &bb) { + enclosedBBs.emplace_back(&bb); + } + + MapleVector &GetLabeledBBsInTry() { + return labeledBBsInTry; + } + + MapleVector &GetBBsToRelocate() { + return bbsToRelocate; + } + + private: + MapleAllocator allocator; + BBT *startTryBB = nullptr; + BBT *endTryBB = nullptr; + StmtNode *tryStmt = nullptr; + MapleVector enclosedBBs; + MapleVector labeledBBsInTry; + MapleVector bbsToRelocate; +}; + +class TryCatchBlocksLower { + public: + TryCatchBlocksLower(MemPool &memPool, BlockNode &body, MIRModule &mirModule) + : memPool(memPool), allocator(&memPool), body(body), mirModule(mirModule), + tryEndTryBlock(memPool), bbList(allocator.Adapter()), prevBBOfTry(allocator.Adapter()), + firstStmtToBBMap(allocator.Adapter()), catchesSeenSoFar(allocator.Adapter()) {} + + ~TryCatchBlocksLower() = default; + void RecoverBasicBlock(); + void TraverseBBList(); + void CheckTryCatchPattern() const; + + void SetGenerateEHCode(bool val) { + generateEHCode = val; + } + + private: + MemPool &memPool; + MapleAllocator allocator; + BlockNode &body; + MIRModule &mirModule; + TryEndTryBlock tryEndTryBlock; + StmtNode *bodyFirst = nullptr; + bool bodyEndWithEndTry = false; + bool generateEHCode = false; + MapleVector bbList; + MapleUnorderedMap prevBBOfTry; + MapleUnorderedMap firstStmtToBBMap; + MapleVector catchesSeenSoFar; + + void ProcessEnclosedBBBetweenTryEndTry(); + void ConnectRemainBB(); + BBT *FindInsertAfterBB(); + void PlaceRelocatedBB(BBT &insertAfter); + void PalceCatchSeenSofar(BBT &insertAfter); + BBT *CreateNewBB(StmtNode *first, StmtNode *last); + bool CheckAndProcessCatchNodeInCurrTryBlock(BBT &ebb, LabelIdx ebbLabel, uint32 index); + BBT *CollectCatchAndFallthruUntilNextCatchBB(BBT *&ebb, uint32 &nextEnclosedIdx, + std::vector &currBBThread); + void WrapCatchWithTryEndTryBlock(std::vector &currBBThread, BBT *&nextBBThreadHead, + uint32 &nextEnclosedIdx, bool hasMoveEndTry); + void SwapEndTryBBAndCurrBBThread(const std::vector &currBBThread, + bool &hasMoveEndTry, const BBT *nextBBThreadHead); + void ProcessThreadTail(BBT &threadTail, BBT * const &nextBBThreadHead, bool hasMoveEndTry); + static StmtNode *MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, const MapleVector &labeledBBsInTry); + static BBT *FindTargetBBlock(LabelIdx idx, const std::vector &bbs); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_TRY_CATCH_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h new file mode 100644 index 0000000000000000000000000000000000000000..aac9a9bd877bf032971be7dfea369b828e65b5c2 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2385dc72d3ce8f22bb92d92e33b985e56a28f8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H + +#include "aarch64_isa.h" +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace AArch64Abi { +constexpr int32 kNumIntParmRegs = 8; +constexpr int32 kNumFloatParmRegs = 8; +constexpr int32 kYieldPointReservedReg = 19; +constexpr uint32 kNormalUseOperandNum = 3; +constexpr uint32 kMaxInstrForCondBr = 260000; // approximately less than (2^18); + +constexpr AArch64reg intReturnRegs[kNumIntParmRegs] = { R0, R1, R2, R3, R4, R5, R6, R7 }; +constexpr AArch64reg floatReturnRegs[kNumFloatParmRegs] = { V0, V1, V2, V3, V4, V5, V6, V7 }; +constexpr AArch64reg intParmRegs[kNumIntParmRegs] = { R0, R1, R2, R3, R4, R5, R6, R7 }; +constexpr AArch64reg floatParmRegs[kNumFloatParmRegs] = { V0, V1, V2, V3, V4, V5, V6, V7 }; + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * ARM 64-bit Architecture. Section 5.5 + */ +bool IsAvailableReg(AArch64reg reg); +bool IsCalleeSavedReg(AArch64reg reg); +bool IsCallerSaveReg(AArch64reg reg); +bool IsParamReg(AArch64reg reg); +bool IsSpillReg(AArch64reg reg); +bool IsExtraSpillReg(AArch64reg reg); +bool IsSpillRegInRA(AArch64reg regNO, bool has3RegOpnd); +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize); +} /* namespace AArch64Abi */ + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..34ce4f7e0bbf8adf4ee6b1d984a19c6248221195 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H + +#include "alignment.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +constexpr uint32 kAlignRegionPower = 4; +constexpr uint32 kAlignInsnLength = 4; +constexpr uint32 kAlignMaxNopNum = 1; + +struct AArch64AlignInfo { + /* if bb size in (16byte, 96byte) , the bb need align */ + uint32 alignMinBBSize = 16; + uint32 alignMaxBBSize = 96; + /* default loop & jump align power, related to the target machine. eg. 2^5 */ + uint32 loopAlign = 4; + uint32 jumpAlign = 5; + /* record func_align_power in CGFunc */ +}; + +class AArch64AlignAnalysis : public AlignAnalysis { + public: + AArch64AlignAnalysis(CGFunc &func, MemPool &memPool) : AlignAnalysis(func, memPool) { + aarFunc = static_cast(&func); + } + ~AArch64AlignAnalysis() override = default; + + void FindLoopHeader() override; + void FindJumpTarget() override; + void ComputeLoopAlign() override; + void ComputeJumpAlign() override; + void ComputeCondBranchAlign() override; + bool MarkCondBranchAlign(); + bool MarkShortBranchSplit(); + void AddNopAfterMark(); + void UpdateInsnId(); + uint32 GetAlignRange(uint32 alignedVal, uint32 addr) const; + + /* filter condition */ + bool IsIncludeCall(BB &bb) override; + bool IsInSizeRange(BB &bb) override; + bool HasFallthruEdge(BB &bb) override; + bool IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const; + + private: + AArch64CGFunc *aarFunc = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_args.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_args.h new file mode 100644 index 0000000000000000000000000000000000000000..13c43659f98dba206e85a1cafb088b9a75193b60 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_args.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H + +#include "args.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +struct ArgInfo { + AArch64reg reg; + MIRType *mirTy; + uint32 symSize; + uint32 stkSize; + RegType regType; + MIRSymbol *sym; + const AArch64SymbolAlloc *symLoc; + uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ + bool doMemPairOpt; + bool createTwoStores; + bool isTwoRegParm; +}; + +class AArch64MoveRegArgs : public MoveRegArgs { + public: + explicit AArch64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {} + ~AArch64MoveRegArgs() override = default; + void Run() override; + + private: + RegOperand *baseReg = nullptr; + const MemSegment *lastSegment = nullptr; + void CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const; + ArgInfo GetArgInfo(std::map &argsList, std::vector &numFpRegs, + std::vector &fpSize, uint32 argIndex) const; + bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const; + void GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset) const; + void GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo); + void GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize); + void MoveRegisterArgs(); + void MoveVRegisterArgs(); + void MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const; + void LoadStackArgsToVReg(MIRSymbol &mirSym) const; + void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..721dd877af211cd16826074a3c36ae7a842242f7 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" +#include "call_conv.h" + +namespace maplebe { +using namespace maple; + +/* + * We use the names used in ARM IHI 0055C_beta. $ 5.4.2. + * nextGeneralRegNO (= _int_parm_num) : Next General-purpose Register number + * nextFloatRegNO (= _float_parm_num): Next SIMD and Floating-point Register Number + * nextStackArgAdress (= _last_memOffset): Next Stacked Argument Address + * for processing an incoming or outgoing parameter list + */ +class AArch64CallConvImpl { + public: + explicit AArch64CallConvImpl(BECommon &be) : beCommon(be) {} + + ~AArch64CallConvImpl() = default; + + /* Return size of aggregate structure copy on stack. */ + int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr); + + int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc); + + void InitCCLocInfo(CCLocInfo &pLoc) const; + + /* for lmbc */ + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize); + + /* return value related */ + void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc); + + void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const; + + void SetupToReturnThroughMemory(CCLocInfo &pLoc) const { + pLoc.regCount = 1; + pLoc.reg0 = R8; + pLoc.primTypeOfReg0 = PTY_u64; + } + + private: + BECommon &beCommon; + uint64 paramNum = 0; /* number of all types of parameters processed so far */ + int32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */ + uint32 nextFloatRegNO = 0; /* number of float parameters processed so far */ + int32 nextStackArgAdress = 0; + + AArch64reg AllocateGPRegister() { + DEBUG_ASSERT(nextGeneralRegNO >= 0, "nextGeneralRegNO can not be neg"); + return (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs) ? AArch64Abi::intParmRegs[nextGeneralRegNO++] : kRinvalid; + } + + void AllocateTwoGPRegisters(CCLocInfo &pLoc) { + if ((nextGeneralRegNO + 1) < AArch64Abi::kNumIntParmRegs) { + pLoc.reg0 = AArch64Abi::intParmRegs[nextGeneralRegNO++]; + pLoc.reg1 = AArch64Abi::intParmRegs[nextGeneralRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + AArch64reg AllocateSIMDFPRegister() { + return (nextFloatRegNO < AArch64Abi::kNumFloatParmRegs) ? AArch64Abi::floatParmRegs[nextFloatRegNO++] : kRinvalid; + } + + void AllocateNSIMDFPRegisters(CCLocInfo &ploc, uint32 num) { + if ((nextFloatRegNO + num - 1) < AArch64Abi::kNumFloatParmRegs) { + switch (num) { + case kOneRegister: + ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + break; + case kTwoRegister: + ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + break; + case kThreeRegister: + ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg2 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + break; + case kFourRegister: + ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg2 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + ploc.reg3 = AArch64Abi::floatParmRegs[nextFloatRegNO++]; + break; + default: + CHECK_FATAL(0, "AllocateNSIMDFPRegisters: unsupported"); + } + } else { + ploc.reg0 = kRinvalid; + } + } + + void RoundNGRNUpToNextEven() { + nextGeneralRegNO = static_cast((nextGeneralRegNO + 1) & ~static_cast(1)); + } + + int32 ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, int32 typeAlign); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..9eff8f9cd2d504753cc02bb7132a0fc17d9e7b3f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H + +#include "cfgo.h" + +namespace maplebe { +class AArch64CFGOptimizer : public CFGOptimizer { + public: + AArch64CFGOptimizer(CGFunc &func, MemPool &memPool) + : CFGOptimizer(func, memPool) {} + ~AArch64CFGOptimizer() = default; + void InitOptimizePatterns() override; +}; + +class AArch64FlipBRPattern : public FlipBRPattern { + public: + explicit AArch64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {} + ~AArch64FlipBRPattern() = default; + + private: + uint32 GetJumpTargetIdx(const Insn &insn) override; + MOperator FlipConditionOp(MOperator flippedOp) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h new file mode 100644 index 0000000000000000000000000000000000000000..750a48063c50cb878dbbf60391864377e1f72640 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H + +#include "cg.h" +#include "aarch64_cgfunc.h" +#include "aarch64_ssa.h" +#include "aarch64_phi_elimination.h" +#include "aarch64_prop.h" +#include "aarch64_dce.h" +#include "aarch64_live.h" +#include "aarch64_reaching.h" +#include "aarch64_args.h" +#include "aarch64_alignment.h" +#include "aarch64_validbit_opt.h" +#include "aarch64_reg_coalesce.h" +#include "aarch64_cfgo.h" + +namespace maplebe { +constexpr int64 kShortBRDistance = (8 * 1024); +constexpr int64 kNegativeImmLowerLimit = -4096; +constexpr int32 kIntRegTypeNum = 5; +constexpr uint32 kAlignPseudoSize = 3; +constexpr uint32 kInsnSize = 4; +constexpr uint32 kAlignMovedFlag = 31; + +/* Supporting classes for GCTIB merging */ +class GCTIBKey { + public: + GCTIBKey(MapleAllocator &allocator, uint32 rcHeader, std::vector &patternWords) + : header(rcHeader), bitMapWords(allocator.Adapter()) { + (void)bitMapWords.insert(bitMapWords.begin(), patternWords.begin(), patternWords.end()); + } + + ~GCTIBKey() = default; + + uint32 GetHeader() const { + return header; + } + + const MapleVector &GetBitmapWords() const { + return bitMapWords; + } + + private: + uint32 header; + MapleVector bitMapWords; +}; + +class Hasher { + public: + size_t operator()(const GCTIBKey *key) const { + CHECK_NULL_FATAL(key); + size_t hash = key->GetHeader(); + return hash; + } +}; + +class EqualFn { + public: + bool operator()(const GCTIBKey *firstKey, const GCTIBKey *secondKey) const { + CHECK_NULL_FATAL(firstKey); + CHECK_NULL_FATAL(secondKey); + const MapleVector &firstWords = firstKey->GetBitmapWords(); + const MapleVector &secondWords = secondKey->GetBitmapWords(); + + if ((firstKey->GetHeader() != secondKey->GetHeader()) || (firstWords.size() != secondWords.size())) { + return false; + } + + for (size_t i = 0; i < firstWords.size(); ++i) { + if (firstWords[i] != secondWords[i]) { + return false; + } + } + return true; + } +}; + +class GCTIBPattern { + public: + GCTIBPattern(GCTIBKey &patternKey, MemPool &mp) : name(&mp) { + key = &patternKey; + id = GetId(); + name = GCTIB_PREFIX_STR + std::string("PTN_") + std::to_string(id); + } + + ~GCTIBPattern() = default; + + int GetId() const { + static int id = 0; + return id++; + } + + std::string GetName() const { + DEBUG_ASSERT(!name.empty(), "null name check!"); + return std::string(name.c_str()); + } + + void SetName(const std::string &ptnName) { + name = ptnName; + } + + private: + int id; + MapleString name; + GCTIBKey *key; +}; + +/* sub Target info & implement */ +class AArch64CG : public CG { + public: + AArch64CG(MIRModule &mod, const CGOptions &opts, const std::vector &nameVec, + const std::unordered_map> &patternMap) + : CG(mod, opts), + ehExclusiveNameVec(nameVec), + cyclePatternMap(patternMap), + keyPatternMap(allocator.Adapter()), + symbolPatternMap(allocator.Adapter()) {} + + ~AArch64CG() override = default; + + CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override { + return memPool.New(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId); + } + + void EnrollTargetPhases(MaplePhaseManager *pm) const override; + + const std::unordered_map> &GetCyclePatternMap() const { + return cyclePatternMap; + } + + void GenerateObjectMaps(BECommon &beCommon) override; + + bool IsExclusiveFunc(MIRFunction&) override; + + void FindOrCreateRepresentiveSym(std::vector &bitmapWords, uint32 rcHeader, const std::string &name); + + void CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const; + + Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override; + + PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override; + + std::string FindGCTIBPatternName(const std::string &name) const override; + + LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const override { + return mp.New(f, da, mp, tmp); + } + LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + }; + PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo, mp); + } + CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { + return mp.New(mp, f, ssaInfo, ll); + } + CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(mp, f, ssaInfo); + } + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo); + } + CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + + /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. + * i. mov reg2, reg1 + * ii. add/sub reg2, reg1, 0/zero register + * iii. mul reg2, reg1, 1 + */ + bool IsEffectiveCopy(Insn &insn) const final; + bool IsTargetInsn(MOperator mOp) const final; + bool IsClinitInsn(MOperator mOp) const final; + bool IsPseudoInsn(MOperator mOp) const final; + void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const final; + const InsnDesc &GetTargetMd(MOperator mOp) const final { + return kMd[mOp]; + } + + static const InsnDesc kMd[kMopLast]; + enum : uint8 { + kR8List, + kR16List, + kR32List, + kR64List, + kV64List + }; + static std::array, kIntRegTypeNum> intRegNames; + static std::array vectorRegNames; + + private: + const std::vector &ehExclusiveNameVec; + const std::unordered_map> &cyclePatternMap; + MapleUnorderedMap keyPatternMap; + MapleUnorderedMap symbolPatternMap; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..6743caad038b827216c17630dd4833bf9770a1dd --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -0,0 +1,959 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H + +#include "cgfunc.h" +#include "call_conv.h" +#include "mpl_atomic.h" +#include "aarch64_abi.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" +#include "aarch64_memlayout.h" +#include "aarch64_reg_info.h" +#include "aarch64_optimize_common.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class LmbcArgInfo { + public: + explicit LmbcArgInfo(MapleAllocator &mallocator) + : lmbcCallArgs(mallocator.Adapter()), + lmbcCallArgTypes(mallocator.Adapter()), + lmbcCallArgOffsets(mallocator.Adapter()), + lmbcCallArgNumOfRegs(mallocator.Adapter()) {} + MapleVector lmbcCallArgs; + MapleVector lmbcCallArgTypes; + MapleVector lmbcCallArgOffsets; + MapleVector lmbcCallArgNumOfRegs; // # of regs needed to complete struct + uint32 lmbcTotalStkUsed = -1; // remove when explicit addr for large agg is available +}; + +class AArch64CGFunc : public CGFunc { + public: + AArch64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, + MemPool &memPool, StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) + : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId), + calleeSavedRegs(mallocator.Adapter()), + proEpilogSavedRegs(mallocator.Adapter()), + phyRegOperandTable(mallocator.Adapter()), + hashLabelOpndTable(mallocator.Adapter()), + hashOfstOpndTable(mallocator.Adapter()), + hashMemOpndTable(mallocator.Adapter()), + memOpndsRequiringOffsetAdjustment(mallocator.Adapter()), + memOpndsForStkPassedArguments(mallocator.Adapter()), + immOpndsRequiringOffsetAdjustment(mallocator.Adapter()), + immOpndsRequiringOffsetAdjustmentForRefloc(mallocator.Adapter()) { + uCatch.regNOCatch = 0; + CGFunc::SetMemlayout(*memPool.New(b, f, mallocator)); + CGFunc::GetMemlayout()->SetCurrFunction(*this); + CGFunc::SetTargetRegInfo(*memPool.New(mallocator)); + CGFunc::GetTargetRegInfo()->SetCurrFunction(*this); + if (f.GetAttr(FUNCATTR_varargs) || f.HasVlaOrAlloca()) { + SetHasVLAOrAlloca(true); + } + SetHasAlloca(f.HasVlaOrAlloca()); + SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule() || + f.GetModule()->GetFlavor() == MIRFlavor::kFlavorLmbc); + } + + ~AArch64CGFunc() override = default; + + uint32 GetRefCount() const { + return refCount; + } + + int32 GetBeginOffset() const { + return beginOffset; + } + + MOperator PickMovBetweenRegs(PrimType destType, PrimType srcType) const; + MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; + + regno_t NewVRflag() override { + DEBUG_ASSERT(maxRegCount > kRFLAG, "CG internal error."); + constexpr uint8 size = 4; + if (maxRegCount <= kRFLAG) { + maxRegCount += (kRFLAG + kVRegisterNumber); + vRegTable.resize(maxRegCount); + } + new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + return kRFLAG; + } + + MIRType *LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) const; + RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); + + void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym); + void GenSaveMethodInfoCode(BB &bb) override; + void DetermineReturnTypeofCall() override; + void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) override; + bool GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool forEA = false); + void HandleRetCleanup(NaryStmtNode &retNode) override; + void MergeReturn() override; + RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + void SelectDassign(DassignNode &stmt, Operand &opnd0) override; + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; + void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; + void SelectAbort() override; + void SelectAssertNull(UnaryStmtNode &stmt) override; + void SelectAsm(AsmNode &stmt) override; + MemOperand *GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 alignUsed, int64 offset, + bool needLow12 = false); + MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); + MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + uint32 LmbcFindTotalStkUsed(std::vector* paramList); + uint32 LmbcTotalRegsUsed(); + void LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn); + bool LmbcSmallAggForRet(const BlkassignoffNode &bNode, const Operand *src); + bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); + void SelectAggDassign(DassignNode &stmt) override; + void SelectIassign(IassignNode &stmt) override; + void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; + void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; + void SelectReturnSendOfStructInRegs(BaseNode *x) override; + void SelectReturn(Operand *opnd0) override; + void SelectIgoto(Operand *opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; + void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &opnd0, Operand &opnd1, + PrimType primType, bool signedCond); + void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectGoto(GotoNode &stmt) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinsicopNode, std::string name) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinsicopNode, PrimType retType, + const std::string &name) override; + Operand *SelectCclz(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCctz(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCpopcount(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCparity(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCclrsb(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCisaligned(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCalignup(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCaligndown(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinsicopNode, PrimType pty) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) override; + AArch64isa::MemoryOrdering PickMemOrder(std::memory_order memOrder, bool isLdr) const; + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + Operand *SelectCReturnAddress(IntrinsicopNode &intrinsicopNode) override; + void SelectMembar(StmtNode &membar) override; + void SelectComment(CommentNode &comment) override; + + void HandleCatch() override; + Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) override; + RegOperand *SelectRegread(RegreadNode &expr) override; + + void SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field = 0); + void SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field = 0); + Operand *SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool = false); + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff = false) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + + PrimType GetDestTypeFromAggSize(uint32 bitSize) const; + + Operand *SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) override; + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; + Operand *SelectIntConst(MIRIntConst &intConst) override; + Operand *HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent); + Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) override; + Operand *SelectStrConst(MIRStrConst &strConst) override; + Operand *SelectStr16Const(MIRStr16Const &str16Const) override; + + void SelectAdd(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + Operand *SelectAdd(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) override; + void SelectMadd(Operand &resOpnd, Operand &oM0, Operand &oM1, Operand &o1, PrimType primeType) override; + Operand *SelectMadd(BinaryNode &node, Operand &oM0, Operand &oM1, Operand &o1, const BaseNode &parent) override; + Operand *SelectRor(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + Operand *SelectShift(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + Operand *SelectSub(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectSub(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + Operand *SelectBand(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectBand(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + Operand *SelectBior(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectBior(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + Operand *SelectBxor(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectBxor(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + + void SelectBxorShift(Operand &resOpnd, Operand *o0, Operand *o1, Operand &o2, PrimType primType); + Operand *SelectLand(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + Operand *SelectLor(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent, + bool parentIsBr = false) override; + Operand *SelectMin(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectMin(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + Operand *SelectMax(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectMax(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + void SelectFMinFMax(Operand &resOpnd, Operand &o0, Operand &o1, bool is64Bits, bool isMin); + void SelectCmpOp(Operand &resOpnd, Operand &o0, Operand &o1, Opcode opCode, PrimType primType, + const BaseNode &parent); + + Operand *SelectCmpOp(CompareNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + + void SelectAArch64Cmp(Operand &o, Operand &i, bool isIntType, uint32 dsize); + void SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize); + void SelectAArch64CCmp(Operand &o, Operand &i, Operand &nzcv, CondOperand &cond, bool is64Bits); + void SelectAArch64CSet(Operand &o, CondOperand &cond, bool is64Bits); + void SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits); + void SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits); + void SelectShift(Operand &resOpnd, Operand &o0, Operand &o1, ShiftDirection direct, PrimType primType); + Operand *SelectMpy(BinaryNode &node, Operand &o0, Operand &o1, const BaseNode &parent) override; + void SelectMpy(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType) override; + /* method description contains method information which is metadata for reflection. */ + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t regNO, bool isDest, Insn &insn, + AArch64reg regNum, bool &isOutOfRange); + void SelectAddAfterInsn(Operand &resOpnd, Operand &o0, Operand &o1, PrimType primType, bool isDest, Insn &insn); + bool IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen); + bool IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx); + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0); + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; + Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) override; + Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectNeg(Operand &dest, Operand &opnd0, PrimType primType); + void SelectMvn(Operand &dest, Operand &opnd0, PrimType primType); + Operand *SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &node, Operand &opnd0, Operand &opnd1, Operand &opnd2, + const BaseNode &parent, bool hasCompare = false) override; + Operand *SelectMalloc(UnaryNode &call, Operand &opnd0) override; + Operand *SelectAlloca(UnaryNode &call, Operand &opnd0) override; + Operand *SelectGCMalloc(GCMallocNode &call) override; + Operand *SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) override; + void SelectSelect(Operand &resOpnd, Operand &condOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType dtype, + PrimType ctype, bool hasCompare = false, ConditionCode cc = CC_NE); + void SelectAArch64Select(Operand &dest, Operand &opnd0, Operand &opnd1, CondOperand &cond, bool isIntType, + uint32 is64bits); + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) override; + Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) override; + Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) override; + Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) override; + RegOperand &SelectCopy(Operand &src, PrimType stype, PrimType dtype) override; + void SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype); + void SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType); + void SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype); + void SelectLibCall(const std::string&, std::vector&, PrimType, PrimType, bool is2ndRet = false); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, std::vector pt, + PrimType retPrimType, bool is2ndRet); + bool IsRegRematCand(const RegOperand ®) const; + void ClearRegRematInfo(const RegOperand ®) const; + bool IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src) const; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) override; + void CleanupDeadMov(bool dump = false) override; + void GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand &GetOrCreateRflag() override; + const Operand *GetRflag() const override; + Operand &GetOrCreatevaryreg(); + RegOperand &CreateRegisterOperandOfType(PrimType primType); + RegOperand &CreateRegisterOperandOfType(RegType regType, uint32 byteLen); + RegOperand &CreateRflagOperand(); + RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType); + MemOperand *GetOrCreatSpillMem(regno_t vrNum); + void FreeSpillRegMem(regno_t vrNum); + RegOperand &GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType type, uint32 flag = 0); + RegOperand &GetOrCreatePhysicalRegisterOperand(std::string &asmAttr); + RegOperand *CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg = 0) const; + RegOperand &CreateVirtualRegisterOperand(regno_t vregNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vregNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; + const LabelOperand *GetLabelOperand(LabelIdx labIdx) const override; + LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; + LabelOperand &GetOrCreateLabelOperand(BB &bb) override; + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + + RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) override; + RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; + RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) override; + RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *opnd2, + PrimType oTyp2, Opcode opc) override; + RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) override; + RegOperand *SelectOneElementVectorCopy(Operand *opnd, PrimType sType); + RegOperand *SelectVectorImmMov(PrimType rType, Operand *src, PrimType sType); + RegOperand *SelectVectorRegMov(PrimType rType, Operand *src, PrimType sType); + RegOperand *SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) override; + RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) override; + RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) override; + RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) override; + RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + Operand *o3, PrimType oTyp3) override; + RegOperand *SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) override; + RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) override; + RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) override; + RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) override; + RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorNot(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) override; + RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) override; + RegOperand *SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) override; + RegOperand *SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, int32 lane) override; + RegOperand *SelectVectorSelect(Operand &cond, PrimType rType, Operand &o0, Operand &o1); + RegOperand *SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) override; + RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oTyp, Operand *o2, bool isLow) override; + RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) override; + RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) override; + RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) override; + RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; + + void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); + void SelectVectorZip(PrimType rType, Operand *o1, Operand *o2); + void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); + RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); + bool DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId) const; + + PrimType FilterOneElementVectorType(PrimType origTyp) const { + PrimType nType = origTyp; + if (origTyp == PTY_i64 || origTyp == PTY_u64) { + nType = PTY_f64; + } + return nType; + } + + ImmOperand &CreateImmOperand(PrimType ptyp, int64 val) override { + return CreateImmOperand(val, GetPrimTypeBitSize(ptyp), IsSignedInteger(ptyp)); + } + + + const Operand *GetFloatRflag() const override { + return nullptr; + } + /* create an integer immediate operand */ + ImmOperand &CreateImmOperand(int64 val, uint32 size, bool isSigned, VaryType varyType = kNotVary, + bool isFmov = false) const { + return *memPool->New(val, size, isSigned, varyType, isFmov); + } + + ImmOperand &CreateImmOperand(Operand::OperandType type, int64 val, uint32 size, bool isSigned) { + return *memPool->New(type, val, size, isSigned); + } + + ListOperand *CreateListOpnd(MapleAllocator &allocator) { + return memPool->New(allocator); + } + + OfstOperand &GetOrCreateOfstOpnd(uint64 offset, uint32 size); + + OfstOperand &CreateOfstOpnd(uint64 offset, uint32 size) const { + return *memPool->New(offset, size); + } + + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int32 relocs) const { + return *memPool->New(mirSymbol, 0, relocs); + } + + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) const { + return *memPool->New(mirSymbol, 0, offset, relocs); + } + + StImmOperand &CreateStImmOperand(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) const { + return *memPool->New(mirSymbol, offset, relocs); + } + + RegOperand &GetOrCreateFramePointerRegOperand() override { + return GetOrCreateStackBaseRegOperand(); + } + + RegOperand &GetOrCreateStackBaseRegOperand() override { + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + return GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + } + + RegOperand &GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType, + PrimType targetType); + void SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector& rematInsns); + MemOperand &GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, + bool needLow12, RegOperand *regOp, std::vector& rematInsns); + + MemOperand &GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef = false, + bool needLow12 = false, RegOperand *regOp = nullptr); + + MemOperand &HashMemOpnd(MemOperand &tMemOpnd); + + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base, + RegOperand *index, ImmOperand *offset, const MIRSymbol *st); + + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode, uint32 size, RegOperand *base, + RegOperand *index, int32 shift, bool isSigned = false); + + MemOperand &GetOrCreateMemOpnd(MemOperand &oldMem); + + MemOperand &CreateMemOpnd(AArch64reg reg, int64 offset, uint32 size) { + RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + return CreateMemOpnd(baseOpnd, offset, size); + } + + MemOperand &CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size); + + MemOperand &CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym); + + MemOperand &CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone); + + MemOperand *CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone); + + CondOperand &GetCondOperand(ConditionCode op) const { + return ccOperands[op]; + } + + BitShiftOperand *GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const; + + BitShiftOperand &CreateBitShiftOperand(BitShiftOperand::ShiftOp op, uint32 amount, int32 bitLen) const { + return *memPool->New(op, amount, bitLen); + } + + ExtendShiftOperand &CreateExtendShiftOperand(ExtendShiftOperand::ExtendOp op, uint32 amount, int32 bitLen) const { + return *memPool->New(op, amount, bitLen); + } + + void SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg, Insn *curInsn = nullptr); + + Operand &GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const; + void GenerateYieldpoint(BB &bb) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + void GenerateCleanupCode(BB &bb) override; + bool NeedCleanup() override; + void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + void LmbcGenSaveSpForAlloca() override; + MemOperand *GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg base = RFP); + RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, + PrimType primType, AArch64reg baseRegno = RFP); + RegOperand *LmbcStructReturnLoad(int32 offset); + Operand *GetBaseReg(const AArch64SymbolAlloc &symAlloc); + int32 GetBaseOffset(const SymbolAlloc &symAlloc) override; + + Operand &CreateCommentOperand(const std::string &s) const { + return *memPool->New(s, *memPool); + } + + Operand &CreateCommentOperand(const MapleString &s) const { + return *memPool->New(s.c_str(), *memPool); + } + + Operand &CreateStringOperand(const std::string &s) const { + return *memPool->New(s, *memPool); + } + + Operand &CreateStringOperand(const MapleString &s) const { + return *memPool->New(s.c_str(), *memPool); + } + + void AddtoCalleeSaved(regno_t reg) override { + if (!UseFP() && reg == R29) { + reg = RFP; + } + if (find(calleeSavedRegs.begin(), calleeSavedRegs.end(), reg) != calleeSavedRegs.end()) { + return; + } + calleeSavedRegs.emplace_back(static_cast(reg)); + DEBUG_ASSERT((AArch64isa::IsGPRegister(static_cast(reg)) || + AArch64isa::IsFPSIMDRegister(static_cast(reg))), + "Int or FP registers are expected"); + if (AArch64isa::IsGPRegister(static_cast(reg))) { + ++numIntregToCalleeSave; + } else { + ++numFpregToCalleeSave; + } + } + + uint32 SizeOfCalleeSaved() const { + /* npairs = num / 2 + num % 2 */ + uint32 nPairs = (numIntregToCalleeSave >> 1) + (numIntregToCalleeSave & 0x1); + nPairs += (numFpregToCalleeSave >> 1) + (numFpregToCalleeSave & 0x1); + return (nPairs * (kIntregBytelen << 1)); + } + + void DBGFixCallFrameLocationOffsets() override; + + void NoteFPLRAddedToCalleeSavedList() { + fplrAddedToCalleeSaved = true; + } + + bool IsFPLRAddedToCalleeSavedList() const { + return fplrAddedToCalleeSaved; + } + + bool IsIntrnCallForC() const { + return isIntrnCallForC; + } + + bool UsedStpSubPairForCallFrameAllocation() const { + return usedStpSubPairToAllocateCallFrame; + } + void SetUsedStpSubPairForCallFrameAllocation(bool val) { + usedStpSubPairToAllocateCallFrame = val; + } + + const MapleVector &GetCalleeSavedRegs() const { + return calleeSavedRegs; + } + + Insn *GetYieldPointInsn() { + return yieldPointInsn; + } + + const Insn *GetYieldPointInsn() const { + return yieldPointInsn; + } + + IntrinsiccallNode *GetCleanEANode() { + return cleanEANode; + } + + MemOperand &CreateStkTopOpnd(uint32 offset, uint32 size); + MemOperand *CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol, bool noExtend); + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, uint32 shift, bool isSigned = false) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym); + + /* if offset < 0, allocation; otherwise, deallocation */ + MemOperand &CreateCallFrameOperand(int32 offset, uint32 size); + + void AppendCall(const MIRSymbol &func); + Insn &AppendCall(const MIRSymbol &func, ListOperand &srcOpnds); + + static constexpr uint32 kDwarfFpRegBegin = 64; + static constexpr int32 kBitLenOfShift64Bits = 6; /* for 64 bits register, shift amount is 0~63, use 6 bits to store */ + static constexpr int32 kBitLenOfShift32Bits = 5; /* for 32 bits register, shift amount is 0~31, use 5 bits to store */ + static constexpr int32 kHighestBitOf64Bits = 63; /* 63 is highest bit of a 64 bits number */ + static constexpr int32 kHighestBitOf32Bits = 31; /* 31 is highest bit of a 32 bits number */ + static constexpr int32 k16ValidBit = 16; + + /* CFI directives related stuffs */ + Operand &CreateCfiRegOperand(uint32 reg, uint32 size) override { + /* + * DWARF for ARM Architecture (ARM IHI 0040B) 3.1 Table 1 + * Having kRinvalid=0 (see arm32_isa.h) means + * each register gets assigned an id number one greater than + * its physical number + */ + if (reg < V0) { + return *memPool->New((reg - R0), size); + } else { + return *memPool->New((reg - V0) + kDwarfFpRegBegin, size); + } + } + + void SetCatchRegno(regno_t regNO) { + uCatch.regNOCatch = regNO; + } + + regno_t GetCatchRegno() const { + return uCatch.regNOCatch; + } + + void SetCatchOpnd(Operand &opnd) { + uCatch.opndCatch = &opnd; + } + + AArch64reg GetReturnRegisterNumber(); + + MOperator PickStInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone) const; + MOperator PickLdInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone) const; + MOperator PickExtInsn(PrimType dtype, PrimType stype) const; + + bool CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const; + RegOperand *GetBaseRegForSplit(uint32 baseRegNum); + + MemOperand &ConstraintOffsetToSafeRegion(uint32 bitLen, const MemOperand &memOpnd); + MemOperand &SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum = AArch64reg::kRinvalid, bool isDest = false, + Insn *insn = nullptr, bool forPair = false); + ImmOperand &SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, RegOperand *resOpnd, int64 ofstVal, + bool isDest = false, Insn *insn = nullptr, bool forPair = false); + MemOperand &CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset); + + bool HasStackLoadStore(); + + MemOperand &LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int datasize); + + int32 GetSplitBaseOffset() const { + return splitStpldpBaseOffset; + } + void SetSplitBaseOffset(int32 val) { + splitStpldpBaseOffset = val; + } + + Insn &CreateCommentInsn(const std::string &comment) { + Insn &insn = GetInsnBuilder()->BuildInsn(abstract::MOP_comment, InsnDesc::GetAbstractId(abstract::MOP_comment)); + insn.AddOperand(CreateCommentOperand(comment)); + return insn; + } + + Insn &CreateCommentInsn(const MapleString &comment) { + Insn &insn = GetInsnBuilder()->BuildInsn(abstract::MOP_comment, InsnDesc::GetAbstractId(abstract::MOP_comment)); + insn.AddOperand(CreateCommentOperand(comment)); + return insn; + } + + Insn &CreateCfiRestoreInsn(uint32 reg, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_restore).AddOpndChain(CreateCfiRegOperand(reg, size)); + } + + Insn &CreateCfiOffsetInsn(uint32 reg, int64 val, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_offset). + AddOpndChain(CreateCfiRegOperand(reg, size)). + AddOpndChain(CreateCfiImmOperand(val, size)); + } + Insn &CreateCfiDefCfaInsn(uint32 reg, int64 val, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa). + AddOpndChain(CreateCfiRegOperand(reg, size)). + AddOpndChain(CreateCfiImmOperand(val, size)); + } + + InsnVisitor *NewInsnModifier() override { + return memPool->New(*this); + } + + RegType GetRegisterType(regno_t reg) const override; + + uint32 MaxCondBranchDistance() override { + return AArch64Abi::kMaxInstrForCondBr; + } + + void InsertJumpPad(Insn *insn) override; + + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const; + + MapleVector &GetProEpilogSavedRegs() { + return proEpilogSavedRegs; + } + + uint32 GetDefaultAlignPow() const { + return alignPow; + } + + LmbcArgInfo *GetLmbcArgInfo() { + return lmbcArgInfo; + } + + void SetLmbcArgInfo(LmbcArgInfo *p) { + lmbcArgInfo = p; + } + + void SetLmbcArgInfo(RegOperand *reg, PrimType pTy, int32 ofst, int32 regs) { + (void)GetLmbcCallArgs().emplace_back(reg); + (void)GetLmbcCallArgTypes().emplace_back(pTy); + (void)GetLmbcCallArgOffsets().emplace_back(ofst); + (void)GetLmbcCallArgNumOfRegs().emplace_back(regs); + } + + void ResetLmbcArgInfo() { + GetLmbcCallArgs().clear(); + GetLmbcCallArgTypes().clear(); + GetLmbcCallArgOffsets().clear(); + GetLmbcCallArgNumOfRegs().clear(); + } + + MapleVector &GetLmbcCallArgs() const { + return lmbcArgInfo->lmbcCallArgs; + } + + MapleVector &GetLmbcCallArgTypes() const { + return lmbcArgInfo->lmbcCallArgTypes; + } + + MapleVector &GetLmbcCallArgOffsets() const { + return lmbcArgInfo->lmbcCallArgOffsets; + } + + MapleVector &GetLmbcCallArgNumOfRegs() const { + return lmbcArgInfo->lmbcCallArgNumOfRegs; + } + + int32 GetLmbcTotalStkUsed() const { + return lmbcArgInfo->lmbcTotalStkUsed; + } + + void SetLmbcTotalStkUsed(int32 offset) { + lmbcArgInfo->lmbcTotalStkUsed = offset; + } + + void SetLmbcCallReturnType(MIRType *ty) { + lmbcCallReturnType = ty; + } + + MIRType *GetLmbcCallReturnType() { + return lmbcCallReturnType; + } + + bool IsSPOrFP(const RegOperand &opnd) const override; + bool IsReturnReg(const RegOperand &opnd) const override; + bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const override; + + RegOperand &GetZeroOpnd(uint32 size) override; + + private: + enum RelationOperator : uint8 { + kAND, + kIOR, + kEOR + }; + + enum RelationOperatorOpndPattern : uint8 { + kRegReg, + kRegImm + }; + + enum RoundType : uint8 { + kCeil, + kFloor, + kRound + }; + + static constexpr int32 kMaxMovkLslEntries = 8; + using MovkLslOperandArray = std::array; + + MapleVector calleeSavedRegs; + MapleVector proEpilogSavedRegs; + uint32 refCount = 0; /* Ref count number. 0 if function don't have "bl MCC_InitializeLocalStackRef" */ + int32 beginOffset = 0; /* Begin offset based x29. */ + Insn *yieldPointInsn = nullptr; /* The insn of yield point at the entry of the func. */ + IntrinsiccallNode *cleanEANode = nullptr; + + MapleUnorderedMap phyRegOperandTable; /* machine register operand table */ + MapleUnorderedMap hashLabelOpndTable; + MapleUnorderedMap hashOfstOpndTable; + MapleUnorderedMap hashMemOpndTable; + /* + * Local variables, formal parameters that are passed via registers + * need offset adjustment after callee-saved registers are known. + */ + MapleUnorderedMap memOpndsRequiringOffsetAdjustment; + MapleUnorderedMap memOpndsForStkPassedArguments; + MapleUnorderedMap immOpndsRequiringOffsetAdjustment; + MapleUnorderedMap immOpndsRequiringOffsetAdjustmentForRefloc; + union { + regno_t regNOCatch; /* For O2. */ + Operand *opndCatch; /* For O0-O1. */ + } uCatch; + enum fpParamState { + kNotFp, + kFp32Bit, + kFp64Bit, + kStateUnknown, + }; + Operand *rcc = nullptr; + Operand *vary = nullptr; + Operand *fsp = nullptr; /* used to point the address of local variables and formal parameters */ + + static CondOperand ccOperands[kCcLast]; + static MovkLslOperandArray movkLslOperands; + uint32 numIntregToCalleeSave = 0; + uint32 numFpregToCalleeSave = 0; + bool fplrAddedToCalleeSaved = false; + bool isIntrnCallForC = false; + bool usedStpSubPairToAllocateCallFrame = false; + int32 splitStpldpBaseOffset = 0; + regno_t methodHandleVreg = -1; + uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5*/ + LmbcArgInfo *lmbcArgInfo = nullptr; + MIRType *lmbcCallReturnType = nullptr; + + void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect); + void SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect); + MOperator PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const; + bool IsFrameReg(const RegOperand &opnd) const override; + + PrimType GetOperandTy(bool isIntty, uint32 dsize, bool isSigned) const { + DEBUG_ASSERT(!isSigned || isIntty, ""); + return (isIntty ? ((dsize == k64BitSize) ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)) + : ((dsize == k64BitSize) ? PTY_f64 : PTY_f32)); + } + + RegOperand &LoadIntoRegister(Operand &o, bool isIntty, uint32 dsize, bool asSigned = false) { + PrimType pTy; + if (o.GetKind() == Operand::kOpdRegister && static_cast(o).GetRegisterType() == kRegTyFloat) { + // f128 is a vector placeholder, no use for now + pTy = dsize == k32BitSize ? PTY_f32 : (dsize == k64BitSize ? PTY_f64 : PTY_f128); + } else { + pTy = GetOperandTy(isIntty, dsize, asSigned); + } + return LoadIntoRegister(o, pTy); + } + + RegOperand &LoadIntoRegister(Operand &o, PrimType oty) { + return (o.IsRegister() ? static_cast(o) : SelectCopy(o, oty, oty)); + } + + RegOperand &LoadIntoRegister(Operand &o, PrimType dty, PrimType sty) { + return (o.IsRegister() ? static_cast(o) : SelectCopy(o, sty, dty)); + } + + void CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, RegOperand *addrOpnd, int32 baseOffset); + RegOperand *SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, + int32 offset, uint32 parmNum); + void CreateCallStructParamPassByReg(regno_t reg, MemOperand &memOpnd, ListOperand &srcOpnds, + fpParamState state); + void CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand *addropnd, + uint32 structSize, int32 copyOffset, int32 fromOffset); + RegOperand *CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, RegOperand *addrOpd, + int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc); + void SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID); + void SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator); + void SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); + void SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); + void CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, AArch64CallConvImpl &parmLocator, + ListOperand &srcOpnds); + void SelectParmListForAggregate(BaseNode &argExpr, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + int32 &structCopyOffset); + size_t SelectParmListGetStructReturnSize(StmtNode &naryNode); + bool MarkParmListCall(BaseNode &expr); + void SelectParmListPreprocessLargeStruct(BaseNode &argExpr, int32 &structCopyOffset); + void SelectParmListPreprocess(const StmtNode &naryNode, size_t start, std::set &specialArgs); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); + Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); + void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, + std::vector &stackPostion); + void SelectRem(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isSigned, bool is64Bits); + void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); + void SelectCvtFloat2Float(Operand &resOpnd, Operand &opnd0, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &opnd0, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); + Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent); + void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, + PrimType primType); + MOperator SelectRelationMop(RelationOperator operatorType, RelationOperatorOpndPattern opndPattern, + bool is64Bits, bool IsBitmaskImmediate, bool isBitNumLessThan16) const; + Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); + Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); + Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); + Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); + int64 GetOrCreatSpillRegLocation(regno_t vrNum) { + AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); + return static_cast(GetBaseOffset(*symLoc)); + } + void SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype); + void SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize, Operand &src, + PrimType stype); + bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, + LabelOperand &targetOpnd, Operand &opnd0); + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); + void SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccallNode, PrimType primType); + void SelectAtomicStore(Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + void SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm); + void SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm); + void SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm); + void SelectMPLClinitCheck(const IntrinsiccallNode&); + void SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode); + void SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, Opcode op); + + Operand *SelectAArch64CSyncFetch(const maple::IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore); + /* Helper functions for translating complex Maple IR instructions/inrinsics */ + void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0); + LabelIdx CreateLabeledBB(StmtNode &stmt); + void SaveReturnValueInLocal(CallReturnVector &retVals, size_t index, PrimType primType, Operand &value, + StmtNode &parentStmt); + /* Translation for load-link store-conditional, and atomic RMW operations. */ + MemOrd OperandToMemOrd(Operand &opnd) const; + MOperator PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const; + RegOperand *SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire); + RegOperand *SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release); + + MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx i) override; + void ProcessLazyBinding() override; + bool CanLazyBinding(const Insn &insn) const; + void ConvertAdrpl12LdrToLdr(); + void ConvertAdrpLdrToIntrisic(); + bool IsStoreMop(MOperator mOp) const; + bool IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, + bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const; + Insn &GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); + Insn &GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); + bool IsDuplicateAsmList(const MIRSymbol &sym) const; + RegOperand *CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, + LabelIdx jumpLabIdx); + RegOperand *CheckStringLengthLessThanEight(BB &bb, RegOperand &countOpnd, PrimType countPty, LabelIdx jumpLabIdx); + void GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString, RegOperand &patternString, + RegOperand &srcCountOpnd, RegOperand &patternLengthOpnd, + PrimType countPty, LabelIdx jumpLabIdx); + MemOperand *CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd); + MemOperand &CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset); + std::string GenerateMemOpndVerbose(const Operand &src) const; + RegOperand *PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol &symbol, int64 offsetVal, RegOperand &BaseReg); + RegOperand *PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd); + RegOperand *PrepareMemcpyParamOpnd(uint64 copySize); + Insn *AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &newStrLdr); + LabelIdx GetLabelInInsn(Insn &insn) override { + return static_cast(insn.GetOperand(AArch64isa::GetJumpTargetIdx(insn))).GetLabelIndex(); + } +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h new file mode 100644 index 0000000000000000000000000000000000000000..62792d0a7da6c5d99dac8a4b0797e2daa3380585 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h @@ -0,0 +1,1572 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H +#include "reg_alloc.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" +#include "aarch64_abi.h" +#include "aarch64_cgfunc.h" +#include "loop.h" +#include "cg_dominance.h" +#include "cg_pre.h" + +namespace maplebe { +#define RESERVED_REGS + +#define USE_LRA +#define USE_SPLIT +#undef USE_BB_FREQUENCY +#define OPTIMIZE_FOR_PROLOG +#define REUSE_SPILLMEM +#undef COLOR_SPLIT +#define MOVE_COALESCE + +/* for robust test */ +#undef CONSISTENT_MEMOPND +#undef RANDOM_PRIORITY + +constexpr uint32 k32 = sizeof(int) * CHAR_BIT; +constexpr uint32 k64 = sizeof(int64) * CHAR_BIT; +constexpr uint32 kU64 = sizeof(uint64) * CHAR_BIT; + +enum RematLevel { + rematOff = 0, + rematConst = 1, + rematAddr = 2, + rematDreadLocal = 3, + rematDreadGlobal = 4 +}; + +template > +inline bool FindNotIn(const std::set &set, const T &item) { + return set.find(item) == set.end(); +} + +template > +inline bool FindNotIn(const std::unordered_set &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleSet &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleList &list, const T &item) { + return std::find(list.begin(), list.end(), item) == list.end(); +} + +template > +inline bool FindIn(const std::set &set, const T &item) { + return set.find(item) != set.end(); +} + +template > +inline bool FindIn(const std::unordered_set &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleSet &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleList &list, const T &item) { + return std::find(list.begin(), list.end(), item) != list.end(); +} + +inline bool IsBitArrElemSet(const uint64 *vec, const uint32 num) { + size_t index = num / kU64; + uint64 bit = num % kU64; + return vec[index] & (1ULL << bit); +} + +inline bool IsBBsetOverlap(const uint64 *vec1, const uint64 *vec2, uint32 bbBuckets) { + for (uint32 i = 0; i < bbBuckets; ++i) { + if ((vec1[i] & vec2[i]) != 0) { + return true; + } + } + return false; +} + +/* For each bb, record info pertain to allocation */ +/* + * This is per bb per LR. + * LU info is particular to a bb in a LR. + */ +class LiveUnit { + public: + LiveUnit() = default; + ~LiveUnit() = default; + + void PrintLiveUnit() const; + + uint32 GetBegin() const { + return begin; + } + + void SetBegin(uint32 val) { + begin = val; + } + + uint32 GetEnd() const { + return end; + } + + void SetEnd(uint32 end) { + this->end = end; + } + + bool HasCall() const { + return hasCall; + } + + void SetHasCall(bool hasCall) { + this->hasCall = hasCall; + } + + uint32 GetDefNum() const { + return defNum; + } + + void SetDefNum(uint32 defNum) { + this->defNum = defNum; + } + + void IncDefNum() { + ++defNum; + } + + uint32 GetUseNum() const { + return useNum; + } + + void SetUseNum(uint32 useNum) { + this->useNum = useNum; + } + + void IncUseNum() { + ++useNum; + } + + bool NeedReload() const { + return needReload; + } + + void SetNeedReload(bool needReload) { + this->needReload = needReload; + } + + bool NeedRestore() const { + return needRestore; + } + + void SetNeedRestore(bool needRestore) { + this->needRestore = needRestore; + } + + private: + uint32 begin = 0; /* first encounter in bb */ + uint32 end = 0; /* last encounter in bb */ + bool hasCall = false; /* bb has a call */ + uint32 defNum = 0; + uint32 useNum = 0; /* used for priority calculation */ + bool needReload = false; + bool needRestore = false; +}; + +struct SortedBBCmpFunc { + bool operator()(const BB *lhs, const BB *rhs) const { + return (lhs->GetLevel() < rhs->GetLevel()); + } +}; + +enum refType : uint8 { + kIsUse = 0x1, + kIsDef = 0x2, + kIsCall = 0x4, +}; + +/* LR is for each global vreg. */ +class LiveRange { + public: + explicit LiveRange(MapleAllocator &allocator) + : lrAlloca(&allocator), + pregveto(allocator.Adapter()), + callDef(allocator.Adapter()), + forbidden(allocator.Adapter()), + prefs(allocator.Adapter()), + refMap(allocator.Adapter()), + luMap(allocator.Adapter()) {} + + ~LiveRange() = default; + + regno_t GetRegNO() const { + return regNO; + } + + void SetRegNO(regno_t val) { + regNO = val; + } + + uint32 GetID() const { + return id; + } + + void SetID(uint32 id) { + this->id = id; + } + + regno_t GetAssignedRegNO() const { + return assignedRegNO; + } + + void SetAssignedRegNO(regno_t val) { + assignedRegNO = val; + } + + uint32 GetNumCall() const { + return numCall; + } + + void SetNumCall(uint32 num) { + numCall = num; + } + + void IncNumCall() { + ++numCall; + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType regType) { + this->regType = regType; + } + + float GetPriority() const { + return priority; + } + + void SetPriority(float priority) { + this->priority = priority; + } + + bool IsMustAssigned() const { + return mustAssigned; + } + + void SetMustAssigned() { + mustAssigned = true; + } + + void SetBBBuckets(uint32 bucketNum) { + bbBuckets = bucketNum; + } + + void SetRegBuckets(uint32 bucketNum) { + regBuckets = bucketNum; + } + + uint32 GetNumBBMembers() const { + return numBBMembers; + } + + void IncNumBBMembers() { + ++numBBMembers; + } + + void DecNumBBMembers() { + --numBBMembers; + } + + void InitBBMember(MemPool &memPool, size_t size) { + bbMember = memPool.NewArray(size); + errno_t ret = memset_s(bbMember, size * sizeof(uint64), 0, size * sizeof(uint64)); + CHECK_FATAL(ret == EOK, "call memset_s failed"); + } + + uint64 *GetBBMember() { + return bbMember; + } + + const uint64 *GetBBMember() const { + return bbMember; + } + + uint64 GetBBMemberElem(int32 index) const { + return bbMember[index]; + } + + void SetBBMemberElem(int32 index, uint64 elem) { + bbMember[index] = elem; + } + + void SetMemberBitArrElem(uint32 bbID) { + uint32 index = bbID / kU64; + uint64 bit = bbID % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBMemberElem(index) & mask) == 0) { + IncNumBBMembers(); + SetBBMemberElem(index, GetBBMemberElem(index) | mask); + } + } + + void UnsetMemberBitArrElem(uint32 bbID) { + uint32 index = bbID / kU64; + uint64 bit = bbID % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBMemberElem(index) & mask) != 0) { + DecNumBBMembers(); + SetBBMemberElem(index, GetBBMemberElem(index) & (~mask)); + } + } + + void SetConflictBitArrElem(regno_t regNO) { + uint32 index = regNO / kU64; + uint64 bit = regNO % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBConflictElem(index) & mask) == 0) { + IncNumBBConflicts(); + SetBBConflictElem(index, GetBBConflictElem(index) | mask); + } + } + + void UnsetConflictBitArrElem(regno_t regNO) { + uint32 index = regNO / kU64; + uint64 bit = regNO % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBConflictElem(index) & mask) != 0) { + DecNumBBConflicts(); + SetBBConflictElem(index, GetBBConflictElem(index) & (~mask)); + } + } + + void InitPregveto() { + pregveto.clear(); + pregveto.resize(kMaxRegNum); + callDef.clear(); + callDef.resize(kMaxRegNum); + } + + bool GetPregveto(regno_t regno) const { + return pregveto[regno]; + } + + size_t GetPregvetoSize() const { + return numPregveto; + } + + void InsertElemToPregveto(regno_t regno) { + if (!pregveto[regno]) { + pregveto[regno] = true; + ++numPregveto; + } + } + + bool GetCallDef(regno_t regno) const { + return callDef[regno]; + } + + void InsertElemToCallDef(regno_t regno) { + if (!callDef[regno]) { + callDef[regno] = true; + ++numCallDef; + } + } + + void SetCrossCall() { + crossCall = true; + } + + bool GetCrossCall() const { + return crossCall; + } + + void InitForbidden() { + forbidden.clear(); + forbidden.resize(kMaxRegNum); + } + + const MapleVector &GetForbidden() const { + return forbidden; + } + + bool GetForbidden(regno_t regno) const { + return forbidden[regno]; + } + + size_t GetForbiddenSize() const { + return numForbidden; + } + + void InsertElemToForbidden(regno_t regno) { + if (!forbidden[regno]) { + forbidden[regno] = true; + ++numForbidden; + } + } + + void EraseElemFromForbidden(regno_t regno) { + if (forbidden[regno]) { + forbidden[regno] = false; + --numForbidden; + } + } + + void ClearForbidden() { + forbidden.clear(); + } + + uint32 GetNumBBConflicts() const { + return numBBConflicts; + } + + void IncNumBBConflicts() { + ++numBBConflicts; + } + + void DecNumBBConflicts() { + --numBBConflicts; + } + + void InitBBConflict(MemPool &memPool, size_t size) { + bbConflict = memPool.NewArray(size); + errno_t ret = memset_s(bbConflict, size * sizeof(uint64), 0, size * sizeof(uint64)); + CHECK_FATAL(ret == EOK, "call memset_s failed"); + } + + const uint64 *GetBBConflict() const { + return bbConflict; + } + + uint64 GetBBConflictElem(int32 index) const { + DEBUG_ASSERT(index < regBuckets, "out of bbConflict"); + return bbConflict[index]; + } + + void SetBBConflictElem(int32 index, uint64 elem) { + DEBUG_ASSERT(index < regBuckets, "out of bbConflict"); + bbConflict[index] = elem; + } + + void SetOldConflict(uint64 *conflict) { + oldConflict = conflict; + } + + const uint64 *GetOldConflict() const { + return oldConflict; + } + + const MapleSet &GetPrefs() const { + return prefs; + } + + void InsertElemToPrefs(regno_t regNO) { + (void)prefs.insert(regNO); + } + + const MapleMap*> GetRefs() const { + return refMap; + } + + const MapleMap GetRefs(uint32 bbId) const { + return *(refMap.find(bbId)->second); + } + + void AddRef(uint32 bbId, uint32 pos, uint32 mark) { + if (refMap.find(bbId) == refMap.end()) { + auto point = lrAlloca->New>(lrAlloca->Adapter()); + (void)point->emplace(std::pair(pos, mark)); + (void)refMap.emplace(std::pair*>(bbId, point)); + } else { + auto &bbPoint = (refMap.find(bbId))->second; + if (bbPoint->find(pos) == bbPoint->end()) { + (void)bbPoint->emplace(std::pair(pos, mark)); + } else { + auto posVal = bbPoint->find(pos)->second; + (void)bbPoint->erase(bbPoint->find(pos)); + (void)bbPoint->emplace(std::pair(pos, posVal | mark)); + } + } + } + + const MapleMap &GetLuMap() const { + return luMap; + } + + MapleMap::iterator FindInLuMap(uint32 index) { + return luMap.find(index); + } + + MapleMap::iterator EndOfLuMap() { + return luMap.end(); + } + + MapleMap::iterator EraseLuMap(MapleMap::iterator it) { + return luMap.erase(it); + } + + void SetElemToLuMap(uint32 key, LiveUnit &value) { + luMap[key] = &value; + } + + LiveUnit *GetLiveUnitFromLuMap(uint32 key) { + return luMap[key]; + } + + const LiveUnit *GetLiveUnitFromLuMap(uint32 key) const { + auto it = luMap.find(key); + DEBUG_ASSERT(it != luMap.end(), "can't find live unit"); + return it->second; + } + + const LiveRange *GetSplitLr() const { + return splitLr; + } + + void SetSplitLr(LiveRange &lr) { + splitLr = &lr; + } + +#ifdef OPTIMIZE_FOR_PROLOG + uint32 GetNumDefs() const { + return numDefs; + } + + void IncNumDefs() { + ++numDefs; + } + + void SetNumDefs(uint32 val) { + numDefs = val; + } + + uint32 GetNumUses() const { + return numUses; + } + + void IncNumUses() { + ++numUses; + } + + void SetNumUses(uint32 val) { + numUses = val; + } + + uint32 GetFrequency() const { + return frequency; + } + + void SetFrequency(uint32 frequency) { + this->frequency = frequency; + } +#endif /* OPTIMIZE_FOR_PROLOG */ + + MemOperand *GetSpillMem() { + return spillMem; + } + + const MemOperand *GetSpillMem() const { + return spillMem; + } + + void SetSpillMem(MemOperand& memOpnd) { + spillMem = &memOpnd; + } + + regno_t GetSpillReg() const { + return spillReg; + } + + void SetSpillReg(regno_t spillReg) { + this->spillReg = spillReg; + } + + uint32 GetSpillSize() const { + return spillSize; + } + + void SetSpillSize(uint32 size) { + spillSize = size; + } + + bool IsSpilled() const { + return spilled; + } + + void SetSpilled(bool spill) { + spilled = spill; + } + + bool HasDefUse() const { + return hasDefUse; + } + + void SetDefUse() { + hasDefUse = true; + } + + bool GetProcessed() const { + return proccessed; + } + + void SetProcessed() { + proccessed = true; + } + + bool IsNonLocal() const { + return isNonLocal; + } + + void SetIsNonLocal(bool isNonLocal) { + this->isNonLocal = isNonLocal; + } + + void SetRematLevel(uint32 val) { + this->rematLevel = val; + } + + uint32 GetRematLevel() const { + return this->rematLevel; + } + + Opcode GetOp() const { + return op; + } + + const MIRSymbol *GetRematSymbol() const { + DEBUG_ASSERT(op == OP_dread || op == OP_addrof, "Remat symbol is invalid"); + return rematInfo.sym; + } + + FieldID GetRematFieldID() const { + DEBUG_ASSERT(op == OP_dread || op == OP_addrof, "Remat field ID is invalid"); + return fieldID; + } + + void SetRematerializable(const MIRConst *c) { + op = OP_constval; + rematInfo.mirConst = c; + } + + void SetRematerializable(Opcode opcode, const MIRSymbol *symbol, FieldID fieldId, bool addrUp) { + this->op = opcode; + rematInfo.sym = symbol; + this->fieldID = fieldId; + this->addrUpper = addrUp; + } + + void CopyRematerialization(const LiveRange &lr) { + op = lr.op; + rematInfo = lr.rematInfo; + fieldID = lr.fieldID; + } + + bool IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLevel) const; + std::vector Rematerialize(AArch64CGFunc *cgFunc, RegOperand ®Op); + + private: + MapleAllocator *lrAlloca; + regno_t regNO = 0; + uint32 id = 0; /* for priority tie breaker */ + regno_t assignedRegNO = 0; /* color assigned */ + uint32 numCall = 0; + RegType regType = kRegTyUndef; + float priority = 0.0; + bool mustAssigned = false; + uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ + uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ + uint32 numBBMembers = 0; /* number of bits set in bbMember */ + uint64 *bbMember = nullptr; /* Same as smember, but use bit array */ + + MapleVector pregveto; /* pregs cannot be assigned -- SplitLr may clear forbidden */ + MapleVector callDef; /* pregs cannot be assigned -- SplitLr may clear forbidden */ + MapleVector forbidden; /* pregs cannot be assigned */ + uint32 numPregveto = 0; + uint32 numCallDef = 0; + uint32 numForbidden = 0; + bool crossCall = false; + + uint32 numBBConflicts = 0; /* number of bits set in bbConflict */ + uint64 *bbConflict = nullptr; /* vreg interference from graph neighbors (bit) */ + uint64 *oldConflict = nullptr; + MapleSet prefs; /* pregs that prefer */ + MapleMap*> refMap; + MapleMap luMap; /* info for each bb */ + LiveRange *splitLr = nullptr; /* The 1st part of the split */ +#ifdef OPTIMIZE_FOR_PROLOG + uint32 numDefs = 0; + uint32 numUses = 0; + uint32 frequency = 0; +#endif /* OPTIMIZE_FOR_PROLOG */ + MemOperand *spillMem = nullptr; /* memory operand used for spill, if any */ + regno_t spillReg = 0; /* register operand for spill at current point */ + uint32 spillSize = 0; /* 32 or 64 bit spill */ + bool spilled = false; /* color assigned */ + bool hasDefUse = false; /* has regDS */ + bool proccessed = false; + bool isNonLocal = false; + uint32 rematLevel = 0; + Opcode op = OP_undef; /* OP_constval, OP_addrof or OP_dread if rematerializable */ + union RematInfo { + const MIRConst *mirConst; + const MIRSymbol *sym; + } rematInfo; /* info for rematerializing value */ + FieldID fieldID = 0; /* used only when op is OP_addrof or OP_dread */ + bool addrUpper = false; /* indicates the upper bits of an addrof */ +}; + +/* One per bb, to communicate local usage to global RA */ +class LocalRaInfo { + public: + explicit LocalRaInfo(MapleAllocator &allocator) + : defCnt(allocator.Adapter()), + useCnt(allocator.Adapter()) {} + + ~LocalRaInfo() = default; + + const MapleMap &GetDefCnt() const { + return defCnt; + } + + uint16 GetDefCntElem(regno_t regNO) { + return defCnt[regNO]; + } + + void SetDefCntElem(regno_t key, uint16 value) { + defCnt[key] = value; + } + + const MapleMap &GetUseCnt() const { + return useCnt; + } + + uint16 GetUseCntElem(regno_t regNO) { + return useCnt[regNO]; + } + + void SetUseCntElem(regno_t key, uint16 value) { + useCnt[key] = value; + } + + private: + MapleMap defCnt; + MapleMap useCnt; +}; + +/* For each bb, record info pertain to allocation */ +class BBAssignInfo { + public: + explicit BBAssignInfo(MapleAllocator &allocator) + : globalsAssigned(allocator.Adapter()), + regMap(allocator.Adapter()) {} + + ~BBAssignInfo() = default; + + uint32 GetIntLocalRegsNeeded() const { + return intLocalRegsNeeded; + } + + void SetIntLocalRegsNeeded(uint32 num) { + intLocalRegsNeeded = num; + } + + uint32 GetFpLocalRegsNeeded() const { + return fpLocalRegsNeeded; + } + + void SetFpLocalRegsNeeded(uint32 num) { + fpLocalRegsNeeded = num; + } + + void InitGlobalAssigned() { + globalsAssigned.clear(); + globalsAssigned.resize(kMaxRegNum); + } + + bool GetGlobalsAssigned(regno_t regNO) const { + return globalsAssigned[regNO]; + } + + void InsertElemToGlobalsAssigned(regno_t regNO) { + globalsAssigned[regNO] = true; + } + + void EraseElemToGlobalsAssigned(regno_t regNO) { + globalsAssigned[regNO] = false; + } + + const MapleMap &GetRegMap() const { + return regMap; + } + + bool HasRegMap(regno_t regNOKey) const { + return (regMap.find(regNOKey) != regMap.end()); + } + + regno_t GetRegMapElem(regno_t regNO) { + return regMap[regNO]; + } + + void SetRegMapElem(regno_t regNOKey, regno_t regNOValue) { + regMap[regNOKey] = regNOValue; + } + + private: + uint32 intLocalRegsNeeded = 0; /* num local reg needs for each bb */ + uint32 fpLocalRegsNeeded = 0; /* num local reg needs for each bb */ + MapleVector globalsAssigned; /* globals used in a bb */ + MapleMap regMap; /* local vreg to preg mapping */ +}; + +class FinalizeRegisterInfo { + public: + explicit FinalizeRegisterInfo(MapleAllocator &allocator) + : defOperands(allocator.Adapter()), + defIdx(allocator.Adapter()), + useOperands(allocator.Adapter()), + useIdx(allocator.Adapter()) {} + + ~FinalizeRegisterInfo() = default; + void ClearInfo() { + memOperandIdx = 0; + baseOperand = nullptr; + offsetOperand = nullptr; + defOperands.clear(); + defIdx.clear(); + useOperands.clear(); + useIdx.clear(); + } + + void SetBaseOperand(Operand &opnd, const int32 idx) { + baseOperand = &opnd; + memOperandIdx = idx; + } + + void SetOffsetOperand(Operand &opnd) { + offsetOperand = &opnd; + } + + void SetDefOperand(Operand &opnd, const int32 idx) { + defOperands.emplace_back(&opnd); + defIdx.emplace_back(idx); + } + + void SetUseOperand(Operand &opnd, const int32 idx) { + useOperands.emplace_back(&opnd); + useIdx.emplace_back(idx); + } + + int32 GetMemOperandIdx() const { + return memOperandIdx; + } + + const Operand *GetBaseOperand() const { + return baseOperand; + } + + const Operand *GetOffsetOperand() const { + return offsetOperand; + } + + size_t GetDefOperandsSize() const { + return defOperands.size(); + } + + const Operand *GetDefOperandsElem(size_t index) const { + return defOperands[index]; + } + + int32 GetDefIdxElem(size_t index) const { + return defIdx[index]; + } + + size_t GetUseOperandsSize() const { + return useOperands.size(); + } + + const Operand *GetUseOperandsElem(size_t index) const { + return useOperands[index]; + } + + int32 GetUseIdxElem(size_t index) const { + return useIdx[index]; + } + + private: + int32 memOperandIdx = 0; + Operand *baseOperand = nullptr; + Operand *offsetOperand = nullptr; + MapleVector defOperands; + MapleVector defIdx; + MapleVector useOperands; + MapleVector useIdx; +}; + +class LocalRegAllocator { + public: + LocalRegAllocator(CGFunc &cgFunc, MapleAllocator &allocator) + : intRegAssignmentMap(allocator.Adapter()), + fpRegAssignmentMap(allocator.Adapter()), + useInfo(allocator.Adapter()), + defInfo(allocator.Adapter()) { + buckets = (cgFunc.GetMaxRegNum() / kU64) + 1; + intRegAssigned = cgFunc.GetMemoryPool()->NewArray(buckets); + fpRegAssigned = cgFunc.GetMemoryPool()->NewArray(buckets); + intRegSpilled = cgFunc.GetMemoryPool()->NewArray(buckets); + fpRegSpilled = cgFunc.GetMemoryPool()->NewArray(buckets); + } + + ~LocalRegAllocator() = default; + + void ClearLocalRaInfo() { + ClearBitArrElement(intRegAssigned); + ClearBitArrElement(fpRegAssigned); + intRegAssignmentMap.clear(); + fpRegAssignmentMap.clear(); + intPregUsed = 0; + fpPregUsed = 0; + ClearBitArrElement(intRegSpilled); + ClearBitArrElement(fpRegSpilled); + numIntPregUsed = 0; + numFpPregUsed = 0; + } + + regno_t RegBaseUpdate(bool isInt) const { + return isInt ? 0 : V0 - R0; + } + + bool IsInRegAssigned(regno_t regNO, bool isInt) const { + uint64 *regAssigned = nullptr; + if (isInt) { + regAssigned = intRegAssigned; + } else { + regAssigned = fpRegAssigned; + } + return IsBitArrElemSet(regAssigned, regNO);; + } + + void SetRegAssigned(regno_t regNO, bool isInt) const { + if (isInt) { + SetBitArrElement(intRegAssigned, regNO); + } else { + SetBitArrElement(fpRegAssigned, regNO); + } + } + + regno_t GetRegAssignmentItem(bool isInt, regno_t regKey) { + return isInt ? intRegAssignmentMap[regKey] : fpRegAssignmentMap[regKey]; + } + + void SetRegAssignmentMap(bool isInt, regno_t regKey, regno_t regValue) { + if (isInt) { + intRegAssignmentMap[regKey] = regValue; + } else { + fpRegAssignmentMap[regKey] = regValue; + } + } + + /* only for HandleLocalRaDebug */ + uint64 GetPregUsed(bool isInt) const { + if (isInt) { + return intPregUsed; + } else { + return fpPregUsed; + } + } + + void SetPregUsed(regno_t regNO, bool isInt) { + uint64 mask = 0; + if (isInt) { + mask = 1ULL << (regNO - R0); + if ((intPregUsed & mask) == 0) { + ++numIntPregUsed; + intPregUsed |= mask; + } + } else { + mask = 1ULL << (regNO - V0); + if ((fpPregUsed & mask) == 0) { + ++numFpPregUsed; + fpPregUsed |= mask; + } + } + } + + bool isInRegSpilled(regno_t regNO, bool isInt) const { + bool isSet; + if (isInt) { + isSet = IsBitArrElemSet(intRegSpilled, regNO); + } else { + isSet = IsBitArrElemSet(fpRegSpilled, regNO); + } + return isSet; + } + + void SetRegSpilled(regno_t regNO, bool isInt) const { + if (isInt) { + SetBitArrElement(intRegSpilled, regNO); + } else { + SetBitArrElement(fpRegSpilled, regNO); + } + } + + uint64 GetPregs(bool isInt) const { + if (isInt) { + return intPregs; + } else { + return fpPregs; + } + } + + void SetPregs(regno_t regNO, bool isInt) { + if (isInt) { + intPregs |= 1ULL << (regNO - RegBaseUpdate(true)); + } else { + fpPregs |= 1ULL << (regNO - RegBaseUpdate(false)); + } + } + + void ClearPregs(regno_t regNO, bool isInt) { + if (isInt) { + intPregs &= ~(1ULL << (regNO - RegBaseUpdate(true))); + } else { + fpPregs &= ~(1ULL << (regNO - RegBaseUpdate(false))); + } + } + + bool IsPregAvailable(regno_t regNO, bool isInt) const { + bool isAvailable; + if (isInt) { + isAvailable = intPregs & (1ULL << (regNO - RegBaseUpdate(true))); + } else { + isAvailable = fpPregs & (1ULL << (regNO - RegBaseUpdate(false))); + } + return isAvailable; + } + + void InitPregs(uint32 intMax, uint32 fpMax, bool hasYield, const MapleSet &intSpillRegSet, + const MapleSet &fpSpillRegSet) { + uint32 intBase = R0; + uint32 fpBase = V0; + intPregs = (1ULL << (intMax + 1)) - 1; + fpPregs = (1ULL << (((fpMax + 1) + fpBase) - RegBaseUpdate(false))) - 1; + for (uint32 regNO : intSpillRegSet) { + ClearPregs(regNO + intBase, true); + } + for (uint32 regNO : fpSpillRegSet) { + ClearPregs(regNO + fpBase, false); + } + if (hasYield) { + ClearPregs(RYP, true); + } +#ifdef RESERVED_REGS + intPregs &= ~(1ULL << R16); + intPregs &= ~(1ULL << R17); +#endif /* RESERVED_REGS */ + } + + const MapleMap &GetIntRegAssignmentMap() const { + return intRegAssignmentMap; + } + + const MapleMap &GetFpRegAssignmentMap() const { + return fpRegAssignmentMap; + } + + const MapleMap &GetUseInfo() const { + return useInfo; + } + + void SetUseInfoElem(regno_t regNO, uint16 info) { + useInfo[regNO] = info; + } + + void IncUseInfoElem(regno_t regNO) { + if (useInfo.find(regNO) != useInfo.end()) { + ++useInfo[regNO]; + } + } + + uint16 GetUseInfoElem(regno_t regNO) { + return useInfo[regNO]; + } + + void ClearUseInfo() { + useInfo.clear(); + } + + const MapleMap &GetDefInfo() const { + return defInfo; + } + + void SetDefInfoElem(regno_t regNO, uint16 info) { + defInfo[regNO] = info; + } + + uint16 GetDefInfoElem(regno_t regNO) { + return defInfo[regNO]; + } + + void IncDefInfoElem(regno_t regNO) { + if (defInfo.find(regNO) != defInfo.end()) { + ++defInfo[regNO]; + } + } + + void ClearDefInfo() { + defInfo.clear(); + } + + uint32 GetNumIntPregUsed() const { + return numIntPregUsed; + } + + uint32 GetNumFpPregUsed() const { + return numFpPregUsed; + } + + private: + void ClearBitArrElement(uint64 *vec) const { + for (uint32 i = 0; i < buckets; ++i) { + vec[i] = 0UL; + } + } + + void SetBitArrElement(uint64 *vec, regno_t regNO) const { + uint32 index = regNO / kU64; + uint64 bit = regNO % kU64; + vec[index] |= 1ULL << bit; + } + + /* The following local vars keeps track of allocation information in bb. */ + uint64 *intRegAssigned; /* in this set if vreg is assigned */ + uint64 *fpRegAssigned; + MapleMap intRegAssignmentMap; /* vreg -> preg map, which preg is the vreg assigned */ + MapleMap fpRegAssignmentMap; + uint64 intPregUsed = 0; /* pregs used in bb */ + uint64 fpPregUsed = 0; + uint64 *intRegSpilled; /* on this list if vreg is spilled */ + uint64 *fpRegSpilled; + + uint64 intPregs = 0; /* available regs for assignement */ + uint64 fpPregs = 0; + MapleMap useInfo; /* copy of local ra info for useCnt */ + MapleMap defInfo; /* copy of local ra info for defCnt */ + + uint32 numIntPregUsed = 0; + uint32 numFpPregUsed = 0; + uint32 buckets; +}; + +class SplitBBInfo { + public: + SplitBBInfo() = default; + + ~SplitBBInfo() = default; + + BB *GetCandidateBB() { + return candidateBB; + } + + const BB *GetCandidateBB() const { + return candidateBB; + } + + const BB *GetStartBB() const { + return startBB; + } + + void SetCandidateBB(BB &bb) { + candidateBB = &bb; + } + + void SetStartBB(BB &bb) { + startBB = &bb; + } + + private: + BB *candidateBB = nullptr; + BB *startBB = nullptr; +}; + +class GraphColorRegAllocator : public RegAllocator { + public: + GraphColorRegAllocator(CGFunc &cgFunc, MemPool &memPool, DomAnalysis &dom) + : RegAllocator(cgFunc, memPool), + domInfo(dom), + bbVec(alloc.Adapter()), + vregLive(alloc.Adapter()), + pregLive(alloc.Adapter()), + lrMap(alloc.Adapter()), + localRegVec(alloc.Adapter()), + bbRegInfo(alloc.Adapter()), + unconstrained(alloc.Adapter()), + unconstrainedPref(alloc.Adapter()), + constrained(alloc.Adapter()), + mustAssigned(alloc.Adapter()), +#ifdef OPTIMIZE_FOR_PROLOG + intDelayed(alloc.Adapter()), + fpDelayed(alloc.Adapter()), +#endif /* OPTIMIZE_FOR_PROLOG */ + intCallerRegSet(alloc.Adapter()), + intCalleeRegSet(alloc.Adapter()), + intSpillRegSet(alloc.Adapter()), + fpCallerRegSet(alloc.Adapter()), + fpCalleeRegSet(alloc.Adapter()), + fpSpillRegSet(alloc.Adapter()), + intCalleeUsed(alloc.Adapter()), + fpCalleeUsed(alloc.Adapter()) { + constexpr uint32 kNumInsnThreashold = 30000; + numVregs = cgFunc.GetMaxVReg(); + localRegVec.resize(cgFunc.NumBBs()); + bbRegInfo.resize(cgFunc.NumBBs()); + if (CGOptions::DoMultiPassColorRA() && cgFunc.GetMirModule().IsCModule()) { + uint32 cnt = 0; + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + ++cnt; + } + } + DEBUG_ASSERT(cnt <= cgFunc.GetTotalNumberOfInstructions(), "Incorrect insn count"); + if (cnt <= kNumInsnThreashold) { + doMultiPass = true; + doLRA = false; + doOptProlog = false; + } + } + } + + ~GraphColorRegAllocator() override = default; + + bool AllocateRegisters() override; + + enum SpillMemCheck : uint8 { + kSpillMemPre, + kSpillMemPost, + }; + + LiveRange *GetLiveRange(regno_t regNO) { + auto it = lrMap.find(regNO); + if (it != lrMap.end()) { + return it->second; + } else { + return nullptr; + } + } + LiveRange *GetLiveRange(regno_t regNO) const { + auto it = lrMap.find(regNO); + if (it != lrMap.end()) { + return it->second; + } else { + return nullptr; + } + } + const MapleMap &GetLrMap() const { + return lrMap; + } + Insn *SpillOperand(Insn &insn, const Operand &opnd, bool isDef, RegOperand &phyOpnd, bool forCall = false); + private: + struct SetLiveRangeCmpFunc { + bool operator()(const LiveRange *lhs, const LiveRange *rhs) const { + if (fabs(lhs->GetPriority() - rhs->GetPriority()) <= 1e-6) { + /* + * This is to ensure the ordering is consistent as the reg# + * differs going through VtableImpl.mpl file. + */ + if (lhs->GetID() == rhs->GetID()) { + return lhs->GetRegNO() < rhs->GetRegNO(); + } else { + return lhs->GetID() < rhs->GetID(); + } + } + return (lhs->GetPriority() > rhs->GetPriority()); + } + }; + + template + void ForEachBBArrElem(const uint64 *vec, Func functor) const; + + template + void ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const; + + template + void ForEachRegArrElem(const uint64 *vec, Func functor) const; + + void PrintLiveUnitMap(const LiveRange &lr) const; + void PrintLiveRangeConflicts(const LiveRange &lr) const; + void PrintLiveBBBit(const LiveRange &li) const; + void PrintLiveRange(const LiveRange &li, const std::string &str) const; + void PrintLiveRanges() const; + void PrintLocalRAInfo(const std::string &str) const; + void PrintBBAssignInfo() const; + void PrintBBs() const; + + uint32 MaxIntPhysRegNum() const; + uint32 MaxFloatPhysRegNum() const; + bool IsReservedReg(AArch64reg regNO) const; + void InitFreeRegPool(); + void InitCCReg(); + bool IsYieldPointReg(regno_t regNO) const; + bool IsUnconcernedReg(regno_t regNO) const; + bool IsUnconcernedReg(const RegOperand ®Opnd) const; + LiveRange *NewLiveRange(); + void CalculatePriority(LiveRange &lr) const; + bool CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef); + LiveRange *CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, uint32 currId); + void CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currPoint, bool updateCount); + bool SetupLiveRangeByOpHandlePhysicalReg(const RegOperand &op, Insn &insn, regno_t regNO, bool isDef); + void SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses); + void SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint); + bool UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const; + void UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn); + void ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) const; + void SetOpndConflict(const Insn &insn, bool onlyDef); + void UpdateOpndConflict(const Insn &insn, bool multiDef); + void SetLrMustAssign(const RegOperand *regOpnd); + void SetupMustAssignedLiveRanges(const Insn &insn); + void ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef); + void ComputeLiveRangesForEachUseOperand(Insn &insn); + void ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn); + void ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint); + void ComputeLiveRanges(); + MemOperand *CreateSpillMem(uint32 spillIdx, SpillMemCheck check); + bool CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const; + void CheckInterference(LiveRange &lr1, LiveRange &lr2) const; + void BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, std::vector &fpLrVec); + void BuildInterferenceGraph(); + void SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO); + bool HaveAvailableColor(const LiveRange &lr, uint32 num) const; + void Separate(); + void SplitAndColorForEachLr(MapleVector &targetLrVec); + void SplitAndColor(); + void ColorForOptPrologEpilog(); + bool IsLocalReg(regno_t regNO) const; + bool IsLocalReg(const LiveRange &lr) const; + void HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const; + void HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt); + void UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, bool isDef, bool isInt) const; + void UpdateLocalRegConflict(regno_t regNO, LocalRegAllocator &localRa, bool isInt); + void HandleLocalReg(Operand &op, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo, bool isDef, bool isInt); + void LocalRaRegSetEraseReg(LocalRegAllocator &localRa, regno_t regNO) const; + bool LocalRaInitRegSet(LocalRegAllocator &localRa, uint32 bbId); + void LocalRaInitAllocatableRegs(LocalRegAllocator &localRa, uint32 bbId); + void LocalRaForEachDefOperand(const Insn &insn, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo); + void LocalRaForEachUseOperand(const Insn &insn, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo); + void LocalRaPrepareBB(BB &bb, LocalRegAllocator &localRa); + void LocalRaFinalAssignment(const LocalRegAllocator &localRa, BBAssignInfo &bbInfo); + void LocalRaDebug(const BB &bb, const LocalRegAllocator &localRa) const; + void LocalRegisterAllocator(bool allocate); + MemOperand *GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, bool isDef); + void SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); + void SpillOperandForSpillPost(Insn &insn, const Operand &opnd, + RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); + MemOperand *GetConsistentReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, + RegType regType); + MemOperand *GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, + RegType regType); + MemOperand *GetReuseMem(uint32 vregNO, uint32 size, RegType regType); + MemOperand *GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, AArch64reg regNO, bool &isOutOfRange) const; + bool SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, uint64 &usedRegMask); + void CollectCannotUseReg(std::unordered_set &cannotUseReg, const LiveRange &lr, Insn &insn); + regno_t PickRegForSpill(uint64 &usedRegMask, RegType regType, uint32 spillIdx, bool &needSpillLr); + bool SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, uint64 &usedRegMask, bool isDef); + bool GetSpillReg(Insn &insn, LiveRange &lr, const uint32 &spillIdx, uint64 &usedRegMask, bool isDef); + RegOperand *GetReplaceOpndForLRA(Insn &insn, const Operand &opnd, uint32 &spillIdx, uint64 &usedRegMask, bool isDef); + bool EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector& visitedMap); + bool FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef); + bool EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap); + bool FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef); + bool HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; + bool HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; + bool NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef); + RegOperand *GetReplaceOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, uint64 &usedRegMask, bool isDef); + void MarkCalleeSaveRegs(); + void MarkUsedRegs(Operand &opnd, uint64 &usedRegMask); + uint64 FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, const Insn &insn, bool &needProcess); + void SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, + BB &headerPred, BB &exitSucc, const std::set &cands); + bool LoopNeedSplit(const CGFuncLoops &loop, std::set &cands); + bool LrGetBadReg(const LiveRange &lr) const; + void AnalysisLoopPressureAndSplit(const CGFuncLoops &loop); + void AnalysisLoop(const CGFuncLoops &); + void OptCallerSave(); + void FinalizeRegisters(); + void GenerateSpillFillRegs(const Insn &insn); + RegOperand *CreateSpillFillCode(const RegOperand &opnd, Insn &insn, uint32 spillCnt, bool isdef = false); + bool SpillLiveRangeForSpills(); + + MapleVector::iterator GetHighPriorityLr(MapleVector &lrSet) const; + void UpdateForbiddenForNeighbors(const LiveRange &lr) const; + void UpdatePregvetoForNeighbors(const LiveRange &lr) const; + regno_t FindColorForLr(const LiveRange &lr) const; + regno_t TryToAssignCallerSave(const LiveRange &lr) const; + bool ShouldUseCallee(LiveRange &lr, const MapleSet &calleeUsed, + const MapleVector &delayed) const; + void AddCalleeUsed(regno_t regNO, RegType regType); + bool AssignColorToLr(LiveRange &lr, bool isDelayed = false); + void PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, std::set &candidateInLoop, + std::set &defInLoop); + bool UseIsUncovered(const BB &bb, const BB &startBB, std::vector &visitedBB); + void FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, bool &remove, + std::set &candidateInLoop, + std::set &defInLoop); + void FindBBSharedInSplit(LiveRange &lr, + const std::set &candidateInLoop, + std::set &defInLoop); + void ComputeBBForNewSplit(LiveRange &newLr, LiveRange &oldLr); + void ClearLrBBFlags(const std::set &member) const; + void ComputeBBForOldSplit(LiveRange &newLr, LiveRange &oldLr); + bool LrCanBeColored(const LiveRange &lr, const BB &bbAdded, std::unordered_set &conflictRegs); + void MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const; + bool ContainsLoop(const CGFuncLoops &loop, const std::set &loops) const; + void GetAllLrMemberLoops(LiveRange &lr, std::set &loop); + bool SplitLrShouldSplit(LiveRange &lr); + bool SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::unordered_set &conflictRegs); + void SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, const std::set &oldLoops, + const std::set &newLoops); + void SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, const std::set &origLoops); + void SplitLrFixOrigLrCalls(LiveRange &lr) const; + void SplitLrUpdateInterference(LiveRange &lr); + void SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, + std::unordered_set &conflictRegs) const ; + void SplitLrErrorCheckAndDebug(const LiveRange &origLr) const; + void SplitLr(LiveRange &lr); + + static constexpr uint16 kMaxUint16 = 0x7fff; + + DomAnalysis &domInfo; + MapleVector bbVec; + MapleUnorderedSet vregLive; + MapleUnorderedSet pregLive; + MapleMap lrMap; + MapleVector localRegVec; /* local reg info for each bb, no local reg if null */ + MapleVector bbRegInfo; /* register assignment info for each bb */ + MapleVector unconstrained; + MapleVector unconstrainedPref; + MapleVector constrained; + MapleVector mustAssigned; +#ifdef OPTIMIZE_FOR_PROLOG + MapleVector intDelayed; + MapleVector fpDelayed; +#endif /* OPTIMIZE_FOR_PROLOG */ + MapleSet intCallerRegSet; /* integer caller saved */ + MapleSet intCalleeRegSet; /* callee */ + MapleSet intSpillRegSet; /* spill */ + MapleSet fpCallerRegSet; /* float caller saved */ + MapleSet fpCalleeRegSet; /* callee */ + MapleSet fpSpillRegSet; /* spill */ + MapleSet intCalleeUsed; + MapleSet fpCalleeUsed; + Bfs *bfs = nullptr; + + uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ + uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ + uint32 intRegNum = 0; /* total available int preg */ + uint32 fpRegNum = 0; /* total available fp preg */ + uint32 numVregs = 0; /* number of vregs when starting */ + regno_t ccReg = 0; + /* For spilling of spill register if there are none available + * Example, all 3 operands spilled + * sp_reg1 -> [spillMemOpnds[1]] + * sp_reg2 -> [spillMemOpnds[2]] + * ld sp_reg1 <- [addr-reg2] + * ld sp_reg2 <- [addr-reg3] + * reg1 <- reg2, reg3 sp_reg1 <- sp_reg1, sp_reg2 + * st sp_reg1 -> [addr-reg1] + * sp_reg1 <- [spillMemOpnds[1]] + * sp_reg2 <- [spillMemOpnds[2]] + */ + static constexpr size_t kSpillMemOpndNum = 4; + std::array spillMemOpnds = { nullptr }; + bool operandSpilled[kSpillMemOpndNum]; + bool needExtraSpillReg = false; +#ifdef USE_LRA + bool doLRA = true; +#else + bool doLRA = false; +#endif +#ifdef OPTIMIZE_FOR_PROLOG + bool doOptProlog = true; +#else + bool doOptProlog = false; +#endif + bool hasSpill = false; + bool doMultiPass = false; + bool seenFP = false; +}; + +class CallerSavePre: public CGPre { + public: + CallerSavePre(GraphColorRegAllocator * regAlloc, CGFunc &cgfunc, DomAnalysis &currDom, + MemPool &memPool, MemPool &mp2, PreKind kind, uint32 limit) + : CGPre(currDom, memPool, mp2, kind, limit), + func(&cgfunc), + regAllocator(regAlloc), + loopHeadBBs(ssaPreAllocator.Adapter()) {} + + ~CallerSavePre() = default; + + void ApplySSAPRE(); + void SetDump(bool val) { + dump = val; + } + private: + void CodeMotion() ; + void UpdateLoadSite(CgOccur *occ); + void CalLoadSites(); + void ComputeAvail(); + void Rename1(); + void ComputeVarAndDfPhis() override; + void BuildWorkList() override; + void DumpWorkCandAndOcc(); + + BB *GetBB(uint32 id) const override { + return func->GetBBFromID(id); + } + + PUIdx GetPUIdx() const override { + return func->GetFunction().GetPuidx(); + } + + bool IsLoopHeadBB(uint32 bbId) const { + return loopHeadBBs.find(bbId) != loopHeadBBs.end(); + } + CGFunc *func; + bool dump = false; + LiveRange *workLr = nullptr; + GraphColorRegAllocator *regAllocator; + MapleSet loopHeadBBs; + bool beyondLimit = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h new file mode 100644 index 0000000000000000000000000000000000000000..9016e50dcbb11773e2ad2ecc6636cfb3cdc3e239 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h @@ -0,0 +1,42 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_AARCH64_DCE_H +#define MAPLEBE_INCLUDE_AARCH64_DCE_H + +#include "cg_dce.h" +namespace maplebe { +class AArch64Dce : public CGDce { + public: + AArch64Dce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : CGDce(mp, f, sInfo) {} + ~AArch64Dce() override = default; + + private: + bool RemoveUnuseDef(VRegVersion &defVersion) override; +}; + +class A64DeleteRegUseVisitor : public DeleteRegUseVisitor { + public: + A64DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : DeleteRegUseVisitor(cgSSAInfo, dInsnID) {} + ~A64DeleteRegUseVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_DCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h new file mode 100644 index 0000000000000000000000000000000000000000..0b686d0309a06f514e210c272fac90c160554e37 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H + +#include "dependence.h" +#include "cgfunc.h" +#include "aarch64_operand.h" + +namespace maplebe { +class AArch64DepAnalysis : public DepAnalysis { + public: + AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA); + + ~AArch64DepAnalysis() override = default; + + void Run(BB &bb, MapleVector &nodes) override; + const std::string &GetDepTypeName(DepType depType) const override; + void DumpDepNode(DepNode &node) const override; + void DumpDepLink(DepLink &link, const DepNode *node) const override; + + protected: + void Init(BB &bb, MapleVector &nodes) override; + void ClearAllDepData() override; + void AnalysisAmbiInsns(BB &bb) override; + void AppendRegUseList(Insn &insn, regno_t regNO) override; + void AddDependence(DepNode& fromNode, DepNode &toNode, DepType depType) override; + void RemoveSelfDeps(Insn &insn) override; + void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) override; + void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine = false) override; + void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) override; + void BuildDepsUseReg(Insn &insn, regno_t regNO) override; + void BuildDepsDefReg(Insn &insn, regno_t regNO) override; + void BuildDepsAmbiInsn(Insn &insn) override; + void BuildDepsMayThrowInsn(Insn &insn) override; + bool NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, const Insn &memInsn) const; + void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) override; + void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) override; + void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + void BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + void BuildDepsMemBar(Insn &insn) override; + void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) override; + void BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) override; + void BuildDepsAccessStImmMem(Insn &insn, bool isDest) override; + void BuildCallerSavedDeps(Insn &insn) override; + void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) override; + void BuildStackPassArgsDeps(Insn &insn) override; + void BuildDepsDirtyStack(Insn &insn) override; + void BuildDepsUseStack(Insn &insn) override; + void BuildDepsDirtyHeap(Insn &insn) override; + DepNode *BuildSeparatorNode() override; + bool IfInAmbiRegs(regno_t regNO) const override; + bool IsFrameReg(const RegOperand&) const override; + + private: + MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const; + void BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop); + void BuildOpndDependency(Insn &insn); + void BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes); + void SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum); + DepNode *GenerateDepNode(Insn &insn, MapleVector &nodes, int32 nodeSum, const MapleVector &comments); + void BuildAmbiInsnDependency(Insn &insn); + void BuildMayThrowInsnDependency(Insn &insn); + void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes); + void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); + MemOperand *BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const; + void AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type); + void AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type); + void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, bool isFromClinit) const; + void ClearDepNodeInfo(DepNode &depNode) const; + void AddEndSeparatorNode(MapleVector &nodes); + + Insn **regDefs = nullptr; + RegList **regUses = nullptr; + Insn *memBarInsn = nullptr; + bool hasAmbiRegs = false; + Insn *lastCallInsn = nullptr; + uint32 separatorIndex = 0; + Insn *lastFrameDef = nullptr; + MapleVector stackUses; + MapleVector stackDefs; + MapleVector heapUses; + MapleVector heapDefs; + MapleVector mayThrows; + /* instructions that can not across may throw instructions. */ + MapleVector ambiInsns; + /* register number that catch bb and cleanup bb uses. */ + MapleSet ehInRegs; + /* the bb to be scheduling currently */ + BB *curBB = nullptr; +}; +} + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h new file mode 100644 index 0000000000000000000000000000000000000000..5ac72e8bf929858f1a68801d0f275922dbdf5e6d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H + +#include "ebo.h" +#include "aarch64_operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { + +class AArch64Ebo : public Ebo { + public: + AArch64Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase) + : Ebo(func, memPool, live, before, phase), + callerSaveRegTable(eboAllocator.Adapter()) { + a64CGFunc = static_cast(cgFunc); + } + + enum ExtOpTable : uint8; + + ~AArch64Ebo() override = default; + + protected: + MapleVector callerSaveRegTable; + AArch64CGFunc *a64CGFunc; + int32 GetOffsetVal(const MemOperand &mem) const override; + OpndInfo *OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) override; + const RegOperand &GetRegOperand(const Operand &opnd) const override; + bool IsGlobalNeeded(Insn &insn) const override; + bool IsDecoupleStaticOp(Insn &insn) const override; + bool OperandEqSpecial(const Operand &op1, const Operand &op2) const override; + bool DoConstProp(Insn &insn, uint32 i, Operand &opnd) override; + bool Csel2Cset(Insn &insn, const MapleVector &opnds) override; + bool SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) override; + void BuildCallerSaveRegisters() override; + void DefineAsmRegisters(InsnInfo &insnInfo) override; + void DefineCallerSaveRegisters(InsnInfo &insnInfo) override; + void DefineReturnUseRegister(Insn &insn) override; + void DefineCallUseSpecialRegister(Insn &insn) override; + void DefineClinitSpecialRegisters(InsnInfo &insnInfo) override; + bool CombineExtensionAndLoad(Insn *insn, const MapleVector &origInfos, ExtOpTable idx, bool is64Bits); + bool SpecialSequence(Insn &insn, const MapleVector &origInfos) override; + bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const override; + bool IsPseudoRet(Insn &insn) const override; + bool ChangeLdrMop(Insn &insn, const Operand &opnd) const override; + bool IsAdd(const Insn &insn) const override; + bool IsFmov(const Insn &insn) const override; + bool IsClinitCheck(const Insn &insn) const override; + bool IsLastAndBranch(BB &bb, Insn &insn) const override; + bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const override; + bool ResIsNotDefAndUse(Insn &insn) const override; + bool LiveOutOfBB(const Operand &opnd, const BB &bb) const override; + bool IsInvalidReg(const RegOperand &opnd) const override; + bool IsZeroRegister(const Operand &opnd) const override; + bool IsConstantImmOrReg(const Operand &opnd) const override; + bool OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) const; + bool ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, MOperator oldMop, + const RegOperand& opnd); + + private: + /* The number of elements in callerSaveRegTable must less then 45. */ + static constexpr int32 kMaxCallerSaveReg = 45; + MOperator ExtLoadSwitchBitSize(MOperator lowMop) const; + bool CheckCondCode(const CondOperand &cond) const; + bool CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, + bool is64bits, bool isFp) const; + bool CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp); + bool CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const; + bool CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const; + bool SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, const ImmOperand &immOperand1, + uint32 opndSize) const; + ConditionCode GetReverseCond(const CondOperand &cond) const; + bool CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0fe4f989f105a028599942abff3598cdd4ef9d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H + +#include "asm_emit.h" + +namespace maplebe { +using namespace maple; + +class AArch64AsmEmitter : public AsmEmitter { + public: + AArch64AsmEmitter(CG &cg, const std::string &asmFileName) : AsmEmitter(cg, asmFileName) {} + ~AArch64AsmEmitter() = default; + + void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) override; + void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) override; + void RecordRegInfo(FuncEmitInfo &funcEmitInfo) const; + void Run(FuncEmitInfo &funcEmitInfo) override; + + private: + /* cfi & dbg need target info ? */ + void EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const; + void EmitAArch64DbgInsn(Emitter &emitter, const Insn &insn) const; + + void EmitAArch64Insn(Emitter &emitter, Insn &insn) const; + void EmitClinit(Emitter &emitter, const Insn &insn) const; + void EmitAdrpLdr(Emitter &emitter, const Insn &insn) const; + void EmitCounter(Emitter &emitter, const Insn &insn) const; + void EmitInlineAsm(Emitter &emitter, const Insn &insn) const; + void EmitClinitTail(Emitter &emitter, const Insn &insn) const; + void EmitLazyLoad(Emitter &emitter, const Insn &insn) const; + void EmitAdrpLabel(Emitter &emitter, const Insn &insn) const; + void EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const; + void EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const; + void EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const; + void EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const; + void EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const; + void EmitStringIndexOf(Emitter &emitter, const Insn &insn) const; + void EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const; + void EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const; + void EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const; + void EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const; + void EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const; + + void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const; + bool CheckInsnRefField(const Insn &insn, size_t opndIndex) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h new file mode 100644 index 0000000000000000000000000000000000000000..40c62f1868a739a9477004645a7e845ce29b0c7a --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H + +#include +#include "aarch64_cg.h" +#include "optimize_common.h" +#include "mir_builder.h" + +namespace maplebe { +class AArch64FixShortBranch { + public: + explicit AArch64FixShortBranch(CGFunc *cf) : cgFunc(cf) {} + ~AArch64FixShortBranch() = default; + void FixShortBranches(); + + private: + CGFunc *cgFunc; + uint32 CalculateAlignRange(const BB &bb, uint32 addr) const; + void SetInsnId() const; +}; /* class AArch64ShortBranch */ + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixShortBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..c630b95c7775a55522b66f291463b7133a0007cd --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def @@ -0,0 +1,75 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + */ +/* + * ID, 128 bit vector prefix, followed by scalar prefixes + * scalar prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill + * (e.g., we use D0 when V0 contains a 64-bit scalar FP number (aka, double)) + */ +FP_SIMD_REG(0 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(1 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(2 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(3 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(4 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(5 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(6 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(7 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(8 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(9 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(10, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(11, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(12, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(13, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(14, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(15, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(16, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(17, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(18, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(19, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(20, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(21, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(22, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(23, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(24, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(25, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(26, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(27, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(28, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(29, "V", "B", "H", "S", "D", "Q", true, false, false, false, true) +FP_SIMD_REG(30, "V", "B", "H", "S", "D", "Q", true, false, false, true, false) +FP_SIMD_REG(31, "V", "B", "H", "S", "D", "Q", true, false, false, true, false) + +/* Alias ID */ +FP_SIMD_REG_ALIAS(0) +FP_SIMD_REG_ALIAS(1) +FP_SIMD_REG_ALIAS(2) +FP_SIMD_REG_ALIAS(3) +FP_SIMD_REG_ALIAS(4) +FP_SIMD_REG_ALIAS(5) +FP_SIMD_REG_ALIAS(6) +FP_SIMD_REG_ALIAS(7) + +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(0) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(1) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(2) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(3) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(4) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(5) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(6) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(7) */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_global.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_global.h new file mode 100644 index 0000000000000000000000000000000000000000..3ff3e08d4f7f62c21cba15d833cfc59a6fbf66e0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_global.h @@ -0,0 +1,490 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H + +#include "global.h" +#include "aarch64_operand.h" + +namespace maplebe { +using namespace maple; + +class AArch64GlobalOpt : public GlobalOpt { + public: + explicit AArch64GlobalOpt(CGFunc &func) : GlobalOpt(func) {} + ~AArch64GlobalOpt() override = default; + void Run() override; +}; + +class OptimizeManager { + public: + explicit OptimizeManager(CGFunc &cgFunc) : cgFunc(cgFunc) {} + ~OptimizeManager() = default; + template + void Optimize() { + OptimizePattern optPattern(cgFunc); + optPattern.Run(); + } + private: + CGFunc &cgFunc; +}; + +class OptimizePattern { + public: + explicit OptimizePattern(CGFunc &cgFunc) : cgFunc(cgFunc) {} + virtual ~OptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(Insn &insn) = 0; + virtual void Run() = 0; + bool OpndDefByOne(Insn &insn, int32 useIdx) const; + bool OpndDefByZero(Insn &insn, int32 useIdx) const; + bool OpndDefByOneOrZero(Insn &insn, int32 useIdx) const; + void ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, + Operand &newOpnd, bool updateInfo) const; + + static bool InsnDefOne(const Insn &insn); + static bool InsnDefZero(const Insn &insn); + static bool InsnDefOneOrZero(const Insn &insn); + + std::string PhaseName() const { + return "globalopt"; + } + protected: + virtual void Init() = 0; + CGFunc &cgFunc; +}; + +/* + * Do Forward prop when insn is mov + * mov xx, x1 + * ... // BBs and x1 is live + * mOp yy, xx + * + * => + * mov x1, x1 + * ... // BBs and x1 is live + * mOp yy, x1 + */ +class ForwardPropPattern : public OptimizePattern { + public: + explicit ForwardPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ForwardPropPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + private: + InsnSet firstRegUseInsnSet; + void RemoveMopUxtwToMov(Insn &insn); + std::set modifiedBB; +}; + +/* + * Do back propagate of vreg/preg when encount following insn: + * + * mov vreg/preg1, vreg2 + * + * back propagate reg1 to all vreg2's use points and def points, when all of them is in same bb + */ +class BackPropPattern : public OptimizePattern { + public: + explicit BackPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~BackPropPattern() override { + firstRegOpnd = nullptr; + secondRegOpnd = nullptr; + defInsnForSecondOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckAndGetOpnd(const Insn &insn); + bool DestOpndHasUseInsns(Insn &insn); + bool DestOpndLiveOutToEHSuccs(Insn &insn) const; + bool CheckSrcOpndDefAndUseInsns(Insn &insn); + bool CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn); + bool CheckPredefineInsn(Insn &insn); + bool CheckRedefineInsn(Insn &insn); + bool CheckReplacedUseInsn(Insn &insn); + RegOperand *firstRegOpnd = nullptr; + RegOperand *secondRegOpnd = nullptr; + uint32 firstRegNO = 0; + uint32 secondRegNO = 0; + InsnSet srcOpndUseInsnSet; + Insn *defInsnForSecondOpnd = nullptr; + bool globalProp = false; +}; + +/* + * when w0 has only one valid bit, these tranformation will be done + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetPattern : public OptimizePattern { + public: + explicit CmpCsetPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~CmpCsetPattern() override { + nextInsn = nullptr; + cmpFirstOpnd = nullptr; + cmpSecondOpnd = nullptr; + csetFirstOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + Insn *nextInsn = nullptr; + int64 cmpConstVal = 0; + Operand *cmpFirstOpnd = nullptr; + Operand *cmpSecondOpnd = nullptr; + Operand *csetFirstOpnd = nullptr; +}; + +/* + * mov w5, #1 + * ... --> cset w5, NE + * mov w0, #0 + * csel w5, w5, w0, NE + * + * mov w5, #0 + * ... --> cset w5,EQ + * mov w0, #1 + * csel w5, w5, w0, NE + * + * condition: + * 1.all define points of w5 are defined by: mov w5, #1(#0) + * 2.all define points of w0 are defined by: mov w0, #0(#1) + * 3.w0 will not be used after: csel w5, w5, w0, NE(EQ) + */ +class CselPattern : public OptimizePattern { + public: + explicit CselPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~CselPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final {} +}; + +/* + * uxtb w0, w0 --> null + * uxth w0, w0 --> null + * + * condition: + * 1. validbits(w0)<=8,16,32 + * 2. the first operand is same as the second operand + * + * uxtb w0, w1 --> null + * uxth w0, w1 --> null + * + * condition: + * 1. validbits(w1)<=8,16,32 + * 2. the use points of w0 has only one define point, that is uxt w0, w1 + */ +class RedundantUxtPattern : public OptimizePattern { + public: + explicit RedundantUxtPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~RedundantUxtPattern() override { + secondOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + uint32 GetMaximumValidBit(Insn &insn, uint8 udIdx, InsnSet &insnChecked) const; + static uint32 GetInsnValidBit(const Insn &insn); + InsnSet useInsnSet; + uint32 firstRegNO = 0; + Operand *secondOpnd = nullptr; +}; + +/* + * bl MCC_NewObj_flexible_cname bl MCC_NewObj_flexible_cname + * mov x21, x0 // [R203] + * str x0, [x29,#16] // local var: Reg0_R6340 [R203] --> str x0, [x29,#16] // local var: Reg0_R6340 [R203] + * ... (has call) ... (has call) + * mov x2, x21 // use of x21 ldr x2, [x29, #16] + * bl *** bl *** + */ +class LocalVarSaveInsnPattern : public OptimizePattern { + public: + explicit LocalVarSaveInsnPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~LocalVarSaveInsnPattern() override { + firstInsnSrcOpnd = nullptr; + firstInsnDestOpnd = nullptr; + secondInsnSrcOpnd = nullptr; + secondInsnDestOpnd = nullptr; + useInsn = nullptr; + secondInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckFirstInsn(const Insn &firstInsn); + bool CheckSecondInsn(); + bool CheckAndGetUseInsn(Insn &firstInsn); + bool CheckLiveRange(const Insn &firstInsn); + Operand *firstInsnSrcOpnd = nullptr; + Operand *firstInsnDestOpnd = nullptr; + Operand *secondInsnSrcOpnd = nullptr; + Operand *secondInsnDestOpnd = nullptr; + Insn *useInsn = nullptr; + Insn *secondInsn = nullptr; +}; + +class ExtendShiftOptPattern : public OptimizePattern { + public: + explicit ExtendShiftOptPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ExtendShiftOptPattern() override { + defInsn = nullptr; + newInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + void DoExtendShiftOpt(Insn &insn); + + enum ExMOpType : uint8 { + kExUndef, + kExAdd, /* MOP_xaddrrr | MOP_xxwaddrrre | MOP_xaddrrrs */ + kEwAdd, /* MOP_waddrrr | MOP_wwwaddrrre | MOP_waddrrrs */ + kExSub, /* MOP_xsubrrr | MOP_xxwsubrrre | MOP_xsubrrrs */ + kEwSub, /* MOP_wsubrrr | MOP_wwwsubrrre | MOP_wsubrrrs */ + kExCmn, /* MOP_xcmnrr | MOP_xwcmnrre | MOP_xcmnrrs */ + kEwCmn, /* MOP_wcmnrr | MOP_wwcmnrre | MOP_wcmnrrs */ + kExCmp, /* MOP_xcmprr | MOP_xwcmprre | MOP_xcmprrs */ + kEwCmp, /* MOP_wcmprr | MOP_wwcmprre | MOP_wcmprrs */ + }; + + enum LsMOpType : uint8 { + kLsUndef, + kLxAdd, /* MOP_xaddrrr | MOP_xaddrrrs */ + kLwAdd, /* MOP_waddrrr | MOP_waddrrrs */ + kLxSub, /* MOP_xsubrrr | MOP_xsubrrrs */ + kLwSub, /* MOP_wsubrrr | MOP_wsubrrrs */ + kLxCmn, /* MOP_xcmnrr | MOP_xcmnrrs */ + kLwCmn, /* MOP_wcmnrr | MOP_wcmnrrs */ + kLxCmp, /* MOP_xcmprr | MOP_xcmprrs */ + kLwCmp, /* MOP_wcmprr | MOP_wcmprrs */ + kLxEor, /* MOP_xeorrrr | MOP_xeorrrrs */ + kLwEor, /* MOP_weorrrr | MOP_weorrrrs */ + kLxNeg, /* MOP_xinegrr | MOP_xinegrrs */ + kLwNeg, /* MOP_winegrr | MOP_winegrrs */ + kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */ + kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */ + }; + + enum SuffixType : uint8 { + kNoSuffix, /* no suffix or do not perform the optimization. */ + kLSL, /* logical shift left */ + kLSR, /* logical shift right */ + kASR, /* arithmetic shift right */ + kExten /* ExtendOp */ + }; + + protected: + void Init() final; + + private: + void SelectExtendOrShift(const Insn &def); + bool CheckDefUseInfo(Insn &use, uint32 size); + SuffixType CheckOpType(const Operand &lastOpnd) const; + void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount); + void SetExMOpType(const Insn &use); + void SetLsMOpType(const Insn &use); + + MOperator replaceOp; + uint32 replaceIdx; + ExtendShiftOperand::ExtendOp extendOp; + BitShiftOperand::ShiftOp shiftOp; + Insn *defInsn = nullptr; + Insn *newInsn = nullptr; + bool optSuccess; + bool removeDefInsn; + ExMOpType exMOpType; + LsMOpType lsMOpType; +}; + +/* + * This pattern do: + * 1) + * uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * ------> + * mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * 2) + * ldrh R201, [...] + * and R202, R201, #65520 + * uxth R203, R202 + * -------> + * ldrh R201, [...] + * and R202, R201, #65520 + * mov R203, R202 + */ +class ExtenToMovPattern : public OptimizePattern { + public: + explicit ExtenToMovPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ExtenToMovPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckHideUxtw(const Insn &insn, regno_t regno) const; + bool CheckUxtw(Insn &insn); + bool BitNotAffected(Insn &insn, uint32 validNum); /* check whether significant bits are affected */ + bool CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum); + + MOperator replaceMop = MOP_undef; +}; + +class SameDefPattern : public OptimizePattern { + public: + explicit SameDefPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~SameDefPattern() override { + currInsn = nullptr; + sameInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool IsSameDef(); + bool SrcRegIsRedefined(regno_t regNo); + bool IsSameOperand(Operand &opnd0, Operand &opnd1); + + Insn *currInsn = nullptr; + Insn *sameInsn = nullptr; +}; + +/* + * and r0, r0, #4 (the imm is n power of 2) + * ... (r0 is not used) + * cbz r0, .Label + * ===> tbz r0, #2, .Label + * + * and r0, r0, #4 (the imm is n power of 2) + * ... (r0 is not used) + * cbnz r0, .Label + * ===> tbnz r0, #2, .Label + */ +class AndCbzPattern : public OptimizePattern { + public: + explicit AndCbzPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~AndCbzPattern() override { + prevInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + int64 CalculateLogValue(int64 val) const; + bool IsAdjacentArea(Insn &prev, Insn &curr) const; + Insn *prevInsn = nullptr; +}; + +/* + * [arithmetic operation] + * add/sub/ R202, R201, #1 add/sub/ R202, R201, #1 + * ... ... + * add/sub/ R203, R201, #1 ---> mov R203, R202 + * + * [copy operation] + * mov R201, #1 mov R201, #1 + * ... ... + * mov R202, #1 ---> mov R202, R201 + * + * The pattern finds the insn with the same rvalue as the current insn, + * then prop its lvalue, and replaces the current insn with movrr insn. + * The mov can be prop in forwardprop or backprop. + * + * conditions: + * 1. in same BB + * 2. rvalue is not defined between two insns + * 3. lvalue is not defined between two insns + */ +class SameRHSPropPattern : public OptimizePattern { + public: + explicit SameRHSPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~SameRHSPropPattern() override { + prevInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool IsSameOperand(Operand *opnd1, Operand *opnd2) const; + bool FindSameRHSInsnInBB(Insn &insn); + Insn *prevInsn = nullptr; + std::vector candidates; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h new file mode 100644 index 0000000000000000000000000000000000000000..9edd861cfefeb6a5f0fef46f6bfb8bb7386c5409 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H +#include "ico.h" +#include "aarch64_isa.h" +#include "optimize_common.h" +#include "live.h" + +namespace maplebe { +class AArch64IfConversionOptimizer : public IfConversionOptimizer { + public: + AArch64IfConversionOptimizer(CGFunc &func, MemPool &memPool) : IfConversionOptimizer(func, memPool) {} + + ~AArch64IfConversionOptimizer() override = default; + void InitOptimizePatterns() override; +}; + +class AArch64ICOPattern : public ICOPattern { + public: + explicit AArch64ICOPattern(CGFunc &func) : ICOPattern(func) {} + ~AArch64ICOPattern() override = default; + protected: + ConditionCode Encode(MOperator mOp, bool inverse) const; + Insn *BuildCmpInsn(const Insn &condBr) const; + Insn *BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const; + Insn *BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const; + Insn *BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, RegOperand &src2) const; + bool IsSetInsn(const Insn &insn, Operand *&dest, std::vector &src) const; + static uint32 GetNZCV(ConditionCode ccCode, bool inverse); + bool CheckMop(MOperator mOperator) const; +}; + +/* If-Then-Else pattern */ +class AArch64ICOIfThenElsePattern : public AArch64ICOPattern { + public: + explicit AArch64ICOIfThenElsePattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOIfThenElsePattern() override = default; + bool Optimize(BB &curBB) override; + protected: + bool BuildCondMovInsn(BB &cmpBB, const BB &bb, const std::map> &ifDestSrcMap, + const std::map> &elseDestSrcMap, bool elseBBIsProcessed, + std::vector &generateInsn); + bool DoOpt(BB &cmpBB, BB *ifBB, BB *elseBB, BB &joinBB); + void GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg, + std::vector &generateInsn); + Operand *GetDestReg(const std::map> &destSrcMap, + const RegOperand &destReg) const; + void GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg, + std::vector &generateInsn); + RegOperand *GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg, std::vector &generateInsn) const; + bool CheckHasSameDest(std::vector &lInsn, std::vector &rInsn) const; + bool CheckModifiedRegister(Insn &insn, std::map> &destSrcMap, + std::vector &src, Operand &dest, + const Insn *cmpInsn, const Operand *flagOpnd) const; + bool CheckCondMoveBB(BB *bb, std::map> &destSrcMap, std::vector &destRegs, + std::vector &setInsn, Operand *flagReg, Insn *cmpInsn) const; +}; + +/* If( cmp || cmp ) then or If( cmp && cmp ) then + * cmp w4, #1 + * beq .L.886__1(branch1) cmp w4, #1 + * .L.886__2: => ccmp w4, #4, #4, NE + * cmp w4, #4 beq .L.886__1 + * beq .L.886__1(branch2) + * */ +class AArch64ICOSameCondPattern : public AArch64ICOPattern { + public: + explicit AArch64ICOSameCondPattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOSameCondPattern() override = default; + bool Optimize(BB &curBB) override; + protected: + bool DoOpt(BB *firstIfBB, BB &secondIfBB); +}; + +/* If-Then MorePreds pattern + * + * .L.891__92: .L.891__92: + * cmp x4, w0, UXTW cmp x4, w0, UXTW + * bls .L.891__41 csel x0, x2, x0, LS + * .L.891__42: bls .L.891__94 + * sub x0, x4, w0, UXTW =====> .L.891__42: + * cmp x0, x2 sub x0, x4, w0, UXTW + * bls .L.891__41 cmp x0, x2 + * ...... csel x0, x2, x0, LS + * .L.891__41: bls .L.891__94 + * mov x0, x2 + * b .L.891__94 + * */ +class AArch64ICOMorePredsPattern : public AArch64ICOPattern { + public: + explicit AArch64ICOMorePredsPattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOMorePredsPattern() override = default; + bool Optimize(BB &curBB) override; + protected: + bool DoOpt(BB &gotoBB); + bool CheckGotoBB(BB &gotoBB, std::vector &movInsn) const; + bool MovToCsel(std::vector &movInsn, std::vector &cselInsn, const Insn &branchInsn) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab6827a82361b1cc7b7027fcf6a741646c8a5a1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H + +#include "aarch64_isa.h" +#include "insn.h" +#include "string_utils.h" +#include "aarch64_operand.h" +#include "common_utils.h" +namespace maplebe { +class A64OpndEmitVisitor : public OpndEmitVisitor { + public: + A64OpndEmitVisitor(Emitter &emitter, const OpndDesc *operandProp) + : OpndEmitVisitor(emitter), + opndProp(operandProp) {} + ~A64OpndEmitVisitor() override { + opndProp = nullptr; + } + + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(OfstOperand *v) final; + void Visit(ListOperand *v) final; + + private: + void EmitVectorOperand(const RegOperand &v); + void EmitIntReg(const RegOperand &v, uint8 opndSz = kMaxSimm32); + + const OpndDesc *opndProp; +}; + +class A64OpndDumpVisitor : public OpndDumpVisitor { + public: + A64OpndDumpVisitor(const OpndDesc &operandDesc) : OpndDumpVisitor(operandDesc) {} + ~A64OpndDumpVisitor() override = default; + + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(PhiOperand *v) final; + void Visit(CommentOperand *v) final; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..d4b00c71a43c654d89cc34a46e8583d7e864b8c4 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + * + * $ 4.1 Registers in AArch64 state + * + * There is no register named W31 or X31. + * Depending on the instruction, register 31 is either the stack + * pointer or the zero register. When used as the stack pointer, + * you refer to it as "SP". When used as the zero register, you refer + * to it as WZR in a 32-bit context or XZR in a 64-bit context. + * The zero register returns 0 when read and discards data when + * written (e.g., when setting the status register for testing). + */ +/* ID, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */ +INT_REG(0 , "W", "X", true, false, true, false, false) +INT_REG(1 , "W", "X", true, false, true, false, false) +INT_REG(2 , "W", "X", true, false, true, false, false) +INT_REG(3 , "W", "X", true, false, true, false, false) +INT_REG(4 , "W", "X", true, false, true, false, false) +INT_REG(5 , "W", "X", true, false, true, false, false) +INT_REG(6 , "W", "X", true, false, true, false, false) +INT_REG(7 , "W", "X", true, false, true, false, false) +INT_REG(8 , "W", "X", true, false, false, false, false) +INT_REG(9 , "W", "X", true, false, false, false, false) +INT_REG(10, "W", "X", true, false, false, false, false) +INT_REG(11, "W", "X", true, false, false, false, false) +INT_REG(12, "W", "X", true, false, false, false, false) +INT_REG(13, "W", "X", true, false, false, false, false) +INT_REG(14, "W", "X", true, false, false, false, false) +INT_REG(15, "W", "X", true, false, false, false, true) +INT_REG(16, "W", "X", true, false, false, true, false) +INT_REG(17, "W", "X", true, false, false, true, false) +INT_REG(18, "W", "X", true, false, false, false, false) +INT_REG(19, "W", "X", true, true, false, false, false) +INT_REG(20, "W", "X", true, true, false, false, false) +INT_REG(21, "W", "X", true, true, false, false, false) +INT_REG(22, "W", "X", true, true, false, false, false) +INT_REG(23, "W", "X", true, true, false, false, false) +INT_REG(24, "W", "X", true, true, false, false, false) +INT_REG(25, "W", "X", true, true, false, false, false) +INT_REG(26, "W", "X", true, true, false, false, false) +INT_REG(27, "W", "X", true, true, false, false, false) +INT_REG(28, "W", "X", true, true, false, false, false) +INT_REG(29, "W", "X", true, true, false, false, false) +INT_REG(30, "W", "X", false, true, false, false, false) +INT_REG(31, "W", "X", false, true, false, false, false) +/* + * Refer to ARM Compiler armasm User Guide version 6.6. $4.5 Predeclared core register names in AArch64 state + * We should not use "W" prefix in 64-bit context, though!! + */ +INT_REG(SP, "W", "" , false, false, false, false, false) +INT_REG(ZR, "W", "X", false, false, false, false, false) + +/* Alias ID, ID, 32-bit prefix, 64-bit prefix */ +INT_REG_ALIAS(FP, 31, "", "" ) +INT_REG_ALIAS(LR, 30, "", "" ) + +/* R19 is reserved for yieldpoint */ +INT_REG_ALIAS(YP, 19, "", "" ) + +INT_REG_ALIAS(LAST_INT_REG, 31, "", "" ) diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h new file mode 100644 index 0000000000000000000000000000000000000000..6b045ee166f07faa25444dd35aa30fe8115dfc85 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H + +#include "isa.h" + +#define DEFINE_MOP(op, ...) op, +enum AArch64MOP_t : maple::uint32 { +#include "aarch64_md.def" + kMopLast +}; +#undef DEFINE_MOP + +namespace maplebe { +/* + * ARM Architecture Reference Manual (for ARMv8) + * D1.8.2 + */ +constexpr int kAarch64StackPtrAlignment = 16; + +constexpr int32 kOffsetAlign = 8; +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr int kSizeOfFplr = 16; + +enum StpLdpImmBound : int { + kStpLdpImm64LowerBound = -512, + kStpLdpImm64UpperBound = 504, + kStpLdpImm32LowerBound = -256, + kStpLdpImm32UpperBound = 252 +}; + +enum StrLdrPerPostBound : int64 { + kStrLdrPerPostLowerBound = -256, + kStrLdrPerPostUpperBound = 255 +}; + +constexpr int64 kStrAllLdrAllImmLowerBound = 0; +enum StrLdrImmUpperBound : int64 { + kStrLdrImm32UpperBound = 16380, /* must be a multiple of 4 */ + kStrLdrImm64UpperBound = 32760, /* must be a multiple of 8 */ + kStrbLdrbImmUpperBound = 4095, + kStrhLdrhImmUpperBound = 8190 +}; + +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + * + * $ 4.1 Registers in AArch64 state + * ...When you use the 32-bit form of an instruction, the upper + * 32 bits of the source registers are ignored and + * the upper 32 bits of the destination register are set to zero. + * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * + * There is no register named W31 or X31. + * Depending on the instruction, register 31 is either the stack + * pointer or the zero register. When used as the stack pointer, + * you refer to it as "SP". When used as the zero register, you refer + * to it as WZR in a 32-bit context or XZR in a 64-bit context. + * The zero register returns 0 when read and discards data when + * written (e.g., when setting the status register for testing). + */ +enum AArch64reg : uint32 { + kRinvalid = kInvalidRegNO, +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) R##ID, +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) V##ID, +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + kMaxRegNum, + kRFLAG, + kAllRegNum, +/* alias */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) R##ALIAS = R##ID, +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define FP_SIMD_REG_ALIAS(ID) S##ID = V##ID, +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define FP_SIMD_REG_ALIAS(ID) D##ID = V##ID, +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS +}; + +class Insn; + +namespace AArch64isa { +static inline bool IsGPRegister(AArch64reg r) { + return R0 <= r && r <= RZR; +} + +static inline bool IsFPSIMDRegister(AArch64reg r) { + return V0 <= r && r <= V31; +} + +static inline bool IsPhysicalRegister(regno_t r) { + return r < kMaxRegNum; +} + +static inline RegType GetRegType(AArch64reg r) { + if (IsGPRegister(r)) { + return kRegTyInt; + } + if (IsFPSIMDRegister(r)) { + return kRegTyFloat; + } + DEBUG_ASSERT(false, "No suitable register type to return?"); + return kRegTyUndef; +} + +enum MemoryOrdering : uint32 { + kMoNone = 0, + kMoAcquire = 1ULL, /* ARMv8 */ + kMoAcquireRcpc = (1ULL << 1), /* ARMv8.3 */ + kMoLoacquire = (1ULL << 2), /* ARMv8.1 */ + kMoRelease = (1ULL << 3), /* ARMv8 */ + kMoLorelease = (1ULL << 4) /* ARMv8.1 */ +}; + +static inline bool IsPseudoInstruction(MOperator mOp) { + return (mOp >= MOP_pseudo_param_def_x && mOp <= MOP_pseudo_eh_def_x); +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_xbr is a jump instruction, but the target is unknown at compile time, + * because a register instead of label. So we don't take it as a branching instruction. + * However for special long range branch patch, the label is installed in this case. + */ +uint32 GetJumpTargetIdx(const Insn &insn); + +MOperator FlipConditionOp(MOperator flippedOp); +} /* namespace AArch64isa */ + +/* + * We save callee-saved registers from lower stack area to upper stack area. + * If possible, we store a pair of registers (int/int and fp/fp) in the stack. + * The Stack Pointer has to be aligned at 16-byte boundary. + * On AArch64, kIntregBytelen == 8 (see the above) + */ +inline void GetNextOffsetCalleeSaved(int &offset) { + offset += (kIntregBytelen << 1); +} + +MOperator GetMopPair(MOperator mop); +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_live.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_live.h new file mode 100644 index 0000000000000000000000000000000000000000..681c94831d808194784a89d6580067e241c2cbb5 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_live.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H + +#include "live.h" + +namespace maplebe { +class AArch64LiveAnalysis : public LiveAnalysis { + public: + AArch64LiveAnalysis(CGFunc &func, MemPool &memPool) : LiveAnalysis(func, memPool) {} + ~AArch64LiveAnalysis() override = default; + bool CleanupBBIgnoreReg(regno_t reg) override; + void InitEhDefine(BB &bb) override; + void GenerateReturnBBDefUse(BB &bb) const override; + void ProcessCallInsnParam(BB &bb, const Insn &insn) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_md.def new file mode 100644 index 0000000000000000000000000000000000000000..51005b8687dcdd7d5109df8320295c79497eb3dc --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -0,0 +1,1194 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* InsnDesc format: + * {mop, opndMD, properties, latency, name, format, atomicNum, validFunc(nullptr)} + */ +/* MOP_undef, */ +DEFINE_MOP(MOP_undef, {},0,kLtUndef,"","",0,kUnknownEncodeType,0x00000000) + +/* AARCH64 MOVES */ +/* MOP_xmovrr */ +DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mov","0,1",1,kMovReg,0x00000000) +/* MOP_wmovrr */ +DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1,kMovReg,0x00000000) +/* MOP_wmovri32 */ +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1,kMovImm,0x00000000) +/* MOP_xmovri64 */ +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1,kMovImm,0x00000000) +/* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ +DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) + +/* MOP_xvmovsr */ +DEFINE_MOP(MOP_xvmovsr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISMOVE,kLtR2f,"fmov","0,1",1,kFloatIntConversions,0x1e270000) +/* MOP_xvmovdr */ +DEFINE_MOP(MOP_xvmovdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISMOVE,kLtR2f,"fmov","0,1",1,kFloatIntConversions,0x9e670000) +/* MOP_xvmovrs */ +DEFINE_MOP(MOP_xvmovrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"fmov","0,1",1,kFloatIntConversions,0x1e260000) +/* MOP_xvmovrd */ +DEFINE_MOP(MOP_xvmovrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISMOVE,kLtF2r,"fmov","0,1",1,kFloatIntConversions,0x9e660000) +/* MOP_xvmovs */ +DEFINE_MOP(MOP_xvmovs, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISMOVE,kLtFpalu,"fmov","0,1",1,kFloatDataProcessing1,0x1e204000) +/* MOP_xvmovd */ +DEFINE_MOP(MOP_xvmovd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISMOVE,kLtFpalu,"fmov","0,1",1,kFloatDataProcessing1,0x1e604000) + +/* Vector SIMD mov */ +/* MOP_xmovrv */ +DEFINE_MOP(MOP_xvmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"mov","0,1",1) + +/* MOP_xadrp */ +DEFINE_MOP(MOP_xadrp, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},0,kLtShift,"adrp","0,1",1,kPCRelAddr,0x90000000) +/* MOP_xadr */ +DEFINE_MOP(MOP_xadri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},0,kLtShift,"adr","0,1",1,kPCRelAddr,0x10000000) +/* MOP_xadrpl12 */ + +DEFINE_MOP(MOP_xadrpl12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Literal12Src},0,kLtAlu,"add","0,1,2",1,kAddPCRelAddr,0x91000000) + +/* AARCH64 Arithmetic: add */ +/* MOP_xaddrrr */ +DEFINE_MOP(MOP_xaddrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"add","0,1,2",1,kAddSubReg,0x8B000000) +/* MOP_xaddrrrs */ +DEFINE_MOP(MOP_xaddrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"add","0,1,2,3",1,kAddSubShiftReg,0x8B000000) +/* MOP_xxwaddrrre */ +DEFINE_MOP(MOP_xxwaddrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1,kAddSubExtendReg,0x8B200000) +/* MOP_xaddrri24 */ +DEFINE_MOP(MOP_xaddrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtShift,"add","0,1,2,3",1,Imm12BitValid,kAddSubShiftImm,0x91000000) +/* MOP_xaddrri12 */ +DEFINE_MOP(MOP_xaddrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid,kAddSubImm,0x91000000) +/* MOP_waddrrr */ +DEFINE_MOP(MOP_waddrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"add","0,1,2",1,kAddSubReg,0xb000000) +/* MOP_waddrrrs */ +DEFINE_MOP(MOP_waddrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"add","0,1,2,3",1,kAddSubShiftReg,0xb000000) +/* MOP_xxwaddrrre */ +DEFINE_MOP(MOP_wwwaddrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1,kAddSubExtendReg, 0xB200000) +/* MOP_waddrri24 */ +DEFINE_MOP(MOP_waddrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"add","0,1,2,3",1,Imm12BitValid,kAddSubShiftImm,0x11000000) +/* MOP_waddrri12 */ +DEFINE_MOP(MOP_waddrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid,kAddSubImm,0x11000000) +/* MOP_dadd */ +DEFINE_MOP(MOP_dadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fadd","0,1,2",1,kFloatDataProcessing2,0x1e602800) +/* MOP_sadd */ +DEFINE_MOP(MOP_sadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fadd","0,1,2",1,kFloatDataProcessing2,0x1e202800) + +/* AARCH64 Arithmetic: sub */ +/* MOP_xsubrrr */ +DEFINE_MOP(MOP_xsubrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"sub","0,1,2",1,kAddSubReg,0xcb000000) +/* MOP_xsubsrrr */ +DEFINE_MOP(MOP_xsubsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"subs","1,2,3",1, kAddSubReg, 0xeb000000) +/* MOP_xsubrrrs */ +DEFINE_MOP(MOP_xsubrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"sub","0,1,2,3",1,kAddSubShiftReg, 0xcb000000) +/* MOP_xxwsubrrre */ +DEFINE_MOP(MOP_xxwsubrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1, kAddSubExtendReg, 0xcb200000) +/* MOP_xsubsrrrs */ +DEFINE_MOP(MOP_xsubsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"subs","1,2,3,4",1, kAddSubShiftReg, 0xeb000000) +/* MOP_xsubrri24 */ +DEFINE_MOP(MOP_xsubrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid,kAddSubShiftImm,0xd1000000) +/* MOP_xsubsrri24 */ +DEFINE_MOP(MOP_xsubsrri24, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid, kAddSubShiftImm, 0xf1000000) +/* MOP_xsubrri12 */ +DEFINE_MOP(MOP_xsubrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid,kAddSubImm,0xd1000000) +/* MOP_xsubsrri12 */ +DEFINE_MOP(MOP_xsubsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid, kAddSubImm, 0xf1000000) +/* MOP_wsubrrr */ +DEFINE_MOP(MOP_wsubrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"sub","0,1,2",1,kAddSubReg,0x4b000000) +/* MOP_wsubsrrr */ +DEFINE_MOP(MOP_wsubsrrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"subs","1,2,3",1, kAddSubReg, 0x6b000000) +/* MOP_wsubrrrs */ +DEFINE_MOP(MOP_wsubrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"sub","0,1,2,3",1, kAddSubShiftReg, 0x4b000000) +/* MOP_wwwsubrrre */ +DEFINE_MOP(MOP_wwwsubrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1, kAddSubExtendReg, 0x4b200000) +/* MOP_wsubsrrrs */ +DEFINE_MOP(MOP_wsubsrrrs, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"subs","1,2,3,4",1, kAddSubShiftReg, 0x6b000000) +/* MOP_wsubrri24 */ +DEFINE_MOP(MOP_wsubrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid,kAddSubShiftImm,0x51000000) +/* MOP_wsubsrri24 */ +DEFINE_MOP(MOP_wsubsrri24, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid, kAddSubShiftImm, 0x71000000) +/* MOP_wsubrri12 */ +DEFINE_MOP(MOP_wsubrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid,kAddSubImm,0x51000000) +/* MOP_wsubsrri12 */ +DEFINE_MOP(MOP_wsubsrri12, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid, kAddSubImm, 0x71000000) +/* MOP_dsub */ +DEFINE_MOP(MOP_dsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fsub","0,1,2",1,kFloatDataProcessing2,0x1e603800) +/* MOP_ssub */ +DEFINE_MOP(MOP_ssub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fsub","0,1,2",1,kFloatDataProcessing2,0x1e203800) + +/* AARCH64 Arithmetic: multiply */ +/* MOP_Tbxmulrrr */ +DEFINE_MOP(MOP_xmulrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mul","0,1,2",1,kDataProcess3Src,0x9b007c00) +/* MOP_wmulrrr */ +DEFINE_MOP(MOP_wmulrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mul","0,1,2",1,kDataProcess3Src,0x1b007c00) +/* MOP_Tbxvmuls */ +DEFINE_MOP(MOP_xvmuls, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpmul,"fmul","0,1,2",1,kFloatDataProcessing2,0x1e200800) +/* MOP_Tbxvmuld */ +DEFINE_MOP(MOP_xvmuld, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpmul,"fmul","0,1,2",1,kFloatDataProcessing2,0x1e600800) +/*MOP_xsmullrrr */ +DEFINE_MOP(MOP_xsmullrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"smull","0,1,2",1,kDataProcess3Src,0x9b207c00) + +/* AARCH64 Arithmetic: multiply first then add */ +/* MOP_xmaddrrrr */ +DEFINE_MOP(MOP_xmaddrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"madd","0,1,2,3",1, kDataProcess3Src, 0x1b000000) +/* MOP_wmaddrrrr */ +DEFINE_MOP(MOP_wmaddrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"madd","0,1,2,3",1, kDataProcess3Src, 0x9b000000) + +/* AARCH64 leading zeros, reverse bits (for trailing zeros) */ +/* MOP_wclz */ +DEFINE_MOP(MOP_wclz, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"clz","0,1", 1, kDataProcess1Src, 0x5ac01000) +/* MOP_xclz */ +DEFINE_MOP(MOP_xclz, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"clz","0,1", 1, kDataProcess1Src, 0xdac01000) +/* MOP_wcls */ +DEFINE_MOP(MOP_wcls, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"cls","0,1", 1, kDataProcess1Src, 0x5ac01400) +/* MOP_xcls */ +DEFINE_MOP(MOP_xcls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"cls","0,1", 1, kDataProcess1Src, 0xdac01400) +/* MOP_wrbit */ +DEFINE_MOP(MOP_wrbit, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rbit","0,1", 1, kDataProcess1Src, 0x5ac00000) +/* MOP_xrbit */ +DEFINE_MOP(MOP_xrbit, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rbit","0,1", 1, kDataProcess1Src, 0xdac00000) +/* MOP_xrevrr */ +DEFINE_MOP(MOP_xrevrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rev","0,1",1, kDataProcess1Src, 0xdac00c00) +/* MOP_wrevrr */ +DEFINE_MOP(MOP_wrevrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev","0,1",1, kDataProcess1Src, 0x5ac00800) +/* MOP_xrevrr */ +DEFINE_MOP(MOP_wrevrr16, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev16","0,1",1, kDataProcess1Src, 0x5ac00400) +DEFINE_MOP(MOP_xrevrr16, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rev16","0,1",1, kDataProcess1Src, 0xdac00400) + +/* AARCH64 Conversions */ +/* MOP_xsxtb32 */ +DEFINE_MOP(MOP_xsxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1,kBitfield,0x13001c00) +/* MOP_xsxtb64 */ +DEFINE_MOP(MOP_xsxtb64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1,kBitfield,0x93401c00) +/* MOP_xsxth32 */ +DEFINE_MOP(MOP_xsxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1,kBitfield,0x13003c00) +/* MOP_xsxth64 */ +DEFINE_MOP(MOP_xsxth64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1,kBitfield,0x93403c00) +/* MOP_xsxtw64 */ +DEFINE_MOP(MOP_xsxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtw","0,1",1,kBitfield,0x93407c00) + +/* MOP_xuxtb32 */ +DEFINE_MOP(MOP_xuxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtb","0,1",1,kBitfield,0x53001c00) +/* MOP_xuxth32 */ +DEFINE_MOP(MOP_xuxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxth","0,1",1,kBitfield,0x53003c00) +/* MOP_xuxtw64 Same as mov w0,w0 */ +DEFINE_MOP(MOP_xuxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtw","0,1",1,kBitfield,0xd3407c00) + +/* MOP_xvcvtfd */ +DEFINE_MOP(MOP_xvcvtfd, {&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1,kFloatDataProcessing1,0x1e624000) +/* MOP_xvcvtdf */ +DEFINE_MOP(MOP_xvcvtdf, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1,kFloatDataProcessing1,0x1e22c000) + +/* MOP_vcvtrf fcvtzs w,s */ +DEFINE_MOP(MOP_vcvtrf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1,kFloatIntConversions,0x1e380000) +/* MOP_xvcvtrf fcvtzs x,s */ +DEFINE_MOP(MOP_xvcvtrf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1,kFloatIntConversions,0x9e380000) +/* MOP_vcvturf fcvtzu w,s */ +DEFINE_MOP(MOP_vcvturf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1,kFloatIntConversions,0x1e390000) +/* MOP_xvcvturf fcvtzu x,s */ +DEFINE_MOP(MOP_xvcvturf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1,kFloatIntConversions,0x9e390000) + +/* MOP_vcvtas fcvtas w,s (for round) */ +DEFINE_MOP(MOP_vcvtas, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1,kFloatIntConversions,0x1e240000) +/* MOP_xvcvtas fcvtas x,s */ +DEFINE_MOP(MOP_xvcvtas, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1,kFloatIntConversions,0x9e640000) +/* MOP_vcvtms fcvtms w,s (for floor) */ +DEFINE_MOP(MOP_vcvtms, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1,kFloatIntConversions,0x1e300000) +/* MOP_xvcvtms fcvtms x,s */ +DEFINE_MOP(MOP_xvcvtms, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1,kFloatIntConversions,0x9e700000) +/* MOP_vcvtps fcvtps w,s (for ceil) */ +DEFINE_MOP(MOP_vcvtps, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1,kFloatIntConversions,0x1e280000) +/* MOP_xvcvtps fcvtps x,d */ +DEFINE_MOP(MOP_xvcvtps, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1,kFloatIntConversions,0x9e680000) + +/* MOP_vcvtrd fcvtzs w,d */ +DEFINE_MOP(MOP_vcvtrd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1,kFloatIntConversions,0x1e780000) +/* MOP_xvcvtrd fcvtzs x,d */ +DEFINE_MOP(MOP_xvcvtrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1,kFloatIntConversions,0x9e780000) +/* MOP_vcvturd fcvtzu w,d */ +DEFINE_MOP(MOP_vcvturd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1,kFloatIntConversions,0x1e790000) +/* MOP_xvcvturd fcvtzu x,d */ +DEFINE_MOP(MOP_xvcvturd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1,kFloatIntConversions,0x9e790000) + +/* MOP_vcvtfr scvtf s,w */ +DEFINE_MOP(MOP_vcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1,kFloatIntConversions,0x1e220000) +/* MOP_xvcvtfr scvtf s,x */ +DEFINE_MOP(MOP_xvcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1,kFloatIntConversions,0x9e220000) +/* MOP_vcvtufr ucvtf s,w */ +DEFINE_MOP(MOP_vcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1,kFloatIntConversions,0x1e230000) +/* MOP_xvcvtufr ucvtf s,x */ +DEFINE_MOP(MOP_xvcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1,kFloatIntConversions,0x9e230000) + +/* MOP_vcvtdr scvtf d,w */ +DEFINE_MOP(MOP_vcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1,kFloatIntConversions,0x1e620000) +/* MOP_xvcvtdr scvtf d,x */ +DEFINE_MOP(MOP_xvcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1,kFloatIntConversions,0x9e620000) +/* MOP_vcvtudr ucvtf d,w */ +DEFINE_MOP(MOP_vcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1,kFloatIntConversions,0x1e630000) +/* MOP_xvcvtudr ucvtf d,x */ +DEFINE_MOP(MOP_xvcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1,kFloatIntConversions,0x9e630000) + +/* MOP_xcsel */ +DEFINE_MOP(MOP_wcselrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1,kConditionalSelect,0x1a800000) +DEFINE_MOP(MOP_xcselrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1,kConditionalSelect,0x9A800000) + +/* MOP_xcset -- all conditions minus AL & NV */ +DEFINE_MOP(MOP_wcsetrc, {&OpndDesc::Reg32ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1,kConditionalSelect,0x1a9f07e0) +DEFINE_MOP(MOP_xcsetrc, {&OpndDesc::Reg64ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1,kConditionalSelect,0x9a9f07e0) + +/* MOP_xcsinc */ +DEFINE_MOP(MOP_wcsincrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1,kConditionalSelect,0x1a800400) +DEFINE_MOP(MOP_xcsincrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1,kConditionalSelect,0x9a800400) + +/* MOP_xcsinv */ +DEFINE_MOP(MOP_wcsinvrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1,kConditionalSelect,0x5a800000) +DEFINE_MOP(MOP_xcsinvrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1,kConditionalSelect,0xda800000) + +/* MOP_xandrrr */ +DEFINE_MOP(MOP_xandrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"and","0,1,2",1,kLogicalReg,0x8a000000) +/* MOP_xandrrrs */ +DEFINE_MOP(MOP_xandrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"and","0,1,2,3",1,kLogicalReg,0x8a000000) +/* MOP_xandrri13 */ +DEFINE_MOP(MOP_xandrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"and","0,1,2",1,Imm13BitMaskValid,kLogicalImm,0x92000000) +/* MOP_wandrrr */ +DEFINE_MOP(MOP_wandrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"and","0,1,2",1,kLogicalReg,0xa000000) +/* MOP_wandrrrs */ +DEFINE_MOP(MOP_wandrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"and","0,1,2,3",1,kLogicalReg,0xa000000) +/* MOP_wandrri12 */ +DEFINE_MOP(MOP_wandrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"and","0,1,2",1,Imm12BitMaskValid,kLogicalImm,0x12000000) + +/* MOP_xbicrrr */ +DEFINE_MOP(MOP_xbicrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"bic","0,1,2",1, kLogicalReg, 0x8a200000) +/* MOP_wbicrrr */ +DEFINE_MOP(MOP_wbicrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"bic","0,1,2",1, kLogicalReg, 0xa200000) + +/* MOP_xiorrrr */ +DEFINE_MOP(MOP_xiorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"orr","0,1,2",1,kLogicalReg,0xaa000000) +/* MOP_xiorrrrs */ +DEFINE_MOP(MOP_xiorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"orr","0,1,2,3",1,kLogicalReg,0xaa000000) +/* MOP_xiorrri13 */ +DEFINE_MOP(MOP_xiorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"orr","0,1,2",1,Imm13BitMaskValid,kLogicalImm,0xb2000000) +/* MOP_wiorrrr */ +DEFINE_MOP(MOP_wiorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"orr","0,1,2",1,kLogicalReg,0x2a000000) +/* MOP_wiorrrrs */ +DEFINE_MOP(MOP_wiorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"orr","0,1,2,3",1,kLogicalReg,0x2a000000) +/* MOP_wiorrri12 */ +DEFINE_MOP(MOP_wiorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"orr","0,1,2",1,Imm12BitMaskValid,kLogicalImm,0x32000000) + +/* MOP_xeorrrr */ +DEFINE_MOP(MOP_xeorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"eor","0,1,2",1,kLogicalReg,0xca000000) +/* MOP_xeorrrrs */ +DEFINE_MOP(MOP_xeorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"eor","0,1,2,3",1,kLogicalReg,0xca000000) +/* MOP_xeorrri13 */ +DEFINE_MOP(MOP_xeorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"eor","0,1,2",1,Imm13BitMaskValid,kLogicalImm,0xd2000000) +/* MOP_weorrrr */ +DEFINE_MOP(MOP_weorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"eor","0,1,2",1,kLogicalReg,0x4a000000) +/* MOP_weorrrrs */ +DEFINE_MOP(MOP_weorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"eor","0,1,2,3",1,kLogicalReg,0x4a000000) +/* MOP_weorrri12 */ +DEFINE_MOP(MOP_weorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"eor","0,1,2",1,Imm12BitMaskValid,kLogicalImm,0x52000000) + +/* MOP_xnotrr */ +DEFINE_MOP(MOP_xnotrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"mvn","0,1",1,kLogicalReg,0xaa2003e0) +/* MOP_wnotrr */ +DEFINE_MOP(MOP_wnotrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"mvn","0,1",1,kLogicalReg,0x2a2003e0) +/* MOP_vnotui */ +DEFINE_MOP(MOP_vnotui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) +/* MOP_vnotvi */ +DEFINE_MOP(MOP_vnotvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) + +/* MOP_wfmaxrrr */ +DEFINE_MOP(MOP_wfmaxrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmax","0,1,2",1,kFloatDataProcessing2,0x1e204800) +/* MOP_xfmaxrrr */ +DEFINE_MOP(MOP_xfmaxrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmax","0,1,2",1,kFloatDataProcessing2,0x1e604800) +/* MOP_wfminrrr */ +DEFINE_MOP(MOP_wfminrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmin","0,1,2",1,kFloatDataProcessing2,0x1e205800) +/* MOP_xfminrrr */ +DEFINE_MOP(MOP_xfminrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmin","0,1,2",1,kFloatDataProcessing2,0x1e605800) + +/* MOP_wsdivrrr */ +DEFINE_MOP(MOP_wsdivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1,kDataProcess2Src,0x1ac00c00) +/* MOP_xsdivrrr */ +DEFINE_MOP(MOP_xsdivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1,kDataProcess2Src,0x9ac00c00) +/* MOP_wudivrrr */ +DEFINE_MOP(MOP_wudivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"udiv","0,1,2",1,kDataProcess2Src,0x1ac00800) +/* MOP_xudivrrr */ +DEFINE_MOP(MOP_xudivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"udiv","0,1,2",1,kDataProcess2Src,0x9ac00800) + +/* MOP_wmsubrrrr */ +DEFINE_MOP(MOP_wmsubrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"msub","0,1,2,3",1,kDataProcess3Src,0x1b008000) +/* MOP_xmsubrrrr */ +DEFINE_MOP(MOP_xmsubrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"msub","0,1,2,3",1,kDataProcess3Src,0x9b008000) + +/* MOP_wmnegrrr */ +DEFINE_MOP(MOP_wmnegrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mneg","0,1,2",1, kDataProcess3Src, 0x1b00fc00) +/* MOP_xmnegrrr */ +DEFINE_MOP(MOP_xmnegrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mneg","0,1,2",1, kDataProcess3Src, 0x9b00fc00) + +/* MOP_wubfxrri5i5 */ +DEFINE_MOP(MOP_wubfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfx","0,1,2,3",1,kBitfield,0x53000000) +/* MOP_xubfxrri6i6 */ +DEFINE_MOP(MOP_xubfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfx","0,1,2,3",1,kBitfield,0xd3400000) + +/* MOP_wsbfxrri5i5 -- Signed Bitfield Extract */ +DEFINE_MOP(MOP_wsbfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"sbfx","0,1,2,3",1,kBitfield,0x13000000) +/* MOP_xsbfxrri6i6 */ +DEFINE_MOP(MOP_xsbfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfx","0,1,2,3",1,kBitfield,0x93400000) + +/* MOP_wubfizrri5i5 -- Unsigned Bitfield Insert in Zero */ +DEFINE_MOP(MOP_wubfizrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfiz","0,1,2,3",1,kBitfield,0x53000000) +/* MOP_xubfizrri6i6 */ +DEFINE_MOP(MOP_xubfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfiz","0,1,2,3",1,kBitfield,0xd3400000) + +/* MOP_xsbfizrri6i6 Signed Bitfield Insert in Zero */ +DEFINE_MOP(MOP_xsbfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfiz","0,1,2,3",1, kBitfield, 0x13000000) + +/* MOP_wbfirri5i5 -- Bitfield Insert */ +DEFINE_MOP(MOP_wbfirri5i5, {&OpndDesc::Reg32IDS,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1,kBitfield,0x33000000) +/* MOP_xbfirri6i6 */ +DEFINE_MOP(MOP_xbfirri6i6, {&OpndDesc::Reg64IDS,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1,kBitfield,0xb3400000) + +/* MOP_xlslrri6,--- Logical Shift Left */ +DEFINE_MOP(MOP_xlslrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsl","0,1,2",1,kBitfield,0xd3400000) +/* MOP_wlslrri5 */ +DEFINE_MOP(MOP_wlslrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsl","0,1,2",1,kBitfield,0x53000000) +/* MOP_xasrrri6, */ +DEFINE_MOP(MOP_xasrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"asr","0,1,2",1,kBitfield,0x9340fc00) +/* MOP_wasrrri5 */ +DEFINE_MOP(MOP_wasrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"asr","0,1,2",1,kBitfield,0x13007c00) +/* MOP_xlsrrri6, */ +DEFINE_MOP(MOP_xlsrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsr","0,1,2",1,kBitfield,0xd340fc00) +/* MOP_wlsrrri5 */ +DEFINE_MOP(MOP_wlsrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsr","0,1,2",1,kBitfield,0x53007c00) +/* MOP_xlslrrr, */ +DEFINE_MOP(MOP_xlslrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsl","0,1,2",1,kDataProcess2Src,0x9ac02000) +/* MOP_wlslrrr */ +DEFINE_MOP(MOP_wlslrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsl","0,1,2",1,kDataProcess2Src,0x1ac02000) +/* MOP_xasrrrr, */ +DEFINE_MOP(MOP_xasrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"asr","0,1,2",1,kDataProcess2Src,0x9ac02800) +/* MOP_wasrrrr */ +DEFINE_MOP(MOP_wasrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"asr","0,1,2",1,kDataProcess2Src,0x1ac02800) +/* MOP_xlsrrrr, */ +DEFINE_MOP(MOP_xlsrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsr","0,1,2",1,kDataProcess2Src,0x9ac02400) +/* MOP_wlsrrrr */ +DEFINE_MOP(MOP_wlsrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsr","0,1,2",1,kDataProcess2Src,0x1ac02400) + +/* MOP_xrorrrr */ +DEFINE_MOP(MOP_xrorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"ror","0,1,2",1, kDataProcess2Src, 0x9ac02c00) +/* MOP_wrorrrr */ +DEFINE_MOP(MOP_wrorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"ror","0,1,2",1, kDataProcess2Src, 0x1ac02c00) +/* MOP_wtstri32 */ +DEFINE_MOP(MOP_wtstri32, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Imm32},0,kLtAlu,"tst","1,2",1, kLogicalImm, 0x7200001f) +/* MOP_xtstri64 */ +DEFINE_MOP(MOP_xtstri64, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Imm64},0,kLtAlu,"tst","1,2",1, kLogicalImm, 0xf200001f) +/* MOP_wtstrr */ +DEFINE_MOP(MOP_wtstrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"tst","1,2",1, kLogicalReg, 0x6a00001f) +/* MOP_xtstrr */ +DEFINE_MOP(MOP_xtstrr, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"tst","1,2",1, kLogicalReg, 0xea00001f) +/* MOP_wextrrrri5 -- Extracts a register from a pair of registers */ +DEFINE_MOP(MOP_wextrrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm5},0,kLtAluShift,"extr","0,1,2,3",1, kExtract, 0x13800000) +/* MOP_xextrrrri6 */ +DEFINE_MOP(MOP_xextrrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"extr","0,1,2,3",1, kExtract, 0x93c00000) + +/* MOP_wsfmovri imm8->s */ +DEFINE_MOP(MOP_wsfmovri, {&OpndDesc::Reg32FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1,kFloatImm,0x1e201000) +/* MOP_xdfmovri imm8->d */ +DEFINE_MOP(MOP_xdfmovri, {&OpndDesc::Reg64FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1,kFloatImm,0x1e601000) + +/* MOP_xcsneg -- Conditional Select Negation */ +DEFINE_MOP(MOP_wcsnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1,kConditionalSelect,0x5a800400) +DEFINE_MOP(MOP_xcsnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1,kConditionalSelect,0xda800400) +DEFINE_MOP(MOP_wcnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1, kConditionalSelect, 0x5a800400) +DEFINE_MOP(MOP_xcnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1, kConditionalSelect, 0xda800400) + +/* MOP_sabsrr */ +DEFINE_MOP(MOP_sabsrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fabs","0,1",1,kFloatDataProcessing1,0x1e20c000) +/* MOP_dabsrr */ +DEFINE_MOP(MOP_dabsrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fabs","0,1",1,kFloatDataProcessing1,0x1e60c000) + +/* MOP_winegrr */ +DEFINE_MOP(MOP_winegrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"neg","0,1",1,kAddSubReg,0x4b0003e0) +/* MOP_winegrre */ +DEFINE_MOP(MOP_winegrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"neg","0,1,2",1, kAddSubShiftReg, 0x4b0003e0) +/* neg MOP_xinegrr */ +DEFINE_MOP(MOP_xinegrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"neg","0,1",1,kAddSubReg,0xcb0003e0) +/* neg MOP_xinegrrs */ +DEFINE_MOP(MOP_xinegrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"neg","0,1,2",1, kAddSubShiftReg, 0xcb0003e0) +/* neg f32 */ +DEFINE_MOP(MOP_wfnegrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fneg","0,1",1,kFloatDataProcessing1,0x1e214000) +/* neg f64 */ +DEFINE_MOP(MOP_xfnegrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fneg","0,1",1,kFloatDataProcessing1,0x1e614000) + +/* MOP_sdivrrr */ +DEFINE_MOP(MOP_sdivrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fdiv","0,1,2",1,kFloatDataProcessing2,0x1e201800) +/* MOP_ddivrrr */ +DEFINE_MOP(MOP_ddivrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fdiv","0,1,2",1,kFloatDataProcessing2,0x1e601800) + +/* MOP_smadd */ +DEFINE_MOP(MOP_smadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1, kFloatDataProcessing3, 0x1f000000) +/* MOP_dmadd */ +DEFINE_MOP(MOP_dmadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1, kFloatDataProcessing3, 0x1f400000) + +/* MOP_smsub */ +DEFINE_MOP(MOP_smsub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1, kFloatDataProcessing3, 0x1f008000) +/* MOP_dmsub */ +DEFINE_MOP(MOP_dmsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1, kFloatDataProcessing3, 0x1f408000) + +/* MOP_snmul */ +DEFINE_MOP(MOP_snmul, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1, kFloatDataProcessing2, 0x1e208800) +/* MOP_dnmul */ +DEFINE_MOP(MOP_dnmul, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1, kFloatDataProcessing2, 0x1e608800) + +/* MOP_hcselrrrc --- Floating-point Conditional Select */ +DEFINE_MOP(MOP_hcselrrrc, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS,&OpndDesc::Reg16FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1,kFloatCondSelect,0x1ee00c00) +/* MOP_scselrrrc */ +DEFINE_MOP(MOP_scselrrrc, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1,kFloatCondSelect,0x1e200c00) +/* MOP_dcselrrrc */ +DEFINE_MOP(MOP_dcselrrrc, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1,kFloatCondSelect,0x1e600c00) + +/* MOP_wldli -- load 32-bit literal */ +DEFINE_MOP(MOP_wldli, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,kLoadLiteralReg,0x18000000) +/* MOP_xldli -- load 64-bit literal */ +DEFINE_MOP(MOP_xldli, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1,kLoadLiteralReg,0x58000000) +/* MOP_sldli -- load 32-bit literal */ +DEFINE_MOP(MOP_sldli, {&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,kLoadLiteralReg,0x1c000000) +/* MOP_dldli -- load 64-bit literal */ +DEFINE_MOP(MOP_dldli, {&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1,kLoadLiteralReg,0x5c000000) + +/* AArch64 branches/calls */ +/* MOP_xbl -- branch with link (call); this is a special definition */ +DEFINE_MOP(MOP_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"bl","0",1,kBranchImm,0x94000000) +/* MOP_xblr -- branch with link (call) to register; this is a special definition */ +DEFINE_MOP(MOP_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"blr","0",1,kBranchReg,0xd63f0000) + +/* Tls descriptor */ +/* + * add x0, #:tprel_hi12:symbol, lsl #12 + * add x0, #:tprel_lo12_nc:symbol + */ +DEFINE_MOP(MOP_tls_desc_rel, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},SPINTRINSIC,kLtAlu,"tlsdescrel","0,1",2) + +/* + * adrp x0, , :tlsdesc:symbol + * ldr x1, [x0, #tlsdesc_lo12:symbol]] + * add x0, #tlsdesc_lo12:symbol + * .tlsdesccall symbol + * blr x1 + */ +DEFINE_MOP(MOP_tls_desc_call, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::ListSrc},ISCALL|CANTHROW|SPINTRINSIC,kLtBranch,"tlsdesccall","0",2) + +/* + * release registers occupied by MOP_tls_desc_call + */ +DEFINE_MOP(MOP_pseduo_tls_release, {&OpndDesc::Reg64IS}, 0,kLtUndef,"pseudo_tls_release","0",0) + +/* System register access */ +/* MOP_mrs */ +DEFINE_MOP(MOP_mrs, {&OpndDesc::Reg64ID,&OpndDesc::String0S},ISMOVE,kLtAlu,"mrs","0,1",1, kSystemInsn, 0xd53bd040) + + +/* Inline asm */ +/* Number of instructions generated by inline asm is arbitrary. Use a large number here. */ +/* asm string, output list, clobber list, input list, output constraint, input constraint, out reg prefix, in reg prefix */ +DEFINE_MOP(MOP_asm, {&OpndDesc::String0S,&OpndDesc::ListDest,&OpndDesc::ListDest,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc},INLINEASM|CANTHROW|HASACQUIRE|HASRELEASE,kLtUndef,"asm","0,1,2,3",100) + +/* c sync builtins */ +/* + * intrinsic_sync_lock_test_setI w0, w1, x2, w3, lable1 + * label1: + * ldxr w0, [x2] + * stxr w1, w3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setI","0,1,2,3,4",5) + +/* + * intrinsic_sync_lock_test_setL x0, w1, x2, x3, lable1 + * label1: + * ldxr x0, [x2] + * stxr w1, x3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setL","0,1,2,3,4",5) + +/* AARCH64 LOADS */ +/* MOP_wldrsb --- Load Register Signed Byte */ +DEFINE_MOP(MOP_wldrsb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid,kLoadStoreReg,0x38c00000) +/* MOP_xldrsb --- Load Register Signed Byte */ +DEFINE_MOP(MOP_xldrsb, {&OpndDesc::Reg64ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid, kLoadStoreReg, 0x38800000) +/* MOP_wldrb */ +DEFINE_MOP(MOP_wldrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid,kLoadStoreReg,0x38400000) +/* MOP_wldrsh --- Load Register Signed Halfword */ +DEFINE_MOP(MOP_wldrsh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid,kLoadStoreReg,0x78c00000) +/* MOP_xldrsh --- Load Register Signed Halfword */ +DEFINE_MOP(MOP_xldrsh, {&OpndDesc::Reg64ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid, kLoadStoreReg,0x7880000) +/* MOP_xldrsw --- Load Register Signed Word */ +DEFINE_MOP(MOP_xldrsw, {&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +/* MOP_wldrh */ +DEFINE_MOP(MOP_wldrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid,kLoadStoreReg,0x78400000) +/* MOP_wldr */ +DEFINE_MOP(MOP_wldr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid,kLoadStoreReg,0xb8400000) +/* MOP_xldr */ +DEFINE_MOP(MOP_xldr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1,StrLdr64ImmValid,kLoadStoreReg,0xf8400000) +/* MOP_bldr */ +DEFINE_MOP(MOP_bldr, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr8ImmValid,kLoadStoreFloat,0x3c400000) +/* MOP_hldr */ +DEFINE_MOP(MOP_hldr, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr16ImmValid,kLoadStoreFloat,0x7c400000) +/* MOP_sldr */ +DEFINE_MOP(MOP_sldr, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid,kLoadStoreFloat,0xbc400000) +/* MOP_dldr */ +DEFINE_MOP(MOP_dldr, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid,kLoadStoreFloat,0xfc400000) +/* MOP_qldr */ +DEFINE_MOP(MOP_qldr, {&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid, kLoadStoreFloat, 0x3cc00000) + +/* AArch64 LDP/LDPSW */ +/* MOP_wldp */ +DEFINE_MOP(MOP_wldp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid,kLoadPair,0x28000000) +/* MOP_xldp */ +DEFINE_MOP(MOP_xldp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid,kLoadPair,0xa8000000) +/* MOP_xldpsw */ +DEFINE_MOP(MOP_xldpsw, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid,kLoadPair,0x68400000) +/* MOP_sldp */ +DEFINE_MOP(MOP_sldp, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid,kLoadPairFloat,0x2c000000) +/* MOP_dldp */ +DEFINE_MOP(MOP_dldp, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid,kLoadPairFloat,0x6c000000) +/* MOP_qldp */ +DEFINE_MOP(MOP_qldp, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid, kLoadPairFloat, 0xac000000) + +/* AARCH64 Load with Acquire semantics */ +/* MOP_wldarb */ +DEFINE_MOP(MOP_wldarb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1,StrLdr8ImmValid,kLoadStoreAR,0x8dffc00) +/* MOP_wldarh */ +DEFINE_MOP(MOP_wldarh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1,StrLdr16ImmValid,kLoadStoreAR,0x48dffc00) +/* MOP_wldar */ +DEFINE_MOP(MOP_wldar, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr32ImmValid,kLoadStoreAR,0x88dffc00) +/* MOP_xldar */ +DEFINE_MOP(MOP_xldar, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr64ImmValid,kLoadStoreAR,0xc8dffc00) + +/* MOP_wmovkri16 */ +DEFINE_MOP(MOP_wmovkri16, {&OpndDesc::Reg32IDS,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid,kMoveWide,0x72800000) +/* MOP_xmovkri16 */ +DEFINE_MOP(MOP_xmovkri16, {&OpndDesc::Reg64IDS,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid,kMoveWide,0xf2800000) + +/* MOP_wmovzri16 */ +DEFINE_MOP(MOP_wmovzri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid,kMoveWide,0x52800000) +/* MOP_xmovzri16 */ +DEFINE_MOP(MOP_xmovzri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid,kMoveWide,0xd2800000) + +/* MOP_wmovnri16 */ +DEFINE_MOP(MOP_wmovnri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid,kMoveWide,0x12800000) +/* MOP_xmovnri16 */ +DEFINE_MOP(MOP_xmovnri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid,kMoveWide,0x92800000) + +/* AARCH64 Load exclusive with/without acquire semantics */ +DEFINE_MOP(MOP_wldxrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrb","0,1",1,StrLdr8ImmValid,kLoadExclusive,0x85f7c00) +DEFINE_MOP(MOP_wldxrh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrh","0,1",1,StrLdr16ImmValid,kLoadExclusive,0x485f7c00) +DEFINE_MOP(MOP_wldxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr32ImmValid,kLoadExclusive,0x885f7c00) +DEFINE_MOP(MOP_xldxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr64ImmValid,kLoadExclusive,0xc85f7c00) + +DEFINE_MOP(MOP_wldaxrb,{&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrb","0,1",1,StrLdr8ImmValid,kLoadExclusive,0x85ffc00) +DEFINE_MOP(MOP_wldaxrh,{&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrh","0,1",1,StrLdr16ImmValid,kLoadExclusive,0x485ffc00) +DEFINE_MOP(MOP_wldaxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr32ImmValid,kLoadExclusive,0x885ffc00) +DEFINE_MOP(MOP_xldaxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr64ImmValid,kLoadExclusive,0xc85ffc00) + +DEFINE_MOP(MOP_wldaxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr32ImmValid,kLoadExclusivePair,0x887f8000) +DEFINE_MOP(MOP_xldaxp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr64ImmValid,kLoadExclusivePair,0xc87f8000) + +/* MOP_vsqrts */ +DEFINE_MOP(MOP_vsqrts, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fsqrt","0,1",1,kFloatDataProcessing1,0x1e21c000) +/* MOP_vsqrtd */ +DEFINE_MOP(MOP_vsqrtd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fsqrt","0,1",1,kFloatDataProcessing1,0x1e61c000) + + +/* # Non Definitions */ +/* # As far as register allocation is concerned, the instructions below are non-definitions. */ + +/* MOP_bcs */ +DEFINE_MOP(MOP_bcs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcs","1",1, kCondBranch, 0x5400000e) +/* MOP_bcc */ +DEFINE_MOP(MOP_bcc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcc","1",1, kCondBranch, 0x5400000f) +/* MOP_beq */ +DEFINE_MOP(MOP_beq, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"beq","1",1,kCondBranch,0x54000000) +/* MOP_bne */ +DEFINE_MOP(MOP_bne, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bne","1",1,kCondBranch,0x54000001) +/* MOP_blt */ +DEFINE_MOP(MOP_blt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blt","1",1,kCondBranch,0x5400000b) +/* MOP_ble */ +DEFINE_MOP(MOP_ble, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"ble","1",1,kCondBranch,0x5400000d) +/* MOP_bgt */ +DEFINE_MOP(MOP_bgt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bgt","1",1,kCondBranch,0x5400000c) +/* MOP_bge */ +DEFINE_MOP(MOP_bge, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bge","1",1,kCondBranch,0x5400000a) +/* MOP_blo equal to MOP_blt for unsigned comparison */ +DEFINE_MOP(MOP_blo, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blo","1",1,kCondBranch,0x54000003) +/* MOP_bls equal to MOP_bls for unsigned comparison */ +DEFINE_MOP(MOP_bls, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bls","1",1,kCondBranch,0x54000009) +/* MOP_bhs equal to MOP_bge for unsigned comparison */ +DEFINE_MOP(MOP_bhs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhs","1",1,kCondBranch,0x54000002) +/* MOP_bhi equal to MOP_bgt for float comparison */ +DEFINE_MOP(MOP_bhi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhi","1",1,kCondBranch,0x54000008) +/* MOP_bpl equal to MOP_bge for float comparison */ +DEFINE_MOP(MOP_bpl, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bpl","1",1,kCondBranch,0x54000005) +DEFINE_MOP(MOP_bmi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bmi","1",1,kCondBranch,0x54000004) +DEFINE_MOP(MOP_bvc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvc","1",1,kCondBranch,0x54000007) +DEFINE_MOP(MOP_bvs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvs","1",1,kCondBranch,0x54000006) + +/* MOP_xret AARCH64 Specific */ +DEFINE_MOP(MOP_xret, {},CANTHROW,kLtBranch,"ret","",1,kBranchReg,0xd65f03c0) +/* MOP_clrex AARCH64 Specific */ +DEFINE_MOP(MOP_clrex, {},CANTHROW,kLtBranch,"clrex","",1, kBranchReg, 0xd503303f) + +/* AARCH64 Floating-Point COMPARES signaling versions */ +/* MOP_hcmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmperi, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1ee02018) +/* MOP_hcmperr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmperr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1ee02010) + +/* MOP_scmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_scmperi, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1e202018) +/* MOP_scmperr */ +DEFINE_MOP(MOP_scmperr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1e202010) + +/* MOP_dcmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_dcmperi, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1e602018) +/* MOP_dcmperr */ +DEFINE_MOP(MOP_dcmperr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmpe","1,2",1,kFloatCompare,0x1e602010) + +/* AARCH64 Floating-Point COMPARES non-signaling (quiet) versions */ +/* MOP_hcmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1ee02008) +/* MOP_hcmpqrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1ee02000) + +/* MOP_scmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_scmpqri, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1e202008) +/* MOP_scmpqrr */ +DEFINE_MOP(MOP_scmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1e202000) + +/* MOP_dcmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_dcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1e602008) +/* MOP_dcmpqrr */ +DEFINE_MOP(MOP_dcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmp","1,2",1,kFloatCompare,0x1e602000) + +/* AARCH64 Integer COMPARES */ +/* MOP_wcmpri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmpri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmp","1,2",1,Imm12BitValid,kAddSubImm,0x7100001f) +/* MOP_wcmprr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmprr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmp","1,2",1,kAddSubReg,0x6b00001f) +/* MOP_wcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmp","1,2,3",1, kAddSubShiftReg, 0x6b00001f) +/* MOP_wwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1, kAddSubExtendReg, 0x6b20001f) +/* MOP_xcmpri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmpri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmp","1,2",1,Imm16BitValid,kAddSubImm,0xf100001f) +/* MOP_xcmprr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmprr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmp","1,2",1,kAddSubReg,0xeb00001f) +/* MOP_xcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmp","1,2,3",1, kAddSubShiftReg, 0xeb00001f) +/* MOP_xwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1, kAddSubExtendReg, 0xeb20001f) + +/* MOP_wccmpriic -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1,kCondCompareImm,0x7a400800) +/* MOP_wccmprric -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wccmprric, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1,kCondCompareReg,0x7a400000) +/* MOP_xccmpriic -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1,kCondCompareImm,0xfa400800) +/* MOP_xccmprric -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xccmprric, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1,kCondCompareReg,0xfa400000) + +/* MOP_wcmnri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmn","1,2",1,Imm12BitValid,kAddSubImm,0x3100001f) +/* MOP_wcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmn","1,2",1,kAddSubReg,0x2b00001f) +/* MOP_wcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmn","1,2,3",1, kAddSubShiftReg, 0x2b00001f) +/* MOP_wwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1, kAddSubExtendReg, 0x2b20001f) +/* MOP_xcmnri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmn","1,2",1,Imm16BitValid,kAddSubImm,0xb100001f) +/* MOP_xcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmn","1,2",1,kAddSubReg,0xab00001f) +/* MOP_xcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmn","1,2,3",1, kAddSubShiftReg, 0xab00001f) +/* MOP_xwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1, kAddSubExtendReg, 0xab20001f) + +/* AArch64 branches */ +/* MOP_xbr -- branch to register */ +DEFINE_MOP(MOP_xbr, {&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},ISUNCONDBRANCH,kLtBranch,"br","0",1,kBranchReg,0xd61f0000) +/* MOP_Tbbuncond */ +DEFINE_MOP(MOP_xuncond, {&OpndDesc::AddressName},ISUNCONDBRANCH,kLtBranch,"b","0",1,kBranchImm,0x14000000) + +/* MOP_wcbnz --- Compare and Branch on Nonzero */ +DEFINE_MOP(MOP_wcbnz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1,kCompareBranch,0x35000000) +/* MOP_xcbnz */ +DEFINE_MOP(MOP_xcbnz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1,kCompareBranch,0xb5000000) +/* MOP_wcbz --- Compare and Branch on zero */ +DEFINE_MOP(MOP_wcbz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1,kCompareBranch,0x34000000) +/* MOP_xcbz */ +DEFINE_MOP(MOP_xcbz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1,kCompareBranch,0xb4000000) + +/* MOP_wtbnz --- Test bit and Branch if Nonzero */ +DEFINE_MOP(MOP_wtbnz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1,kTestBranch,0x37000000) +/* MOP_xtbnz */ +DEFINE_MOP(MOP_xtbnz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1,kTestBranch,0xb7000000) +/* MOP_wtbz --- Test bit and Branch if Zero */ +DEFINE_MOP(MOP_wtbz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1,kTestBranch,0x36000000) +/* MOP_xtbz */ +DEFINE_MOP(MOP_xtbz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1,kTestBranch,0xb6000000) + +/* AARCH64 STORES */ +/* MOP_wstrb -- Store Register Byte */ +DEFINE_MOP(MOP_wstrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid,kLoadStoreReg,0x38000000) +/* MOP_wstrh -- Store Register Halfword */ +DEFINE_MOP(MOP_wstrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid,kLoadStoreReg,0x78000000) +/* MOP_wstr -- Store Register Word */ +DEFINE_MOP(MOP_wstr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid,kLoadStoreReg,0xb8000000) +/* MOP_xstr -- Store Register Double word */ +DEFINE_MOP(MOP_xstr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid,kLoadStoreReg,0xf8000000) + +/* MOP_sstr -- Store Register SIMD/FP Float */ +DEFINE_MOP(MOP_sstr, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid,kLoadStoreFloat,0xbc000000) +/* MOP_dstr -- Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_dstr, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid,kLoadStoreFloat,0xfc000000) +/* MOP_qstr -- Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_qstr, {&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid, kLoadStoreFloat, 0x3c800000) + +/* AArch64 STP. */ +/* MOP_wstp */ +DEFINE_MOP(MOP_wstp, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid,kStorePair,0x28000000) +/* MOP_xstp */ +DEFINE_MOP(MOP_xstp, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr64PairImmValid,kStorePair,0xa8000000) +/* AArch64 does not define STPSW. It has no practical value. */ +/* MOP_sstp */ +DEFINE_MOP(MOP_sstp, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid,kStorePairFloat,0x2c000000) +/* MOP_dstp */ +DEFINE_MOP(MOP_dstp, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid,kStorePairFloat,0x6c000000) +/* MOP_qstp */ +DEFINE_MOP(MOP_qstp, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid, kStorePairFloat, 0xac000000) + +/* AARCH64 Store with Release semantics */ +/* MOP_wstlrb -- Store-Release Register Byte */ +DEFINE_MOP(MOP_wstlrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1,StrLdr8ImmValid,kLoadStoreAR,0x89ffc00) +/* MOP_wstlrh -- Store-Release Register Halfword */ +DEFINE_MOP(MOP_wstlrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1,StrLdr16ImmValid,kLoadStoreAR,0x489ffc00) +/* MOP_wstlr -- Store-Release Register Word */ +DEFINE_MOP(MOP_wstlr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr32ImmValid,kLoadStoreAR,0x889ffc00) +/* MOP_xstlr -- Store-Release Register Double word */ +DEFINE_MOP(MOP_xstlr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr64ImmValid,kLoadStoreAR,0xc89ffc00) + +/* AARCH64 Store exclusive with/without release semantics */ +DEFINE_MOP(MOP_wstxrb, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrb","0,1,2",1,StrLdr8ImmValid,kStoreExclusive,0x8007c00) +DEFINE_MOP(MOP_wstxrh, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrh","0,1,2",1,StrLdr16ImmValid,kStoreExclusive,0x48007c00) +DEFINE_MOP(MOP_wstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr32ImmValid,kStoreExclusive,0x88007c00) +DEFINE_MOP(MOP_xstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr64ImmValid,kStoreExclusive,0xc8007c00) + +DEFINE_MOP(MOP_wstlxrb,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrb","0,1,2",1,StrLdr8ImmValid,kStoreExclusive,0x800fc00) +DEFINE_MOP(MOP_wstlxrh,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrh","0,1,2",1,StrLdr16ImmValid,kStoreExclusive,0x4800fc00) +DEFINE_MOP(MOP_wstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr32ImmValid,kStoreExclusive,0x8800fc00) +DEFINE_MOP(MOP_xstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr64ImmValid,kStoreExclusive,0xc800fc00) + +DEFINE_MOP(MOP_wstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid,kStoreExclusivePair,0x88208000) +DEFINE_MOP(MOP_xstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid,kStoreExclusivePair,0xc8208000) + +/* Memory barriers */ +/* MOP_dmb_ishld */ +DEFINE_MOP(MOP_dmb_ishld, {}, HASACQUIRE|ISDMB,kLtBranch, "dmb\tishld", "",1,kSystemInsn,0xd50339bf) +/* MOP_dmb_ishst */ +DEFINE_MOP(MOP_dmb_ishst, {}, HASRELEASE|ISDMB,kLtBranch, "dmb\tishst", "",1,kSystemInsn,0xd5033abf) +/* MOP_dmb_ish */ +DEFINE_MOP(MOP_dmb_ish, {}, HASACQUIRE|HASRELEASE|ISDMB,kLtBranch, "dmb\tish", "",1,kSystemInsn,0xd5033bbf) + +/* Neon simd, r:nonvector reg, u:64b vector reg, v:128b vector reg */ +/* Following ISMOVE vector instructions must be in a group, starting with vmovui and end with vmovvv */ +DEFINE_MOP(MOP_vmovui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) +DEFINE_MOP(MOP_vmovvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) +DEFINE_MOP(MOP_vmovuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) +DEFINE_MOP(MOP_vmovvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) +DEFINE_MOP(MOP_vwmovru, {&OpndDesc::Reg32ID,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vwmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vxmovrv, {&OpndDesc::Reg64ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vwdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vwdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vxdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vxdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vduprv, {&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vextuuui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) +DEFINE_MOP(MOP_vextvvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) +DEFINE_MOP(MOP_vsabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sabdl","0,1,2",1) +DEFINE_MOP(MOP_vuabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uabdl","0,1,2",1) +DEFINE_MOP(MOP_vsabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sabdl2","0,1,2",1) +DEFINE_MOP(MOP_vuabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uabdl2","0,1,2",1) +DEFINE_MOP(MOP_vspadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) +DEFINE_MOP(MOP_vspadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) +DEFINE_MOP(MOP_vupadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) +DEFINE_MOP(MOP_vupadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) +DEFINE_MOP(MOP_vspadduu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) +DEFINE_MOP(MOP_vspaddvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) +DEFINE_MOP(MOP_vupadduu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) +DEFINE_MOP(MOP_vupaddvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) +DEFINE_MOP(MOP_vwinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vxinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vwinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vxinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vrev16dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) +DEFINE_MOP(MOP_vrev32dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) +DEFINE_MOP(MOP_vrev64dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) +DEFINE_MOP(MOP_vrev16qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) +DEFINE_MOP(MOP_vrev32qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) +DEFINE_MOP(MOP_vrev64qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) +DEFINE_MOP(MOP_vbaddvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vhaddvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vsaddvru,{&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vbaddvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vhaddvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vsaddvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vdaddvrv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addp","0,1",1) + +DEFINE_MOP(MOP_vzcmequu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vzcmgtuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vzcmgeuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vzcmltuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) +DEFINE_MOP(MOP_vzcmleuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) +DEFINE_MOP(MOP_vzcmeqvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vzcmgtvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vzcmgevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vzcmltvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) +DEFINE_MOP(MOP_vzcmlevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) +DEFINE_MOP(MOP_vcmequuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vcmgeuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vcmgtuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vcmhiuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) +DEFINE_MOP(MOP_vcmhsuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) +DEFINE_MOP(MOP_vcmeqvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vcmgevvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vcmgtvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vcmhivvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) +DEFINE_MOP(MOP_vcmhsvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) +DEFINE_MOP(MOP_vbsluuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) +DEFINE_MOP(MOP_vbslvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) + +DEFINE_MOP(MOP_vshluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) +DEFINE_MOP(MOP_vshlvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) +DEFINE_MOP(MOP_vushluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) +DEFINE_MOP(MOP_vushlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) + +DEFINE_MOP(MOP_vushluui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) +DEFINE_MOP(MOP_vushlvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) +DEFINE_MOP(MOP_vushruui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) +DEFINE_MOP(MOP_vushrvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) + +DEFINE_MOP(MOP_vshllvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shll","0,1,2",1) +DEFINE_MOP(MOP_vushllvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushll","0,1,2",1) +DEFINE_MOP(MOP_vxtnuv, {&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn","0,1",1) +DEFINE_MOP(MOP_vsxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sxtl","0,1",1) +DEFINE_MOP(MOP_vuxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uxtl","0,1",1) +DEFINE_MOP(MOP_vxtn2uv, {&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn2","0,1",1) +DEFINE_MOP(MOP_vsxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sxtl2","0,1",1) +DEFINE_MOP(MOP_vuxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uxtl2","0,1",1) + +DEFINE_MOP(MOP_vshruui, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) +DEFINE_MOP(MOP_vshrvvi, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) +DEFINE_MOP(MOP_vshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shrn","0,1,2",1) + +DEFINE_MOP(MOP_vtbl1vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"tbl","0,1,2",1) +DEFINE_MOP(MOP_vsmaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlal","0,1,2",1) +DEFINE_MOP(MOP_vumaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlal","0,1,2",1) +DEFINE_MOP(MOP_vsmullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smull","0,1,2",1) +DEFINE_MOP(MOP_vumullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umull","0,1,2",1) +DEFINE_MOP(MOP_vsmull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smull2","0,1,2",1) +DEFINE_MOP(MOP_vumull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umull2","0,1,2",1) +DEFINE_MOP(MOP_vabsuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"abs","0,1",1) +DEFINE_MOP(MOP_vabsvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"abs","0,1",1) +DEFINE_MOP(MOP_vadduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) +DEFINE_MOP(MOP_vsaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddl","0,1,2",1) +DEFINE_MOP(MOP_vuaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddl","0,1,2",1) +DEFINE_MOP(MOP_vsaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddl2","0,1,2",1) +DEFINE_MOP(MOP_vuaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddl2","0,1,2",1) +DEFINE_MOP(MOP_vsaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddw","0,1,2",1) +DEFINE_MOP(MOP_vuaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddw","0,1,2",1) +DEFINE_MOP(MOP_vsaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddw2","0,1,2",1) +DEFINE_MOP(MOP_vuaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddw2","0,1,2",1) +DEFINE_MOP(MOP_vaddvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) +DEFINE_MOP(MOP_vmuluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) +DEFINE_MOP(MOP_vmulvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) +DEFINE_MOP(MOP_vsubuuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) +DEFINE_MOP(MOP_vsubvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) +DEFINE_MOP(MOP_vanduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) +DEFINE_MOP(MOP_vandvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) +DEFINE_MOP(MOP_voruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) +DEFINE_MOP(MOP_vorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) +DEFINE_MOP(MOP_vxoruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) +DEFINE_MOP(MOP_vxorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) +DEFINE_MOP(MOP_vnotuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"not","0,1",1) +DEFINE_MOP(MOP_vnotvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"not","0,1",1) +DEFINE_MOP(MOP_vneguu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"neg","0,1",1) +DEFINE_MOP(MOP_vnegvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"neg","0,1",1) +DEFINE_MOP(MOP_vssublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubl","0,1,2",1) +DEFINE_MOP(MOP_vusublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubl","0,1,2",1) +DEFINE_MOP(MOP_vssubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubl2","0,1,2",1) +DEFINE_MOP(MOP_vusubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubl2","0,1,2",1) +DEFINE_MOP(MOP_vssubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubw","0,1,2",1) +DEFINE_MOP(MOP_vusubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubw","0,1,2",1) +DEFINE_MOP(MOP_vssubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubw2","0,1,2",1) +DEFINE_MOP(MOP_vusubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubw2","0,1,2",1) +DEFINE_MOP(MOP_vzip1vvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip1","0,1,2",1) +DEFINE_MOP(MOP_vzip2vvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip2","0,1,2",1) + +/* + * MOP_clinit + * will be emit to four instructions in a row: + * adrp xd, :got:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd,#:got_lo12:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * ldr xd, [xd,#112] + * ldr wzr, [xd] + */ +DEFINE_MOP(MOP_clinit, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW|ISINTRINSIC,kLtClinit,"intrinsic_clinit","0,1",4) + +/* + * MOP_counter + * will be emit to five instructions in a row: + * adrp x1, :got:__profile_table + idx + * ldr w17, [x1,#:got_lo12:__profile_table] + * add w17, w17, #1 + * str w17,[x1,,#:got_lo12:__profile_table] + */ +DEFINE_MOP(MOP_counter, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW|ISINTRINSIC,kLtClinit,"intrinsic_counter","0,1", 4) + +/* + * will be emit to two instrunctions in a row: + * ldr wd, [xs] // xd and xs should be differenct register + * ldr wd, [xd] + */ +DEFINE_MOP(MOP_lazy_ldr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC|ISINTRINSIC,kLtClinitTail,"intrinsic_lazyload","0,1",2) + +/* + * will be emit to three instrunctions in a row: + * adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr xd, [xd,#:got_lo12:__staticDecoupleValueOffset$$xx+offset] + * ldr xzr, [xd] + */ +DEFINE_MOP(MOP_lazy_ldr_static, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW|ISINTRINSIC,kLtAdrpLdr,"intrinsic_lazyloadstatic","0,1",3) + +/* A pseudo instruction followed MOP_lazy_ldr, to make sure xs and xd be allocated to different physical registers. */ +DEFINE_MOP(MOP_lazy_tail, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},0,kLtUndef,"pseudo_lazy_tail","",0) + +/* will be emit to two instructions in a row: + * adrp xd, _PTR__cinf_Ljava_2Flang_2FSystem_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] + * MOP_adrp_ldr + */ +DEFINE_MOP(MOP_adrp_ldr, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc},ISATOMIC|CANTHROW|ISINTRINSIC,kLtAdrpLdr,"intrinsic_adrpldr","0,1",2) + +/* will be emit to two instructions in a row: + * adrp xd, label + * add xd, xd, #:lo12:label + */ +DEFINE_MOP(MOP_adrp_label, {&OpndDesc::Reg64ID, &OpndDesc::Imm64},ISINTRINSIC,kLtAlu,"intrinsic_adrplabel","0,1", 2) + +/* + * will be emit to three instrunctions in a row: + * adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr xd, [xd,#:got_lo12:__arrayClassCacheTable$$xx+offset] + * ldr xzr, [xd] + */ +DEFINE_MOP(MOP_arrayclass_cache_ldr, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW|ISINTRINSIC,kLtAdrpLdr,"intrinsic_loadarrayclass","0,1",3) + +/* + * ldr x17, [xs,#112] + * ldr wzr, [x17] + */ +DEFINE_MOP(MOP_clinit_tail, {&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|ISINTRINSIC,kLtClinitTail,"intrinsic_clinit_tail","0",2) + +/* + * intrinsic Unsafe.getAndAddInt + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +DEFINE_MOP(MOP_get_and_addI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_int","",5) +/* + * intrinsic Unsafe.getAndAddLong + * intrinsic_get_add_long x0, xt, xs, ws, x1, x2, x3, ws, label + * add xt, x1, x2 + * label: + * ldaxr x0, [xt] + * add xs, x0, x3 + * stlxr ws, x2, [xt] + * cbnz ws, label + */ +DEFINE_MOP(MOP_get_and_addL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_long","",5) + +/* + * intrinsic Unsafe.getAndSetInt + * intrinsic_get_set_int w0, xt, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr w2, w3, [xt] + * cbnz w2, label + */ +DEFINE_MOP(MOP_get_and_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_int","0,1,2,3,4",4) +/* + * intrinsic Unsafe.getAndSetLong + * intrinsic_get_set_long x0, x1, x2, x3, label + * add xt, x1, x2 + * label: + * ldaxr x0, [xt] + * stlxr w2, x3, [xt] + * cbnz w2, label + */ +DEFINE_MOP(MOP_get_and_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_long","0,1,2,3,4",4) + +/* + * intrinsic Unsafe.compareAndSwapInt + * intrinsic_compare_swap_int x0, xt, ws, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +DEFINE_MOP(MOP_compare_and_swapI, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_int","0,1,2,3,4,5,6",7) +/* + * intrinsic Unsafe.compareAndSwapLong + * intrinsic_compare_swap_long x0, xt, xs, x1, x2, x3, x4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr xs, [xt] + * cmp xs, x3 + * b.ne label2 + * stlxr ws, x4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_long","0,1,2,3,4,5,6",7) + +/* + * intrinsic String.indexOf(Ljava/lang/String;)I + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +DEFINE_MOP(MOP_string_indexof, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_string_indexof","0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17",36) + +/* MOP_tail_call_opt_xbl -- branch without link (call); this is a special definition */ +DEFINE_MOP(MOP_tail_call_opt_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"b","0", 1, kBranchImm,0x14000000) +/* MOP_tail_call_opt_xblr -- branch without link (call) to register; this is a special definition */ +DEFINE_MOP(MOP_tail_call_opt_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"br","0", 1, kBranchReg, 0xd61f0000) + +/* MOP_pseudo_param_def_x, */ +DEFINE_MOP(MOP_pseudo_param_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_w, */ +DEFINE_MOP(MOP_pseudo_param_def_w, {&OpndDesc::Reg32ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_d, */ +DEFINE_MOP(MOP_pseudo_param_def_d, {&OpndDesc::Reg64FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_s, */ +DEFINE_MOP(MOP_pseudo_param_def_s, {&OpndDesc::Reg32FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_store_x, */ +DEFINE_MOP(MOP_pseudo_param_store_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_param_store_x","0", 0) + +/* MOP_pseudo_param_store_w, */ +DEFINE_MOP(MOP_pseudo_param_store_w, {&OpndDesc::Mem32D},0,kLtUndef,"//MOP_pseudo_param_store_w","0", 0) + +/* MOP_pseudo_ref_init_x, */ +DEFINE_MOP(MOP_pseudo_ref_init_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_ref_init_x","0", 0) + +/* MOP_pseudo_ret_int, */ +DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS},0,kLtUndef,"//MOP_pseudo_ret_int","", 0) + +/* MOP_pseudo_ret_float, */ +DEFINE_MOP(MOP_pseudo_ret_float, {&OpndDesc::Reg64FS},0,kLtUndef,"//MOP_pseudo_ret_float","", 0) + +/* When exception occurs, R0 and R1 may be defined by runtime code. */ +/* MOP_pseudo_eh_def_x, */ +DEFINE_MOP(MOP_pseudo_eh_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_eh_def_x","0", 0) + +/* A pseudo instruction that used for seperating dependence graph. */ +/* MOP_pseudo_dependence_seperator, */ +DEFINE_MOP(MOP_pseudo_dependence_seperator, {},0,kLtUndef,"//MOP_pseudo_dependence_seperator","0", 0) + +/* A pseudo instruction that used for replacing MOP_clinit_tail after clinit merge in scheduling. */ +/* MOP_pseudo_none, */ +DEFINE_MOP(MOP_pseudo_none, {},0,kLtUndef,"//MOP_pseudo_none","0", 0) + +/*MOP_nop */ +DEFINE_MOP(MOP_nop, {},ISNOP,kLtAlu,"nop","", 1, kSystemInsn, 0xd503201f) + +/* phi node for SSA form */ +/* MOP_xphirr */ +DEFINE_MOP(MOP_xphirr, {&OpndDesc::Reg64ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +/* MOP_wphirr */ +DEFINE_MOP(MOP_wphirr, {&OpndDesc::Reg32ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +/* MOP_xvphis */ +DEFINE_MOP(MOP_xvphis, {&OpndDesc::Reg32FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +/* MOP_xvphid */ +DEFINE_MOP(MOP_xvphid, {&OpndDesc::Reg64FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +/* MOP_xvphivd */ +DEFINE_MOP(MOP_xvphivd, {&OpndDesc::Reg128VD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"movi","0,1",1) + +/* end of AArch64 instructions */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..49a813c998a38f6a83a4c4722e04fee3f36e6766 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H + +#include "memlayout.h" +#include "aarch64_abi.h" + +namespace maplebe { +class AArch64SymbolAlloc : public SymbolAlloc { + public: + AArch64SymbolAlloc() = default; + + ~AArch64SymbolAlloc() = default; + + void SetRegisters(AArch64reg r0, AArch64reg r1, AArch64reg r2, AArch64reg r3) { + reg0 = r0; + reg1 = r1; + reg2 = r2; + reg3 = r3; + } + + inline bool IsRegister() const { + return reg0 != kRinvalid; + } + + private: + AArch64reg reg0 = kRinvalid; + AArch64reg reg1 = kRinvalid; + AArch64reg reg2 = kRinvalid; + AArch64reg reg3 = kRinvalid; +}; + +/* + * On AArch64, stack frames are structured as follows: + * + * The stack grows downward -- full descending (SP points + * to a filled slot). + * + * Any of the parts of a frame is optional, i.e., it is + * possible to write a caller-callee pair in such a way + * that the particular part is absent in the frame. + * + * Before a call is made, the frame looks like: + * | | + * ||----------------------------| + * | args passed on the stack | (we call them up-formals) + * ||----------------------------|<- Stack Pointer + * | | + * + * V1. + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | callee-saved registers | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------| + * | variable-sized local vars | + * | (VLAs) | + * ||----------------------------|<- Stack Pointer + * + * callee-saved registers include + * 1. R19-R28 + * 2. R8 if return value needs to be returned + * thru memory and callee wants to use R8 + * 3. we don't need to save R19 if it is used + * as base register for PIE. + * 4. V8-V15 + * + * V2. (this way, we may be able to save + * on SP modifying instruction) + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | | + * | empty space | + * | | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | callee-saved registers | + * | including those used for | + * | parameter passing | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | variable-sized local vars | + * | (VLAs) | + * ||----------------------------| + * | args to pass through stack | + * ||----------------------------| + */ +class AArch64MemLayout : public MemLayout { + public: + AArch64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator) + : MemLayout(b, f, mallocator, kAarch64StackPtrAlignment) {} + + ~AArch64MemLayout() override = default; + + /* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ + uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) override; + + void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override; + + void AssignSpillLocationsToPseudoRegisters() override; + + SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; + + uint64 StackFrameSize() const; + + uint32 RealStackFrameSize() const; + + const MemSegment &locals() const { + return segLocals; + } + + uint32 GetSizeOfSpillReg() const { + return segSpillReg.GetSize(); + } + + uint32 GetSizeOfLocals() const { + return segLocals.GetSize(); + } + + void SetSizeOfGRSaveArea(uint32 sz) { + segGrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfGRSaveArea() const { + return segGrSaveArea.GetSize(); + } + + inline void SetSizeOfVRSaveArea(uint32 sz) { + segVrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfVRSaveArea() const { + return segVrSaveArea.GetSize(); + } + + uint32 GetSizeOfRefLocals() const { + return segRefLocals.GetSize(); + } + + int32 GetRefLocBaseLoc() const; + int32 GetGRSaveAreaBaseLoc(); + int32 GetVRSaveAreaBaseLoc(); + + private: + MemSegment segRefLocals = MemSegment(kMsRefLocals); + /* callee saved register R19-R28 (10) */ + MemSegment segSpillReg = MemSegment(kMsSpillReg); + MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ + MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); + MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); + int32 fixStackSize = 0; + void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const; + void SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const; + void LayoutVarargParams(); + void LayoutFormalParams(); + void LayoutActualParams(); + void LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays); + void LayoutEAVariales(std::vector &tempVar); + void LayoutReturnRef(std::vector &returnDelays, int32 &structCopySize, int32 &maxParmStackSize); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_obj_emitter.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_obj_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..2c52a14e42a4c1065c2245f05bd36177aa525b65 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_obj_emitter.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) [2020-2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H + +#include "obj_emit.h" +#include "aarch64_insn.h" +#include "aarch64_cg.h" + +namespace maplebe { +enum AArch64FixupKind { + kAArch64PCRelAdrImm21 = kFirstTargetFixupKind, + kAArch64PCRelAdrpImm21, + kAArch64LoadPCRelImm19, + kAArch64CondBranchPCRelImm19, + kAArch64UnCondBranchPCRelImm26, + kAArch64CompareBranchPCRelImm19, + kAArch64TestBranchPCRelImm14, + kAArch64CallPCRelImm26, + kAArch64AddPCRelLo12, + kAArch64LdrPCRelLo12, +}; + +class AArch64ObjFuncEmitInfo : public ObjFuncEmitInfo { + public: + AArch64ObjFuncEmitInfo(CGFunc &func, MemPool &memPool) : ObjFuncEmitInfo(func, memPool) {} + ~AArch64ObjFuncEmitInfo() = default; + void HandleLocalBranchFixup(const std::vector &label2Offset) override; +}; + +class AArch64ObjEmitter : public ObjEmitter { + public: + AArch64ObjEmitter(CG &cg, const std::string &objFileName) : ObjEmitter(cg, objFileName) {} + ~AArch64ObjEmitter() = default; + + void EncodeInstruction(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) override { + uint32 binInsn = GetBinaryCodeForInsn(insn, label2Offset, objFuncEmitInfo); + objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize); + } + + uint32 GetInsnSize(const Insn &insn) const override { + (void)insn; + return k4ByteSize; + } + + FuncEmitInfo &CreateFuncEmitInfo(CGFunc &cgFunc) { + MemPool *memPool = cgFunc.GetCG()->GetMIRModule()->GetMemPool(); + AArch64ObjFuncEmitInfo *content = memPool->New(cgFunc, *memPool); + // contents[cgFunc.GetFunction().GetPuidxOrigin()] = content; + contents.insert(contents.begin() + cgFunc.GetFunction().GetPuidxOrigin(), content); + return *content; + } + + void HandleTextSectionGlobalFixup() override; + void HandleTextSectionFixup(); + void AppendTextSectionData() override; + void AppendGlobalLabel() override; + void AppendSymsToSymTabSec() override; + void InitSections() override; + void LayoutSections() override; + void UpdateMachineAndFlags(FileHeader &header) override; + void EmitDataToDynamic(); + void EmitDataToHash(); + void EmitIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) override; + void EmitSpinIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) override; + + uint32 GetBinaryCodeForInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetOpndMachineValue(const Operand &opnd) const; + uint32 GetAdrLabelOpndValue(const Insn &insn, const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetLoadLiteralOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetCondBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetUnCondBranchOpndValue(const Operand &opnd, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetCallFuncOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetTestBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetCompareBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GetLo12LitrealOpndValue(MOperator mOp, const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const; + void InsertNopInsn(ObjFuncEmitInfo &objFuncEmitInfo) const override; + + private: + uint32 GenAddSubExtendRegInsn(const Insn &insn) const; + uint32 GenAddSubImmInsn(const Insn &insn) const; + uint32 GenAddSubShiftImmInsn(const Insn &insn) const; + uint32 GenAddSubRegInsn(const Insn &insn) const; + uint32 GenAddSubShiftRegInsn(const Insn &insn) const; + uint32 GenBitfieldInsn(const Insn &insn) const; + uint32 GenExtractInsn(const Insn &insn) const; + uint32 GenBranchImmInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenBranchRegInsn(const Insn &insn) const; + uint32 GenCompareBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenCondCompareImmInsn(const Insn &insn) const; + uint32 GenCondCompareRegInsn(const Insn &insn) const; + uint32 GenConditionalSelectInsn(const Insn &insn) const; + uint32 GenDataProcess1SrcInsn(const Insn &insn) const; + uint32 GenDataProcess2SrcInsn(const Insn &insn) const; + uint32 GenDataProcess3SrcInsn(const Insn &insn) const; + uint32 GenFloatIntConversionsInsn(const Insn &insn) const; + uint32 GenFloatCompareInsn(const Insn &insn) const; + uint32 GenFloatDataProcessing1Insn(const Insn &insn) const; + uint32 GenFloatDataProcessing2Insn(const Insn &insn) const; + uint32 GenFloatImmInsn(const Insn &insn) const; + uint32 GenFloatCondSelectInsn(const Insn &insn) const; + uint32 GenLoadStoreModeLiteral(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenLoadStoreModeBOi(const Insn &insn) const; + uint32 GenLoadStoreModeBOrX(const Insn &insn) const; + uint32 GenLoadStoreRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenLoadStoreARInsn(const Insn &insn) const; + uint32 GenLoadExclusiveInsn(const Insn &insn) const; + uint32 GenLoadExclusivePairInsn(const Insn &insn) const; + uint32 GenStoreExclusiveInsn(const Insn &insn) const; + uint32 GenStoreExclusivePairInsn(const Insn &insn) const; + uint32 GenLoadPairInsn(const Insn &insn) const; + uint32 GenStorePairInsn(const Insn &insn) const; + uint32 GenLoadStoreFloatInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenLoadPairFloatInsn(const Insn &insn) const; + uint32 GenStorePairFloatInsn(const Insn &insn) const; + uint32 GenLoadLiteralRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenLogicalRegInsn(const Insn &insn) const; + uint32 GenLogicalImmInsn(const Insn &insn) const; + uint32 GenMoveWideInsn(const Insn &insn) const; + uint32 GenPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenAddPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenSystemInsn(const Insn &insn) const; + uint32 GenTestBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenCondBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const; + uint32 GenMovReg(const Insn &insn) const; + uint32 GenMovImm(const Insn &insn) const; + uint32 EncodeLogicaImm(uint64 imm, uint32 size) const; + void HandleCallFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup); + // void HandleAdrpFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup); + void HandleAdrFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup); + // void HandlekPCRelLo12Fixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup); + void HandleLSDAFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup); + + /* emit intrinsic insn */ + void EmitMCCStackMapCall(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitEnv(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitClinit(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitCounter(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitLazyLoad(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitLazyLoadStatic(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitAdrpLdr(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitArrayClassCacheLoad(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitClinitTail(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitGetAndAddInt(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitGetAndSetInt(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitCompareAndSwapInt(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitStringIndexOf(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitStringIndexOf2(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitStringIndexOf3(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitCheckCastNoArray(const Insn &insn, std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo); + void EmitCheckCastIsAssignable(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitCheckCastNoSubIsAssignable(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitInstanceOfIsAssignable(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitInstanceOfNoSubIsAssignable(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitMovMovkri16(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + void EmitMovMovk64ri16(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo); + + void EmitInsn(MOperator mOp, Operand &opnd1, + std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo) { + Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1); + EncodeInstruction(insn, label2Offset, objFuncEmitInfo); + } + + void EmitInsn(MOperator mOp, Operand &opnd1, Operand &opnd2, + std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo) { + Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1, opnd2); + EncodeInstruction(insn, label2Offset, objFuncEmitInfo); + } + + void EmitInsn(MOperator mOp, Operand &opnd1, Operand &opnd2, Operand &opnd3, + std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo) { + Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1, opnd2, opnd3); + EncodeInstruction(insn, label2Offset, objFuncEmitInfo); + } +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..0ca08427704d809ce39fd9ffb3c2ad82a3203d67 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H + +#include "offset_adjust.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +class AArch64FPLROffsetAdjustment : public FrameFinalize { + public: + explicit AArch64FPLROffsetAdjustment(CGFunc &func) : FrameFinalize(func) {} + + ~AArch64FPLROffsetAdjustment() override = default; + + void Run() override; + + private: + void AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc); + void AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index, AArch64CGFunc &aarchCGFunc) const; + void AdjustmentOffsetForFPLR(); + /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ + void AdjustmentStackPointer(Insn &insn, AArch64CGFunc &aarchCGFunc); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h new file mode 100644 index 0000000000000000000000000000000000000000..c04b3775bc1ec02cce0d4e434ff7c1ec9132f6aa --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H + +#include +#include +#include +#include "aarch64_isa.h" +#include "operand.h" +#include "cg.h" +#include "emit.h" +#include "common_utils.h" + +namespace std { +template<> /* function-template-specialization */ +class std::hash { + public: + size_t operator()(const maplebe::MemOperand &x) const { + std::size_t seed = 0; + hash_combine(seed, x.GetAddrMode()); + hash_combine(seed, x.GetSize()); + maplebe::RegOperand *xb = x.GetBaseRegister(); + maplebe::RegOperand *xi = x.GetIndexRegister(); + if (xb != nullptr) { + hash_combine(seed, xb->GetRegisterNumber()); + hash_combine(seed, xb->GetSize()); + } + if (xi != nullptr) { + hash_combine(seed, xi->GetRegisterNumber()); + hash_combine(seed, xi->GetSize()); + } + return seed; + } +}; +} +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..4a3f5b52dfe201f1975232d7b5b6c4c6e9a14b89 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H + +#include "aarch64_isa.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + + +class AArch64InsnVisitor : public InsnVisitor { + public: + explicit AArch64InsnVisitor(CGFunc &func) : InsnVisitor(func) {} + + ~AArch64InsnVisitor() override = default; + + void ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) override; + void ModifyJumpTarget(Operand &targetOperand, BB &bb) override; + void ModifyJumpTarget(BB &newTarget, BB &bb) override; + /* Check if it requires to add extra gotos when relocate bb */ + Insn *CloneInsn(Insn &originalInsn) override; + LabelIdx GetJumpLabel(const Insn &insn) const override; + bool IsCompareInsn(const Insn &insn) const override; + bool IsCompareAndBranchInsn(const Insn &insn) const override; + bool IsAddOrSubInsn(const Insn &insn) const override; + RegOperand *CreateVregFromReg(const RegOperand &pReg) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h new file mode 100644 index 0000000000000000000000000000000000000000..3b645256f75fbcbf5e1cb8da82bbce5724a06313 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -0,0 +1,1808 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H + +#include +#include "peep.h" +#include "aarch64_cg.h" +#include "optimize_common.h" +#include "mir_builder.h" + +namespace maplebe { +class AArch64CGPeepHole : CGPeepHole { + public: + /* normal constructor */ + AArch64CGPeepHole(CGFunc &f, MemPool *memPool) : CGPeepHole(f, memPool) {}; + /* constructor for ssa */ + AArch64CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) : CGPeepHole(f, memPool, cgssaInfo) {}; + ~AArch64CGPeepHole() = default; + + void Run() override; + bool DoSSAOptimize(BB &bb, Insn &insn) override; + void DoNormalOptimize(BB &bb, Insn &insn) override; +}; + +/* +* i. cmp x0, x1 +* cset w0, EQ ===> cmp x0, x1 +* cmp w0, #0 cset w0, EQ +* cset w0, NE +* +* ii. cmp x0, x1 +* cset w0, EQ ===> cmp x0, x1 +* cmp w0, #0 cset w0, NE +* cset w0, EQ +*/ +class ContinuousCmpCsetPattern : public CGPeepPattern { + public: + ContinuousCmpCsetPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ContinuousCmpCsetPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ContinuousCmpCsetPattern"; + } + + private: + bool CheckCondCode(const CondOperand &condOpnd) const; + Insn *prevCmpInsn = nullptr; + Insn *prevCsetInsn1 = nullptr; + Insn *prevCmpInsn1 = nullptr; + bool reverse = false; +}; + +/* + * Example 1) + * mov w5, #1 + * ... + * mov w0, #0 + * csel w5, w5, w0, NE ===> cset w5, NE + * + * Example 2) + * mov w5, #0 + * ... + * mov w0, #1 + * csel w5, w5, w0, NE ===> cset w5,EQ + * + * conditions: + * 1. mov_imm1 value is 0(1) && mov_imm value is 1(0) + */ +class CselToCsetPattern : public CGPeepPattern { + public: + CselToCsetPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CselToCsetPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CselToCsetPattern"; + } + + private: + bool IsOpndDefByZero(const Insn &insn) const; + bool IsOpndDefByOne(const Insn &insn) const; + Insn *prevMovInsn1 = nullptr; + Insn *prevMovInsn2 = nullptr; +}; + +/* + * combine cset & cbz/cbnz ---> beq/bne + * Example 1) + * cset w0, EQ or cset w0, NE + * cbnz w0, .label cbnz w0, .label + * ===> beq .label ===> bne .label + * + * Case: same conditon_code + * + * Example 2) + * cset w0, EQ or cset w0, NE + * cbz w0, .label cbz w0, .label + * ===> bne .label ===> beq .label + * + * Case: reversed condition_code + */ +class CsetCbzToBeqPattern : public CGPeepPattern { + public: + CsetCbzToBeqPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CsetCbzToBeqPattern() override = default; + std::string GetPatternName() override { + return "CsetCbzToBeqPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + + private: + MOperator SelectNewMop(ConditionCode condCode, bool inverse) const; + Insn *prevInsn = nullptr; +}; + +/* + * combine neg & cmp --> cmn + * Example 1) + * neg x0, x6 + * cmp x2, x0 ---> (currInsn) + * ===> cmn x2, x6 + * + * Example 2) + * neg x0, x6, LSL #5 + * cmp x2, x0 ---> (currInsn) + * ===> cmn x2, x6, LSL #5 + * + * Conditions: + * 1. neg_amount_val is valid in cmn amount range + */ +class NegCmpToCmnPattern : public CGPeepPattern { + public: + NegCmpToCmnPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~NegCmpToCmnPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "NegCmpToCmnPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * combine {sxtw / uxtw} & lsl ---> {sbfiz / ubfiz} + * sxtw x1, w0 + * lsl x2, x1, #3 ===> sbfiz x2, x0, #3, #32 + * + * uxtw x1, w0 + * lsl x2, x1, #3 ===> ubfiz x2, x0, #3, #32 + */ +class ExtLslToBitFieldInsertPattern : public CGPeepPattern { + public: + ExtLslToBitFieldInsertPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ExtLslToBitFieldInsertPattern() override = default; + std::string GetPatternName() override { + return "ExtLslToBitFieldInsertPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + + private: + Insn *prevInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * Example 1) + * and w0, w6, #1 ====> tbz w6, #0, .label + * cmp w0, #1 + * bne .label + * + * and w0, w6, #16 ====> tbz w6, #4, .label + * cmp w0, #16 + * bne .label + * + * and w0, w6, #32 ====> tbnz w6, #5, .label + * cmp w0, #32 + * beq .label + * + * Conditions: + * 1. cmp_imm value == and_imm value + * 2. (and_imm value is (1 << n)) && (cmp_imm value is (1 << n)) + * + * Example 2) + * and x0, x6, #32 ====> tbz x6, #5, .label + * cmp x0, #0 + * beq .label + * + * and x0, x6, #32 ====> tbnz x6, #5, .label + * cmp x0, #0 + * bne .labelSimplifyMulArithmeticPattern + * + * Conditions: + * 1. (cmp_imm value is 0) || (cmp_imm == and_imm) + * 2. and_imm value is (1 << n) + */ +class AndCmpBranchesToTbzPattern : public CGPeepPattern { + public: + AndCmpBranchesToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~AndCmpBranchesToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndCmpBranchesToTbzPattern"; + } + + private: + bool CheckAndSelectPattern(const Insn &currInsn); + Insn *prevAndInsn = nullptr; + Insn *prevCmpInsn = nullptr; + MOperator newMop = MOP_undef; + int64 tbzImmVal = -1; +}; + +/* + * optimize the following patterns: + * Example 1) + * cmp w1, wzr + * bge .label ====> tbz w1, #31, .label + * + * cmp wzr, w1 + * ble .label ====> tbz w1, #31, .label + * + * cmp w1,wzr + * blt .label ====> tbnz w1, #31, .label + * + * cmp wzr, w1 + * bgt .label ====> tbnz w1, #31, .label + * + * + * Example 2) + * cmp w1, #0 + * bge .label ====> tbz w1, #31, .label + * + * cmp w1, #0 + * blt .label ====> tbnz w1, #31, .label + */ +class ZeroCmpBranchesToTbzPattern : public CGPeepPattern { + public: + ZeroCmpBranchesToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ZeroCmpBranchesToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ZeroCmpBranchesToTbzPattern"; + } + + private: + bool CheckAndSelectPattern(const Insn &currInsn); + Insn *prevInsn = nullptr; + MOperator newMop = MOP_undef; + RegOperand *regOpnd = nullptr; +}; + +/* + * mvn w3, w3 ====> bic w3, w5, w3 + * and w3, w5, w3 + * ====> + * mvn x3, x3 ====> bic x3, x5, x3 + * and x3, x5, x3 + */ +class MvnAndToBicPattern : public CGPeepPattern { + public: + MvnAndToBicPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~MvnAndToBicPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "MvnAndToBicPattern"; + } + + private: + Insn *prevInsn1 = nullptr; + Insn *prevInsn2 = nullptr; + bool op1IsMvnDef = false; + bool op2IsMvnDef = false; +}; + +/* + * and r0, r1, #4 (the imm is n power of 2) + * ... + * cbz r0, .Label + * ===> tbz r1, #2, .Label + * + * and r0, r1, #4 (the imm is n power of 2) + * ... + * cbnz r0, .Label + * ===> tbnz r1, #2, .Label + */ +class AndCbzToTbzPattern : public CGPeepPattern { + public: + AndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + AndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : + CGPeepPattern(cgFunc, currBB, currInsn) {} + ~AndCbzToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndCbzToTbzPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +class CombineSameArithmeticPattern : public CGPeepPattern { + public: + CombineSameArithmeticPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CombineSameArithmeticPattern() override { + prevInsn = nullptr; + newImmOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CombineSameArithmeticPattern"; + } + + private: + std::vector validMops = {MOP_wlsrrri5, MOP_xlsrrri6, MOP_wasrrri5, MOP_xasrrri6, MOP_wlslrri5, + MOP_xlslrri6, MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12}; + Insn *prevInsn = nullptr; + ImmOperand *newImmOpnd = nullptr; +}; + +/* + * Specific Extension Elimination, includes sxt[b|h|w] & uxt[b|h|w]. There are scenes: + * 1. PrevInsn is mov + * Example 1) + * mov w0, #imm or mov w0, #imm + * sxt{} w0, w0 uxt{} w0, w0 + * ===> mov w0, #imm ===> mov w0, #imm + * mov w0, w0 mov w0, w0 + * + * Example 2) + * mov w0, R0 + * uxt{} w0, w0 + * ===> mov w0, R0 + * mov w0, w0 + * + * Conditions: + * 1) #imm is not out of range depend on extention valid bits. + * 2) [mov w0, R0] is return value of call and return size is not of range + * 3) mov.destOpnd.size = ext.destOpnd.size + * + * + * 2. PrevInsn is ldr[b|h|sb|sh] + * Example 1) + * ldrb x1, [] + * and x1, x1, #imm + * ===> ldrb x1, [] + * mov x1, x1 + * + * Example 2) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] or + * sxtb x1, x1 uxtb x1, x1 sxtb x1, x1 uxtb x1, x1 + * ===> ldrsb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrh x1, [] or ldrh x1, [] or ldrsh x1, [] or ldrsh x1, [] or + * sxth x1, x1 uxth x1, x1 sxth x1, x1 uxth x1, x1 + * ===> ldrsh x1, [] ===> ldrh x1, [] ===> ldrsh x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrsw x1, [] or ldrsw x1, [] + * sxtw x1, x1 uxtw x1, x1 + * ===> ldrsw x1, [] ===> no change + * mov x1, x1 + * + * Example 3) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] or + * sxth x1, x1 uxth x1, x1 sxth x1, x1 uxth x1, x1 + * ===> ldrb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> no change + * mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrb x1, [] or ldrh x1, [] or ldrsb x1, [] or ldrsh x1, [] or + * sxtw x1, x1 sxtw x1, x1 sxtw x1, x1 sxtw x1, x1 + * ===> ldrb x1, [] ===> ldrh x1, [] ===> ldrsb x1, [] ===> ldrsh x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldr x1, [] + * sxtw x1, x1 + * ===> ldrsw x1, [] + * mov x1, x1 + * + * Cases: + * 1) extension size == load size -> change the load type or eliminate the extension + * 2) extension size > load size -> possibly eliminating the extension + * + * + * 3. PrevInsn is same sxt / uxt + * Example 1) + * sxth x1, x2 + * sxth x3, x1 + * ===> sxth x1, x2 + * mov x3, x1 + * + * Example 2) + * sxtb x1, x2 or uxtb w0, w0 + * sxth x3, x1 uxth w0, w0 + * ===> sxtb x1, x2 ===> uxtb w0, w0 + * mov x3, x1 mov x0, x0 + * + * Conditions: + * 1) ext1.destOpnd.size == ext2.destOpnd.size + * 2) ext1.destOpnd.regNo == ext2.destOpnd.regNo + * === prop ext1.destOpnd to ext2.srcOpnd, transfer ext2 to mov + * + * Cases: + * 1) ext1 type == ext2 type ((sxth32 & sxth32) || (sxth64 & sxth64) || ...) + * 2) ext1 type < ext2 type ((sxtb32 & sxth32) || (sxtb64 & sxth64) || (sxtb64 & sxtw64) || + * (sxth64 & sxtw64) || (uxtb32 & uxth32)) + */ +class ElimSpecificExtensionPattern : public CGPeepPattern { + public: + ElimSpecificExtensionPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ElimSpecificExtensionPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ElimSpecificExtensionPattern"; + } + + protected: + enum SpecificExtType : uint8 { + EXTUNDEF = 0, + SXTB, + SXTH, + SXTW, + UXTB, + UXTH, + UXTW, + SpecificExtTypeSize + }; + enum OptSceneType : uint8 { + kSceneUndef = 0, + kSceneMov, + kSceneLoad, + kSceneSameExt + }; + static constexpr uint8 kPrevLoadPatternNum = 6; + static constexpr uint8 kPrevLoadMappingNum = 2; + static constexpr uint8 kValueTypeNum = 2; + static constexpr uint64 kInvalidValue = 0; + static constexpr uint8 kSameExtPatternNum = 4; + static constexpr uint8 kSameExtMappingNum = 2; + uint64 extValueRangeTable[SpecificExtTypeSize][kValueTypeNum] = { + /* {minValue, maxValue} */ + {kInvalidValue, kInvalidValue}, /* UNDEF */ + {0xFFFFFFFFFFFFFF80, 0x7F}, /* SXTB */ + {0xFFFFFFFFFFFF8000, 0x7FFF}, /* SXTH */ + {0xFFFFFFFF80000000, kInvalidValue}, /* SXTW */ + {0xFFFFFFFFFFFFFF00, kInvalidValue}, /* UXTB */ + {0xFFFFFFFFFFFF0000, kInvalidValue}, /* UXTH */ + {kInvalidValue, kInvalidValue} /* UXTW */ + }; + MOperator loadMappingTable[SpecificExtTypeSize][kPrevLoadPatternNum][kPrevLoadMappingNum] = { + /* {prevOrigMop, prevNewMop} */ + {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UNDEF */ + {{MOP_wldrb, MOP_wldrsb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldr, MOP_wldrsb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_wldrh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldrsh, MOP_wldrsh}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTH */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrsh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldr, MOP_xldrsw}, {MOP_xldrsw, MOP_xldrsw}}, /* SXTW */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UXTB */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldr, MOP_wldrh}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UXTH */ + {{MOP_wldr, MOP_wldr}, {MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}} /* UXTW */ + }; + MOperator sameExtMappingTable[SpecificExtTypeSize][kSameExtPatternNum][kSameExtMappingNum] = { + /* {prevMop, currMop} */ + {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UNDEF */ + {{MOP_xsxtb32, MOP_xsxtb32}, {MOP_xsxtb64, MOP_xsxtb64}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_xsxtb32, MOP_xsxth32}, {MOP_xsxtb64, MOP_xsxth64}, {MOP_xsxth32, MOP_xsxth32}, + {MOP_xsxth64, MOP_xsxth64}}, /* SXTH */ + {{MOP_xsxtb64, MOP_xsxtw64}, {MOP_xsxth64, MOP_xsxtw64}, {MOP_xsxtw64, MOP_xsxtw64}, + {MOP_undef, MOP_undef}}, /* SXTW */ + {{MOP_xuxtb32, MOP_xuxtb32}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UXTB */ + {{MOP_xuxtb32, MOP_xuxth32}, {MOP_xuxth32, MOP_xuxth32}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UXTH */ + {{MOP_xuxtw64, MOP_xuxtw64}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}} /* UXTW */ + }; + + private: + void SetSpecificExtType(const Insn &currInsn); + void SetOptSceneType(); + bool IsValidLoadExtPattern(Insn &currInsn, MOperator oldMop, MOperator newMop) const; + MOperator SelectNewLoadMopByBitSize(MOperator lowBitMop) const; + void ElimExtensionAfterLoad(Insn &currInsn); + void ElimExtensionAfterMov(Insn &currInsn); + void ElimExtensionAfterSameExt(Insn &currInsn); + void ReplaceExtWithMov(Insn &currInsn); + Insn *prevInsn = nullptr; + SpecificExtType extTypeIdx = EXTUNDEF; + OptSceneType sceneType = kSceneUndef; + bool is64Bits = false; +}; + +/* + * We optimize the following pattern in this function: + * if w0's valid bits is one + * uxtb w0, w0 + * eor w0, w0, #1 + * cbz w0, .label + * => + * tbnz w0, .label + * if there exists uxtb w0, w0 and w0's valid bits is + * less than 8, eliminate it. + */ +class OneHoleBranchPattern : public CGPeepPattern { + public: + explicit OneHoleBranchPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~OneHoleBranchPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "OneHoleBranchPattern"; + } + + private: + void FindNewMop(const BB &bb, const Insn &insn); + bool CheckPrePrevInsn(); + Insn *prevInsn = nullptr; + Insn *prePrevInsn = nullptr; + MOperator newOp = MOP_undef; +}; + +/* + * Combine logical shift and orr to [extr wd, wn, wm, #lsb / extr xd, xn, xm, #lsb] + * Example 1) + * lsr w5, w6, #16 + * lsl w4, w7, #16 + * orr w5, w5, w4 ---> (currInsn) + * ===> extr w5, w6, w7, #16 + * + * Example 2) + * lsr w5, w6, #16 + * orr w5, w5, w4, LSL #16 ---> (currInsn) + * ===> extr w5, w6, w4, #16 + * + * Example 3) + * lsl w4, w7, #16 + * orr w5, w4, w5, LSR #16 ---> (currInsn) + * ===> extr w5, w5, w7, #16 + * + * Conditions: + * 1. (def[wn] is lsl) & (def[wm] is lsr) + * 2. lsl_imm + lsr_imm == curr type size (32 or 64) + * 3. is64bits ? (extr_imm in range [0, 63]) : (extr_imm in range [0, 31]) + * 4. extr_imm = lsr_imm + */ +class LogicShiftAndOrrToExtrPattern : public CGPeepPattern { + public: + LogicShiftAndOrrToExtrPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~LogicShiftAndOrrToExtrPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LogicShiftAndOrrToExtrPattern"; + } + + private: + Insn *prevLsrInsn = nullptr; + Insn *prevLslInsn = nullptr; + int64 shiftValue = 0; + bool is64Bits = false; +}; + +/* + * Simplify Mul and Basic Arithmetic. There are three scenes: + * 1. currInsn is add: + * Example 1) + * mul x1, x1, x2 or mul x0, x1, x2 + * add x0, x0, x1 add x0, x0, x1 + * ===> madd x0, x1, x2, x0 ===> madd x0, x1, x2, x1 + * + * Example 2) + * fmul d1, d1, d2 or fmul d0, d1, d2 + * fadd d0, d0, d1 fadd d0, d0, d1 + * ===> fmadd d0, d1, d2, d0 ===> fmadd d0, d1, d2, d1 + * + * cases: addInsn second opnd || addInsn third opnd + * + * + * 2. currInsn is sub: + * Example 1) Example 2) + * mul x1, x1, x2 fmul d1, d1, d2 + * sub x0, x0, x1 fsub d0, d0, d1 + * ===> msub x0, x1, x2, x0 ===> fmsub d0, d1, d2, d0 + * + * cases: subInsn third opnd + * + * 3. currInsn is neg: + * Example 1) Example 2) + * mul x1, x1, x2 fmul d1, d1, d2 + * neg x0, x1 fneg d0, d1 + * ===> mneg x0, x1, x2 ===> fnmul d0, d1, d2 + * + * cases: negInsn second opnd + */ +class SimplifyMulArithmeticPattern : public CGPeepPattern { + public: + SimplifyMulArithmeticPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~SimplifyMulArithmeticPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "SimplifyMulArithmeticPattern"; + } + + protected: + enum ArithmeticType : uint8 { + kUndef = 0, + kAdd, + kFAdd, + kSub, + kFSub, + kNeg, + kFNeg, + kArithmeticTypeSize + }; + static constexpr uint8 newMopNum = 2; + MOperator curMop2NewMopTable[kArithmeticTypeSize][newMopNum] = { + /* {32bit_mop, 64bit_mop} */ + {MOP_undef, MOP_undef}, /* kUndef */ + {MOP_wmaddrrrr, MOP_xmaddrrrr}, /* kAdd */ + {MOP_smadd, MOP_dmadd}, /* kFAdd */ + {MOP_wmsubrrrr, MOP_xmsubrrrr}, /* kSub */ + {MOP_smsub, MOP_dmsub}, /* kFSub */ + {MOP_wmnegrrr, MOP_xmnegrrr}, /* kNeg */ + {MOP_snmul, MOP_dnmul} /* kFNeg */ + }; + + private: + void SetArithType(const Insn &currInsn); + void DoOptimize(BB &currBB, Insn &currInsn); + ArithmeticType arithType = kUndef; + int32 validOpndIdx = -1; + Insn *prevInsn = nullptr; + bool isFloat = false; +}; + +/* + * Example 1) + * lsr w0, w1, #6 + * and w0, w0, #1 ---> (currInsn) + * ===> ubfx w0, w1, #6, #1 + * + * Conditions: + * 1. and_imm value is (1 << n -1) + * 2. is64bits ? (ubfx_imm_lsb in range [0, 63]) : (ubfx_imm_lsb in range [0, 31]) + * 3. is64bits ? ((ubfx_imm_lsb + ubfx_imm_width) in range [1, 32]) : ((ubfx_imm_lsb + ubfx_imm_width) in range [1, 64]) + */ +class LsrAndToUbfxPattern : public CGPeepPattern { + public: + LsrAndToUbfxPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~LsrAndToUbfxPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LsrAndToUbfxPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * orr w21, w0, #0 ====> mov w21, w0 + * orr w21, #0, w0 ====> mov w21, w0 + */ +class OrrToMovPattern : public CGPeepPattern { + public: + explicit OrrToMovPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~OrrToMovPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "OrrToMovPattern"; + } + + private: + MOperator newMop = MOP_undef; + RegOperand *reg2 = nullptr; +}; + +/* + * Optimize the following patterns: + * ubfx x201, x202, #0, #32 + * ====> + * uxtw x201, w202 + */ +class UbfxToUxtwPattern : public CGPeepPattern { + public: + UbfxToUxtwPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~UbfxToUxtwPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "UbfxToUxtwPattern"; + } +}; + +/* + * Optimize the following patterns: + * ubfx w0, w0, #2, #1 + * cbz w0, .L.3434__292 ====> tbz w0, #2, .L.3434__292 + * ------------------------------- + * ubfx w0, w0, #2, #1 + * cnbz w0, .L.3434__292 ====> tbnz w0, #2, .L.3434__292 + * ------------------------------- + * ubfx x0, x0, #2, #1 + * cbz x0, .L.3434__292 ====> tbz x0, #2, .L.3434__292 + * ------------------------------- + * ubfx x0, x0, #2, #1 + * cnbz x0, .L.3434__292 ====> tbnz x0, #2, .L.3434__292 + */ +class UbfxAndCbzToTbzPattern : public CGPeepPattern { + public: + UbfxAndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : + CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~UbfxAndCbzToTbzPattern() override { + useInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "UbfxAndCbzToTbzPattern"; + } + private: + Insn *useInsn = nullptr; + MOperator newMop = MOP_undef; +}; + +/* + * Looking for identical mem insn to eliminate. + * If two back-to-back is: + * 1. str + str + * 2. str + ldr + * And the [MEM] is pattern of [base + offset] + * 1. The [MEM] operand is exactly same then first + * str can be eliminate. + * 2. The [MEM] operand is exactly same and src opnd + * of str is same as the dest opnd of ldr then + * ldr can be eliminate + */ +class RemoveIdenticalLoadAndStorePattern : public CGPeepPattern { + public: + RemoveIdenticalLoadAndStorePattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIdenticalLoadAndStorePattern() override { + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIdenticalLoadAndStorePattern"; + } + + private: + bool IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const; + Insn *nextInsn = nullptr; +}; + +/* ======== CGPeepPattern End ======== */ +/* + * Looking for identical mem insn to eliminate. + * If two back-to-back is: + * 1. str + str + * 2. str + ldr + * And the [MEM] is pattern of [base + offset] + * 1. The [MEM] operand is exactly same then first + * str can be eliminate. + * 2. The [MEM] operand is exactly same and src opnd + * of str is same as the dest opnd of ldr then + * ldr can be eliminate + */ +class RemoveIdenticalLoadAndStoreAArch64 : public PeepPattern { + public: + explicit RemoveIdenticalLoadAndStoreAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~RemoveIdenticalLoadAndStoreAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const; +}; + +/* Remove redundant mov which src and dest opnd is exactly same */ +class RemoveMovingtoSameRegPattern : public CGPeepPattern { + public: + RemoveMovingtoSameRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveMovingtoSameRegPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveMovingtoSameRegPattern"; + } +}; + +/* Remove redundant mov which src and dest opnd is exactly same */ +class RemoveMovingtoSameRegAArch64 : public PeepPattern { + public: + explicit RemoveMovingtoSameRegAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~RemoveMovingtoSameRegAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp, when they are + * back to back and the [MEM] they access is conjointed. + */ +class CombineContiLoadAndStorePattern : public CGPeepPattern { + public: + CombineContiLoadAndStorePattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) { + doAggressiveCombine = cgFunc.GetMirModule().IsCModule(); + } + ~CombineContiLoadAndStorePattern() override { + memOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CombineContiLoadAndStorePattern"; + } + + private: + std::vector FindPrevStrLdr(Insn &insn, regno_t destRegNO, regno_t memBaseRegNO, int64 baseOfst); + /* + * avoid the following situation: + * str x2, [x19, #8] + * mov x0, x19 + * bl foo (change memory) + * str x21, [x19, #16] + */ + bool IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, int64 baseOfst) const; + void RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const; + MOperator GetMopHigherByte(MOperator mop) const; + bool SplitOfstWithAddToCombine(const Insn &curInsn, Insn &combineInsn, const MemOperand &memOperand) const; + Insn *FindValidSplitAddInsn(Insn &curInsn, RegOperand &baseOpnd) const; + bool PlaceSplitAddInsn(const Insn &curInsn, Insn &combineInsn, const MemOperand &memOpnd, + RegOperand &baseOpnd, uint32 bitLen) const; + bool doAggressiveCombine = false; + MemOperand *memOpnd = nullptr; +}; + +/* + * add xt, xn, #imm add xt, xn, xm + * ldr xd, [xt] ldr xd, [xt] + * =====================> + * ldr xd, [xn, #imm] ldr xd, [xn, xm] + * + * load/store can do extend shift as well + */ +class EnhanceStrLdrAArch64 : public PeepPattern { + public: + explicit EnhanceStrLdrAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~EnhanceStrLdrAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool IsEnhanceAddImm(MOperator prevMop) const; +}; + +/* Eliminate the sxt[b|h|w] w0, w0;, when w0 is satisify following: + * i) mov w0, #imm (#imm is not out of range) + * ii) ldrs[b|h] w0, [MEM] + */ +class EliminateSpecifcSXTAArch64 : public PeepPattern { + public: + explicit EliminateSpecifcSXTAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~EliminateSpecifcSXTAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* Eliminate the uxt[b|h|w] w0, w0;when w0 is satisify following: + * i) mov w0, #imm (#imm is not out of range) + * ii) mov w0, R0(Is return value of call and return size is not of range) + * iii)w0 is defined and used by special load insn and uxt[] pattern + */ +class EliminateSpecifcUXTAArch64 : public PeepPattern { + public: + explicit EliminateSpecifcUXTAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~EliminateSpecifcUXTAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* fmov ireg1 <- freg1 previous insn + * fmov ireg2 <- freg1 current insn + * use ireg2 may or may not be present + * => + * fmov ireg1 <- freg1 previous insn + * mov ireg2 <- ireg1 current insn + * use ireg1 may or may not be present + */ +class FmovRegPattern : public CGPeepPattern { + public: + FmovRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~FmovRegPattern() override { + prevInsn = nullptr; + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "FmovRegPattern"; + } + + private: + Insn *prevInsn = nullptr; + Insn *nextInsn = nullptr; +}; + +/* sbfx ireg1, ireg2, 0, 32 + * use ireg1.32 + * => + * sbfx ireg1, ireg2, 0, 32 + * use ireg2.32 + */ +class SbfxOptPattern : public CGPeepPattern { +public: + SbfxOptPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~SbfxOptPattern() override { + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "SbfxOptPattern"; + } + +private: + Insn *nextInsn = nullptr; + bool toRemove = false; + std::vector cands; +}; + +/* cbnz x0, labelA + * mov x0, 0 + * b return-bb + * labelA: + * => + * cbz x0, return-bb + * labelA: + */ +class CbnzToCbzPattern : public CGPeepPattern { + public: + CbnzToCbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~CbnzToCbzPattern() override { + nextBB = nullptr; + movInsn = nullptr; + brInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CbnzToCbzPattern"; + } + + private: + BB *nextBB = nullptr; + Insn *movInsn = nullptr; + Insn *brInsn = nullptr; +}; + +/* i. cset w0, EQ + * cbnz w0, .label ===> beq .label + * + * ii. cset w0, EQ + * cbz w0, .label ===> bne .label + * + * iii. cset w0, NE + * cbnz w0, .label ===> bne .label + * + * iiii.cset w0, NE + * cbz w0, .label ===> beq .label + * ... ... + */ +class CsetCbzToBeqOptAArch64 : public PeepPattern { + public: + explicit CsetCbzToBeqOptAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~CsetCbzToBeqOptAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + MOperator SelectMOperator(ConditionCode condCode, bool inverse) const; +}; + +/* When exist load after load or load after store, and [MEM] is + * totally same. Then optimize them. + */ +class ContiLDRorSTRToSameMEMPattern : public CGPeepPattern { + public: + ContiLDRorSTRToSameMEMPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ContiLDRorSTRToSameMEMPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ContiLDRorSTRToSameMEMPattern"; + } + + private: + Insn *prevInsn = nullptr; + bool loadAfterStore = false; + bool loadAfterLoad = false; +}; + +/* + * Remove following patterns: + * mov x1, x0 + * bl MCC_IncDecRef_NaiveRCFast + */ +class RemoveIncDecRefPattern : public CGPeepPattern { + public: + RemoveIncDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncDecRefPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * When GCONLY is enabled, the read barriers can be inlined. + * we optimize it with the following pattern: + * #if USE_32BIT_REF + * bl MCC_LoadRefField -> ldr w0, [x1] + * bl MCC_LoadVolatileField -> ldar w0, [x1] + * bl MCC_LoadRefStatic -> ldr w0, [x0] + * bl MCC_LoadVolatileStaticField -> ldar w0, [x0] + * bl MCC_Dummy -> omitted + * #else + * bl MCC_LoadRefField -> ldr x0, [x1] + * bl MCC_LoadVolatileField -> ldar x0, [x1] + * bl MCC_LoadRefStatic -> ldr x0, [x0] + * bl MCC_LoadVolatileStaticField -> ldar x0, [x0] + * bl MCC_Dummy -> omitted + * #endif + * + * if we encounter a tail call optimized read barrier call, + * such as: + * b MCC_LoadRefField + * a return instruction will be added just after the load: + * ldr w0, [x1] + * ret + */ +class InlineReadBarriersPattern : public CGPeepPattern { + public: + InlineReadBarriersPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~InlineReadBarriersPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "InlineReadBarriersPattern"; + } +}; + +/* + * mov w1, #34464 + * movk w1, #1, LSL #16 + * sdiv w2, w0, w1 + * ========> + * mov w1, #34464 // may deleted if w1 not live anymore. + * movk w1, #1, LSL #16 // may deleted if w1 not live anymore. + * mov w16, #0x588f + * movk w16, #0x4f8b, LSL #16 + * smull x16, w0, w16 + * asr x16, x16, #32 + * add x16, x16, w0, SXTW + * asr x16, x16, #17 + * add x2, x16, x0, LSR #31 + */ +class ReplaceDivToMultiPattern : public CGPeepPattern { + public: + ReplaceDivToMultiPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceDivToMultiPattern() override { + prevInsn = nullptr; + prePrevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceDivToMultiPattern"; + } + + private: + Insn *prevInsn = nullptr; + Insn *prePrevInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * and w0, w0, #imm ====> tst w0, #imm + * cmp w0, #0 beq/bne .label + * beq/bne .label + * + * and x0, x0, #imm ====> tst x0, #imm + * cmp x0, #0 beq/bne .label + * beq/bne .label + */ +class AndCmpBranchesToTstAArch64 : public PeepPattern { + public: + explicit AndCmpBranchesToTstAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCmpBranchesToTstAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * and w0, w0, #imm ====> tst w0, #imm + * cbz/cbnz .label beq/bne .label + */ +class AndCbzBranchesToTstAArch64 : public PeepPattern { + public: + explicit AndCbzBranchesToTstAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCbzBranchesToTstAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * and w0, w0, #1 ====> and w0, w0, #1 + * cmp w0, #1 + * cset w0, EQ + * + * and w0, w0, #1 ====> and w0, w0, #1 + * cmp w0, #0 + * cset w0, NE + * --------------------------------------------------- + * and w0, w0, #imm ====> ubfx w0, w0, pos, size + * cmp w0, #imm + * cset w0, EQ + * + * and w0, w0, #imm ====> ubfx w0, w0, pos, size + * cmp w0, #0 + * cset w0, NE + * conditions: + * imm is pos power of 2 + * + * --------------------------------------------------- + * and w0, w0, #1 ====> and wn, w0, #1 + * cmp w0, #1 + * cset wn, EQ # wn != w0 && w0 is not live after cset + * + * and w0, w0, #1 ====> and wn, w0, #1 + * cmp w0, #0 + * cset wn, NE # wn != w0 && w0 is not live after cset + * --------------------------------------------------- + * and w0, w0, #imm ====> ubfx wn, w0, pos, size + * cmp w0, #imm + * cset wn, EQ # wn != w0 && w0 is not live after cset + * + * and w0, w0, #imm ====> ubfx wn, w0, pos, size + * cmp w0, #0 + * cset wn, NE # wn != w0 && w0 is not live after cset + * conditions: + * imm is pos power of 2 and w0 is not live after cset + */ +class AndCmpBranchesToCsetAArch64 : public PeepPattern { + public: + explicit AndCmpBranchesToCsetAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCmpBranchesToCsetAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + Insn *FindPreviousCmp(Insn &insn) const; +}; +/* + * We optimize the following pattern in this function: + * cmp w[0-9]*, wzr ====> tbz w[0-9]*, #31, .label + * bge .label + * + * cmp wzr, w[0-9]* ====> tbz w[0-9]*, #31, .label + * ble .label + * + * cmp w[0-9]*,wzr ====> tbnz w[0-9]*, #31, .label + * blt .label + * + * cmp wzr, w[0-9]* ====> tbnz w[0-9]*, #31, .label + * bgt .label + * + * cmp w[0-9]*, #0 ====> tbz w[0-9]*, #31, .label + * bge .label + * + * cmp w[0-9]*, #0 ====> tbnz w[0-9]*, #31, .label + * blt .label + */ +class ZeroCmpBranchesAArch64 : public PeepPattern { + public: + explicit ZeroCmpBranchesAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ZeroCmpBranchesAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Look for duplicate or overlapping zero or sign extensions. + * Examples: + * sxth x1, x2 ====> sxth x1, x2 + * sxth x3, x1 mov x3, x1 + * + * sxtb x1, x2 ====> sxtb x1, x2 + * sxth x3, x1 mov x3, x1 + */ +class ElimDuplicateExtensionAArch64 : public PeepPattern { + public: + explicit ElimDuplicateExtensionAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ElimDuplicateExtensionAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetAArch64 : public PeepPattern { + public: + explicit CmpCsetAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~CmpCsetAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool CheckOpndDefPoints(Insn &checkInsn, int opndIdx); + const Insn *DefInsnOfOperandInBB(const Insn &startInsn, const Insn &checkInsn, int opndIdx) const; + bool OpndDefByOneValidBit(const Insn &defInsn) const; + bool FlagUsedLaterInCurBB(const BB &bb, Insn &startInsn) const; +}; + +/* + * add x0, x1, x0 + * ldr x2, [x0] + * ==> + * ldr x2, [x1, x0] + */ +class ComplexMemOperandAddAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandAddAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandAddAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + private: + + bool IsExpandBaseOpnd(const Insn &insn, const Insn &prevInsn) const; +}; + +/* + * cbnz w0, @label + * .... + * mov w0, #0 (elseBB) -->this instruction can be deleted + * + * cbz w0, @label + * .... + * mov w0, #0 (ifBB) -->this instruction can be deleted + * + * condition: + * 1.there is not predefine points of w0 in elseBB(ifBB) + * 2.the first opearnd of cbnz insn is same as the first Operand of mov insn + * 3.w0 is defined by move 0 + * 4.all preds of elseBB(ifBB) end with cbnz or cbz + * + * NOTE: if there are multiple preds and there is not define point of w0 in one pred, + * (mov w0, 0) can't be deleted, avoiding use before def. + */ +class DeleteMovAfterCbzOrCbnzAArch64 : public PeepPattern { + public: + explicit DeleteMovAfterCbzOrCbnzAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) { + cgcfg = cgFunc.GetTheCFG(); + cgcfg->InitInsnVisitor(cgFunc); + } + ~DeleteMovAfterCbzOrCbnzAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool PredBBCheck(BB &bb, bool checkCbz, const Operand &opnd) const; + bool OpndDefByMovZero(const Insn &insn) const; + bool NoPreDefine(Insn &testInsn) const; + void ProcessBBHandle(BB *processBB, const BB &bb, const Insn &insn) const; + CGCFG *cgcfg; +}; + +/* + * We optimize the following pattern in this function: + * if w0's valid bits is one + * uxtb w0, w0 + * eor w0, w0, #1 + * cbz w0, .label + * => + * tbnz w0, .label + * && + * if there exists uxtb w0, w0 and w0's valid bits is + * less than 8, eliminate it. + */ +class OneHoleBranchesPreAArch64 : public PeepPattern { + public: + explicit OneHoleBranchesPreAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~OneHoleBranchesPreAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + private: + MOperator FindNewMop(const BB &bb, const Insn &insn) const; +}; + +/* + * We optimize the following pattern in this function: + * movz x0, #11544, LSL #0 + * movk x0, #21572, LSL #16 + * movk x0, #8699, LSL #32 + * movk x0, #16393, LSL #48 + * => + * ldr x0, label_of_constant_1 + */ +class LoadFloatPointPattern : public CGPeepPattern { + public: + LoadFloatPointPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~LoadFloatPointPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LoadFloatPointPattern"; + } + private: + bool FindLoadFloatPoint(Insn &insn); + bool IsPatternMatch(); + std::vector optInsn; +}; + +/* + * Optimize the following patterns: + * orr w21, w0, #0 ====> mov w21, w0 + * orr w21, #0, w0 ====> mov w21, w0 + */ +class ReplaceOrrToMovAArch64 : public PeepPattern { + public: + explicit ReplaceOrrToMovAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ReplaceOrrToMovAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * ldr w0, [x21,#68] ldr w0, [x21,#68] + * mov w1, #-1 mov w1, #-1 + * cmp w0, w1 ====> cmn w0, #-1 + */ +class ReplaceCmpToCmnAArch64 : public PeepPattern { + public: + explicit ReplaceCmpToCmnAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ReplaceCmpToCmnAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Remove following patterns: + * mov x0, XX + * mov x1, XX + * bl MCC_IncDecRef_NaiveRCFast + */ +class RemoveIncRefPattern : public CGPeepPattern { + public: + RemoveIncRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncRefPattern() override { + insnMov2 = nullptr; + insnMov1 = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncRefPattern"; + } + + private: + Insn *insnMov2 = nullptr; + Insn *insnMov1 = nullptr; +}; + +/* + * opt long int compare with 0 + * *cmp x0, #0 + * csinv w0, wzr, wzr, GE + * csinc w0, w0, wzr, LE + * cmp w0, #0 + * => + * cmp x0, #0 + */ +class LongIntCompareWithZPattern : public CGPeepPattern { + public: + LongIntCompareWithZPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~LongIntCompareWithZPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LongIntCompareWithZPattern"; + } + + private: + bool FindLondIntCmpWithZ(Insn &insn); + bool IsPatternMatch(); + std::vector optInsn; +}; + +/* + * add x0, x1, #:lo12:Ljava_2Futil_2FLocale_241_3B_7C_24SwitchMap_24java_24util_24Locale_24Category + * ldr x2, [x0] + * ==> + * ldr x2, [x1, #:lo12:Ljava_2Futil_2FLocale_241_3B_7C_24SwitchMap_24java_24util_24Locale_24Category] + */ +class ComplexMemOperandAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * add x0, x1, x0 + * ldr x2, [x0] + * ==> + * ldr x2, [x1, x0] + */ +class ComplexMemOperandPreAddAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandPreAddAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandPreAddAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * add x0, x0, x1, LSL #2 + * ldr x2, [x0] + * ==> + * ldr x2, [x0,x1,LSL #2] + */ +class ComplexMemOperandLSLAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandLSLAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandLSLAArch64() override = default; + bool CheckShiftValid(const Insn &insn, const BitShiftOperand &lsl) const; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * ldr x0, label_of_constant_1 + * fmov d4, x0 + * ==> + * ldr d4, label_of_constant_1 + */ +class ComplexMemOperandLabelAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandLabelAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandLabelAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * mov R0, vreg1 / R0 mov R0, vreg1 + * add vreg2, vreg1, #imm1 add vreg2, vreg1, #imm1 + * mov R1, vreg2 mov R1, vreg2 + * mov R2, vreg3 mov R2, vreg3 + * ... ... + * mov R0, vreg1 + * add vreg4, vreg1, #imm2 -> str vreg5, [vreg1, #imm2] + * mov R1, vreg4 + * mov R2, vreg5 + */ +class WriteFieldCallPattern : public CGPeepPattern { + public: + struct WriteRefFieldParam { + Operand *objOpnd = nullptr; + RegOperand *fieldBaseOpnd = nullptr; + int64 fieldOffset = 0; + Operand *fieldValue = nullptr; + }; + WriteFieldCallPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~WriteFieldCallPattern() override { + prevCallInsn = nullptr; + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "WriteFieldCallPattern"; + } + + private: + bool hasWriteFieldCall = false; + Insn *prevCallInsn = nullptr; + Insn *nextInsn = nullptr; + WriteRefFieldParam firstCallParam; + WriteRefFieldParam currentCallParam; + std::vector paramDefInsns; + bool WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m); + bool IsWriteRefFieldCallInsn(const Insn &insn) const; +}; + +/* + * Remove following patterns: + * mov x0, xzr/#0 + * bl MCC_DecRef_NaiveRCFast + */ +class RemoveDecRefPattern : public CGPeepPattern { + public: + RemoveDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveDecRefPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * We optimize the following pattern in this function: + * and x1, x1, #imm (is n power of 2) + * cbz/cbnz x1, .label + * => + * and x1, x1, #imm (is n power of 2) + * tbnz/tbz x1, #n, .label + */ +class OneHoleBranchesAArch64 : public PeepPattern { + public: + explicit OneHoleBranchesAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~OneHoleBranchesAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Replace following pattern: + * mov x1, xzr + * bl MCC_IncDecRef_NaiveRCFast + * => + * bl MCC_IncRef_NaiveRCFast + */ +class ReplaceIncDecWithIncPattern : public CGPeepPattern { + public: + ReplaceIncDecWithIncPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceIncDecWithIncPattern() override { + prevInsn = nullptr; + target = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceIncDecWithIncPattern"; + } + + private: + Insn *prevInsn = nullptr; + FuncNameOperand *target = nullptr; +}; + +/* + * Optimize the following patterns: + * and w0, w6, #1 ====> tbz w6, 0, .label + * cmp w0, #1 + * bne .label + * + * and w0, w6, #16 ====> tbz w6, 4, .label + * cmp w0, #16 + * bne .label + * + * and w0, w6, #32 ====> tbnz w6, 5, .label + * cmp w0, #32 + * beq .label + * + * and x0, x6, #32 ====> tbz x6, 5, .label + * cmp x0, #0 + * beq .label + * + * and x0, x6, #32 ====> tbnz x6, 5, .label + * cmp x0, #0 + * bne .label + */ +class AndCmpBranchesToTbzAArch64 : public PeepPattern { + public: + explicit AndCmpBranchesToTbzAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCmpBranchesToTbzAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * sxth r4, r4 ====> strh r4, [r0, r3] + * strh r4, [r0, r3] + * + * sxtb r4, r4 ====> strb r4, [r0, r3] + * strb r4, [r0, r3] + */ +class RemoveSxtBeforeStrAArch64 : public PeepPattern { + public: + explicit RemoveSxtBeforeStrAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~RemoveSxtBeforeStrAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * mov x1, #1 + * csel x22, xzr, x1, LS ====> cset x22, HI + * + * mov x1, #1 + * csel x22, x1, xzr, LS ====> cset x22, LS + */ +class CselZeroOneToCsetOpt : public PeepPattern { + public: + explicit CselZeroOneToCsetOpt(CGFunc &cgFunc) : PeepPattern(cgFunc), cgFunc(&cgFunc) {} + ~CselZeroOneToCsetOpt() override = default; + void Run(BB &bb, Insn &insn) override; + private: + Insn *trueMovInsn = nullptr; + Insn *falseMovInsn = nullptr; + Insn *FindFixedValue(Operand &opnd, BB &bb, Operand *&tempOp, const Insn &insn) const; + protected: + CGFunc *cgFunc; +}; + +/* + * Replace following pattern: + * sxtw x1, w0 + * lsl x2, x1, #3 ====> sbfiz x2, x0, #3, #32 + * + * uxtw x1, w0 + * lsl x2, x1, #3 ====> ubfiz x2, x0, #3, #32 + */ +class ComplexExtendWordLslAArch64 : public PeepPattern { + public: + explicit ComplexExtendWordLslAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexExtendWordLslAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool IsExtendWordLslPattern(const Insn &insn) const; +}; + +class AArch64PeepHole : public PeepPatternMatch { + public: + AArch64PeepHole(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PeepHole() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveIdenticalLoadAndStoreOpt = 0, + kRemoveMovingtoSameRegOpt, + kCombineContiLoadAndStoreOpt, + kEliminateSpecifcSXTOpt, + kEliminateSpecifcUXTOpt, + kFmovRegOpt, + kCbnzToCbzOpt, + kCsetCbzToBeqOpt, + kContiLDRorSTRToSameMEMOpt, + kRemoveIncDecRefOpt, + kInlineReadBarriersOpt, + kReplaceDivToMultiOpt, + kAndCmpBranchesToCsetOpt, + kAndCmpBranchesToTstOpt, + kAndCbzBranchesToTstOpt, + kZeroCmpBranchesOpt, + kCselZeroOneToCsetOpt, + kPeepholeOptsNum + }; +}; + +class AArch64PeepHole0 : public PeepPatternMatch { + public: + AArch64PeepHole0(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PeepHole0() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveIdenticalLoadAndStoreOpt = 0, + kCmpCsetOpt, + kComplexMemOperandOptAdd, + kDeleteMovAfterCbzOrCbnzOpt, + kRemoveSxtBeforeStrOpt, + kRemoveMovingtoSameRegOpt, + kPeepholeOptsNum + }; +}; + +class AArch64PrePeepHole : public PeepPatternMatch { + public: + AArch64PrePeepHole(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PrePeepHole() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kOneHoleBranchesPreOpt = 0, + kLoadFloatPointOpt, + kReplaceOrrToMovOpt, + kReplaceCmpToCmnOpt, + kRemoveIncRefOpt, + kLongIntCompareWithZOpt, + kComplexMemOperandOpt, + kComplexMemOperandPreOptAdd, + kComplexMemOperandOptLSL, + kComplexMemOperandOptLabel, + kWriteFieldCallOpt, + kDuplicateExtensionOpt, + kEnhanceStrLdrAArch64Opt, + kUbfxToUxtw, + kPeepholeOptsNum + }; +}; + +class AArch64PrePeepHole1 : public PeepPatternMatch { + public: + AArch64PrePeepHole1(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PrePeepHole1() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveDecRefOpt = 0, + kComputationTreeOpt, + kOneHoleBranchesOpt, + kReplaceIncDecWithIncOpt, + kAndCmpBranchesToTbzOpt, + kComplexExtendWordLslOpt, + kPeepholeOptsNum + }; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def new file mode 100644 index 0000000000000000000000000000000000000000..ff08e818524a9bd8890a9a5683f2c6420a02fab9 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -0,0 +1,71 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + ADDTARGETPHASE("layoutstackframe", true); + ADDTARGETPHASE("createstartendlabel", true); + ADDTARGETPHASE("buildehfunc", true); + ADDTARGETPHASE("handlefunction", true); + ADDTARGETPHASE("moveargs", true); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + /* SSA PHASES */ + ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + } + /* Normal OPT PHASES */ + ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole()); + ADDTARGETPHASE("ebo", CGOptions::DoEBO()); + ADDTARGETPHASE("prepeephole", CGOptions::DoPrePeephole()) + ADDTARGETPHASE("ico", CGOptions::DoICO()) + ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); + + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()); + ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()); + } + ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()); + + ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()); + ADDTARGETPHASE("ebo1", CGOptions::DoEBO()); + ADDTARGETPHASE("prescheduling", !GetMIRModule()->IsJavaModule() && CGOptions::DoPreSchedule()); + ADDTARGETPHASE("raopt", CGOptions::DoPreLSRAOpt()); + ADDTARGETPHASE("cgsplitcriticaledge", GetMIRModule()->IsCModule()); + ADDTARGETPHASE("regalloc", true); + ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()); + ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); + } + ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())); + ADDTARGETPHASE("generateproepilog", true); + ADDTARGETPHASE("framefinalize", true); + ADDTARGETPHASE("dbgfixcallframeoffsets", true); + ADDTARGETPHASE("cfgo", GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); + ADDTARGETPHASE("peephole0", CGOptions::DoPeephole()) + ADDTARGETPHASE("postebo", CGOptions::DoEBO()); + ADDTARGETPHASE("postcfgo", CGOptions::DoCFGO()); + ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole()) + ADDTARGETPHASE("peephole", CGOptions::DoPeephole()) + ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule() || GetMIRModule()->IsWithDbgInfo()); + ADDTARGETPHASE("yieldpoint", GetMIRModule()->IsJavaModule() && CGOptions::IsInsertYieldPoint()); + ADDTARGETPHASE("scheduling", CGOptions::DoSchedule()); + ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis()); + ADDTARGETPHASE("fixshortbranch", true); + ADDTARGETPHASE("cgemit", true); diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..f36f3ca68a7c9937d5a7b9667df63c8e201d0860 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H +#define MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H +#include "cg_phi_elimination.h" +namespace maplebe { +class AArch64PhiEliminate : public PhiEliminate { + public: + AArch64PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp) : PhiEliminate(f, ssaAnalysisResult, mp) {} + ~AArch64PhiEliminate() override = default; + RegOperand &GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn /* for remat */); + + private: + void ReCreateRegOperand(Insn &insn) override; + Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) override; + void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) override; + RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) override; + void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const override; +}; + +class A64OperandPhiElmVisitor : public OperandPhiElmVisitor { + public: + A64OperandPhiElmVisitor(AArch64PhiEliminate *a64PhiElm, Insn &cInsn, uint32 idx) + : a64PhiEliminator(a64PhiElm), + insn(&cInsn), + idx(idx) {}; + ~A64OperandPhiElmVisitor() override = default; + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + + private: + AArch64PhiEliminate *a64PhiEliminator; + Insn *insn; + uint32 idx; +}; +} +#endif //MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..f4ed2aa3c567948ee3b8475ea383f0a5f3bdc808 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H + +#include "proepilog.h" +#include "cg.h" +#include "operand.h" +#include "aarch64_cgfunc.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" + +namespace maplebe { +using namespace maple; + +class AArch64GenProEpilog : public GenProEpilog { + public: + AArch64GenProEpilog(CGFunc &func, MemPool &memPool) : + GenProEpilog(func), + tmpAlloc(&memPool), + exitBB2CallSitesMap(tmpAlloc.Adapter()) { + useFP = func.UseFP(); + if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stackBaseReg = RFP; + } else { + stackBaseReg = useFP ? R29 : RSP; + } + exitBB2CallSitesMap.clear(); + } + ~AArch64GenProEpilog() override = default; + + bool TailCallOpt() override; + bool NeedProEpilog() override; + static MemOperand *SplitStpLdpOffsetForCalleeSavedWithAddInstruction( + CGFunc &cgFunc, const MemOperand &mo, uint32 bitLen, AArch64reg baseReg = AArch64reg::kRinvalid); + static void AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int offset); + static void AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset); + static void AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset); + static void AppendInstructionPopPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int offset); + void Run() override; + private: + void GenStackGuard(BB&); + BB &GenStackGuardCheckInsn(BB&); + bool HasLoop(); + bool OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const; + void TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB); + bool InsertOpndRegs(Operand &opnd, std::set &vecRegs) const; + bool InsertInsnRegs(Insn &insn, bool insetSource, std::set &vecSourceRegs, + bool insertTarget, std::set &vecTargetRegs); + bool FindRegs(Operand &insn, std::set &vecRegs) const; + bool BackwardFindDependency(BB &ifbb, std::set &vecReturnSourceReg, + std::list &existingInsns, std::list &moveInsns); + BB *IsolateFastPath(BB&); + void AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty); + void AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty); + void GeneratePushRegs(); + void GeneratePushUnnamedVarargRegs(); + void AppendInstructionStackCheck(AArch64reg reg, RegType rty, int offset); + void GenerateProlog(BB&); + + void GenerateRet(BB &bb); + bool TestPredsOfRetBB(const BB &exitBB); + void AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty); + void AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty); + void GeneratePopRegs(); + void AppendJump(const MIRSymbol &func); + void GenerateEpilog(BB&); + void GenerateEpilogForCleanup(BB&); + void ConvertToTailCalls(MapleSet &callInsnsMap); + Insn &CreateAndAppendInstructionForAllocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1, + RegType rty); + Insn &AppendInstructionForAllocateOrDeallocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1, + RegType rty, bool isAllocate); + MapleMap> &GetExitBB2CallSitesMap() { + return exitBB2CallSitesMap; + } + void SetCurTailcallExitBB(BB *bb) { + curTailcallExitBB = bb; + } + BB *GetCurTailcallExitBB() { + return curTailcallExitBB; + } + void SetFastPathReturnBB(BB *bb) { + fastPathReturnBB = bb; + } + BB *GetFastPathReturnBB() { + return fastPathReturnBB; + } + MapleAllocator tmpAlloc; + static constexpr const int32 kOffset8MemPos = 8; + static constexpr const int32 kOffset16MemPos = 16; + MapleMap> exitBB2CallSitesMap; + BB *curTailcallExitBB = nullptr; + BB *fastPathReturnBB = nullptr; + bool useFP = true; + /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ + AArch64reg stackBaseReg = RFP; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h new file mode 100644 index 0000000000000000000000000000000000000000..7a5335fc139b313a2b50c97f892921357d6263c6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h @@ -0,0 +1,403 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_PROP_H +#define MAPLEBE_INCLUDE_AARCH64_PROP_H + +#include "cg_prop.h" +#include "aarch64_cgfunc.h" +#include "aarch64_strldr.h" +namespace maplebe{ +class AArch64Prop : public CGProp { + public: + AArch64Prop(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) + : CGProp(mp, f, sInfo, ll){} + ~AArch64Prop() override = default; + + /* do not extend life range */ + static bool IsInLimitCopyRange(VRegVersion *toBeReplaced); + private: + void CopyProp() override; + /* + * for aarch64 + * 1. extended register prop + * 2. shift register prop + * 3. add/ext/shf prop -> str/ldr + * 4. const prop + */ + void TargetProp(Insn &insn) override; + void PropPatternOpt() override; +}; + +class A64StrLdrProp { + public: + A64StrLdrProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn, CGDce &dce) + : cgFunc(&f), + ssaInfo(&sInfo), + curInsn(&insn), + a64StrLdrAlloc(&mp), + replaceVersions(a64StrLdrAlloc.Adapter()), + cgDce(&dce) {} + void DoOpt(); + private: + MemOperand *StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod = kUndef); + static MemPropMode SelectStrLdrPropMode(const MemOperand &currMemOpnd); + bool ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn); + MemOperand *SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd); + RegOperand *GetReplaceReg(RegOperand &a64Reg); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize) const; + MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, + bool isSigned, uint32 memSize); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const; + void DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn); + uint32 GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const; + + bool CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const; + + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + Insn *curInsn; + MapleAllocator a64StrLdrAlloc; + MapleMap replaceVersions; + MemPropMode memPropMode = kUndef; + CGDce *cgDce = nullptr; +}; + +enum ArithmeticType { + kAArch64Add, + kAArch64Sub, + kAArch64Orr, + kAArch64Eor, + kUndefArith +}; + +class A64ConstProp { + public: + A64ConstProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn) + : constPropMp(&mp), + cgFunc(&f), + ssaInfo(&sInfo), + curInsn(&insn) {} + void DoOpt(); + /* false : default lsl #0 true: lsl #12 (only support 12 bit left shift in aarch64) */ + static MOperator GetRegImmMOP(MOperator regregMop, bool withLeftShift); + static MOperator GetReversalMOP(MOperator arithMop); + static MOperator GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn); + + private: + bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); + /* use xzr/wzr in aarch64 to shrink register live range */ + void ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg); + + /* replace old Insn with new Insn, update ssa info automatically */ + void ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const; + ImmOperand *CanDoConstFold(const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit); + + /* optimization */ + bool MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); + bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT); + bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT); + bool ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd); + bool BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd); + + MemPool *constPropMp; + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + Insn *curInsn; +}; + +class CopyRegProp : public PropOptimizePattern { + public: + CopyRegProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : PropOptimizePattern(cgFunc, cgssaInfo, ll) {} + ~CopyRegProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + private: + bool IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const; + void VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn); + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +class RedundantPhiProp : public PropOptimizePattern { + public: + RedundantPhiProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~RedundantPhiProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + + private: + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +class ValidBitNumberProp : public PropOptimizePattern { + public: + ValidBitNumberProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ValidBitNumberProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + private: + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +/* + * frame pointer and stack pointer will not be varied in function body + * treat them as const + */ +class FpSpConstProp : public PropOptimizePattern { + public: + FpSpConstProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~FpSpConstProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + fpSpBase = nullptr; + shiftOpnd = nullptr; + aT = kUndefArith; + replaced = nullptr; + } + + private: + bool GetValidSSAInfo(Operand &opnd); + void PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn); + void PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT); + void PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop); + int64 ArithmeticFold(int64 valInUse, ArithmeticType useAT) const; + + RegOperand *fpSpBase = nullptr; + ImmOperand *shiftOpnd = nullptr; + ArithmeticType aT = kUndefArith; + VRegVersion *replaced = nullptr; +}; + +/* + * This pattern do: + * 1) + * uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * ------> + * mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * 2) + * ldrh R201, [...] + * and R202, R201, #65520 + * uxth R203, R202 + * -------> + * ldrh R201, [...] + * and R202, R201, #65520 + * mov R203, R202 + */ +class ExtendMovPattern : public PropOptimizePattern { +public: + ExtendMovPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ExtendMovPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + +protected: + void Init() final; + +private: + bool BitNotAffected(const Insn &insn, uint32 validNum); /* check whether significant bits are affected */ + bool CheckSrcReg(regno_t srcRegNo, uint32 validNum); + + MOperator replaceMop = MOP_undef; +}; + +class ExtendShiftPattern : public PropOptimizePattern { +public: + ExtendShiftPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ExtendShiftPattern() override = default; + bool IsSwapInsn(const Insn &insn) const; + void SwapOpnd(Insn &insn); + bool CheckAllOpndCondition(Insn &insn); + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + void DoExtendShiftOpt(Insn &insn); + + enum ExMOpType : uint8 { + kExUndef, + kExAdd, /* MOP_xaddrrr | MOP_xxwaddrrre | MOP_xaddrrrs */ + kEwAdd, /* MOP_waddrrr | MOP_wwwaddrrre | MOP_waddrrrs */ + kExSub, /* MOP_xsubrrr | MOP_xxwsubrrre | MOP_xsubrrrs */ + kEwSub, /* MOP_wsubrrr | MOP_wwwsubrrre | MOP_wsubrrrs */ + kExCmn, /* MOP_xcmnrr | MOP_xwcmnrre | MOP_xcmnrrs */ + kEwCmn, /* MOP_wcmnrr | MOP_wwcmnrre | MOP_wcmnrrs */ + kExCmp, /* MOP_xcmprr | MOP_xwcmprre | MOP_xcmprrs */ + kEwCmp, /* MOP_wcmprr | MOP_wwcmprre | MOP_wcmprrs */ + }; + + enum LsMOpType : uint8 { + kLsUndef, + kLxAdd, /* MOP_xaddrrr | MOP_xaddrrrs */ + kLwAdd, /* MOP_waddrrr | MOP_waddrrrs */ + kLxSub, /* MOP_xsubrrr | MOP_xsubrrrs */ + kLwSub, /* MOP_wsubrrr | MOP_wsubrrrs */ + kLxCmn, /* MOP_xcmnrr | MOP_xcmnrrs */ + kLwCmn, /* MOP_wcmnrr | MOP_wcmnrrs */ + kLxCmp, /* MOP_xcmprr | MOP_xcmprrs */ + kLwCmp, /* MOP_wcmprr | MOP_wcmprrs */ + kLxEor, /* MOP_xeorrrr | MOP_xeorrrrs */ + kLwEor, /* MOP_weorrrr | MOP_weorrrrs */ + kLxNeg, /* MOP_xinegrr | MOP_xinegrrs */ + kLwNeg, /* MOP_winegrr | MOP_winegrrs */ + kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */ + kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */ + }; + + enum SuffixType : uint8 { + kNoSuffix, /* no suffix or do not perform the optimization. */ + kLSL, /* logical shift left */ + kLSR, /* logical shift right */ + kASR, /* arithmetic shift right */ + kExten /* ExtendOp */ + }; + +protected: + void Init() final; + +private: + void SelectExtendOrShift(const Insn &def); + SuffixType CheckOpType(const Operand &lastOpnd) const; + void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount); + void SetExMOpType(const Insn &use); + void SetLsMOpType(const Insn &use); + + MOperator replaceOp; + uint32 replaceIdx; + ExtendShiftOperand::ExtendOp extendOp; + BitShiftOperand::ShiftOp shiftOp; + Insn *defInsn = nullptr; + Insn *newInsn = nullptr; + Insn *curInsn = nullptr; + bool optSuccess; + ExMOpType exMOpType; + LsMOpType lsMOpType; +}; + +/* + * optimization for call convention + * example: + * [BB26] [BB43] + * sub R287, R101, R275 sub R279, R101, R275 + * \ / + * \ / + * [BB27] + * <---- insert new phi: R403, (R275 <26>, R275 <43>) + * old phi: R297, (R287 <26>, R279 <43>) + * / \ + * / \ + * [BB28] \ + * sub R310, R101, R309 \ + * | \ + * | \ + * [BB17] [BB29] [BB44] + * sub R314, R101, R275 | / + * \ | / + * \ | / + * \ | / + * \ | / + * [BB18] + * <---- insert new phi: R404, (R275 <17>, R309 <29>, R403 <44>) + * old phi: R318, (R314 <17>, R310 <29>, R297 <44>) + * mov R1, R318 ====> sub R1, R101, R404 + * / \ + * / \ + * / \ + * [BB19] [BB34] + * sub R336, R101, R335 / + * \ / + * \ / + * \ / + * [BB20] + * <---- insert new phi: R405, (R335 <19>, R404<34>) + * old phi: R340, (R336 <19>, R318 <34>) + * mov R1, R340 ====> sub R1, R101, R405 + */ +class A64PregCopyPattern : public PropOptimizePattern { + public: + A64PregCopyPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~A64PregCopyPattern() override { + firstPhiInsn = nullptr; + } + bool CheckCondition(Insn &insn) override; + void Optimize(Insn &insn) override; + void Run() override; + + protected: + void Init() override { + validDefInsns.clear(); + firstPhiInsn = nullptr; + differIdx = -1; + differOrigNO = 0; + isCrossPhi = false; + } + + private: + bool CheckUselessDefInsn(const Insn *defInsn) const; + bool CheckValidDefInsn(const Insn *defInsn); + bool CheckMultiUsePoints(const Insn *defInsn) const; + bool CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn); + bool DFSFindValidDefInsns(Insn *curDefInsn, RegOperand *lastPhiDef, std::unordered_map &visited); + Insn &CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn); + RegOperand &DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited); + RegOperand *CheckAndGetExistPhiDef(Insn &phiInsn, std::vector &validDifferRegNOs) const; + std::vector validDefInsns; + Insn *firstPhiInsn = nullptr; + int differIdx = -1; + regno_t differOrigNO = 0; + bool isCrossPhi = false; +}; + +class A64ReplaceRegOpndVisitor : public ReplaceRegOpndVisitor { + public: + A64ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldRegister ,RegOperand &newRegister) + : ReplaceRegOpndVisitor(f, cInsn, cIdx, oldRegister, newRegister) {} + ~A64ReplaceRegOpndVisitor() override = default; + private: + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_PROP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..e68b318d0f323132f1f30be16767d8311affdef4 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H + +#include "cg.h" +#include "ra_opt.h" +#include "aarch64_cg.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { +class X0OptInfo { + public: + X0OptInfo() : movSrc(nullptr), replaceReg(0), renameInsn(nullptr), renameOpnd(nullptr), renameReg(0) {} + ~X0OptInfo() = default; + + inline RegOperand *GetMovSrc() const { + return movSrc; + } + + inline regno_t GetReplaceReg() const { + return replaceReg; + } + + inline Insn *GetRenameInsn() const { + return renameInsn; + } + + inline Operand *GetRenameOpnd() const { + return renameOpnd; + } + + inline regno_t GetRenameReg() const { + return renameReg; + } + + inline void SetMovSrc(RegOperand *srcReg) { + movSrc = srcReg; + } + + inline void SetReplaceReg(regno_t regno) { + replaceReg = regno; + } + + inline void SetRenameInsn(Insn *insn) { + renameInsn = insn; + } + + inline void ResetRenameInsn() { + renameInsn = nullptr; + } + + inline void SetRenameOpnd(Operand *opnd) { + renameOpnd = opnd; + } + + inline void SetRenameReg(regno_t regno) { + renameReg = regno; + } + + private: + RegOperand *movSrc; + regno_t replaceReg; + Insn *renameInsn; + Operand *renameOpnd; + regno_t renameReg; +}; + +class RaX0Opt { + public: + explicit RaX0Opt(CGFunc* func) : cgFunc(func) {} + ~RaX0Opt() = default; + + bool PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const; + bool PropagateRenameReg(Insn *insn, const X0OptInfo &optVal) const; + bool PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const; + bool PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, uint32 index) const; + bool PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal); + bool PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal); + void PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal); + void PropagateX0(); + + private: + CGFunc *cgFunc; +}; + +class VregRenameInfo { + public: + VregRenameInfo() = default; + + ~VregRenameInfo() = default; + + BB *firstBBLevelSeen = nullptr; + BB *lastBBLevelSeen = nullptr; + uint32 numDefs = 0; + uint32 numUses = 0; + uint32 numInnerDefs = 0; + uint32 numInnerUses = 0; + uint32 largestUnusedDistance = 0; + uint8 innerMostloopLevelSeen = 0; +}; + +class VregRename { + public: + VregRename(CGFunc *func, MemPool *pool) : cgFunc(func), + memPool(pool), + alloc(pool), + renameInfo(alloc.Adapter()) { + renameInfo.resize(cgFunc->GetMaxRegNum()); + ccRegno = static_cast(&cgFunc->GetOrCreateRflag())->GetRegisterNumber(); + }; + ~VregRename() = default; + + void PrintRenameInfo(regno_t regno) const; + void PrintAllRenameInfo() const; + + void RenameFindLoopVregs(const CGFuncLoops *loop); + void RenameFindVregsToRename(const CGFuncLoops *loop); + bool IsProfitableToRename(const VregRenameInfo *info) const; + void RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop); + void RenameGetFuncVregInfo(); + void UpdateVregInfo(regno_t reg, BB *bb, bool isInner, bool isDef); + void VregLongLiveRename(); + + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + Bfs *bfs = nullptr; + MapleVector renameInfo; + uint32 maxRegnoSeen = 0; + regno_t ccRegno; +}; + +class AArch64RaOpt : public RaOpt { + public: + AArch64RaOpt(CGFunc &func, MemPool &pool) : RaOpt(func, pool) {} + ~AArch64RaOpt() override = default; + void Run() override; + + private: +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..f927d9cac5452e02892fd53197d7bf793504c66b --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H + +#include "reaching.h" +#include "aarch64_operand.h" + +namespace maplebe { +class AArch64ReachingDefinition : public ReachingDefinition { + public: + AArch64ReachingDefinition(CGFunc &func, MemPool &memPool) : ReachingDefinition(func, memPool) {} + ~AArch64ReachingDefinition() override = default; + std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const final; + std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const final; + bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const final; + bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const final; + bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const final; + bool HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const; + InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const final; + InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const final; + InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const final; + bool FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const; + + protected: + void InitStartGen() final; + void InitEhDefine(BB &bb) final; + void InitGenUse(BB &bb, bool firstTime = true) final; + void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllCallerSavedRegs(BB &bb, Insn &insn) final; + bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const final; + bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const final; + void AddRetPseudoInsn(BB &bb) final; + void AddRetPseudoInsns() final; + bool IsCallerSavedReg(uint32 regNO) const final; + void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const final; + void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const final; + void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + int32 GetStackSize() const final; + + private: + void InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef); + void InitInfoForListOpnd(const BB &bb, Operand &opnd); + void InitInfoForConditionCode(const BB &bb); + void InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef); + void InitMemInfoForClearStackCall(Insn &callInsn); + inline bool CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const; + int64 GetEachMemSizeOfPair(MOperator opCode) const; + bool DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, std::vector &visitedBB, + std::list &pathStatus, DumpType infoType) const; + bool DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, std::vector &visitedBB) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h new file mode 100644 index 0000000000000000000000000000000000000000..a11b1085dbb6b5fa2f15191a229a8f0839235af9 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H +#include "reg_coalesce.h" +#include "aarch64_isa.h" +#include "live.h" + +namespace maplebe { +class AArch64LiveIntervalAnalysis : public LiveIntervalAnalysis { + public: + AArch64LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) + : LiveIntervalAnalysis(func, memPool), + vregLive(alloc.Adapter()), + candidates(alloc.Adapter()) {} + + ~AArch64LiveIntervalAnalysis() override = default; + + void ComputeLiveIntervals() override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const; + LiveInterval *GetOrCreateLiveInterval(regno_t regNO); + void UpdateCallInfo(); + void SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef); + void ComputeLiveIntervalsForEachDefOperand(Insn &insn); + void ComputeLiveIntervalsForEachUseOperand(Insn &insn); + void SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint); + void CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src); + void CoalesceRegisters() override; + void CollectMoveForEachBB(BB &bb, std::vector &movInsns) const; + void CoalesceMoves(std::vector &movInsns, bool phiOnly); + void CheckInterference(LiveInterval &li1, LiveInterval &li2) const; + void CollectCandidate(); + std::string PhaseName() const { + return "regcoalesce"; + } + +private: + static bool IsRegistersCopy(Insn &insn); + MapleUnorderedSet vregLive; + MapleSet candidates; +}; + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..b47dd45186c8140ac5bf016f2b58494f659e7281 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H +#include "reg_info.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" +#include "aarch64_abi.h" + +namespace maplebe { + +class AArch64RegInfo : public RegisterInfo { + public: + explicit AArch64RegInfo(MapleAllocator &mallocator): RegisterInfo(mallocator) { + } + + ~AArch64RegInfo() override = default; + + bool IsGPRegister(regno_t regNO) const override { + return AArch64isa::IsGPRegister(static_cast(regNO)); + } + /* phys reg which can be pre-Assignment */ + bool IsPreAssignedReg(regno_t regNO) const override { + return AArch64Abi::IsParamReg(static_cast(regNO)); + } + regno_t GetIntRetReg(uint32 idx) override { + CHECK_FATAL(idx <= AArch64Abi::kNumIntParmRegs, "index out of range in IntRetReg"); + return AArch64Abi::intReturnRegs[idx]; + } + regno_t GetFpRetReg(uint32 idx) override { + CHECK_FATAL(idx <= AArch64Abi::kNumFloatParmRegs, "index out of range in IntRetReg"); + return AArch64Abi::floatReturnRegs[idx]; + } + bool IsAvailableReg(regno_t regNO) const override { + /* special handle for R9 due to MRT_CallSlowNativeExt */ + if (regNO == R9 || regNO == R29) { + return false; + } + return AArch64Abi::IsAvailableReg(static_cast(regNO)); + } + /* Those registers can not be overwrite. */ + bool IsUntouchableReg(regno_t regNO) const override{ + if ((regNO == RSP) || (regNO == RFP) || regNO == RZR) { + return true; + } + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (GetCurrFunction()->GetCG()->GenYieldPoint() && (regNO == RYP)) { + return true; + } + return false; + } + uint32 GetIntRegsParmsNum() override { + return AArch64Abi::kNumIntParmRegs; + } + uint32 GetFloatRegsParmsNum() override { + return AArch64Abi::kNumFloatParmRegs; + } + uint32 GetIntRetRegsNum() override { + return AArch64Abi::kNumIntParmRegs; + } + uint32 GetFpRetRegsNum() override { + return AArch64Abi::kNumFloatParmRegs; + } + uint32 GetNormalUseOperandNum() override { + return AArch64Abi::kNormalUseOperandNum; + } + uint32 GetIntParamRegIdx(regno_t regNO) const override { + return static_cast(regNO - *GetIntRegs().begin()); + } + uint32 GetFpParamRegIdx(regno_t regNO) const override { + return static_cast(regNO - *GetFpRegs().begin()); + } + regno_t GetLastParamsIntReg() override { + return R7; + } + regno_t GetLastParamsFpReg() override { + return V7; + } + uint32 GetAllRegNum() override { + return kAllRegNum; + } + regno_t GetInvalidReg() override { + return kRinvalid; + } + bool IsVirtualRegister(const RegOperand ®Opnd) override { + return regOpnd.GetRegisterNumber() > kAllRegNum; + } + bool IsVirtualRegister(regno_t regno) override { + return regno > kAllRegNum; + } + uint32 GetReservedSpillReg() override { + return R16; + } + uint32 GetSecondReservedSpillReg() override { + return R17; + } + + void Init() override; + void Fini() override; + void SaveCalleeSavedReg(MapleSet savedRegs) override; + bool IsSpecialReg(regno_t regno) const override; + bool IsCalleeSavedReg(regno_t regno) const override; + bool IsYieldPointReg(regno_t regNO) const override; + bool IsUnconcernedReg(regno_t regNO) const override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const override; + bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) override; + RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag = 0) override; + ListOperand *CreateListOperand() override; + Insn *BuildMovInstruction(Operand &opnd0, Operand &opnd1) override; + Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildCommentInsn(const std::string &comment) override; + MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) override; + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) override; + void FreeSpillRegMem(regno_t vrNum) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h new file mode 100644 index 0000000000000000000000000000000000000000..26a00f61c61fde0d63620f773855bdadd9c5218f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h @@ -0,0 +1,248 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H + +#include "cg.h" +#include "regsaves.h" +#include "aarch64_cg.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { + +/* Saved reg info. This class is created to avoid the complexity of + nested Maple Containers */ +class SavedRegInfo { + public: + bool insertAtLastMinusOne = false; + explicit SavedRegInfo(MapleAllocator &alloc) + : saveSet(alloc.Adapter()), + restoreEntrySet(alloc.Adapter()), + restoreExitSet(alloc.Adapter()) {} + + bool ContainSaveReg(regno_t r) { + if (saveSet.find(r) != saveSet.end()) { + return true; + } + return false; + } + + bool ContainEntryReg(regno_t r) { + if (restoreEntrySet.find(r) != restoreEntrySet.end()) { + return true; + } + return false; + } + + bool ContainExitReg(regno_t r) { + if (restoreExitSet.find(r) != restoreExitSet.end()) { + return true; + } + return false; + } + + void InsertSaveReg(regno_t r) { + (void)saveSet.insert(r); + } + + void InsertEntryReg(regno_t r) { + (void)restoreEntrySet.insert(r); + } + + void InsertExitReg(regno_t r) { + (void)restoreExitSet.insert(r); + } + + MapleSet &GetSaveSet() { + return saveSet; + } + + MapleSet &GetEntrySet() { + return restoreEntrySet; + } + + MapleSet &GetExitSet() { + return restoreExitSet; + } + + void RemoveSaveReg(regno_t r) { + (void)saveSet.erase(r); + } + + private: + MapleSet saveSet; + MapleSet restoreEntrySet; + MapleSet restoreExitSet; +}; + +class SavedBBInfo { + public: + explicit SavedBBInfo(MapleAllocator &alloc) : bbList (alloc.Adapter()) {} + + MapleSet &GetBBList() { + return bbList; + } + + void InsertBB(BB *bb) { + (void)bbList.insert(bb); + } + + void RemoveBB(BB *bb) { + (void)bbList.erase(bb); + } + + private: + MapleSet bbList; +}; + +class AArch64RegSavesOpt : public RegSavesOpt { + public: + AArch64RegSavesOpt(CGFunc &func, MemPool &pool, DomAnalysis &dom, PostDomAnalysis &pdom) + : RegSavesOpt(func, pool), + domInfo(&dom), + pDomInfo(&pdom), + bbSavedRegs(alloc.Adapter()), + regSavedBBs(alloc.Adapter()), + regOffset(alloc.Adapter()), + id2bb(alloc.Adapter()) { + bbSavedRegs.resize(func.NumBBs()); + regSavedBBs.resize(sizeof(CalleeBitsType)<<3); + for (size_t i = 0; i < bbSavedRegs.size(); ++i) { + bbSavedRegs[i] = nullptr; + } + for (size_t i = 0; i < regSavedBBs.size(); ++i) { + regSavedBBs[i] = nullptr; + } + } + ~AArch64RegSavesOpt() override = default; + + using CalleeBitsType = uint64 ; + + void InitData(); + void CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse); + void GenerateReturnBBDefUse(const BB &bb); + void ProcessCallInsnParam(BB &bb); + void ProcessAsmListOpnd(const BB &bb, Operand &opnd, uint32 idx); + void ProcessListOpnd(const BB &bb, Operand &opnd); + void ProcessMemOpnd(const BB &bb, Operand &opnd); + void ProcessCondOpnd(const BB &bb); + void GetLocalDefUse(); + void PrintBBs() const; + int CheckCriteria(BB *bb, regno_t reg) const; + bool AlreadySavedInDominatorList(const BB *bb, regno_t reg) const; + void DetermineCalleeSaveLocationsDoms(); + void DetermineCalleeSaveLocationsPre(); + bool DetermineCalleeRestoreLocations(); + int32 FindNextOffsetForCalleeSave() const; + void InsertCalleeSaveCode(); + void InsertCalleeRestoreCode(); + void Verify(regno_t reg, BB* bb, std::set *visited, uint32 *s, uint32 *r); + void Run() override; + + DomAnalysis *GetDomInfo() const { + return domInfo; + } + + PostDomAnalysis *GetPostDomInfo() const { + return pDomInfo; + } + + Bfs *GetBfs() const { + return bfs; + } + + CalleeBitsType *GetCalleeBitsDef() { + return calleeBitsDef; + } + + CalleeBitsType *GetCalleeBitsUse() { + return calleeBitsUse; + } + + CalleeBitsType GetBBCalleeBits(CalleeBitsType *data, uint32 bid) const { + return data[bid]; + } + + void SetCalleeBit(CalleeBitsType *data, uint32 bid, regno_t reg) const { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + if ((GetBBCalleeBits(data, bid) & mask) == 0) { + data[bid] = GetBBCalleeBits(data, bid) | mask; + } + } + + void ResetCalleeBit(CalleeBitsType * data, uint32 bid, regno_t reg) const { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + data[bid] = GetBBCalleeBits(data, bid) & ~mask; + } + + bool IsCalleeBitSet(CalleeBitsType * data, uint32 bid, regno_t reg) const { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + return GetBBCalleeBits(data, bid) & mask; + } + + /* AArch64 specific callee-save registers bit positions + 0 9 10 33 -- position + R19 .. R28 V8 .. V15 V16 .. V31 -- regs */ + uint32 RegBitMap(regno_t reg) const { + uint32 r; + if (reg <= R28) { + r = (reg - R19); + } else { + r = ((R28 - R19) + 1) + (reg - V8); + } + return r; + } + + regno_t ReverseRegBitMap(uint32 reg) const { + if (reg < 10) { + return static_cast(R19 + reg); + } else { + return static_cast((V8 + reg) - (R28 - R19 + 1)); + } + } + + SavedRegInfo *GetbbSavedRegsEntry(uint32 bid) { + if (bbSavedRegs[bid] == nullptr) { + bbSavedRegs[bid] = memPool->New(alloc); + } + return bbSavedRegs[bid]; + } + + void SetId2bb(BB *bb) { + id2bb[bb->GetId()] = bb; + } + + BB *GetId2bb(uint32 bid) { + return id2bb[bid]; + } + + private: + DomAnalysis *domInfo; + PostDomAnalysis *pDomInfo; + Bfs *bfs = nullptr; + CalleeBitsType *calleeBitsDef = nullptr; + CalleeBitsType *calleeBitsUse = nullptr; + MapleVector bbSavedRegs; /* set of regs to be saved in a BB */ + MapleVector regSavedBBs; /* set of BBs to be saved for a reg */ + MapleMap regOffset; /* save offset of each register */ + MapleMap id2bb; /* bbid to bb* mapping */ + bool oneAtaTime = false; + regno_t oneAtaTimeReg = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..aedf1861f795044f2f53d009dfc834c7b998069c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h @@ -0,0 +1,291 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H + +#include "schedule.h" +#include "aarch64_operand.h" + +namespace maplebe { +enum RegisterType : uint8 { + kRegisterUndef, + kRegisterInt, + kRegisterFloat, + kRegisterCc, + kRegisterLast, +}; + +class ScheduleProcessInfo { + public: + explicit ScheduleProcessInfo(uint32 size) { + availableReadyList.reserve(size); + scheduledNodes.reserve(size); + } + + virtual ~ScheduleProcessInfo() = default; + + uint32 GetLastUpdateCycle() const { + return lastUpdateCycle; + } + + void SetLastUpdateCycle(uint32 updateCycle) { + lastUpdateCycle = updateCycle; + } + + uint32 GetCurrCycle() const { + return currCycle; + } + + void IncCurrCycle() { + ++currCycle; + } + + void DecAdvanceCycle() { + advanceCycle--; + } + + uint32 GetAdvanceCycle() const { + return advanceCycle; + } + + void SetAdvanceCycle(uint32 cycle) { + advanceCycle = cycle; + } + + void ClearAvailableReadyList() { + availableReadyList.clear(); + } + + void PushElemIntoAvailableReadyList(DepNode *node) { + availableReadyList.emplace_back(node); + } + + size_t SizeOfAvailableReadyList() const { + return availableReadyList.size(); + } + + bool AvailableReadyListIsEmpty() const { + return availableReadyList.empty(); + } + + void SetAvailableReadyList(const std::vector &tempReadyList) { + availableReadyList = tempReadyList; + } + + const std::vector &GetAvailableReadyList() const { + return availableReadyList; + } + + const std::vector &GetAvailableReadyList() { + return availableReadyList; + } + + void PushElemIntoScheduledNodes(DepNode *node) { + node->SetState(kScheduled); + node->SetSchedCycle(currCycle); + node->OccupyUnits(); + scheduledNodes.emplace_back(node); + } + + bool IsFirstSeparator() const { + return isFirstSeparator; + } + + void ResetIsFirstSeparator() { + isFirstSeparator = false; + } + + size_t SizeOfScheduledNodes() const { + return scheduledNodes.size(); + } + + const std::vector &GetScheduledNodes() const { + return scheduledNodes; + } + + private: + std::vector availableReadyList; + std::vector scheduledNodes; + uint32 lastUpdateCycle = 0; + uint32 currCycle = 0; + uint32 advanceCycle = 0; + bool isFirstSeparator = true; +}; + + +class AArch64ScheduleProcessInfo : public ScheduleProcessInfo { + public: + explicit AArch64ScheduleProcessInfo(uint32 size) : ScheduleProcessInfo(size) {} + ~AArch64ScheduleProcessInfo() override = default; + + /* recover register type which is not recorded in live analysis */ + static RegType GetRegisterType(CGFunc &f, regno_t regNO); + void VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc); + void VaryFreeRegSet(CGFunc &f, std::set regNOs, DepNode &node); + + uint32 GetFreeIntRegs(DepNode &node) { + return freeIntRegNodeSet.count(&node) ? freeIntRegNodeSet.find(&node)->second : 0; + } + void IncFreeIntRegNode(DepNode &node) { + if (!freeIntRegNodeSet.count(&node)) { + freeIntRegNodeSet.emplace(std::pair(&node, 1)); + } else { + freeIntRegNodeSet.find(&node)->second++; + } + } + const std::map &GetFreeIntRegNodeSet() const { + return freeIntRegNodeSet; + } + void IncFreeFpRegNode(DepNode &node) { + if (!freeFpRegNodeSet.count(&node)) { + freeFpRegNodeSet.emplace(std::pair(&node, 1)); + } else { + freeFpRegNodeSet.find(&node)->second++; + } + } + uint32 GetFreeFpRegs(DepNode &node) { + return freeFpRegNodeSet.count(&node) ? freeFpRegNodeSet.find(&node)->second : 0; + } + const std::map &GetFreeFpRegNodeSet() const { + return freeFpRegNodeSet; + } + + void ClearALLFreeRegNodeSet() { + freeIntRegNodeSet.clear(); + freeFpRegNodeSet.clear(); + } + + size_t FindIntLiveReg(regno_t reg) const { + return intLiveRegSet.count(reg); + } + void IncIntLiveRegSet(regno_t reg) { + intLiveRegSet.emplace(reg); + } + void DecIntLiveRegSet(regno_t reg) { + intLiveRegSet.erase(reg); + } + size_t FindFpLiveReg(regno_t reg) const { + return fpLiveRegSet.count(reg); + } + void IncFpLiveRegSet(regno_t reg) { + fpLiveRegSet.emplace(reg); + } + void DecFpLiveRegSet(regno_t reg) { + fpLiveRegSet.erase(reg); + } + + size_t SizeOfIntLiveRegSet() const { + return intLiveRegSet.size(); + } + + size_t SizeOfCalleeSaveLiveRegister(bool isInt) { + size_t num = 0; + if (isInt) { + for (auto regNO : intLiveRegSet) { + if (regNO > static_cast(R19)) { + num++; + } + } + } else { + for (auto regNO : fpLiveRegSet) { + if (regNO > static_cast(V16)) { + num++; + } + } + } + return num; + } + + size_t SizeOfFpLiveRegSet() const { + return fpLiveRegSet.size(); + } + private: + std::set intLiveRegSet; + std::set fpLiveRegSet; + std::map freeIntRegNodeSet; + std::map freeFpRegNodeSet; +}; + +class AArch64Schedule : public Schedule { + public: + AArch64Schedule(CGFunc &func, MemPool &memPool, LiveAnalysis &live, const std::string &phaseName) + : Schedule(func, memPool, live, phaseName) { + intCalleeSaveThreshold = func.UseFP() ? intCalleeSaveThresholdBase : intCalleeSaveThresholdEnhance; + } + ~AArch64Schedule() override = default; + protected: + void DumpDepGraph(const MapleVector &nodes) const; + void DumpScheduleResult(const MapleVector &nodes, SimulateType type) const; + void GenerateDot(const BB &bb, const MapleVector &nodes) const; + void EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) override; + void FindAndCombineMemoryAccessPair(const std::vector &memList) override; + void RegPressureScheduling(BB &bb, MapleVector &nodes) override; + + private: + enum CSRResult : uint8 { + kNode1, + kNode2, + kDoCSP /* can do csp further */ + }; + void Init() override; + void MemoryAccessPairOpt() override; + void ClinitPairOpt() override; + uint32 DoSchedule() override; + uint32 DoBruteForceSchedule() override; + uint32 SimulateOnly() override; + void UpdateBruteForceSchedCycle() override; + void IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) override; + bool CanCombine(const Insn &insn) const override; + void ListScheduling(bool beforeRA) override; + void BruteForceScheduling(const BB &bb); + void SimulateScheduling(const BB &bb); + void FinalizeScheduling(BB &bb, const DepAnalysis &depAnalysis) override; + uint32 ComputeEstart(uint32 cycle) override; + void ComputeLstart(uint32 maxEstart) override; + void UpdateELStartsOnCycle(uint32 cycle) override; + void RandomTest() override; + void EraseNodeFromReadyList(const DepNode &target) override; + uint32 GetNextSepIndex() const override; + void CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const override; + static bool IfUseUnitKind(const DepNode &depNode, uint32 index); + void UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) override; + void UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info); + void UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode); + bool CheckSchedulable(AArch64ScheduleProcessInfo &info) const; + void SelectNode(AArch64ScheduleProcessInfo &scheduleInfo); + static void DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo); + bool CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const; + void CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo); + void UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo); + std::set CanFreeRegister(const DepNode &node) const; + void UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode& node); + void InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo); + int CalSeriesCycles(const MapleVector &nodes); + CSRResult DoCSR(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const; + AArch64Schedule::CSRResult ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const; + int intCalleeSaveThreshold = 0; + + static uint32 maxUnitIndex; + static int intRegPressureThreshold; + static int fpRegPressureThreshold; + static int intCalleeSaveThresholdBase; + static int intCalleeSaveThresholdEnhance; + static int fpCalleeSaveThreshold; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..bcf6142889b1cff05e285a4a91ae4b341e76f2e1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_AARCH64_SSA_H +#define MAPLEBE_CG_INCLUDE_AARCH64_SSA_H + +#include "cg_ssa.h" +#include "aarch64_insn.h" + +namespace maplebe { +class AArch64CGSSAInfo : public CGSSAInfo { + public: + AArch64CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp) : CGSSAInfo(f, da, mp, tmp) {} + ~AArch64CGSSAInfo() override = default; + void DumpInsnInSSAForm(const Insn &insn) const override; + RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) override; + MemOperand *CreateMemOperand(MemOperand &memOpnd, bool isOnSSA /* false = on cgfunc */); + void ReplaceInsn(Insn &oriInsn, Insn &newInsn) override; + void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) override; + void CreateNewInsnSSAInfo(Insn &newInsn) override; + + private: + void RenameInsn(Insn &insn) override; + VRegVersion *RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx); + RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) override; + void CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion); +}; + +class A64SSAOperandRenameVisitor : public SSAOperandVisitor { + public: + A64SSAOperandRenameVisitor(AArch64CGSSAInfo &cssaInfo, Insn &cInsn, const OpndDesc &cProp, uint32 idx) + : SSAOperandVisitor(cInsn, cProp, idx), ssaInfo(&cssaInfo) {} + ~A64SSAOperandRenameVisitor() override = default; + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *a64MemOpnd) final; + + private: + AArch64CGSSAInfo *ssaInfo; +}; + +class A64OpndSSAUpdateVsitor : public SSAOperandVisitor, + public OperandVisitor { + public: + explicit A64OpndSSAUpdateVsitor(AArch64CGSSAInfo &cssaInfo) : ssaInfo(&cssaInfo) {} + ~A64OpndSSAUpdateVsitor() override = default; + void MarkIncrease() { + isDecrease = false; + }; + void MarkDecrease() { + isDecrease = true; + }; + bool HasDeleteDef() const { + return !deletedDef.empty(); + } + void Visit(RegOperand *regOpnd) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *a64MemOpnd) final; + void Visit(PhiOperand *phiOpnd) final; + + bool IsPhi() const { + return isPhi; + } + + void SetPhi(bool flag) { + isPhi = flag; + } + + private: + void UpdateRegUse(uint32 ssaIdx); + void UpdateRegDef(uint32 ssaIdx); + AArch64CGSSAInfo *ssaInfo; + bool isDecrease = false; + std::set deletedDef; + bool isPhi = false; +}; + +class A64SSAOperandDumpVisitor : public SSAOperandDumpVisitor { + public: + explicit A64SSAOperandDumpVisitor(const MapleUnorderedMap &allssa) : + SSAOperandDumpVisitor(allssa) {}; + ~A64SSAOperandDumpVisitor() override = default; + void Visit(RegOperand *a64RegOpnd) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *a64MemOpnd) final; + void Visit(PhiOperand *phi) final; +}; +} + +#endif //MAPLEBE_CG_INCLUDE_AARCH64_SSA_H diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h new file mode 100644 index 0000000000000000000000000000000000000000..aaeb74754e6fa226c24d689f84b4c0c43ef73aa2 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h @@ -0,0 +1,81 @@ + /* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H + +#include "strldr.h" +#include "aarch64_reaching.h" +#include "aarch64_operand.h" + +namespace maplebe { +using namespace maple; +enum MemPropMode : uint8 { + kUndef, + kPropBase, + kPropOffset, + kPropSignedExtend, + kPropUnsignedExtend, + kPropShift +}; + +class AArch64StoreLoadOpt : public StoreLoadOpt { + public: + AArch64StoreLoadOpt(CGFunc &func, MemPool &memPool) + : StoreLoadOpt(func, memPool), localAlloc(&memPool), str2MovMap(localAlloc.Adapter()) {} + ~AArch64StoreLoadOpt() override = default; + void Run() final; + void DoStoreLoadOpt(); + void DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, + const InsnSet &memUseInsnSet) const; + void DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, + short memSeq, const InsnSet &memUseInsnSet); + bool CheckStoreOpCode(MOperator opCode) const; + static bool CheckNewAmount(const Insn &insn, uint32 newAmount); + + private: + void StrLdrIndexModeOpt(Insn &currInsn); + bool CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, regno_t replaceRegNo); + bool CheckDefInsn(Insn &defInsn, Insn &currInsn); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal); + MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset); + MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned); + bool CanDoMemProp(const Insn *insn); + bool CanDoIndexOpt(const MemOperand &MemOpnd); + void MemPropInit(); + void SelectPropMode(const MemOperand &currMemOpnd); + int64 GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize); + MemOperand *SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd); + bool ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset); + void MemProp(Insn &insn); + void ProcessStrPair(Insn &insn); + void ProcessStr(Insn &insn); + void GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq); + void GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq); + bool HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const; + bool IsAdjacentBB(Insn &defInsn, Insn &curInsn) const; + MapleAllocator localAlloc; + /* the max number of mov insn to optimize. */ + static constexpr uint8 kMaxMovNum = 2; + MapleMap str2MovMap; + MemPropMode propMode = kUndef; + uint32 amount = 0; + bool removeDefInsn = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3379b9be1d3ac2a734185c7d632bfaccdf8eb80d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H + +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { + +/** + * Get or create new memory operand for load instruction loadIns for which + * machine opcode will be replaced with newLoadMop. + * + * @param loadIns load instruction + * @param newLoadMop new opcode for load instruction + * @return memory operand for new load machine opcode + * or nullptr if memory operand can't be obtained + */ +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, MOperator newLoadMop); +} // namespace maplebe + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..4ab103375cc16e4cc13dfa2f5ec4b81cfc68ae84 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H + +#include "cg_validbit_opt.h" +#include "operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +class AArch64ValidBitOpt : public ValidBitOpt { + public: + AArch64ValidBitOpt(CGFunc &f, CGSSAInfo &info) : ValidBitOpt(f, info) {} + ~AArch64ValidBitOpt() override = default; + + void DoOpt(BB &bb, Insn &insn) override; + void SetValidBits(Insn &insn) override; + bool SetPhiValidBits(Insn &insn) override; +}; + +/* + * Example 1) + * def w9 def w9 + * ... ... + * and w4, w9, #255 ===> mov w4, w9 + * + * Example 2) + * and w6[16], w0[16], #FF00[16] mov w6, w0 + * asr w6, w6[16], #8[4] ===> asr w6, w6 + */ +class AndValidBitPattern : public ValidBitPattern { + public: + AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~AndValidBitPattern() override { + desReg = nullptr; + srcReg = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndValidBitPattern"; + } + + private: + bool CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const; + MOperator newMop = MOP_undef; + RegOperand *desReg = nullptr; + RegOperand *srcReg = nullptr; +}; + +/* + * Example 1) + * uxth w1[16], w2[16] / uxtb w1[8], w2[8] + * ===> + * mov w1, w2 + * + * Example 2) + * ubfx w1, w2[16], #0, #16 / sbfx w1, w2[16], #0, #16 + * ===> + * mov w1, w2 + */ +class ExtValidBitPattern : public ValidBitPattern { + public: + ExtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~ExtValidBitPattern() override { + newDstOpnd = nullptr; + newSrcOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ExtValidBitPattern"; + } + + private: + RegOperand *newDstOpnd = nullptr; + RegOperand *newSrcOpnd = nullptr; + MOperator newMop = MOP_undef; +}; + +/* + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetVBPattern : public ValidBitPattern { + public: + CmpCsetVBPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpCsetVBPattern() override { + cmpInsn = nullptr; + } + void Run(BB &bb, Insn &csetInsn) override; + bool CheckCondition(Insn &csetInsn) override; + std::string GetPatternName() override { + return "CmpCsetPattern"; + }; + + private: + bool IsContinuousCmpCset(const Insn &curInsn); + bool OpndDefByOneValidBit(const Insn &defInsn); + Insn *cmpInsn = nullptr; + int64 cmpConstVal = -1; +}; + +/* + * cmp w0[16], #32768 + * bge label ===> tbnz w0, #15, label + * + * bge / blt + */ +class CmpBranchesPattern : public ValidBitPattern { + public: + CmpBranchesPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpBranchesPattern() override { + prevCmpInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CmpBranchesPattern"; + }; + + private: + void SelectNewMop(MOperator mop); + Insn *prevCmpInsn = nullptr; + int64 newImmVal = -1; + MOperator newMop = MOP_undef; + bool is64Bit = false; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H */ + diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h new file mode 100644 index 0000000000000000000000000000000000000000..d459a87094738feef02c58b3134e5edf90245ac8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H + +#include "yieldpoint.h" + +namespace maplebe { +using namespace maple; + +class AArch64YieldPointInsertion : public YieldPointInsertion { + public: + explicit AArch64YieldPointInsertion(CGFunc &func) : YieldPointInsertion(func) {} + + ~AArch64YieldPointInsertion() override = default; + + void Run() override; + + private: + void InsertYieldPoint(); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h b/ecmascript/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..f89f706597056731b81e893d8e4921cda6557d89 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H +#define MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H + +#include +#include +#include "types_def.h" + +namespace maple { +enum class MemOrd : uint32 { + kNotAtomic = 0, +#define ATTR(STR) STR, +#include "memory_order_attrs.def" +#undef ATTR +}; + +MemOrd MemOrdFromU32(uint32 val); + +bool MemOrdIsAcquire(MemOrd ord); + +bool MemOrdIsRelease(MemOrd ord); +} /* namespace maple */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/abi.h b/ecmascript/mapleall/maple_be/include/cg/abi.h new file mode 100644 index 0000000000000000000000000000000000000000..57c4ad728c7d19b09d0c3be87db131df3fb0fc3d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/abi.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ABI_H +#define MAPLEBE_INCLUDE_CG_ABI_H + +#include +#include "types_def.h" +#include "operand.h" + +namespace maplebe { +enum ArgumentClass : uint8 { + kNoClass, + kIntegerClass, + kFloatClass, + kPointerClass, + kVectorClass, + kMemoryClass, + kShortVectorClass, + kCompositeTypeHFAClass, /* Homegeneous Floating-point Aggregates for AArch64 */ + kCompositeTypeHVAClass, /* Homegeneous Short-Vector Aggregates for AArch64 */ +}; + +using regno_t = uint32_t; + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ABI_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/abstract_mmir.def b/ecmascript/mapleall/maple_be/include/cg/abstract_mmir.def new file mode 100644 index 0000000000000000000000000000000000000000..1ec9f30ff3c692460705fd8e00d5693b67efbd31 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/abstract_mmir.def @@ -0,0 +1,122 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + + /* Abstract Maple Machine IR */ + /* {mop, opnds, prop, latency, name, format, length} */ + DEFINE_MOP(MOP_undef, {}, ISABSTRACT,0,"","",0) + + /* conversion between all types and registers */ + DEFINE_MOP(MOP_copy_ri_8, {&OpndDesc::Reg8ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_8","",1) + DEFINE_MOP(MOP_copy_rr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISMOVE,0,"copy_rr_8","",1) + DEFINE_MOP(MOP_copy_ri_16, {&OpndDesc::Reg16ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_16","",1) + DEFINE_MOP(MOP_copy_rr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISMOVE,0,"copy_rr_16","",1) + DEFINE_MOP(MOP_copy_ri_32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_32","",1) + DEFINE_MOP(MOP_copy_rr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISMOVE,0,"copy_rr_32","",1) + DEFINE_MOP(MOP_copy_ri_64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_ri_64","",1) + DEFINE_MOP(MOP_copy_rr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS},ISABSTRACT|ISMOVE,0,"copy_rr_64","",1) + + DEFINE_MOP(MOP_copy_fi_8, {&OpndDesc::Reg8FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_8","",1) + DEFINE_MOP(MOP_copy_ff_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISMOVE,0,"copy_ff_8","",1) + DEFINE_MOP(MOP_copy_fi_16, {&OpndDesc::Reg16FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_16","",1) + DEFINE_MOP(MOP_copy_ff_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISMOVE,0,"copy_ff_16","",1) + DEFINE_MOP(MOP_copy_fi_32, {&OpndDesc::Reg32FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_32","",1) + DEFINE_MOP(MOP_copy_ff_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISMOVE,0,"copy_ff_32","",1) + DEFINE_MOP(MOP_copy_fi_64, {&OpndDesc::Reg64FD,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_fi_64","",1) + DEFINE_MOP(MOP_copy_ff_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS},ISABSTRACT|ISMOVE,0,"copy_ff_64","",1) + + /* register extend */ + DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r8","",1) + DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r8","",1) + DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r16","",1) + DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r16","",1) + + DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r8","",1) + DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r8","",1) + DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r16","",1) + DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r16","",1) + DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) + DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + + /* conversion between different kinds of registers */ + DEFINE_MOP(MOP_cvt_rf_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_32","",1) + + /* Support transformation between memory and registers */ + DEFINE_MOP(MOP_str_8, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_8","",1) + DEFINE_MOP(MOP_str_16, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_16","",1) + DEFINE_MOP(MOP_str_32, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_32","",1) + DEFINE_MOP(MOP_str_64, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_64","",1) + DEFINE_MOP(MOP_load_8, {&OpndDesc::Reg8ID,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_8","",1) + DEFINE_MOP(MOP_load_16, {&OpndDesc::Reg16ID,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_16","",1) + DEFINE_MOP(MOP_load_32, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_32","",1) + DEFINE_MOP(MOP_load_64, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_64","",1) + DEFINE_MOP(MOP_str_f_8, {&OpndDesc::Reg8FS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_f_8","",1) + DEFINE_MOP(MOP_str_f_16, {&OpndDesc::Reg16FS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_f_16","",1) + DEFINE_MOP(MOP_str_f_32, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_f_32","",1) + DEFINE_MOP(MOP_str_f_64, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_f_64","",1) + DEFINE_MOP(MOP_load_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_f_8","",1) + DEFINE_MOP(MOP_load_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_f_16","",1) + DEFINE_MOP(MOP_load_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_f_32","",1) + DEFINE_MOP(MOP_load_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_f_64","",1) + + /* Support three address basic operations */ + DEFINE_MOP(MOP_add_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"add_8","",1) + DEFINE_MOP(MOP_add_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"add_16","",1) + DEFINE_MOP(MOP_add_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"add_32","",1) + DEFINE_MOP(MOP_add_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"add_64","",1) + DEFINE_MOP(MOP_sub_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) + DEFINE_MOP(MOP_sub_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) + DEFINE_MOP(MOP_sub_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) + DEFINE_MOP(MOP_sub_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) + DEFINE_MOP(MOP_or_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"or_8","",1) + DEFINE_MOP(MOP_or_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"or_16","",1) + DEFINE_MOP(MOP_or_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"or_32","",1) + DEFINE_MOP(MOP_or_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"or_64","",1) + DEFINE_MOP(MOP_xor_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"xor_8","",1) + DEFINE_MOP(MOP_xor_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"xor_16","",1) + DEFINE_MOP(MOP_xor_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"xor_32","",1) + DEFINE_MOP(MOP_xor_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"xor_64","",1) + DEFINE_MOP(MOP_and_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"and_8","",1) + DEFINE_MOP(MOP_and_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"and_16","",1) + DEFINE_MOP(MOP_and_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"and_32","",1) + DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1) + + /* shift -- shl/ashr/lshr */ + DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1) + DEFINE_MOP(MOP_shl_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"shl_16","",1) + DEFINE_MOP(MOP_shl_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_32","",1) + DEFINE_MOP(MOP_shl_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"shl_64","",1) + DEFINE_MOP(MOP_ashr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"ashr_8","",1) + DEFINE_MOP(MOP_ashr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"ashr_16","",1) + DEFINE_MOP(MOP_ashr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_32","",1) + DEFINE_MOP(MOP_ashr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"ashr_64","",1) + DEFINE_MOP(MOP_lshr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"lshr_8","",1) + DEFINE_MOP(MOP_lshr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"lshr_16","",1) + DEFINE_MOP(MOP_lshr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_32","",1) + DEFINE_MOP(MOP_lshr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"lshr_64","",1) + + /* Support two address basic operations */ + DEFINE_MOP(MOP_neg_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"neg_8","",1) + DEFINE_MOP(MOP_neg_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"neg_16","",1) + DEFINE_MOP(MOP_neg_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"neg_32","",1) + DEFINE_MOP(MOP_neg_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"neg_64","",1) + DEFINE_MOP(MOP_not_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"not_8","",1) + DEFINE_MOP(MOP_not_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"not_16","",1) + DEFINE_MOP(MOP_not_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"not_32","",1) + DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) + + /* MOP_comment */ + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/alignment.h b/ecmascript/mapleall/maple_be/include/cg/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..8c588039992dbb776a8444eaf221544b96965d21 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/alignment.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_ALIGNMENT_H +#define MAPLEBE_INCLUDE_CG_ALIGNMENT_H + +#include "cg_phase.h" +#include "maple_phase.h" +#include "cgbb.h" +#include "loop.h" + +namespace maplebe { +class AlignAnalysis { + public: + AlignAnalysis(CGFunc &func, MemPool &memP) + : cgFunc(&func), + alignAllocator(&memP), + loopHeaderBBs(alignAllocator.Adapter()), + jumpTargetBBs(alignAllocator.Adapter()), + alignInfos(alignAllocator.Adapter()), + sameTargetBranches(alignAllocator.Adapter()) {} + + virtual ~AlignAnalysis() = default; + + void AnalysisAlignment(); + void Dump(); + virtual void FindLoopHeader() = 0; + virtual void FindJumpTarget() = 0; + virtual void ComputeLoopAlign() = 0; + virtual void ComputeJumpAlign() = 0; + virtual void ComputeCondBranchAlign() = 0; + + /* filter condition */ + virtual bool IsIncludeCall(BB &bb) = 0; + virtual bool IsInSizeRange(BB &bb) = 0; + virtual bool HasFallthruEdge(BB &bb) = 0; + + std::string PhaseName() const { + return "alignanalysis"; + } + const MapleUnorderedSet &GetLoopHeaderBBs() const { + return loopHeaderBBs; + } + const MapleUnorderedSet &GetJumpTargetBBs() const { + return jumpTargetBBs; + } + const MapleUnorderedMap &GetAlignInfos() const { + return alignInfos; + } + uint32 GetAlignPower(BB &bb) { + return alignInfos[&bb]; + } + + void InsertLoopHeaderBBs(BB &bb) { + loopHeaderBBs.insert(&bb); + } + void InsertJumpTargetBBs(BB &bb) { + jumpTargetBBs.insert(&bb); + } + void InsertAlignInfos(BB &bb, uint32 power) { + alignInfos[&bb] = power; + } + + protected: + CGFunc *cgFunc; + MapleAllocator alignAllocator; + MapleUnorderedSet loopHeaderBBs; + MapleUnorderedSet jumpTargetBBs; + MapleUnorderedMap alignInfos; + MapleUnorderedMap sameTargetBranches; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgAlignAnalysis, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ALIGNMENT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/args.h b/ecmascript/mapleall/maple_be/include/cg/args.h new file mode 100644 index 0000000000000000000000000000000000000000..33e444b30e99358f3614fdd072544dd975d9185d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/args.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ARGS_H +#define MAPLEBE_INCLUDE_CG_ARGS_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class MoveRegArgs { + public: + explicit MoveRegArgs(CGFunc &func) : cgFunc(&func) {} + + virtual ~MoveRegArgs() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "moveargs"; + } + + const CGFunc *GetCGFunc() const { + return cgFunc; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgMoveRegArgs, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ARGS_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/asm_emit.h b/ecmascript/mapleall/maple_be/include/cg/asm_emit.h new file mode 100644 index 0000000000000000000000000000000000000000..492a1599f096de87e14cb490383cc29fdc990804 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/asm_emit.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ASM_EMIT_H +#define MAPLEBE_INCLUDE_CG_ASM_EMIT_H + +#include "emit.h" + +namespace maplebe { +class AsmFuncEmitInfo : public FuncEmitInfo { + public: + explicit AsmFuncEmitInfo(CGFunc &func) : FuncEmitInfo(func) {} + virtual ~AsmFuncEmitInfo() = default; +}; + +class AsmEmitter : public Emitter { + protected: + AsmEmitter(CG &cg, const std::string &asmFileName) : Emitter(cg, asmFileName) { + fileStream.open(asmFileName, std::ios::trunc); + } + + virtual ~AsmEmitter() = default; + + virtual void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) = 0; + virtual void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) = 0; + virtual void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) = 0; + virtual void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) = 0; + virtual void Run(FuncEmitInfo &funcEmitInfo) = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ASM_EMIT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/asm_info.h b/ecmascript/mapleall/maple_be/include/cg/asm_info.h new file mode 100644 index 0000000000000000000000000000000000000000..bfce53ec0a8e59ac827c4b59d9d2130c544d67cc --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/asm_info.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ASM_INFO_H +#define MAPLEBE_INCLUDE_CG_ASM_INFO_H + +#include "maple_string.h" + +namespace maplebe { +enum AsmLabel : uint8 { + kAsmGlbl, + kAsmLocal, + kAsmWeak, + kAsmBss, + kAsmComm, + kAsmData, + kAsmAlign, + kAsmSyname, + kAsmZero, + kAsmByte, + kAsmShort, + kAsmValue, + kAsmLong, + kAsmQuad, + kAsmSize, + kAsmType, + kAsmText, + kAsmHidden +}; + +class AsmInfo { + public: + const MapleString &GetCmnt() const { + return asmCmnt; + } + + const MapleString &GetAtobt() const { + return asmAtObt; + } + + const MapleString &GetFile() const { + return asmFile; + } + + const MapleString &GetSection() const { + return asmSection; + } + + const MapleString &GetRodata() const { + return asmRodata; + } + + const MapleString &GetGlobal() const { + return asmGlobal; + } + + const MapleString &GetLocal() const { + return asmLocal; + } + + const MapleString &GetWeak() const { + return asmWeak; + } + + const MapleString &GetBss() const { + return asmBss; + } + + const MapleString &GetComm() const { + return asmComm; + } + + const MapleString &GetData() const { + return asmData; + } + + const MapleString &GetAlign() const { + return asmAlign; + } + + const MapleString &GetZero() const { + return asmZero; + } + + const MapleString &GetByte() const { + return asmByte; + } + + const MapleString &GetShort() const { + return asmShort; + } + + const MapleString &GetValue() const { + return asmValue; + } + + const MapleString &GetLong() const { + return asmLong; + } + + const MapleString &GetQuad() const { + return asmQuad; + } + + const MapleString &GetSize() const { + return asmSize; + } + + const MapleString &GetType() const { + return asmType; + } + + const MapleString &GetHidden() const { + return asmHidden; + } + + const MapleString &GetText() const { + return asmText; + } + + const MapleString &GetSet() const { + return asmSet; + } + + const MapleString &GetWeakref() const { + return asmWeakref; + } + + explicit AsmInfo(MemPool &memPool) +#if TARGX86 || TARGX86_64 + : asmCmnt("\t//\t", &memPool), +#elif TARGARM32 + : asmCmnt("\t@\t", &memPool), +#else + : asmCmnt("\t#\t", &memPool), +#endif + + asmAtObt("\t%object\t", &memPool), + asmFile("\t.file\t", &memPool), + asmSection("\t.section\t", &memPool), + asmRodata(".rodata\t", &memPool), + asmGlobal("\t.global\t", &memPool), + asmLocal("\t.local\t", &memPool), + asmWeak("\t.weak\t", &memPool), + asmBss("\t.bss\t", &memPool), + asmComm("\t.comm\t", &memPool), + asmData("\t.data\t", &memPool), + asmAlign("\t.align\t", &memPool), + asmZero("\t.zero\t", &memPool), + asmByte("\t.byte\t", &memPool), + asmShort("\t.short\t", &memPool), +#ifdef TARGARM32 + asmValue("\t.short\t", &memPool), +#else + asmValue("\t.value\t", &memPool), +#endif +#ifdef TARGARM32 + asmLong("\t.word\t", &memPool), +#else + asmLong("\t.long\t", &memPool), +#endif + asmQuad("\t.quad\t", &memPool), + asmSize("\t.size\t", &memPool), + asmType("\t.type\t", &memPool), + asmHidden("\t.hidden\t", &memPool), + asmText("\t.text\t", &memPool), + asmSet("\t.set\t", &memPool), + asmWeakref("\t.weakref\t", &memPool){} + + ~AsmInfo() = default; + + private: + MapleString asmCmnt; + MapleString asmAtObt; + MapleString asmFile; + MapleString asmSection; + MapleString asmRodata; + MapleString asmGlobal; + MapleString asmLocal; + MapleString asmWeak; + MapleString asmBss; + MapleString asmComm; + MapleString asmData; + MapleString asmAlign; + MapleString asmZero; + MapleString asmByte; + MapleString asmShort; + MapleString asmValue; + MapleString asmLong; + MapleString asmQuad; + MapleString asmSize; + MapleString asmType; + MapleString asmHidden; + MapleString asmText; + MapleString asmSet; + MapleString asmWeakref; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ASM_INFO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/call_conv.h b/ecmascript/mapleall/maple_be/include/cg/call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..fd5c60d7456846ebf971e6d9be9e46be157b7e2f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/call_conv.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +/* for specifying how a parameter is passed */ +struct CCLocInfo { + regno_t reg0 = 0; /* 0 means parameter is stored on the stack */ + regno_t reg1 = 0; + regno_t reg2 = 0; /* can have up to 4 single precision fp registers */ + regno_t reg3 = 0; /* for small structure return. */ + int32 memOffset = 0; + int32 memSize = 0; + uint32 fpSize = 0; + uint32 numFpPureRegs = 0; + uint8 regCount = 0; /* number of registers <= 2 storing the return value */ + PrimType primTypeOfReg0; /* the primitive type stored in reg0 */ + PrimType primTypeOfReg1; /* the primitive type stored in reg1 */ + PrimType primTypeOfReg2; + PrimType primTypeOfReg3; + uint8 GetRegCount() const { + return regCount; + } + + PrimType GetPrimTypeOfReg0() const { + return primTypeOfReg0; + } + + PrimType GetPrimTypeOfReg1() const { + return primTypeOfReg1; + } + + PrimType GetPrimTypeOfReg2() const { + return primTypeOfReg2; + } + + PrimType GetPrimTypeOfReg3() const { + return primTypeOfReg3; + } + + regno_t GetReg0() const { + return reg0; + } + + regno_t GetReg1() const { + return reg1; + } + + regno_t GetReg2() const { + return reg2; + } + + regno_t GetReg3() const { + return reg3; + } +}; + +class LmbcFormalParamInfo { + public: + LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz) : + type(nullptr), primType(pType), offset(ofst), onStackOffset(0), size(sz), regNO(0), vregNO(0), numRegs(0), + fpSize(0), isReturn(false), isPureFloat(false), isOnStack(false), hasRegassign(false) {} + + ~LmbcFormalParamInfo() = default; + + MIRStructType *GetType() { + return type; + } + void SetType(MIRStructType *ty) { + type = ty; + } + PrimType GetPrimType() const { + return primType; + } + void SetPrimType(PrimType pType) { + primType = pType; + } + uint32 GetOffset() const { + return offset; + } + void SetOffset(uint32 ofs) { + offset = ofs; + } + uint32 GetOnStackOffset() const { + return onStackOffset; + } + void SetOnStackOffset(uint32 ofs) { + onStackOffset = ofs; + } + uint32 GetSize() const { + return size; + } + void SetSize(uint32 sz) { + size = sz; + } + regno_t GetRegNO() const { + return regNO; + } + void SetRegNO(regno_t reg) { + regNO = reg; + } + regno_t GetVregNO() const { + return vregNO; + } + void SetVregNO(regno_t reg) { + vregNO = reg; + } + uint32 GetNumRegs() const { + return numRegs; + } + void SetNumRegs(uint32 num) { + numRegs = num; + } + uint32 GetFpSize() const { + return fpSize; + } + void SetFpSize(uint32 sz) { + fpSize = sz; + } + bool IsReturn() const { + return isReturn; + } + void SetIsReturn() { + isReturn = true; + } + bool IsPureFloat() const { + return isPureFloat; + } + void SetIsPureFloat() { + isPureFloat = true; + } + bool IsInReg() const { + return !isOnStack ; + } + bool IsOnStack() const { + return isOnStack; + } + void SetIsOnStack() { + isOnStack = true; + } + bool HasRegassign() const { + return hasRegassign; + } + void SetHasRegassign() { + hasRegassign = true; + } + private: + MIRStructType *type; + PrimType primType; + uint32 offset; + uint32 onStackOffset; /* stack location if isOnStack */ + uint32 size; /* size primtype or struct */ + regno_t regNO = 0; /* param reg num or starting reg num if numRegs > 0 */ + regno_t vregNO = 0; /* if no explicit regassing from IR, create move from param reg */ + uint32 numRegs = 0; /* number of regs for struct param */ + uint32 fpSize = 0; /* size of fp param if isPureFloat */ + bool isReturn; + bool isPureFloat = false; + bool isOnStack; /* large struct is passed by a copy on stack */ + bool hasRegassign; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CALL_CONV_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cfgo.h b/ecmascript/mapleall/maple_be/include/cg/cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..29d58977b23b8f33d80f9af70defe44403b43dac --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cfgo.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CFGO_H +#define MAPLEBE_INCLUDE_CG_CFGO_H +#include "cg_cfg.h" +#include "optimize_common.h" + +namespace maplebe { +class ChainingPattern : public OptimizationPattern { + public: + explicit ChainingPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "BB Chaining"; + dotColor = kCfgoChaining; + } + + virtual ~ChainingPattern() = default; + bool Optimize(BB &curBB) override; + + protected: + bool NoInsnBetween(const BB &from, const BB &to) const; + bool DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const; + bool MergeFallthuBB(BB &curBB); + bool MergeGotoBB(BB &curBB, BB &sucBB); + bool MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB); + bool RemoveGotoInsn(BB &curBB, BB &sucBB); + bool ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB); +}; + +class SequentialJumpPattern : public OptimizationPattern { + public: + explicit SequentialJumpPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Sequential Jump"; + dotColor = kCfgoSj; + } + + virtual ~SequentialJumpPattern() = default; + bool Optimize(BB &curBB) override; + + protected: + void SkipSucBB(BB &curBB, BB &sucBB); + void UpdateSwitchSucc(BB &curBB, BB &sucBB); +}; + +class FlipBRPattern : public OptimizationPattern { + public: + explicit FlipBRPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Condition Flip"; + dotColor = kCfgoFlipCond; + } + + virtual ~FlipBRPattern() = default; + bool Optimize(BB &curBB) override; + + protected: + void RelocateThrowBB(BB &curBB); + private: + virtual uint32 GetJumpTargetIdx(const Insn &insn) = 0; + virtual MOperator FlipConditionOp(MOperator flippedOp) = 0; +}; + +/* This class represents the scenario that the BB is unreachable. */ +class UnreachBBPattern : public OptimizationPattern { + public: + explicit UnreachBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Unreachable BB"; + dotColor = kCfgoUnreach; + func.GetTheCFG()->FindAndMarkUnreachable(*cgFunc); + } + + virtual ~UnreachBBPattern() = default; + bool Optimize(BB &curBB) override; +}; + +/* + * This class represents the scenario that a common jump BB can be duplicated + * to one of its another predecessor. + */ +class DuplicateBBPattern : public OptimizationPattern { + public: + explicit DuplicateBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Duplicate BB"; + dotColor = kCfgoDup; + } + + virtual ~DuplicateBBPattern() = default; + bool Optimize(BB &curBB) override; + + private: + static constexpr int kThreshold = 10; +}; + +/* + * This class represents the scenario that a BB contains nothing. + */ +class EmptyBBPattern : public OptimizationPattern { + public: + explicit EmptyBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Empty BB"; + dotColor = kCfgoEmpty; + } + + virtual ~EmptyBBPattern() = default; + bool Optimize(BB &curBB) override; +}; + +class CFGOptimizer : public Optimizer { + public: + CFGOptimizer(CGFunc &func, MemPool &memPool) : Optimizer(func, memPool) { + name = "CFGO"; + } + + virtual ~CFGOptimizer() = default; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCfgo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostCfgo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CFGO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cfi.def b/ecmascript/mapleall/maple_be/include/cg/cfi.def new file mode 100644 index 0000000000000000000000000000000000000000..2c361aa64d2d69f53b53af4067d51cbf8d2da196 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cfi.def @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* Binutiles 2.28 */ +/* https://sourceware.org/binutils/docs-2.28/as/CFI-directives.html#CFI-directives */ +CFI_DEFINE( sections, , 1, List, Undef, Undef ) +CFI_DEFINE( startproc, , 0, Undef, Undef, Undef ) +CFI_DEFINE( startproc, _simple, 1, String, Undef, Undef ) /* "simple" */ +CFI_DEFINE( endproc, , 0, Undef, Undef, Undef ) +CFI_DEFINE( personality, _default, 1, Immediate, Undef, Undef ) +CFI_DEFINE( personality, _symbol, 2, Immediate, String, Undef ) +CFI_DEFINE( personality, _constant, 2, Immediate, Immediate, Undef ) +CFI_DEFINE( personality_id, , 1, StImmediate, Undef, Undef ) +CFI_DEFINE( fde_data, , 1, List, Undef, Undef ) +CFI_DEFINE( lsda, _default, 1, Immediate, Undef, Undef ) +CFI_DEFINE( lsda, _label, 2, Immediate, BBAddress, Undef ) +CFI_DEFINE( lsda, _constant, 2, Immediate, Immediate, Undef ) +CFI_DEFINE( inline_lsda, , 0, Undef, Undef, Undef ) +CFI_DEFINE( inline_lsda, _align, 1, Immediate, Undef, Undef ) /* power of 2 */ +CFI_DEFINE( def_cfa, , 2, Register, Immediate, Undef ) +CFI_DEFINE( def_cfa_register, , 1, Register, Undef, Undef ) +CFI_DEFINE( def_cfa_offset, , 1, Immediate, Undef, Undef ) +CFI_DEFINE( adjust_cfa_offset, , 1, Immediate, Undef, Undef ) +CFI_DEFINE( offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( val_offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( rel_offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( register, , 2, Register, Register, Undef ) +CFI_DEFINE( restore, , 1, Register, Undef, Undef ) +CFI_DEFINE( undefined, , 1, Register, Undef, Undef ) +CFI_DEFINE( same_value, , 1, Register, Undef, Undef ) +CFI_DEFINE( remember_state, , 0, Undef, Undef, Undef ) +CFI_DEFINE( restore_state, , 0, Undef, Undef, Undef ) +CFI_DEFINE( return_column, , 1, Register, Undef, Undef ) +CFI_DEFINE( signal_frame, , 0, Undef, Undef, Undef ) +CFI_DEFINE( window_save, , 0, Undef, Undef, Undef ) +CFI_DEFINE( escape, , 2, StImmediate, List /*expression[, ...]*/, Undef ) +CFI_DEFINE( val_encoded_addr, , 3, Register, Immediate, StImmediate ) + +ARM_DIRECTIVES_DEFINE( save, , 1, List, Undef, Undef ) +ARM_DIRECTIVES_DEFINE( vsave, , 1, List, Undef, Undef ) +ARM_DIRECTIVES_DEFINE( setfp, , 3, Register, Register, Immediate ) +ARM_DIRECTIVES_DEFINE( pad, , 1, Immediate, Undef, Undef ) diff --git a/ecmascript/mapleall/maple_be/include/cg/cfi.h b/ecmascript/mapleall/maple_be/include/cg/cfi.h new file mode 100644 index 0000000000000000000000000000000000000000..015c3b83524ddb4a7d910b474e5ba34f604671df --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cfi.h @@ -0,0 +1,267 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CFI_H +#define MAPLEBE_INCLUDE_CG_CFI_H + +#include "insn.h" +#include "mempool_allocator.h" +#include "mir_symbol.h" +#include "operand.h" +#include "common_utils.h" + +/* + * Reference: + * GNU Binutils. AS documentation + * https://sourceware.org/binutils/docs-2.28/as/index.html + * + * CFI blog + * https://www.imperialviolet.org/2017/01/18/cfi.html + * + * System V Application Binary Interface + * AMD64 Architecture Processor Supplement. Draft Version 0.99.7 + * https://www.uclibc.org/docs/psABI-x86_64.pdf $ 3.7 Figure 3.36 + * (RBP->6, RSP->7) + * + * System V Application Binary Interface + * Inte386 Architecture Processor Supplement. Version 1.0 + * https://www.uclibc.org/docs/psABI-i386.pdf $ 2.5 Table 2.14 + * (EBP->5, ESP->4) + * + * DWARF for ARM Architecture (ARM IHI 0040B) + * infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf + * $ 3.1 Table 1 + * (0-15 -> R0-R15) + */ +namespace cfi { +using namespace maple; + +enum CfiOpcode : uint8 { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) OP_CFI_##k##sub, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + kOpCfiLast +}; + +class CfiInsn : public maplebe::Insn { + public: + CfiInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1) + : Insn(memPool, op, opnd0, opnd1) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1, + maplebe::Operand &opnd2) + : Insn(memPool, op, opnd0, opnd1, opnd2) {} + + ~CfiInsn() = default; + + bool IsMachineInstruction() const override { + return false; + } + + void Dump() const override; + +#if DEBUG + void Check() const override; +#endif + + bool IsCfiInsn() const override { + return true; + } + + bool IsTargetInsn() const override { + return false; + } + + bool IsRegDefined(maplebe::regno_t regNO) const override { + CHECK_FATAL(false, "cfi do not def regs"); + return false; + } + + std::set GetDefRegs() const override{ + CHECK_FATAL(false, "cfi do not def regs"); + return std::set(); + } + + uint32 GetBothDefUseOpnd() const override { + return maplebe::kInsnMaxOpnd; + } + + private: + CfiInsn &operator=(const CfiInsn&); +}; + +class RegOperand : public maplebe::OperandVisitable { + public: + RegOperand(uint32 no, uint32 size) : OperandVisitable(kOpdRegister, size), regNO(no) {} + + ~RegOperand() = default; + using OperandVisitable::OperandVisitable; + + uint32 GetRegisterNO() const { + return regNO; + } + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + private: + uint32 regNO; +}; + +class ImmOperand : public maplebe::OperandVisitable { + public: + ImmOperand(int64 val, uint32 size) : OperandVisitable(kOpdImmediate, size), val(val) {} + + ~ImmOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + int64 GetValue() const { + return val; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + private: + int64 val; +}; + +class SymbolOperand : public maplebe::OperandVisitable { + public: + SymbolOperand(maple::MIRSymbol &mirSymbol, uint8 size) : + OperandVisitable(kOpdStImmediate, size), + symbol(&mirSymbol) {} + ~SymbolOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + void Dump() const override { + LogInfo::MapleLogger() << "symbol is : " << symbol->GetName(); + } + + private: + maple::MIRSymbol *symbol; +}; + +class StrOperand : public maplebe::OperandVisitable { + public: + StrOperand(const std::string &str, MemPool &memPool) : OperandVisitable(kOpdString, 0), str(str, &memPool) {} + + ~StrOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + const MapleString &GetStr() const { + return str; + } + + void Dump() const override; + + private: + const MapleString str; +}; + +class LabelOperand : public maplebe::OperandVisitable { + public: + LabelOperand(const std::string &parent, LabelIdx labIdx, MemPool &memPool) + : OperandVisitable(kOpdBBAddress, 0), parentFunc(parent, &memPool), labelIndex(labIdx) {} + + ~LabelOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + void Dump() const override; + + const MapleString &GetParentFunc() const { + return parentFunc; + } + LabelIdx GetIabelIdx() const { + return labelIndex; + }; + + private: + const MapleString parentFunc; + LabelIdx labelIndex; +}; + +class CFIOpndEmitVisitor : public maplebe::OperandVisitorBase, + public maplebe::OperandVisitors { + public: + explicit CFIOpndEmitVisitor(maplebe::Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~CFIOpndEmitVisitor() = default; + protected: + maplebe::Emitter &emitter; + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(SymbolOperand *v) final; + void Visit(StrOperand *v) final; + void Visit(LabelOperand *v) final; +}; +} /* namespace cfi */ + +#endif /* MAPLEBE_INCLUDE_CG_CFI_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg.h b/ecmascript/mapleall/maple_be/include/cg/cg.h new file mode 100644 index 0000000000000000000000000000000000000000..7fbb5b79baa52823251018d3269103baefd8d638 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg.h @@ -0,0 +1,405 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_H +#define MAPLEBE_INCLUDE_CG_CG_H + +/* C++ headers. */ +#include +#include +/* MapleIR headers. */ +#include "operand.h" +#include "insn.h" +#include "cgfunc.h" +#include "live.h" +#include "cg_option.h" +#include "opcode_info.h" +#include "global_tables.h" +#include "mir_function.h" +#include "mad.h" + +namespace maplebe { +#define ADDTARGETPHASE(PhaseName, condition) \ + if (!CGOptions::IsSkipPhase(PhaseName)) { \ + pm->AddPhase(PhaseName, condition); \ + } +/* subtarget opt phase -- cyclic Dependency, use Forward declaring */ +class CGSSAInfo; +class PhiEliminate; +class DomAnalysis; +class CGProp; +class CGDce; +class AlignAnalysis; +class MoveRegArgs; +class MPISel; +class Standardize; +class LiveIntervalAnalysis; +class ValidBitOpt; +class CG; +class LocalOpt; +class CFGOptimizer; + +class Globals { + public: + static Globals *GetInstance() { + static Globals instance; + return &instance; + } + + ~Globals() = default; + + void SetBECommon(BECommon &bc) { + beCommon = &bc; + } + + BECommon *GetBECommon() { + return beCommon; + } + + const BECommon *GetBECommon() const { + return beCommon; + } + + void SetMAD(MAD &m) { + mad = &m; + } + + MAD *GetMAD() { + return mad; + } + + const MAD *GetMAD() const { + return mad; + } + + void SetOptimLevel(int32 opLevel) { + optimLevel = opLevel; + } + + int32 GetOptimLevel() const { + return optimLevel; + } + + void SetTarget(CG &target); + const CG *GetTarget() const ; + + private: + BECommon *beCommon = nullptr; + MAD *mad = nullptr; + int32 optimLevel = 0; + CG *cg = nullptr; + Globals() = default; +}; + +class CG { + public: + using GenerateFlag = uint64; + + public: + CG(MIRModule &mod, const CGOptions &cgOptions) + : memPool(memPoolCtrler.NewMemPool("maplecg mempool", false /* isLocalPool */)), + allocator(memPool), + mirModule(&mod), + emitter(nullptr), + labelOrderCnt(0), + cgOption(cgOptions), + instrumentationFunction(nullptr), + fileGP(nullptr) { + const std::string &internalNameLiteral = namemangler::GetInternalNameLiteral(namemangler::kJavaLangObjectStr); + GStrIdx strIdxFromName = GlobalTables::GetStrTable().GetStrIdxFromName(internalNameLiteral); + isLibcore = (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdxFromName) != nullptr); + DefineDebugTraceFunctions(); + isLmbc = (mirModule->GetFlavor() == MIRFlavor::kFlavorLmbc); + } + + virtual ~CG(); + + /* enroll all code generator phases for target machine */ + virtual void EnrollTargetPhases(MaplePhaseManager *pm) const = 0; + + void GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName); + void GenPrimordialObjectList(const std::string &outputBaseName); + const std::string ExtractFuncName(const std::string &str); + + virtual Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) = 0; + virtual PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) = 0; + + virtual CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction&, BECommon&, MemPool&, StackMemPool&, + MapleAllocator&, uint32) = 0; + + bool IsExclusiveEH() const { + return CGOptions::IsExclusiveEH(); + } + + virtual bool IsExclusiveFunc(MIRFunction &mirFunc) = 0; + + /* NOTE: Consider making be_common a field of CG. */ + virtual void GenerateObjectMaps(BECommon &beCommon) = 0; + + /* Used for GCTIB pattern merging */ + virtual std::string FindGCTIBPatternName(const std::string &name) const = 0; + + bool GenerateVerboseAsm() const { + return cgOption.GenerateVerboseAsm(); + } + + bool GenerateVerboseCG() const { + return cgOption.GenerateVerboseCG(); + } + + bool DoPrologueEpilogue() const { + return cgOption.DoPrologueEpilogue(); + } + + bool DoTailCall() const { + return cgOption.DoTailCall(); + } + + bool DoCheckSOE() const { + return cgOption.DoCheckSOE(); + } + + bool GenerateDebugFriendlyCode() const { + return cgOption.GenerateDebugFriendlyCode(); + } + + int32 GetOptimizeLevel() const { + return cgOption.GetOptimizeLevel(); + } + + bool UseFastUnwind() const { + return true; + } + + bool IsStackProtectorStrong() const { + return cgOption.IsStackProtectorStrong(); + } + + bool IsStackProtectorAll() const { + return cgOption.IsStackProtectorAll(); + } + + bool NeedInsertInstrumentationFunction() const { + return cgOption.NeedInsertInstrumentationFunction(); + } + + void SetInstrumentationFunction(const std::string &name); + const MIRSymbol *GetInstrumentationFunction() const { + return instrumentationFunction; + } + + bool InstrumentWithDebugTraceCall() const { + return cgOption.InstrumentWithDebugTraceCall(); + } + + bool InstrumentWithProfile() const { + return cgOption.InstrumentWithProfile(); + } + + bool DoPatchLongBranch() const { + return cgOption.DoPatchLongBranch(); + } + + uint8 GetRematLevel() const { + return CGOptions::GetRematLevel(); + } + + bool GenYieldPoint() const { + return cgOption.GenYieldPoint(); + } + + bool GenLocalRC() const { + return cgOption.GenLocalRC(); + } + + bool GenerateExceptionHandlingCode() const { + return cgOption.GenerateExceptionHandlingCode(); + } + + bool DoConstFold() const { + return cgOption.DoConstFold(); + } + + void AddStackGuardvar(); + void DefineDebugTraceFunctions(); + MIRModule *GetMIRModule() { + return mirModule; + } + + void SetEmitter(Emitter &emitter) { + this->emitter = &emitter; + } + + Emitter *GetEmitter() const { + return emitter; + } + + MIRModule *GetMIRModule() const { + return mirModule; + } + + void IncreaseLabelOrderCnt() { + labelOrderCnt++; + } + + LabelIDOrder GetLabelOrderCnt() const { + return labelOrderCnt; + } + + const CGOptions &GetCGOptions() const { + return cgOption; + } + + void UpdateCGOptions(const CGOptions &newOption) { + cgOption.SetOptionFlag(newOption.GetOptionFlag()); + } + + bool IsLibcore() const { + return isLibcore; + } + + bool IsLmbc() const { + return isLmbc; + } + + MIRSymbol *GetDebugTraceEnterFunction() { + return dbgTraceEnter; + } + + const MIRSymbol *GetDebugTraceEnterFunction() const { + return dbgTraceEnter; + } + + MIRSymbol *GetProfileFunction() { + return dbgFuncProfile; + } + + const MIRSymbol *GetProfileFunction() const { + return dbgFuncProfile; + } + + const MIRSymbol *GetDebugTraceExitFunction() const { + return dbgTraceExit; + } + + /* Init SubTarget phase */ + virtual LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual MPISel *CreateMPIsel(MemPool &mp, MapleAllocator &allocator, CGFunc &f) const { + return nullptr; + } + virtual Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const { + return nullptr; + } + virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { + return nullptr; + } + + /* Init SubTarget optimization */ + virtual CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const { + return nullptr; + }; + virtual LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { + return nullptr; + }; + virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const { + return nullptr; + }; + virtual CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const { + return nullptr; + }; + virtual LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition&) const { + return nullptr; + }; + virtual CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const { + return nullptr; + } + + /* Object map generation helper */ + std::vector GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType); + + void SetGP(MIRSymbol *sym) { + fileGP = sym; + } + MIRSymbol *GetGP() const { + return fileGP; + } + + static bool IsInFuncWrapLabels(MIRFunction *func) { + return funcWrapLabels.find(func) != funcWrapLabels.end(); + } + + static void SetFuncWrapLabels(MIRFunction *func, const std::pair labels) { + if (!IsInFuncWrapLabels(func)) { + funcWrapLabels[func] = labels; + } + } + + static std::map> &GetFuncWrapLabels() { + return funcWrapLabels; + } + static void SetCurCGFunc(CGFunc &cgFunc) { + currentCGFunction = &cgFunc; + } + + static const CGFunc *GetCurCGFunc() { + return currentCGFunction; + } + + static CGFunc *GetCurCGFuncNoConst() { + return currentCGFunction; + } + + virtual const InsnDesc &GetTargetMd(MOperator mOp) const = 0; + virtual bool IsEffectiveCopy(Insn &insn) const = 0; + virtual bool IsTargetInsn(MOperator mOp) const = 0; + virtual bool IsClinitInsn(MOperator mOp) const = 0; + virtual bool IsPseudoInsn(MOperator mOp) const = 0; + virtual void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const = 0; + + protected: + MemPool *memPool; + MapleAllocator allocator; + + private: + MIRModule *mirModule; + Emitter *emitter; + LabelIDOrder labelOrderCnt; + static CGFunc *currentCGFunction; /* current cg function being compiled */ + CGOptions cgOption; + MIRSymbol *instrumentationFunction; + MIRSymbol *dbgTraceEnter = nullptr; + MIRSymbol *dbgTraceExit = nullptr; + MIRSymbol *dbgFuncProfile = nullptr; + MIRSymbol *fileGP; /* for lmbc, one local %GP per file */ + static std::map> funcWrapLabels; + bool isLibcore; + bool isLmbc; +}; /* class CG */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_cfg.h b/ecmascript/mapleall/maple_be/include/cg/cg_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..208722e17c55baf321282a81b8e8109e85b53765 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_cfg.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_CFG_H +#define MAPLEBE_INCLUDE_CG_CG_CFG_H +#include "eh_func.h" +#include "cgbb.h" + +namespace maplebe { +class InsnVisitor { + public: + explicit InsnVisitor(CGFunc &func) : cgFunc(&func) {} + + virtual ~InsnVisitor() = default; + CGFunc *GetCGFunc() const { + return cgFunc; + } + + /* + * Precondition: + * The last instruction in bb is either conditional or unconditional jump. + * + * The jump target of bb is modified to the location specified by targetLabel. + */ + virtual void ModifyJumpTarget(LabelIdx targetLabel, BB &bb) = 0; + + /* + * Precondition: + * The last instruction in bb is either conditional or unconditional jump. + * + * The jump target of bb is modified to the location specified by targetOperand. + */ + virtual void ModifyJumpTarget(Operand &targetOperand, BB &bb) = 0; + + /* + * Precondition: + * The last instruction in bb is either a conditional or an unconditional jump. + * The last instruction in newTarget is an unconditional jump. + * + * The jump target of bb is modified to newTarget's jump target. + */ + virtual void ModifyJumpTarget(BB &newTarget, BB &bb) = 0; + /* Check if it requires to add extra gotos when relocate bb */ + virtual Insn *CloneInsn(Insn &originalInsn) = 0; + /* Create a new virtual register operand which has the same type and size as the given one. */ + virtual RegOperand *CreateVregFromReg(const RegOperand ®) = 0; + virtual LabelIdx GetJumpLabel(const Insn &insn) const = 0; + virtual bool IsCompareInsn(const Insn &insn) const = 0; + virtual bool IsCompareAndBranchInsn(const Insn &insn) const = 0; + virtual bool IsAddOrSubInsn(const Insn &insn) const = 0; + + private: + CGFunc *cgFunc; +}; /* class InsnVisitor; */ + +class CGCFG { + public: + explicit CGCFG(CGFunc &cgFunc) : cgFunc(&cgFunc) {} + + ~CGCFG() = default; + + void BuildCFG(); + void CheckCFG(); + void CheckCFGFreq(); + + void InitInsnVisitor(CGFunc &func); + InsnVisitor *GetInsnModifier() const { + return insnVisitor; + } + + static bool AreCommentAllPreds(const BB &bb); + bool CanMerge(const BB &merger, const BB &mergee) const; + bool BBJudge(const BB &first, const BB &second) const; + /* + * Merge all instructions in mergee into merger, each BB's successors and + * predecessors should be modified accordingly. + */ + static void MergeBB(BB &merger, BB &mergee, CGFunc &func); + + /* + * Remove a BB from its position in the CFG. + * Prev, next, preds and sucs are all modified accordingly. + */ + void RemoveBB(BB &curBB, bool isGotoIf = false); + /* Skip the successor of bb, directly jump to bb's successor'ssuccessor */ + void RetargetJump(BB &srcBB, BB &targetBB); + + /* Loop up if the given label is in the exception tables in LSDA */ + static bool InLSDA(LabelIdx label, const EHFunc &ehFunc); + static bool InSwitchTable(LabelIdx label, const CGFunc &func); + + RegOperand *CreateVregFromReg(const RegOperand &pReg); + Insn *CloneInsn(Insn &originalInsn); + static BB *GetTargetSuc(BB &curBB, bool branchOnly = false, bool isGotoIf = false); + bool IsCompareAndBranchInsn(const Insn &insn) const; + bool IsAddOrSubInsn(const Insn &insn) const; + + Insn *FindLastCondBrInsn(BB &bb) const; + static void FindAndMarkUnreachable(CGFunc &func); + void FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const; + void MarkLabelTakenBB(); + void UnreachCodeAnalysis(); + void FindWillExitBBs(BB *bb, std::set *visitedBBs); + void WontExitAnalysis(); + BB *FindLastRetBB(); + + void UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB); + void BreakCriticalEdge(BB &pred, BB &succ); + /* cgcfgvisitor */ + private: + CGFunc *cgFunc = nullptr; + static InsnVisitor *insnVisitor; + static void MergeBB(BB &merger, BB &mergee); +}; /* class CGCFG */ +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleCFG, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_CFG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_critical_edge.h b/ecmascript/mapleall/maple_be/include/cg/cg_critical_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..a613f3bf9d7684101c1b2cdb6ed6d6a063ab309c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_critical_edge.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H +#define MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H + +#include "cgbb.h" +#include "insn.h" + +namespace maplebe { +class CriticalEdge{ + public: + CriticalEdge(CGFunc &func, MemPool &mem) + : cgFunc(&func), + alloc(&mem), + criticalEdges(alloc.Adapter()) + {} + + ~CriticalEdge() = default; + + void CollectCriticalEdges(); + void SplitCriticalEdges(); + + private: + CGFunc *cgFunc; + MapleAllocator alloc; + MapleVector> criticalEdges; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgCriticalEdge, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_dce.h b/ecmascript/mapleall/maple_be/include/cg/cg_dce.h new file mode 100644 index 0000000000000000000000000000000000000000..7fd57124ba1fc72fbfcc412be3cde84ab61623db --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_dce.h @@ -0,0 +1,59 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_CG_DCE_H +#define MAPLEBE_INCLUDE_CG_DCE_H +#include "cgfunc.h" +#include "cg_ssa.h" + +namespace maplebe { +/* dead code elimination*/ +class CGDce { + public: + CGDce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : memPool(&mp), cgFunc(&f), ssaInfo(&sInfo) {} + virtual ~CGDce() = default; + + void DoDce(); + /* provide public use in ssa opt */ + virtual bool RemoveUnuseDef(VRegVersion &defVersion) = 0; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + + protected: + MemPool *memPool; + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; + +class DeleteRegUseVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : deleteInsnId(dInsnID), ssaInfo(&cgSSAInfo) {} + virtual ~DeleteRegUseVisitor() = default; + + protected: + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + uint32 deleteInsnId; + private: + CGSSAInfo *ssaInfo; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgDce, maplebe::CGFunc) +} +#endif /* MAPLEBE_INCLUDE_CG_DCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_dominance.h b/ecmascript/mapleall/maple_be/include/cg/cg_dominance.h new file mode 100644 index 0000000000000000000000000000000000000000..511187b97e4e450d93f8593ea3767afad2a13cd3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_dominance.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DOM_H +#define MAPLEBE_INCLUDE_CG_DOM_H + +#include "cg_phase.h" +#include "insn.h" +#include "cgbb.h" +#include "datainfo.h" +#include "maple_phase.h" + +namespace maplebe { +class DominanceBase : public AnalysisResult { + public: + DominanceBase(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : AnalysisResult(&memPool), + domAllocator(&memPool), + tmpAllocator(&tmpPool), + bbVec(bbVec), + cgFunc(func), + commonEntryBB(commonEntryBB), + commonExitBB(commonExitBB) {} + + ~DominanceBase() override = default; + + BB &GetCommonEntryBB() const { + return commonEntryBB; + } + + BB &GetCommonExitBB() const { + return commonExitBB; + } + + protected: + bool CommonEntryBBIsPred(const BB &bb) const; + MapleAllocator domAllocator; // stores the analysis results + MapleAllocator tmpAllocator; // can be freed after dominator computation + MapleVector &bbVec; + CGFunc &cgFunc; + BB &commonEntryBB; + BB &commonExitBB; +}; + +class DomAnalysis : public DominanceBase { + public: + DomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB), + postOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()), + reversePostOrder(tmpAllocator.Adapter()), + doms(bbVec.size() + 1, nullptr, domAllocator.Adapter()), + domFrontier(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + domChildren(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + iterDomFrontier(bbVec.size() + 1, MapleSet(domAllocator.Adapter()), domAllocator.Adapter()), + dtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()), + dtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()), + dtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter()) {} + ~DomAnalysis() override = default; + + void Compute(); + void Dump(); + + void GenPostOrderID(); + void ComputeDominance(); + void ComputeDomFrontiers(); + void ComputeDomChildren(); + void GetIterDomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, std::vector &visitedMap); + void ComputeIterDomFrontiers(); + uint32 ComputeDtPreorder(const BB &bb, uint32 &num); + bool Dominate(const BB &bb1, const BB &bb2); // true if bb1 dominates bb2 + + MapleVector &GetReversePostOrder() { + return reversePostOrder; + } + + MapleVector &GetDtPreOrder() { + return dtPreOrder; + } + + uint32 GetDtPreOrderItem(size_t idx) const { + return dtPreOrder[idx]; + } + + size_t GetDtPreOrderSize() const { + return dtPreOrder.size(); + } + + uint32 GetDtDfnItem(size_t idx) const { + return dtDfn[idx]; + } + + size_t GetDtDfnSize() const { + return dtDfn.size(); + } + + BB *GetDom(uint32 id) { + DEBUG_ASSERT(id < doms.size(), "bbid out of range"); + return doms[id]; + } + void SetDom(uint32 id, BB *bb) { + DEBUG_ASSERT(id < doms.size(), "bbid out of range"); + doms[id] = bb; + } + size_t GetDomsSize() const { + return doms.size(); + } + + auto &GetDomFrontier(size_t idx) { + return domFrontier[idx]; + } + bool HasDomFrontier(uint32 id, uint32 frontier) const { + return std::find(domFrontier[id].begin(), domFrontier[id].end(), frontier) != domFrontier[id].end(); + } + + size_t GetDomFrontierSize() const { + return domFrontier.size(); + } + + auto &GetDomChildren() { + return domChildren; + } + + auto &GetDomChildren(size_t idx) { + return domChildren[idx]; + } + + auto &GetIdomFrontier(uint32 idx) { + return iterDomFrontier[idx]; + } + + size_t GetDomChildrenSize() const { + return domChildren.size(); + } + + private: + void PostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap); + BB *Intersect(BB &bb1, const BB &bb2); + + MapleVector postOrderIDVec; // index is bb id + MapleVector reversePostOrder; // an ordering of the BB in reverse postorder + MapleVector doms; // index is bb id; immediate dominator for each BB + MapleVector> domFrontier; // index is bb id + MapleVector> domChildren; // index is bb id; for dom tree + MapleVector> iterDomFrontier; + MapleVector dtPreOrder; // ordering of the BBs in a preorder traversal of the dominator tree + MapleVector dtDfn; // gives position of each BB in dt_preorder + MapleVector dtDfnOut; // max position of all nodes in the sub tree of each BB in dt_preorder +}; + +class PostDomAnalysis : public DominanceBase { + public: + PostDomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB), + pdomPostOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()), + pdomReversePostOrder(tmpAllocator.Adapter()), + pdoms(bbVec.size() + 1, nullptr, domAllocator.Adapter()), + pdomFrontier(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + pdomChildren(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + iterPdomFrontier(bbVec.size() + 1, MapleSet(domAllocator.Adapter()), domAllocator.Adapter()), + pdtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()), + pdtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()), + pdtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter()) {} + + ~PostDomAnalysis() override = default; + void Compute(); + void PdomGenPostOrderID(); + void ComputePostDominance(); + void ComputePdomFrontiers(); + void ComputePdomChildren(); + void GetIterPdomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, std::vector &visitedMap); + void ComputeIterPdomFrontiers(); + uint32 ComputePdtPreorder(const BB &bb, uint32 &num); + bool PostDominate(const BB &bb1, const BB &bb2); // true if bb1 postdominates bb2 + void Dump(); + + auto &GetPdomFrontierItem(size_t idx) { + return pdomFrontier[idx]; + } + + size_t GetPdomFrontierSize() const { + return pdomFrontier.size(); + } + + auto &GetIpdomFrontier(uint32 idx) { + return iterPdomFrontier[idx]; + } + + auto &GetPdomChildrenItem(size_t idx) { + return pdomChildren[idx]; + } + + void ResizePdtPreOrder(size_t n) { + pdtPreOrder.resize(n); + } + + uint32 GetPdtPreOrderItem(size_t idx) const { + return pdtPreOrder[idx]; + } + + size_t GetPdtPreOrderSize() const { + return pdtPreOrder.size(); + } + + uint32 GetPdtDfnItem(size_t idx) const { + return pdtDfn[idx]; + } + + int32 GetPdomPostOrderIDVec(size_t idx) const { + return pdomPostOrderIDVec[idx]; + } + + BB *GetPdomReversePostOrder(size_t idx) { + return pdomReversePostOrder[idx]; + } + + MapleVector &GetPdomReversePostOrder() { + return pdomReversePostOrder; + } + + size_t GetPdomReversePostOrderSize() const { + return pdomReversePostOrder.size(); + } + + bool HasPdomFrontier(uint32 id, uint32 frontier) const { + return std::find(pdomFrontier[id].begin(), pdomFrontier[id].end(), frontier) != pdomFrontier[id].end(); + } + + BB *GetPdom(uint32 id) { + DEBUG_ASSERT(id < pdoms.size(), "bbid out of range"); + return pdoms[id]; + } + void SetPdom(uint32 id, BB *bb) { + DEBUG_ASSERT(id < pdoms.size(), "bbid out of range"); + pdoms[id] = bb; + } + + private: + void PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap); + BB *PdomIntersect(BB &bb1, const BB &bb2); + + MapleVector pdomPostOrderIDVec; // index is bb id + MapleVector pdomReversePostOrder; // an ordering of the BB in reverse postorder + MapleVector pdoms; // index is bb id; immediate dominator for each BB + MapleVector> pdomFrontier; // index is bb id + MapleVector> pdomChildren; // index is bb id; for pdom tree + MapleVector> iterPdomFrontier; + MapleVector pdtPreOrder; // ordering of the BBs in a preorder traversal of the post-dominator tree + MapleVector pdtDfn; // gives position of each BB in pdt_preorder + MapleVector pdtDfnOut; // max position of all nodes in the sub tree of each BB in pdt_preorder +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgDomAnalysis, maplebe::CGFunc); + DomAnalysis *GetResult() { + return domAnalysis; + } + DomAnalysis *domAnalysis = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostDomAnalysis, maplebe::CGFunc); + PostDomAnalysis *GetResult() { + return pdomAnalysis; + } + PostDomAnalysis *pdomAnalysis = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_DOM_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_irbuilder.h b/ecmascript/mapleall/maple_be/include/cg/cg_irbuilder.h new file mode 100644 index 0000000000000000000000000000000000000000..fe0d23af0309f70f15c8d692f65208c9ee6d74ff --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H +#define MAPLEBE_INCLUDE_CG_IRBUILDER_H + +#include "insn.h" +#include "operand.h" + +namespace maplebe { +class InsnBuilder { + public: + explicit InsnBuilder(MemPool &memPool) : mp(&memPool) {} + virtual ~InsnBuilder() { + mp = nullptr; + } + + template + Insn &BuildInsn(MOperator opCode) { + return BuildInsn(opCode, Target::kMd[opCode]); + } + Insn &BuildInsn(MOperator opCode, const InsnDesc &idesc); + Insn &BuildInsn(MOperator opCode, Operand &o0); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4); + Insn &BuildInsn(MOperator opCode, std::vector &opnds); + + Insn &BuildCfiInsn(MOperator opCode); + Insn &BuildDbgInsn(MOperator opCode); + VectorInsn &BuildVectorInsn(MOperator opCode, const InsnDesc &idesc); + + uint32 GetCreatedInsnNum () const { + return createdInsnNum; + } + protected: + MemPool *mp; + private: + void IncreaseInsnNum() { + createdInsnNum++; + } + uint32 createdInsnNum = 0; +}; + +constexpr uint32 baseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +class OperandBuilder { + public: + explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) + : alloc(&mp), virtualRegNum(mirPregNum) {} + + /* create an operand in cgfunc when no mempool is supplied */ + ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); + ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); + MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size); + RegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr); + RegOperand &CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + RegOperand &CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + ListOperand &CreateList(MemPool *mp = nullptr); + FuncNameOperand &CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp = nullptr); + LabelOperand &CreateLabel(const char *parent, LabelIdx idx, MemPool *mp = nullptr); + CommentOperand &CreateComment(const std::string &s, MemPool *mp = nullptr); + CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); + + uint32 GetCurrentVRegNum() const { + return virtualRegNum; + } + + protected: + MapleAllocator alloc; + + private: + uint32 virtualRegNum = 0; + /* reg bank for multiple use */ +}; +} +#endif //MAPLEBE_INCLUDE_CG_IRBUILDER_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_occur.h b/ecmascript/mapleall/maple_be/include/cg/cg_occur.h new file mode 100644 index 0000000000000000000000000000000000000000..ae29f3570b333394ad6baa2c0886ff9eedd0634b --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_occur.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGOCCUR_H +#define MAPLEBE_CG_INCLUDE_CGOCCUR_H +#include "cg_dominance.h" + +// the data structures that represent occurrences and work candidates for PRE +namespace maplebe { +enum OccType { + kOccUndef, + kOccReal, + kOccDef, + kOccStore, + kOccPhiocc, + kOccPhiopnd, + kOccExit, + kOccUse, // for use appearances when candidate is dassign + kOccMembar, // for representing occurrence of memory barriers (use CgRealOcc) +}; + +class CgOccur { + public: + CgOccur(OccType ty, BB *bb, Insn *insn, Operand *opnd) + : occTy(ty), + cgBB(bb), + insn(insn), + opnd(opnd) {} + + CgOccur(OccType ty, int cId, BB &bb, CgOccur *df) : occTy(ty), classID(cId), cgBB(&bb), def(df) {} + virtual ~CgOccur() = default; + + bool IsDominate(DomAnalysis &dom, CgOccur &occ); + const BB *GetBB() const { + return cgBB; + } + + BB *GetBB() { + return cgBB; + } + + void SetBB(BB &bb) { + cgBB = &bb; + } + + OccType GetOccType() const { + return occTy; + } + + int GetClassID() const { + return classID; + } + + void SetClassID(int id) { + classID = id; + } + + const CgOccur *GetDef() const { + return def; + } + + CgOccur *GetDef() { + return def; + } + + void SetDef(CgOccur *define) { + def = define; + } + + const Insn *GetInsn() const { + return insn; + } + + Insn *GetInsn() { + return insn; + } + + const Operand *GetOperand() const { + return opnd; + } + + Operand *GetOperand() { + return opnd; + } + + bool Processed() const { + return processed; + } + + void SetProcessed(bool val) { + processed = val; + } + + virtual CgOccur *GetPrevVersionOccur() { + CHECK_FATAL(false, "has no prev version occur"); + } + + virtual void SetPrevVersionOccur(CgOccur*) { + CHECK_FATAL(false, "has no prev version occur"); + } + + virtual void Dump() const { + if (occTy == kOccExit) { + LogInfo::MapleLogger() << "ExitOcc at bb " << GetBB()->GetId() << std::endl; + } + }; + + private: + OccType occTy = kOccUndef; // kinds of occ + int classID = 0; // class id + BB *cgBB = nullptr; // the BB it occurs in + Insn *insn = nullptr; + Operand *opnd = nullptr; + CgOccur *def = nullptr; + bool processed = false; +}; + +class CgUseOcc : public CgOccur { + public: + CgUseOcc(BB *bb, Insn *insn, Operand *opnd) + : CgOccur(kOccUse, bb, insn, opnd), + needReload(false) {} + + ~CgUseOcc() = default; + + bool Reload() const { + return needReload; + } + + void SetReload(bool val) { + needReload = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "UseOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needReload ? "need-reload, " : "not need-reload, ") + << "\n"; + } + private: + bool needReload = false; + CgOccur *prevVersion = nullptr; +}; + +class CgStoreOcc : public CgOccur { + public: + CgStoreOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccStore, bb, insn, opnd) {} + ~CgStoreOcc() = default; + + bool Reload() const { + return needReload; + } + + void SetReload(bool val) { + needReload = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "StoreOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needReload ? "reload, " : "not reload, ") + << "\n"; + } + private: + bool needReload = false; + CgOccur *prevVersion = nullptr; +}; + +class CgDefOcc : public CgOccur { + public: + CgDefOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccDef, bb, insn, opnd) {} + ~CgDefOcc() = default; + + bool Loaded() const { + return needStore; + } + + void SetLoaded(bool val) { + needStore = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "DefOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needStore ? "store" : "not store") + << "\n"; + } + private: + bool needStore = false; + CgOccur *prevVersion = nullptr; +}; + + +class CgPhiOpndOcc; +enum AvailState { + kFullyAvailable, + kPartialAvailable, + kNotAvailable +}; +class CgPhiOcc : public CgOccur { + public: + CgPhiOcc(BB &bb, Operand *opnd, MapleAllocator &alloc) + : CgOccur(kOccPhiocc, 0, bb, nullptr), + regOpnd(opnd), + isDownSafe(!bb.IsCatch()), + phiOpnds(alloc.Adapter()) {} + + virtual ~CgPhiOcc() = default; + + bool IsDownSafe() const { + return isDownSafe; + } + + void SetIsDownSafe(bool downSafe) { + isDownSafe = downSafe; + } + + const MapleVector &GetPhiOpnds() const { + return phiOpnds; + } + + MapleVector &GetPhiOpnds() { + return phiOpnds; + } + + Operand *GetOpnd() { + return regOpnd; + } + + CgPhiOpndOcc *GetPhiOpnd(size_t idx) { + DEBUG_ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd"); + return phiOpnds.at(idx); + } + + const CgPhiOpndOcc *GetPhiOpnd(size_t idx) const { + DEBUG_ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd"); + return phiOpnds.at(idx); + } + + void AddPhiOpnd(CgPhiOpndOcc &opnd) { + phiOpnds.push_back(&opnd); + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + bool IsFullyAvailable() const { + return availState == kFullyAvailable; + } + + bool IsPartialAvailable() const { + return availState == kPartialAvailable; + } + + bool IsNotAvailable() const { + return availState == kNotAvailable; + } + + void SetAvailability(AvailState val) { + availState = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (isDownSafe ? "downsafe, " : "not downsafe, ") + << (availState == kNotAvailable ? "not avail" + : (availState == kPartialAvailable ? "part avail" : "fully avail")) + << "\n"; + } + + private: + Operand *regOpnd; + bool isDownSafe = true; // default is true + AvailState availState = kFullyAvailable; + MapleVector phiOpnds; + CgOccur *prevVersion = nullptr; +}; + +class CgPhiOpndOcc : public CgOccur { + public: + CgPhiOpndOcc(BB *bb, Operand *opnd, CgPhiOcc *defPhi) + : CgOccur(kOccPhiopnd, bb, nullptr, opnd), + hasRealUse(false), + phiOcc(defPhi) {} + + ~CgPhiOpndOcc() = default; + + bool HasRealUse() const { + return hasRealUse; + } + + void SetHasRealUse(bool realUse) { + hasRealUse = realUse; + } + + const CgPhiOcc *GetPhiOcc() const { + return phiOcc; + } + + CgPhiOcc *GetPhiOcc() { + return phiOcc; + } + + void SetPhiOcc(CgPhiOcc &occ) { + phiOcc = &occ; + } + + bool Reload() const { + return reload; + } + void SetReload(bool val) { + reload = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOpndOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (hasRealUse ? "hasRealUse, " : "not hasRealUse, ") + << (reload ? "reload" : "not reload") << std::endl; + } + + private: + bool hasRealUse; + bool reload = false; + CgPhiOcc *phiOcc = nullptr; // its lhs +}; + +// each singly linked list represents each bucket in workCandHashTable +class PreWorkCand { + public: + PreWorkCand(MapleAllocator &alloc, Operand *curOpnd, PUIdx pIdx) + : next(nullptr), + allOccs(alloc.Adapter()), + realOccs(alloc.Adapter()), + phiOccs(alloc.Adapter()), + theOperand(curOpnd), + puIdx(pIdx), + redo2HandleCritEdges(false) { + DEBUG_ASSERT(pIdx != 0, "PreWorkCand: initial puIdx cannot be 0"); + } + + virtual ~PreWorkCand() = default; + + void AddRealOccAsLast(CgOccur &occ, PUIdx pIdx) { + realOccs.push_back(&occ); // add as last + DEBUG_ASSERT(pIdx != 0, "puIdx of realocc cannot be 0"); + if (pIdx != puIdx) { + puIdx = 0; + } + } + + const PreWorkCand *GetNext() const { + return next; + } + + PreWorkCand *GetNext() { + return next; + } + + void SetNext(PreWorkCand &workCand) { + next = &workCand; + } + + int32 GetIndex() const { + return index; + } + + void SetIndex(int idx) { + index = idx; + } + + const MapleVector &GetRealOccs() const { + return realOccs; + } + + MapleVector &GetRealOccs() { + return realOccs; + } + + const CgOccur *GetRealOcc(size_t idx) const { + DEBUG_ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt"); + return realOccs.at(idx); + } + + CgOccur *GetRealOcc(size_t idx) { + DEBUG_ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt"); + return realOccs.at(idx); + } + + const MapleVector &PhiOccs() const { + return phiOccs; + } + + MapleVector &PhiOccs() { + return phiOccs; + } + + const Operand *GetTheOperand() const { + return theOperand; + } + + Operand *GetTheOperand() { + return theOperand; + } + + void SetTheOperand(Operand &expr) { + theOperand = &expr; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx idx) { + puIdx = idx; + } + + bool Redo2HandleCritEdges() const { + return redo2HandleCritEdges; + } + + void SetRedo2HandleCritEdges(bool redo) { + redo2HandleCritEdges = redo; + } + + private: + PreWorkCand *next; + int32 index = 0; + MapleVector allOccs; + MapleVector realOccs; // maintained in order of dt_preorder + MapleVector phiOccs; + Operand *theOperand; // the expression of this workcand + PUIdx puIdx; // if 0, its occ span multiple PUs; initial value must + // puIdx cannot be 0 if hasLocalOpnd is true + bool redo2HandleCritEdges : 1; // redo to make critical edges affect canbevail +}; + +class PreWorkCandHashTable { + public: + static const uint32 workCandHashLength = 229; + static uint32 ComputeWorkCandHashIndex(const Operand &opnd); + static uint32 ComputeStmtWorkCandHashIndex(const Insn &insn); + + PreWorkCandHashTable() = default; + ~PreWorkCandHashTable() = default; + + std::array &GetWorkcandHashTable() { + return workCandHashTable; + } + + PreWorkCand *GetWorkcandFromIndex(size_t idx) { + return workCandHashTable[idx]; + } + + void SetWorkCandAt(size_t idx, PreWorkCand &workCand) { + workCandHashTable[idx] = &workCand; + } + + private: + std::array workCandHashTable; +}; +} // namespace maple +#endif // MAPLEBE_CG_INCLUDE_CGOCCUR_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_option.h b/ecmascript/mapleall/maple_be/include/cg/cg_option.h new file mode 100644 index 0000000000000000000000000000000000000000..34b5d2550f796815f90e8370ae523d36e1b6c126 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_option.h @@ -0,0 +1,1431 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_OPTION_H +#define MAPLEBE_INCLUDE_CG_CG_OPTION_H +#include +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "types_def.h" + +namespace maplebe { +using namespace maple; +struct Range { + bool enable; + uint64 begin; + uint64 end; +}; + +typedef uint8 *(*MemoryManagerAllocateDataSectionCallback)( + void *codeSpace, uint32 size, uint32 alignment, const std::string §ionName); + +typedef void (*MemoryManagerSaveFunc2AddressInfoCallback)( + void *codeSpace, std::string funcName, uint32_t address); + +class CGOptions { + public: + enum OptionEnum : uint64 { + kUndefined = 0ULL, + kDoCg = 1ULL << 0, + kDoLinearScanRegAlloc = 1ULL << 1, + kDoColorRegAlloc = 1ULL << 2, + kConstFold = 1ULL << 3, + kGenPic = 1ULL << 4, + kGenPie = 1ULL << 5, + kVerboseAsm = 1ULL << 6, + kGenInsertCall = 1ULL << 7, + kAddDebugTrace = 1ULL << 8, + kGenYieldPoint = 1ULL << 9, + kGenLocalRc = 1ULL << 10, + kProEpilogueOpt = 1ULL << 11, + kVerboseCG = 1ULL << 12, + kDebugFriendly = 1ULL << 20, + kWithLoc = 1ULL << 21, + kWithDwarf = 1ULL << 22, + kWithMpl = 1ULL << 23, + kWithSrc = 1ULL << 24, + kWithAsm = 1ULL << 25, + kWithProfileCode = 1ULL << 30, + kUseStackProtectorStrong = 1ULL << 31, + kUseStackProtectorAll = 1ULL << 32, + kSoeCheckInsert = 1ULL << 33, + kAddFuncProfile = 1ULL << 34, + kPatchLongBranch = 1ULL << 35, + kTailCallOpt = 1ULL << 36, + /* undocumented */ + kDumpCFG = 1ULL << 61, + kDumpCgir = 1ULL << 62, + kSuppressFileInfo = 1ULL << 63, + }; + + using OptionFlag = uint64; + + enum GenerateEnum : uint64 { + kCMacroDef = 1ULL << 0, + kGctib = 1ULL << 1, + kGrootList = 1ULL << 2, + kPrimorList = 1ULL << 3, + }; + + using GenerateFlag = uint64; + + enum OptimizeLevel : uint8 { + kLevel0 = 0, + kLevelLiteCG = 1, + kLevel1 = 2, + kLevel2 = 3, + }; + + enum ABIType : uint8 { + kABIHard, + kABISoft, + kABISoftFP + }; + + enum EmitFileType : uint8 { + kAsm, + kObj, + kEmitNone, + }; + + struct EmitMemoryManager { + void *codeSpace; + MemoryManagerAllocateDataSectionCallback allocateDataSection; + MemoryManagerSaveFunc2AddressInfoCallback funcAddressSaver; + }; + /* + * The default CG option values are: + * Don't BE_QUITE; verbose, + * DO CG and generate .s file as output, + * Generate EH, + * Use frame pointer, + * Generate CFI directives, + * DO peephole optimization, + * Generate position-independent executable, + * Don't insert debug comments in .s file, + * Don't insert a call to the named (instrumentation) + * function at each function entry. + */ + static const OptionFlag kDefaultOptions = OptionFlag( +#if TARGAARCH64 || TARGARM32 || TARGRISCV64 + kDoCg | kGenPie | kDoColorRegAlloc +#else + kDoCg +#endif + ); + + /* + * The default metadata generation flags values are: + * Generate .macros.def for C preprocessors. + * Generate .groots.txt for GC. + * Generate .primordials.txt for GC. + * Generate yieldpoints for GC. + * Do not generate separate GCTIB file. + */ + static const GenerateFlag kDefaultGflags = GenerateFlag(0); + + public: + static CGOptions &GetInstance(); + virtual ~CGOptions() = default; + bool SolveOptions(bool isDebug); + void DecideMplcgRealLevel(bool isDebug); + + void DumpOptions(); + std::vector &GetSequence() { + return phaseSequence; + } + + const EmitMemoryManager &GetEmitMemoryManager() const { + return emitMemoryManager; + } + + void SetupEmitMemoryManager(void *codeSpace, + MemoryManagerAllocateDataSectionCallback allocateDataSection, + MemoryManagerSaveFunc2AddressInfoCallback funcAddressSaver) { + emitMemoryManager.codeSpace = codeSpace; + emitMemoryManager.allocateDataSection = allocateDataSection; + emitMemoryManager.funcAddressSaver = funcAddressSaver; + } + + template + void SetOrClear(T &dest, uint64 flag, bool truth) const { + if (truth) { + dest |= flag; + } else { + dest &= ~flag; + } + } + + void ParseExclusiveFunc(const std::string &fileName); + void ParseCyclePattern(const std::string &fileName); + + void EnableO0(); + void EnableO1(); + void EnableO2(); + void EnableLiteCG(); + + bool GenDef() const { + return generateFlag & kCMacroDef; + } + + bool GenGctib() const { + return generateFlag & kGctib; + } + + bool GenGrootList() const { + return generateFlag & kGrootList; + } + + bool GenPrimorList() const { + return generateFlag & kPrimorList; + } + + bool GenYieldPoint() const { + return generateFlag & kGenYieldPoint; + } + + bool GenLocalRC() const { + return (generateFlag & kGenLocalRc) && !gcOnly; + } + + bool DoConstFold() const { + return options & kConstFold; + } + + bool DoEmitCode() const { + return (options & kDoCg) != 0; + } + + bool GenerateExceptionHandlingCode() const { + return true; + } + + bool DoLinearScanRegisterAllocation() const { + return (options & kDoLinearScanRegAlloc) != 0; + } + bool DoColoringBasedRegisterAllocation() const { + return (options & kDoColorRegAlloc) != 0; + } + + bool GeneratePositionIndependentExecutable() const { + return (options & kGenPie) != 0; + } + + bool GenerateVerboseAsm() const { + return (options & kVerboseAsm) != 0; + } + + bool GenerateVerboseCG() const { + return (options & kVerboseCG) != 0; + } + + bool GenerateDebugFriendlyCode() const { + return true; + } + + bool DoPrologueEpilogue() const { + return (options & kProEpilogueOpt) != 0; + } + + bool IsStackProtectorStrong() const { + return (options & kUseStackProtectorStrong) != 0; + } + + bool IsStackProtectorAll() const { + return (options & kUseStackProtectorAll) != 0; + } + + bool WithLoc() const { + return (options & kWithLoc) != 0; + } + + bool WithDwarf() const { + return (options & kWithDwarf) != 0; + } + + bool WithSrc() const { + return (options & kWithSrc) != 0; + } + + bool WithMpl() const { + return (options & kWithMpl) != 0; + } + + bool WithAsm() const { + return (options & kWithAsm) != 0; + } + + bool NeedInsertInstrumentationFunction() const { + return (options & kGenInsertCall) != 0; + } + + bool InstrumentWithDebugTraceCall() const { + return (options & kAddDebugTrace) != 0; + } + + bool InstrumentWithProfile() const { + return (options & kAddFuncProfile) != 0; + } + + bool DoPatchLongBranch() const { + return (options & kPatchLongBranch) != 0; + } + + bool DoTailCall() const { + return (options & kTailCallOpt) != 0; + } + + bool DoCheckSOE() const { + return (options & kSoeCheckInsert) != 0; + } + + bool SuppressFileInfo() const { + return (options & kSuppressFileInfo) != 0; + } + + bool DoDumpCFG() const { + return (options & kDumpCFG) != 0; + } + + void SetDefaultOptions(const MIRModule &mod); + static bool DumpPhase(const std::string &phase); + static bool FuncFilter(const std::string &name); + void SplitPhases(const std::string &str, std::unordered_set &set); + void SetRange(const std::string &str, const std::string &cmd, Range &subRange); + void SetTargetMachine(const std::string &str); + + int32 GetOptimizeLevel() const { + return optimizeLevel; + } + + bool IsRunCG() const { + return runCGFlag; + } + + void SetRunCGFlag(bool cgFlag) { + runCGFlag = cgFlag; + } + + bool IsInsertCall() const { + return insertCall; + } + + void SetInsertCall(bool insertFlag) { + insertCall = insertFlag; + } + + bool IsGenerateObjectMap() const { + return generateObjectMap; + } + + void SetGenerateObjectMap(bool flag) { + generateObjectMap = flag; + } + + void SetParserOption(uint32 option) { + parserOption |= option; + } + + uint32 GetParserOption() const { + return parserOption; + } + + GenerateFlag &GetGenerateFlags() { + return generateFlag; + } + + const GenerateFlag &GetGenerateFlags() const { + return generateFlag; + } + + void SetGenerateFlags(GenerateFlag flag) { + generateFlag |= flag; + } + + void SetOption(OptionFlag opFlag) { + options |= opFlag; + } + + void ClearOption(OptionFlag opFlag) { + options &= ~opFlag; + } + + const std::string &GetInstrumentationFunction() const { + return instrumentationFunction; + } + + void SetInstrumentationFunction(const std::string &function) { + instrumentationFunction = function; + } + + const std::string &GetClassListFile() const { + return classListFile; + } + + void SetClassListFile(const std::string &classList) { + classListFile = classList; + } + + void SetEHExclusiveFile(const std::string &ehExclusive) { + ehExclusiveFile = ehExclusive; + } + + void SetCyclePatternFile(const std::string &cyclePattern) { + cyclePatternFile = cyclePattern; + } + + static bool IsQuiet() { + return quiet; + } + + static void SetQuiet(bool flag) { + quiet = flag; + } + + static std::unordered_set &GetDumpPhases() { + return dumpPhases; + } + + static std::unordered_set &GetSkipPhases() { + return skipPhases; + } + + static bool IsSkipPhase(const std::string &phaseName) { + return !(skipPhases.find(phaseName) == skipPhases.end()); + } + + const std::vector &GetEHExclusiveFunctionNameVec() const { + return ehExclusiveFunctionName; + } + + static const std::unordered_map> &GetCyclePatternMap() { + return cyclePatternMap; + } + + static bool IsSkipFromPhase(const std::string &phaseName) { + return skipFrom.compare(phaseName) == 0; + } + + static const std::string GetSkipFromPhase() { + return skipFrom; + } + + static void SetSkipFrom(const std::string &phaseName) { + skipFrom = phaseName; + } + + static bool IsSkipAfterPhase(const std::string &phaseName) { + return skipAfter.compare(phaseName) == 0; + } + + static const std::string GetSkipAfterPhase() { + return skipAfter; + } + + static void SetSkipAfter(const std::string &phaseName) { + skipAfter = phaseName; + } + + static const std::string &GetDumpFunc() { + return dumpFunc; + } + + static bool IsDumpFunc(const std::string &func) { + return ((dumpFunc.compare("*") == 0) || (func.find(CGOptions::dumpFunc.c_str()) != std::string::npos)); + } + + static void SetDumpFunc(const std::string &func) { + dumpFunc = func; + } + static size_t FindIndexInProfileData(char data) { + return profileData.find(data); + } + + static void SetProfileData(const std::string &path) { + profileData = path; + } + + static std::string &GetProfileData() { + return profileData; + } + + static const std::string GetProfileDataSubStr(size_t begin, size_t end) { + return profileData.substr(begin, end); + } + + static const std::string GetProfileDataSubStr(size_t position) { + return profileData.substr(position); + } + + static bool IsProfileDataEmpty() { + return profileData.empty(); + } + + static const std::string &GetProfileFuncData() { + return profileFuncData; + } + + static bool IsProfileFuncDataEmpty() { + return profileFuncData.empty(); + } + + static void SetProfileFuncData(const std::string &data) { + profileFuncData = data; + } + + static const std::string &GetProfileClassData() { + return profileClassData; + } + + static void SetProfileClassData(const std::string &data) { + profileClassData = data; + } + + static const std::string &GetDuplicateAsmFile() { + return duplicateAsmFile; + } + + static bool IsDuplicateAsmFileEmpty() { + if (duplicateAsmFile.empty()) { + return true; + } + struct stat buffer; + if (stat(duplicateAsmFile.c_str(), &buffer) != 0) { + return true; + } + return false; + } + + static void SetDuplicateAsmFile(const std::string &fileName) { + duplicateAsmFile = fileName; + } + + static bool UseRange() { + return range.enable; + } + static const std::string &GetFastFuncsAsmFile() { + return fastFuncsAsmFile; + } + + static bool IsFastFuncsAsmFileEmpty() { + return fastFuncsAsmFile.empty(); + } + + static void SetFastFuncsAsmFile(const std::string &fileName) { + fastFuncsAsmFile = fileName; + } + + static Range &GetRange() { + return range; + } + + static uint64 GetRangeBegin() { + return range.begin; + } + + static uint64 GetRangeEnd() { + return range.end; + } + + static Range &GetSpillRanges() { + return spillRanges; + } + + static uint64 GetSpillRangesBegin() { + return spillRanges.begin; + } + + static uint64 GetSpillRangesEnd() { + return spillRanges.end; + } + + static uint64 GetLSRABBOptSize() { + return lsraBBOptSize; + } + + static void SetLSRABBOptSize(uint64 size) { + lsraBBOptSize = size; + } + + static void SetLSRAInsnOptSize(uint64 size) { + lsraInsnOptSize = size; + } + + static uint64 GetOverlapNum() { + return overlapNum; + } + + static void SetOverlapNum(uint64 num) { + overlapNum = num; + } + + static uint8 GetRematLevel() { + return rematLevel; + } + + static bool OptimizeForSize() { + return optForSize; + } + + static void SetRematLevel(uint8 level) { + rematLevel = level; + } + + static uint8 GetFastAllocMode() { + return fastAllocMode; + } + + static void SetFastAllocMode(uint8 mode) { + fastAllocMode = mode; + } + + static void EnableBarriersForVolatile() { + useBarriersForVolatile = true; + } + + static void DisableBarriersForVolatile() { + useBarriersForVolatile = false; + } + + static bool UseBarriersForVolatile() { + return useBarriersForVolatile; + } + static void EnableFastAlloc() { + fastAlloc = true; + } + + static bool IsFastAlloc() { + return fastAlloc; + } + + static bool IsEnableTimePhases() { + return timePhases; + } + + static void EnableTimePhases() { + timePhases = true; + } + + static void DisableTimePhases() { + timePhases = false; + } + + static void EnableInRange() { + inRange = true; + } + + static void DisableInRange() { + inRange = false; + } + + static bool IsInRange() { + return inRange; + } + + static void EnableEBO() { + doEBO = true; + } + + static void DisableEBO() { + doEBO = false; + } + + static bool DoEBO() { + return doEBO; + } + + static void DisableCGSSA() { + doCGSSA = false; + } + + static void EnableCGSSA() { + doCGSSA = true; + } + + static bool DoCGSSA() { + return doCGSSA; + } + + static void DisableIPARA() { + doIPARA = false; + } + + static bool DoIPARA() { + return doIPARA; + } + + static void EnableCFGO() { + doCFGO = true; + } + + static void DisableCFGO() { + doCFGO = false; + } + + static bool DoCFGO() { + return doCFGO; + } + + static void EnableRegSavesOpt() { + doRegSavesOpt = true; + } + + static void DisableRegSavesOpt() { + doRegSavesOpt = false; + } + + static bool DoRegSavesOpt() { + return doRegSavesOpt; + } + + static void EnableSsaPreSave() { + useSsaPreSave = true; + } + + static void DisableSsaPreSave() { + useSsaPreSave = false; + } + + static bool UseSsaPreSave() { + return useSsaPreSave; + } + static void EnableSsuPreRestore() { + useSsuPreRestore = true; + } + + static void DisableSsuPreRestore() { + useSsuPreRestore = false; + } + + static bool UseSsuPreRestore() { + return useSsuPreRestore; + } + + static void EnableICO() { + doICO = true; + } + + static void DisableICO() { + doICO = false; + } + + static bool DoICO() { + return doICO; + } + + static void EnableStoreLoadOpt() { + doStoreLoadOpt = true; + } + + static void DisableStoreLoadOpt() { + doStoreLoadOpt = false; + } + + static bool DoStoreLoadOpt() { + return doStoreLoadOpt; + } + + static void EnableGlobalOpt() { + doGlobalOpt = true; + } + + static void DisableGlobalOpt() { + doGlobalOpt = false; + } + + static void EnableHotColdSplit() { + enableHotColdSplit = true; + } + + static void DisableHotColdSplit() { + enableHotColdSplit = false; + } + + static bool DoEnableHotColdSplit() { + return enableHotColdSplit; + } + + static bool DoGlobalOpt() { + return doGlobalOpt; + } + + static void EnableAlignAnalysis() { + doAlignAnalysis = true; + } + + static void DisableAlignAnalysis() { + doAlignAnalysis = false; + } + + static bool DoAlignAnalysis() { + return doAlignAnalysis; + } + + static void EnableCondBrAlign() { + doCondBrAlign = true; + } + + static void DisableCondBrAlign() { + doCondBrAlign = false; + } + + static bool DoCondBrAlign() { + return doCondBrAlign; + } + + static void EnableBigEndianInCG() { + cgBigEndian = true; + } + + static void DisableBigEndianInCG() { + cgBigEndian = false; + } + + static bool IsBigEndian() { + return cgBigEndian; + } + + static void EnableArm64ilp32() { + arm64ilp32 = true; + } + + static void DisableArm64ilp32() { + arm64ilp32 = false; + } + + static bool IsArm64ilp32() { + return arm64ilp32; + } + + static bool IsTargetX86_64() { + return targetArch == "x86_64"; + }; + + static void EnableVregRename() { + doVregRename = true; + } + + static void DisableVregRename() { + doVregRename = false; + } + + static bool DoVregRename() { + return doVregRename; + } + + static void EnableMultiPassColorRA() { + doMultiPassColorRA = true; + } + + static void DisableMultiPassColorRA() { + doMultiPassColorRA = false; + } + + static bool DoMultiPassColorRA() { + return doMultiPassColorRA; + } + + static void EnablePreLSRAOpt() { + doPreLSRAOpt = true; + } + + static void DisablePreLSRAOpt() { + doPreLSRAOpt = false; + } + + static bool DoPreLSRAOpt() { + return doPreLSRAOpt; + } + + static void EnableLocalRefSpill() { + doLocalRefSpill = true; + } + + static void DisableLocalRefSpill() { + doLocalRefSpill = false; + } + + static bool DoLocalRefSpill() { + return doLocalRefSpill; + } + + static void EnableCalleeToSpill() { + doCalleeToSpill = true; + } + + static void DisableCalleeToSpill() { + doCalleeToSpill = false; + } + + static bool DoCalleeToSpill() { + return doCalleeToSpill; + } + + static void EnablePrePeephole() { + doPrePeephole = true; + } + + static void DisablePrePeephole() { + doPrePeephole = false; + } + + static bool DoPrePeephole() { + return doPrePeephole; + } + + static void EnablePeephole() { + doPeephole = true; + } + + static void DisablePeephole() { + doPeephole = false; + } + + static bool DoPeephole() { + return doPeephole; + } + + static void EnableRetMerge() { + doRetMerge = true; + } + + static void DisableRetMerge() { + doRetMerge = false; + } + + static bool DoRetMerge() { + return doRetMerge; + } + + static void EnablePreSchedule() { + doPreSchedule = true; + } + + static void DisablePreSchedule() { + doPreSchedule = false; + } + + static bool DoPreSchedule() { + return doPreSchedule; + } + + static void EnableSchedule() { + doSchedule = true; + } + + static void DisableSchedule() { + doSchedule = false; + } + + static bool DoSchedule() { + return doSchedule; + } + static void EnableWriteRefFieldOpt() { + doWriteRefFieldOpt = true; + } + + static void DisableWriteRefFieldOpt() { + doWriteRefFieldOpt = false; + } + static bool DoWriteRefFieldOpt() { + return doWriteRefFieldOpt; + } + + static void EnableDumpOptimizeCommonLog() { + dumpOptimizeCommonLog = true; + } + + static void DisableDumpOptimizeCommonLog() { + dumpOptimizeCommonLog = false; + } + + static bool IsDumpOptimizeCommonLog() { + return dumpOptimizeCommonLog; + } + + static void EnableCheckArrayStore() { + checkArrayStore = true; + } + + static void DisableCheckArrayStore() { + checkArrayStore = false; + } + + static bool IsCheckArrayStore() { + return checkArrayStore; + } + + static void EnableExclusiveEH() { + exclusiveEH = true; + } + + static bool IsExclusiveEH() { + return exclusiveEH; + } + + static void EnablePIC() { + doPIC = true; + } + + static void DisablePIC() { + doPIC = false; + } + + static bool IsPIC() { + return doPIC; + } + + static void EnableNoDupBB() { + noDupBB = true; + } + + static void DisableNoDupBB() { + noDupBB = false; + } + + static bool IsNoDupBB() { + return noDupBB; + } + + static void EnableNoCalleeCFI() { + noCalleeCFI = true; + } + + static void DisableNoCalleeCFI() { + noCalleeCFI = false; + } + + static bool IsNoCalleeCFI() { + return noCalleeCFI; + } + + static void EnableEmitCyclePattern() { + emitCyclePattern = true; + } + + static bool IsInsertYieldPoint() { + return insertYieldPoint; + } + + static void EnableMapleLinker() { + mapleLinker = true; + } + + static void DisableMapleLinker() { + mapleLinker = false; + } + + static bool IsMapleLinker() { + return mapleLinker; + } + static void EnableReplaceASM() { + replaceASM = true; + } + + static void DisableReplaceASM() { + replaceASM = false; + } + + static bool IsReplaceASM() { + return replaceASM; + } + + static void EnableGeneralRegOnly() { + generalRegOnly = true; + } + + static void DisableGeneralRegOnly() { + generalRegOnly = false; + } + + static bool UseGeneralRegOnly() { + return generalRegOnly; + } + + static void EnablePrintFunction() { + printFunction = true; + } + + static void DisablePrintFunction() { + printFunction = false; + } + + static bool IsPrintFunction() { + return printFunction; + } + + static std::string &GetGlobalVarProFile() { + return globalVarProfile; + } + + static bool IsGlobalVarProFileEmpty() { + return globalVarProfile.empty(); + } + + static bool IsEmitBlockMarker() { + return emitBlockMarker; + } + + static void EnableNativeOpt() { + nativeOpt = true; + } + + static void DisableNativeOpt() { + nativeOpt = false; + } + + static bool IsNativeOpt() { + return nativeOpt; + } + + static void EnableLazyBinding() { + lazyBinding = true; + } + + static void DisableLazyBinding() { + lazyBinding = false; + } + + static bool IsLazyBinding() { + return lazyBinding; + } + + static void EnableHotFix() { + hotFix = true; + } + + static void DisableHotFix() { + hotFix = false; + } + + static bool IsHotFix() { + return hotFix; + } + + static void EnableDebugSched() { + debugSched = true; + } + + static void DisableDebugSched() { + debugSched = false; + } + + static bool IsDebugSched() { + return debugSched; + } + + static void EnableDruteForceSched() { + bruteForceSched = true; + } + + static void DisableDruteForceSched() { + bruteForceSched = false; + } + + static bool IsDruteForceSched() { + return bruteForceSched; + } + + static void EnableSimulateSched() { + simulateSched = true; + } + + static void DisableSimulateSched() { + simulateSched = false; + } + + static bool IsSimulateSched() { + return simulateSched; + } + + static void SetABIType(const std::string &type) { + if (type == "hard") { + abiType = kABIHard; + } else if (type == "soft") { + CHECK_FATAL(false, "float-abi=soft is not supported Currently."); + } else if (type == "softfp") { + abiType = kABISoftFP; + } else { + CHECK_FATAL(false, "unexpected abi-type, only hard, soft and softfp are supported"); + } + } + + static ABIType GetABIType() { + return abiType; + } + + static void SetEmitFileType(const std::string &type) { + if (type == "asm") { + emitFileType = kAsm; + } else if (type == "obj") { + emitFileType = kObj; + } else if (type == "null") { + emitFileType = kEmitNone; + CHECK_FATAL(false, "null is not supported Currently."); + } else { + CHECK_FATAL(false, "unexpected file-type, only asm, obj, and null are supported"); + } + } + + static EmitFileType GetEmitFileType() { + return emitFileType; + } + + static void EnableLongCalls() { + genLongCalls = true; + } + + static void DisableLongCalls() { + genLongCalls = false; + } + + static bool IsLongCalls() { + return genLongCalls; + } + + static void EnableFunctionSections() { + functionSections = true; + } + + static void DisableFunctionSections() { + functionSections = false; + } + + static bool IsFunctionSections() { + return functionSections; + } + + static void EnableFramePointer() { + useFramePointer = true; + } + + static void DisableFramePointer() { + useFramePointer = false; + } + + static bool UseFramePointer() { + return useFramePointer; + } + + static void EnableGCOnly() { + gcOnly = true; + } + + static void DisableGCOnly() { + gcOnly = false; + } + + static bool IsGCOnly() { + return gcOnly; + } + + const OptionFlag &GetOptionFlag() const { + return options; + } + + void SetOptionFlag(const OptionFlag &flag) { + options = flag; + } + + static void EnableFastMath() { + fastMath = true; + } + + static void DisableFastMath() { + fastMath = false; + } + + static bool IsFastMath() { + return fastMath; + } + + static void EnableCommon() { + noCommon = false; + } + + static void DisableCommon() { + noCommon = true; + } + + static bool IsNoCommon() { + return noCommon; + } + + static void SetAlignMinBBSize(uint32 minBBSize) { + alignMinBBSize = minBBSize; + } + + static uint32 GetAlignMinBBSize() { + return alignMinBBSize; + } + + static void SetAlignMaxBBSize(uint32 maxBBSize) { + alignMaxBBSize = maxBBSize; + } + + static uint32 GetAlignMaxBBSize() { + return alignMaxBBSize; + } + + static void SetLoopAlignPow(uint32 loopPow) { + loopAlignPow = loopPow; + } + + static uint32 GetLoopAlignPow() { + return loopAlignPow; + } + + static void SetJumpAlignPow(uint32 jumpPow) { + jumpAlignPow = jumpPow; + } + + static uint32 GetJumpAlignPow() { + return jumpAlignPow; + } + + static void SetFuncAlignPow(uint32 funcPow) { + funcAlignPow = funcPow; + } + + static uint32 GetFuncAlignPow() { + return funcAlignPow; + } + + private: + std::vector phaseSequence; + EmitMemoryManager emitMemoryManager; + + bool insertCall = false; + bool runCGFlag = true; + bool generateObjectMap = true; + uint32 parserOption = 0; + int32 optimizeLevel = 0; + + GenerateFlag generateFlag = 0; + OptionFlag options = kUndefined; + std::string instrumentationFunction; + + std::string classListFile; + std::string ehExclusiveFile; + std::string cyclePatternFile; + /* we don't do exception handling in this list */ + std::vector ehExclusiveFunctionName; + + static bool quiet; + static std::string targetArch; + static std::unordered_set dumpPhases; + static std::unordered_set skipPhases; + static std::unordered_map> cyclePatternMap; + static std::string skipFrom; + static std::string skipAfter; + static std::string dumpFunc; + static std::string duplicateAsmFile; + static bool optForSize; + static bool enableHotColdSplit; + static bool useBarriersForVolatile; + static bool timePhases; + static bool cgBigEndian; + static bool doEBO; + static bool doCGSSA; + static bool doIPARA; + static bool doCFGO; + static bool doICO; + static bool doStoreLoadOpt; + static bool doGlobalOpt; + static bool doVregRename; + static bool doMultiPassColorRA; + static bool doPrePeephole; + static bool doPeephole; + static bool doRetMerge; + static bool doSchedule; + static bool doAlignAnalysis; + static bool doCondBrAlign; + static bool doWriteRefFieldOpt; + static bool doRegSavesOpt; + static bool useSsaPreSave; + static bool useSsuPreRestore; + static bool dumpOptimizeCommonLog; + static bool checkArrayStore; + static bool exclusiveEH; + static bool doPIC; + static bool noDupBB; + static bool noCalleeCFI; + static bool emitCyclePattern; + static bool insertYieldPoint; + static bool mapleLinker; + static bool printFunction; + static std::string globalVarProfile; + static bool nativeOpt; + static bool lazyBinding; + static bool arm64ilp32; + static bool hotFix; + /* if true dump scheduling information */ + static bool debugSched; + /* if true do BruteForceSchedule */ + static bool bruteForceSched; + /* if true do SimulateSched */ + static bool simulateSched; + static ABIType abiType; + static EmitFileType emitFileType; + /* if true generate adrp/ldr/blr */ + static bool genLongCalls; + static bool functionSections; + static bool useFramePointer; + static bool gcOnly; + static bool doPreSchedule; + static bool emitBlockMarker; + static Range range; + static bool inRange; + static bool doPatchLongBranch; + static std::string profileData; + static std::string profileFuncData; + static std::string profileClassData; + static std::string fastFuncsAsmFile; + static Range spillRanges; + static uint64 lsraBBOptSize; + static uint64 lsraInsnOptSize; + static uint64 overlapNum; + static uint8 rematLevel; + static uint8 fastAllocMode; + static bool fastAlloc; + static bool doPreLSRAOpt; + static bool doLocalRefSpill; + static bool doCalleeToSpill; + static bool replaceASM; + static bool generalRegOnly; + static std::string literalProfile; + static bool fastMath; + static bool noCommon; + static uint32 alignMinBBSize; + static uint32 alignMaxBBSize; + static uint32 loopAlignPow; + static uint32 jumpAlignPow; + static uint32 funcAlignPow; +}; +} /* namespace maplebe */ + +#define SET_FIND(SET, NAME) ((SET).find(NAME)) +#define SET_END(SET) ((SET).end()) +#define IS_STR_IN_SET(SET, NAME) (SET_FIND(SET, NAME) != SET_END(SET)) + +#define CG_DEBUG_FUNC(f) \ + (!maplebe::CGOptions::GetDumpPhases().empty() && maplebe::CGOptions::IsDumpFunc((f).GetName()) && \ + maplebe::CGOptions::GetDumpPhases().find(PhaseName()) != maplebe::CGOptions::GetDumpPhases().end()) +#ifndef TRACE_PHASE +#define TRACE_PHASE (IS_STR_IN_SET(maplebe::CGOptions::GetDumpPhases(), PhaseName())) +#endif + +#endif /* MAPLEBE_INCLUDE_CG_CG_OPTION_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_options.h b/ecmascript/mapleall/maple_be/include/cg/cg_options.h new file mode 100644 index 0000000000000000000000000000000000000000..2f4b5dfcb32804099b849fcdadd5a6cc13af4988 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_options.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_BE_INCLUDE_CG_OPTIONS_H +#define MAPLE_BE_INCLUDE_CG_OPTIONS_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include +#include +#include + +namespace opts::cg { + +extern maplecl::Option pie; +extern maplecl::Option fpic; +extern maplecl::Option verboseAsm; +extern maplecl::Option verboseCg; +extern maplecl::Option maplelinker; +extern maplecl::Option quiet; +extern maplecl::Option cg; +extern maplecl::Option replaceAsm; +extern maplecl::Option generalRegOnly; +extern maplecl::Option lazyBinding; +extern maplecl::Option hotFix; +extern maplecl::Option ebo; +extern maplecl::Option cfgo; +extern maplecl::Option ico; +extern maplecl::Option storeloadopt; +extern maplecl::Option globalopt; +extern maplecl::Option hotcoldsplit; +extern maplecl::Option prelsra; +extern maplecl::Option lsraLvarspill; +extern maplecl::Option lsraOptcallee; +extern maplecl::Option calleeregsPlacement; +extern maplecl::Option ssapreSave; +extern maplecl::Option ssupreRestore; +extern maplecl::Option prepeep; +extern maplecl::Option peep; +extern maplecl::Option preschedule; +extern maplecl::Option schedule; +extern maplecl::Option retMerge; +extern maplecl::Option vregRename; +extern maplecl::Option fullcolor; +extern maplecl::Option writefieldopt; +extern maplecl::Option dumpOlog; +extern maplecl::Option nativeopt; +extern maplecl::Option objmap; +extern maplecl::Option yieldpoint; +extern maplecl::Option proepilogue; +extern maplecl::Option localRc; +extern maplecl::Option insertCall; +extern maplecl::Option addDebugTrace; +extern maplecl::Option addFuncProfile; +extern maplecl::Option classListFile; +extern maplecl::Option genCMacroDef; +extern maplecl::Option genGctibFile; +extern maplecl::Option stackProtectorStrong; +extern maplecl::Option stackProtectorAll; +extern maplecl::Option debug; +extern maplecl::Option gdwarf; +extern maplecl::Option gsrc; +extern maplecl::Option gmixedsrc; +extern maplecl::Option gmixedasm; +extern maplecl::Option profile; +extern maplecl::Option withRaLinearScan; +extern maplecl::Option withRaGraphColor; +extern maplecl::Option patchLongBranch; +extern maplecl::Option constFold; +extern maplecl::Option ehExclusiveList; +extern maplecl::Option o0; +extern maplecl::Option o1; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option olitecg; +extern maplecl::Option lsraBb; +extern maplecl::Option lsraInsn; +extern maplecl::Option lsraOverlap; +extern maplecl::Option remat; +extern maplecl::Option suppressFileinfo; +extern maplecl::Option dumpCfg; +extern maplecl::Option target; +extern maplecl::Option dumpPhases; +extern maplecl::Option skipPhases; +extern maplecl::Option skipFrom; +extern maplecl::Option skipAfter; +extern maplecl::Option dumpFunc; +extern maplecl::Option timePhases; +extern maplecl::Option useBarriersForVolatile; +extern maplecl::Option range; +extern maplecl::Option fastAlloc; +extern maplecl::Option spillRange; +extern maplecl::Option dupBb; +extern maplecl::Option calleeCfi; +extern maplecl::Option printFunc; +extern maplecl::Option cyclePatternList; +extern maplecl::Option duplicateAsmList; +extern maplecl::Option duplicateAsmList2; +extern maplecl::Option blockMarker; +extern maplecl::Option soeCheck; +extern maplecl::Option checkArraystore; +extern maplecl::Option debugSchedule; +extern maplecl::Option bruteforceSchedule; +extern maplecl::Option simulateSchedule; +extern maplecl::Option crossLoc; +extern maplecl::Option floatAbi; +extern maplecl::Option filetype; +extern maplecl::Option longCalls; +extern maplecl::Option functionSections; +extern maplecl::Option omitFramePointer; +extern maplecl::Option fastMath; +extern maplecl::Option tailcall; +extern maplecl::Option alignAnalysis; +extern maplecl::Option cgSsa; +extern maplecl::Option common; +extern maplecl::Option condbrAlign; +extern maplecl::Option alignMinBbSize; +extern maplecl::Option alignMaxBbSize; +extern maplecl::Option loopAlignPow; +extern maplecl::Option jumpAlignPow; +extern maplecl::Option funcAlignPow; + +} + +#endif /* MAPLE_BE_INCLUDE_CG_OPTIONS_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_phase.h b/ecmascript/mapleall/maple_be/include/cg/cg_phase.h new file mode 100644 index 0000000000000000000000000000000000000000..5c0fe56006505489b092d98a82c7604d61b62023 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_phase.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_PHASE_H +#define MAPLEBE_INCLUDE_CG_CG_PHASE_H + +namespace maple {} +namespace maplebe { +using namespace maple; +class CGFunc; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_PHASE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_phasemanager.h b/ecmascript/mapleall/maple_be/include/cg/cg_phasemanager.h new file mode 100644 index 0000000000000000000000000000000000000000..a5c40fcb50aeea6f2a71ed0341612e305014cded --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_phasemanager.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H +#define MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H +#include +#include +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "mir_lower.h" +#include "lower.h" +#include "constantfold.h" +#include "cgfunc.h" +#include "cg_phase.h" +#include "cg_option.h" +namespace maplebe { +using cgFuncOptTy = MapleFunctionPhase; + +/* =================== new phase manager =================== */ +class CgFuncPM : public FunctionPM { + public: + explicit CgFuncPM(MemPool *mp) : FunctionPM(mp, &id) {} + PHASECONSTRUCTOR(CgFuncPM); + std::string PhaseName() const override; + ~CgFuncPM() override { + cgOptions = nullptr; + cg = nullptr; + beCommon = nullptr; + if (CGOptions::IsEnableTimePhases()) { + DumpPhaseTime(); + } + } + bool PhaseRun(MIRModule &m) override; + + void SetCGOptions(CGOptions *curCGOptions) { + cgOptions = curCGOptions; + } + + CG *GetCG() { + return cg; + } + BECommon *GetBECommon() { + return beCommon; + } + private: + bool FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM); + void GenerateOutPutFile(MIRModule &m); + void CreateCGAndBeCommon(MIRModule &m); + void PrepareLower(MIRModule &m); + void PostOutPut(MIRModule &m); + void DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc); + /* Tool functions */ + void DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const; + /* For Emit */ + void InitProfile(MIRModule &m) const; + void EmitGlobalInfo(MIRModule &m) const; + void EmitDuplicatedAsmFunc(MIRModule &m) const; + void EmitDebugInfo(const MIRModule &m) const; + void EmitFastFuncs(const MIRModule &m) const; + bool IsFramework(MIRModule &m) const; + void SweepUnusedStaticSymbol(MIRModule &m); + + CG *cg = nullptr; + BECommon *beCommon = nullptr; + MIRLower *mirLower = nullptr; + CGLowerer *cgLower = nullptr; + /* module options */ + CGOptions *cgOptions = nullptr; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_phi_elimination.h b/ecmascript/mapleall/maple_be/include/cg/cg_phi_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0369afdbd1ec6cf0799ae538d46dc163db0ab3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_phi_elimination.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H +#define MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H + +#include "cgfunc.h" +#include "cg_ssa.h" + +namespace maplebe { +class PhiEliminate { + public: + PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp) : + cgFunc(&f), + ssaInfo(&ssaAnalysisResult), + phiEliAlloc(&mp), + eliminatedBB(phiEliAlloc.Adapter()), + replaceVreg(phiEliAlloc.Adapter()), + remateInfoAfterSSA(phiEliAlloc.Adapter()) { + tempRegNO = static_cast(GetSSAInfo()->GetAllSSAOperands().size()) + CGSSAInfo::SSARegNObase; + } + virtual ~PhiEliminate() = default; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + void TranslateTSSAToCSSA(); + /* move ssaRegOperand from ssaInfo to cgfunc */ + virtual void ReCreateRegOperand(Insn &insn) = 0; + + protected: + virtual Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) = 0; + virtual void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) = 0; + virtual void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const = 0; + void UpdateRematInfo(); + regno_t GetAndIncreaseTempRegNO(); + RegOperand *MakeRoomForNoDefVreg(RegOperand &conflictReg); + void RecordRematInfo(regno_t vRegNO, PregIdx pIdx); + PregIdx FindRematInfo(regno_t vRegNO) { + return remateInfoAfterSSA.count(vRegNO) ? remateInfoAfterSSA[vRegNO] : -1; + } + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + MapleAllocator phiEliAlloc; + + private: + void PlaceMovInPredBB(uint32 predBBId, Insn &movInsn); + virtual RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) = 0; + MapleSet eliminatedBB; + /* + * noDef Vregs occupy the vregno_t which is used for ssa re_creating + * first : conflicting VReg with noDef VReg second : new_Vreg opnd to replace occupied Vreg + */ + MapleUnorderedMap replaceVreg; + regno_t tempRegNO = 0; /* use for create mov insn for phi */ + MapleMap remateInfoAfterSSA; +}; + +class OperandPhiElmVisitor : public OperandVisitorBase, + public OperandVisitors { +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPhiElimination, maplebe::CGFunc) +} + +#endif //MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_pre.h b/ecmascript/mapleall/maple_be/include/cg/cg_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..40e80d65fa1687860f0c12b7b427fcc236ba19e2 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_pre.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGPRE_H +#define MAPLEBE_CG_INCLUDE_CGPRE_H +#include "cg_occur.h" +#include "cg_dominance.h" +#include "cgfunc.h" + +namespace maplebe { +enum PreKind { + kExprPre, + kStmtPre, + kLoadPre, + kAddrPre +}; + +class CGPre { + public: + CGPre(DomAnalysis &currDom, MemPool &memPool, MemPool &mp2, PreKind kind, uint32 limit) + : dom(&currDom), + ssaPreMemPool(&memPool), + ssaPreAllocator(&memPool), + perCandMemPool(&mp2), + perCandAllocator(&mp2), + workList(ssaPreAllocator.Adapter()), + preKind(kind), + allOccs(ssaPreAllocator.Adapter()), + phiOccs(ssaPreAllocator.Adapter()), + exitOccs(ssaPreAllocator.Adapter()), + preLimit(limit), + dfPhiDfns(std::less(), ssaPreAllocator.Adapter()), + varPhiDfns(std::less(), ssaPreAllocator.Adapter()), + temp2LocalRefVarMap(ssaPreAllocator.Adapter()) { + preWorkCandHashTable.GetWorkcandHashTable().fill(nullptr); + } + + virtual ~CGPre() = default; + + const MapleVector &GetRealOccList() const { + return workCand->GetRealOccs(); + } + + virtual BB *GetBB(uint32 id) const = 0; + virtual PUIdx GetPUIdx() const = 0; + virtual void SetCurFunction(PUIdx) const {} + + void GetIterDomFrontier(const BB *bb, MapleSet *dfset) const { + for (uint32 bbid : dom->GetIdomFrontier(bb->GetId())) { + (void)dfset->insert(dom->GetDtDfnItem(bbid)); + } + } + + PreWorkCand* GetWorkCand() const { + return workCand; + } + // compute downsafety for each PHI + static void ResetDS(CgPhiOcc *phiOcc); + void ComputeDS(); + + protected: + virtual void ComputeVarAndDfPhis() = 0; + virtual void CreateSortedOccs(); + CgOccur *CreateRealOcc(Insn &insn, Operand &opnd, OccType occType); + virtual void BuildWorkList() = 0; + /* for stmt pre only */ + void CreateExitOcc(BB &bb) { + CgOccur *exitOcc = ssaPreMemPool->New(kOccExit, 0, bb, nullptr); + exitOccs.push_back(exitOcc); + } + + DomAnalysis *dom; + MemPool *ssaPreMemPool; + MapleAllocator ssaPreAllocator; + MemPool *perCandMemPool; + MapleAllocator perCandAllocator; + MapleList workList; + PreWorkCand *workCand = nullptr; // the current PreWorkCand + PreKind preKind; + + // PRE work candidates; incremented by 2 for each tree; + // purpose is to avoid processing a node the third time + // inside a tree (which is a DAG) + // the following 3 lists are all maintained in order of dt_preorder + MapleVector allOccs; // cleared at start of each workcand + MapleVector phiOccs; // cleared at start of each workcand + MapleVector exitOccs; // this is shared by all workcands + uint32 preLimit; // set by command-line option to limit the number of candidates optimized (for debugging purpose) + // step 1 phi insertion data structures + // following are set of BBs in terms of their dfn's; index into + // dominance->pdt_preorder to get their bbid's + MapleSet dfPhiDfns; // phis inserted due to dominance frontiers + MapleSet varPhiDfns; // phis inserted due to the var operands + // step 2 renaming data structures + uint32 classCount = 0; // count class created during renaming + // is index into workCand->realOccs + // step 6 codemotion data structures + MapleMap temp2LocalRefVarMap; + int32 reBuiltOccIndex = -1; // stores the size of worklist every time when try to add new worklist, update before + // each code motion + uint32 strIdxCount = 0; // ssapre will create a lot of temp variables if using var to store redundances, start from 0 + PreWorkCandHashTable preWorkCandHashTable; +}; +} // namespace maple +#endif // MAPLEBE_CG_INCLUDE_CGPRE_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_prop.h b/ecmascript/mapleall/maple_be/include/cg/cg_prop.h new file mode 100644 index 0000000000000000000000000000000000000000..319ecee882f69fa4d81da5eb8b4a36ea1d8879d4 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_prop.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_PROP_H +#define MAPLEBE_INCLUDE_CG_PROP_H + +#include "cgfunc.h" +#include "cg_ssa.h" +#include "cg_dce.h" +#include "cg.h" +#include "reg_coalesce.h" + +namespace maplebe { +class CGProp { + public: + CGProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) + : memPool(&mp), + cgFunc(&f), + propAlloc(&mp), + ssaInfo(&sInfo), + regll(&ll) { + cgDce = f.GetCG()->CreateCGDce(mp, f, sInfo); + } + virtual ~CGProp() = default; + + void DoCopyProp(); + void DoTargetProp(); + + protected: + MemPool *memPool; + CGFunc *cgFunc; + MapleAllocator propAlloc; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + CGDce *GetDce() { + return cgDce; + } + LiveIntervalAnalysis *GetRegll() { + return regll; + } + + private: + virtual void CopyProp() = 0; + virtual void TargetProp(Insn &insn) = 0; + virtual void PropPatternOpt() = 0; + CGSSAInfo *ssaInfo; + CGDce *cgDce = nullptr; + LiveIntervalAnalysis *regll; +}; + +class PropOptimizeManager { + public: + ~PropOptimizeManager() = default; + template + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) const { + PropOptimizePattern optPattern(cgFunc, cgssaInfo, ll); + optPattern.Run(); + } + template + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) const { + PropOptimizePattern optPattern(cgFunc, cgssaInfo); + optPattern.Run(); + } +}; + +class PropOptimizePattern { + public: + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : cgFunc(cgFunc), + optSsaInfo(cgssaInfo), + regll(ll) {} + + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) + : cgFunc(cgFunc), + optSsaInfo(cgssaInfo) {} + virtual ~PropOptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(Insn &insn) = 0; + virtual void Run() = 0; + + protected: + std::string PhaseName() const { + return "propopt"; + } + virtual void Init() = 0; + Insn *FindDefInsn(const VRegVersion *useVersion); + + CGFunc &cgFunc; + CGSSAInfo *optSsaInfo = nullptr; + LiveIntervalAnalysis *regll = nullptr; +}; + +class ReplaceRegOpndVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldR ,RegOperand &newR) + : cgFunc(&f), + insn(&cInsn), + idx(cIdx), + oldReg(&oldR), + newReg(&newR) {} + virtual ~ReplaceRegOpndVisitor() = default; + + protected: + CGFunc *cgFunc; + Insn *insn; + uint32 idx; + RegOperand *oldReg; + RegOperand *newReg; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgCopyProp, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgTargetProp, maplebe::CGFunc) +} +#endif /* MAPLEBE_INCLUDE_CG_PROP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_ssa.h b/ecmascript/mapleall/maple_be/include/cg/cg_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..98578de0d85184c19b15e248f3ccfb7d9d473f6c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_ssa.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_SSA_H +#define MAPLEBE_CG_INCLUDE_CG_SSA_H + +#include "cgfunc.h" +#include "cg_dominance.h" +#include "live.h" +#include "operand.h" +#include "visitor_common.h" + +namespace maplebe { +class CGSSAInfo; +enum SSAOpndDefBy { + kDefByNo, + kDefByInsn, + kDefByPhi +}; + +/* precise def/use info in machine instrcution */ +class DUInsnInfo { + public: + DUInsnInfo(Insn *cInsn, uint32 cIdx, MapleAllocator &alloc) : insn(cInsn), DUInfo(alloc.Adapter()) { + IncreaseDU(cIdx); + } + void IncreaseDU(uint32 idx) { + if (!DUInfo.count(idx)) { + DUInfo[idx] = 0; + } + DUInfo[idx]++; + } + void DecreaseDU(uint32 idx) { + DEBUG_ASSERT(DUInfo[idx] > 0, "no def/use any more"); + DUInfo[idx]--; + } + void ClearDU(uint32 idx) { + DEBUG_ASSERT(DUInfo.count(idx), "no def/use find"); + DUInfo[idx] = 0; + } + bool HasNoDU() { + for(auto it : DUInfo) { + if (it.second != 0) { + return false; + } + } + return true; + } + Insn *GetInsn() { + return insn; + } + MapleMap& GetOperands() { + return DUInfo; + } + private: + Insn *insn; + /* operand idx --- count */ + MapleMap DUInfo; +}; + +class VRegVersion { + public: + VRegVersion(const MapleAllocator &alloc, RegOperand &vReg, uint32 vIdx, regno_t vregNO) : + versionAlloc(alloc), + ssaRegOpnd(&vReg), + versionIdx(vIdx), + originalRegNO(vregNO), + useInsnInfos(versionAlloc.Adapter()) {} + void SetDefInsn(DUInsnInfo *duInfo, SSAOpndDefBy defTy) { + defInsnInfo = duInfo; + defType = defTy; + } + DUInsnInfo *GetDefInsnInfo() const { + return defInsnInfo; + } + SSAOpndDefBy GetDefType() const { + return defType; + } + RegOperand *GetSSAvRegOpnd(bool isDef = true) { + if (!isDef) { + return implicitCvtedRegOpnd; + } + return ssaRegOpnd; + } + uint32 GetVersionIdx() const { + return versionIdx; + } + regno_t GetOriginalRegNO() const { + return originalRegNO; + } + void AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx); + /* elimate dead use */ + void CheckDeadUse(const Insn &useInsn); + void RemoveUseInsn(const Insn &useInsn, uint32 idx); + MapleUnorderedMap &GetAllUseInsns() { + return useInsnInfos; + } + void MarkDeleted() { + deleted = true; + } + void MarkRecovery() { + deleted = false; + } + bool IsDeleted() const { + return deleted; + } + void SetImplicitCvt() { + hasImplicitCvt = true; + } + bool HasImplicitCvt() const { + return hasImplicitCvt; + } + + private: + MapleAllocator versionAlloc; + /* if this version has implicit conversion, it refers to def reg */ + RegOperand *ssaRegOpnd; + RegOperand *implicitCvtedRegOpnd = nullptr; + uint32 versionIdx; + regno_t originalRegNO; + DUInsnInfo *defInsnInfo = nullptr; + SSAOpndDefBy defType = kDefByNo; + /* insn ID -> insn* & operand Idx */ + // --> vector? + MapleUnorderedMap useInsnInfos; + bool deleted = false; + /* + * def reg (size:64) or def reg (size:32) --> + * all use reg (size:32) all use reg (size:64) + * do not support single use which has implicit conversion yet + * support single use in DUInfo in future + */ + bool hasImplicitCvt = false; +}; + +class CGSSAInfo { + public: + CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp) : + cgFunc(&f), + memPool(&mp), + tempMp(&tmp), + ssaAlloc(&mp), + domInfo(&da), + renamedBBs(ssaAlloc.Adapter()), + vRegDefCount(ssaAlloc.Adapter()), + vRegStk(ssaAlloc.Adapter()), + allSSAOperands(ssaAlloc.Adapter()), + noDefVRegs(ssaAlloc.Adapter()), + reversePostOrder(ssaAlloc.Adapter()), + safePropInsns(ssaAlloc.Adapter()) {} + virtual ~CGSSAInfo() = default; + void ConstructSSA(); + VRegVersion *FindSSAVersion(regno_t ssaRegNO); /* Get specific ssa info */ + /* replace insn & update ssaInfo */ + virtual void ReplaceInsn(Insn &oriInsn, Insn &newInsn) = 0; + virtual void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) = 0; + virtual void CreateNewInsnSSAInfo(Insn &newInsn) = 0; + PhiOperand &CreatePhiOperand(); + + DUInsnInfo *CreateDUInsnInfo(Insn *cInsn, uint32 idx) { + return memPool->New(cInsn, idx, ssaAlloc); + } + const MapleUnorderedMap &GetAllSSAOperands() const { + return allSSAOperands; + } + bool IsNoDefVReg(regno_t vRegNO) const { + return noDefVRegs.find(vRegNO) != noDefVRegs.end(); + } + uint32 GetVersionNOOfOriginalVreg(regno_t vRegNO) { + if (vRegDefCount.count(vRegNO)) { + return vRegDefCount[vRegNO]; + } + DEBUG_ASSERT(false, " original vreg is not existed"); + return 0; + } + MapleVector &GetReversePostOrder() { + return reversePostOrder; + } + void InsertSafePropInsn(uint32 insnId) { + (void)safePropInsns.emplace_back(insnId); + } + MapleVector &GetSafePropInsns() { + return safePropInsns; + } + void DumpFuncCGIRinSSAForm() const; + virtual void DumpInsnInSSAForm(const Insn &insn) const = 0; + static uint32 SSARegNObase; + + protected: + VRegVersion *CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi = false); + virtual RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) = 0; + bool IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst); + uint32 IncreaseVregCount(regno_t vRegNO); + VRegVersion *GetVersion(const RegOperand &virtualOpnd); + MapleUnorderedMap &GetPrivateAllSSAOperands() { + return allSSAOperands; + } + void AddNoDefVReg(regno_t noDefVregNO) { + DEBUG_ASSERT(!noDefVRegs.count(noDefVregNO), "duplicate no def Reg, please check"); + noDefVRegs.emplace(noDefVregNO); + } + void MarkInsnsInSSA(Insn &insn); + CGFunc *cgFunc = nullptr; + MemPool *memPool = nullptr; + MemPool *tempMp = nullptr; + MapleAllocator ssaAlloc; + + private: + void InsertPhiInsn(); + void RenameVariablesForBB(uint32 bbID); + void RenameBB(BB &bb); + void RenamePhi(BB &bb); + virtual void RenameInsn(Insn &insn) = 0; + /* build ssa on virtual register only */ + virtual RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) = 0; + void RenameSuccPhiUse(const BB &bb); + void PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd); + + void AddRenamedBB(uint32 bbID) { + DEBUG_ASSERT(!renamedBBs.count(bbID), "cgbb has been renamed already"); + renamedBBs.emplace(bbID); + } + bool IsBBRenamed(uint32 bbID) const { + return renamedBBs.count(bbID); + } + void SetReversePostOrder(); + + DomAnalysis *domInfo = nullptr; + MapleSet renamedBBs; + /* original regNO - number of definitions (start from 0) */ + MapleMap vRegDefCount; + /* original regNO - ssa version stk */ + MapleMap> vRegStk; + /* ssa regNO - ssa virtual operand version */ + MapleUnorderedMap allSSAOperands; + /* For virtual registers which do not have definition */ + MapleSet noDefVRegs; + /* only save bb_id to reduce space */ + MapleVector reversePostOrder; + /* destSize < srcSize but can be propagated */ + MapleVector safePropInsns; + int32 insnCount = 0; +}; + +class SSAOperandVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + SSAOperandVisitor(Insn &cInsn, const OpndDesc &cDes, uint32 idx) : insn(&cInsn), opndDes(&cDes), idx(idx) {} + SSAOperandVisitor() = default; + virtual ~SSAOperandVisitor() = default; + void SetInsnOpndInfo(Insn &cInsn, const OpndDesc &cDes, uint32 index) { + insn = &cInsn; + opndDes = &cDes; + this->idx = index; + } + + protected: + Insn *insn = nullptr; + const OpndDesc *opndDes = nullptr; + uint32 idx = 0; +}; + +class SSAOperandDumpVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + explicit SSAOperandDumpVisitor(const MapleUnorderedMap &allssa) : allSSAOperands(allssa) {} + virtual ~SSAOperandDumpVisitor() = default; + void SetHasDumped() { + hasDumped = true; + } + bool HasDumped() const { + return hasDumped; + } + bool hasDumped = false; + protected: + const MapleUnorderedMap &allSSAOperands; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgSSAConstruct, maplebe::CGFunc); +CGSSAInfo *GetResult() { + return ssaInfo; +} +CGSSAInfo *ssaInfo = nullptr; + private: + void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; +MAPLE_FUNC_PHASE_DECLARE_END +} + +#endif //MAPLEBE_CG_INCLUDE_CG_SSA_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_ssa_pre.h b/ecmascript/mapleall/maple_be/include/cg/cg_ssa_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..10256204c14716d888a4de1c567e36118c20910c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_ssa_pre.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H +#define MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "cg_dominance.h" + +// Use SSAPRE to determine where to insert saves for callee-saved registers. +// The external interface is DoSavePlacementOpt(). Class SsaPreWorkCand is used +// as input/output interface. + +namespace maplebe { + +using BBId = uint32; + +// This must have been constructed by the caller of DoSavePlacementOpt() and +// passed to it as parameter. The caller of DoSavePlacementOpt() describes +// the problem via occBBs. DoSavePlacementOpt()'s outputs are returned to the +// caller by setting saveAtEntryBBs. +class SsaPreWorkCand { + public: + explicit SsaPreWorkCand(MapleAllocator *alloc): + occBBs(alloc->Adapter()), + saveAtEntryBBs(alloc->Adapter()) {} + // inputs + MapleSet occBBs; // Id's of BBs with appearances of the callee-saved reg + // outputs + MapleSet saveAtEntryBBs; // Id's of BBs to insert saves of the register at BB entry + bool saveAtProlog = false; // if true, no shrinkwrapping can be done and + // the other outputs can be ignored +}; + +extern void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand); + +enum AOccType { + kAOccUndef, + kAOccReal, + kAOccPhi, + kAOccPhiOpnd, + kAOccExit, +}; + +class Occ { + public: + Occ(AOccType ty, BB *bb) : occTy(ty), cgbb(bb) {} + virtual ~Occ() = default; + + virtual void Dump() const = 0; + bool IsDominate(DomAnalysis *dom, const Occ *occ) const { + return dom->Dominate(*cgbb, *occ->cgbb); + } + + AOccType occTy; + uint32 classId = 0; + BB *cgbb; // the BB it occurs in + Occ *def = nullptr; // points to its single def +}; + +class RealOcc : public Occ { + public: + explicit RealOcc(BB *bb): Occ(kAOccReal, bb) {} + virtual ~RealOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); + LogInfo::MapleLogger() << " classId" << classId; + } + + bool redundant = true; +}; + +class PhiOcc; + +class PhiOpndOcc : public Occ { + public: + explicit PhiOpndOcc(BB *bb): Occ(kAOccPhiOpnd, bb) {} + virtual ~PhiOpndOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOpndOcc at bb" << cgbb->GetId() << " classId" << classId; + } + + + PhiOcc *defPhiOcc = nullptr; // its lhs definition + bool hasRealUse = false; + bool insertHere = false; +}; + +class PhiOcc : public Occ { + public: + PhiOcc(BB *bb, MapleAllocator &alloc) + : Occ(kAOccPhi, bb), phiOpnds(alloc.Adapter()) {} + virtual ~PhiOcc() = default; + + bool WillBeAvail() const { + return isCanBeAvail && !isLater; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOcc at bb" << cgbb->GetId() << " classId" << classId << " Phi["; + for (size_t i = 0; i < phiOpnds.size(); i++) { + phiOpnds[i]->Dump(); + if (i != phiOpnds.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; + } + + + bool isDownsafe = true; + bool speculativeDownsafe = false; // true if set to downsafe via speculation + bool isCanBeAvail = true; + bool isLater = true; + MapleVector phiOpnds; +}; + +class ExitOcc : public Occ { + public: + explicit ExitOcc(BB *bb) : Occ(kAOccExit, bb) {} + virtual ~ExitOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "ExitOcc at bb" << cgbb->GetId(); + } +}; + +class SSAPre { + public: + SSAPre(CGFunc *cgfunc, DomAnalysis *dm, MemPool *memPool, SsaPreWorkCand *wkcand, bool aeap, bool enDebug) + : cgFunc(cgfunc), + dom(dm), + preMp(memPool), + preAllocator(memPool), + workCand(wkcand), + fullyAntBBs(cgfunc->GetAllBBs().size(), true, preAllocator.Adapter()), + phiDfns(std::less(), preAllocator.Adapter()), + classCount(0), + realOccs(preAllocator.Adapter()), + allOccs(preAllocator.Adapter()), + phiOccs(preAllocator.Adapter()), + exitOccs(preAllocator.Adapter()), + asEarlyAsPossible(aeap), + enabledDebug(enDebug) {} + ~SSAPre() = default; + + void ApplySSAPre(); + + private: + // step 6 methods + void CodeMotion(); + // step 5 methods + void Finalize(); + // step 4 methods + void ResetCanBeAvail(PhiOcc *phi) const; + void ComputeCanBeAvail() const; + void ResetLater(PhiOcc *phi) const; + void ComputeLater() const; + // step 3 methods + void ResetDownsafe(const PhiOpndOcc *phiOpnd) const; + void ComputeDownsafe() const; + // step 2 methods + void Rename(); + // step 1 methods + void GetIterDomFrontier(const BB *bb, MapleSet *dfset) const { + for (BBId bbid : dom->GetIdomFrontier(bb->GetId())) { + (void)dfset->insert(dom->GetDtDfnItem(bbid)); + } + } + void FormPhis(); + void CreateSortedOccs(); + // step 0 methods + void PropagateNotAnt(BB *bb, std::set *visitedBBs); + void FormRealsNExits(); + + CGFunc *cgFunc; + DomAnalysis *dom; + MemPool *preMp; + MapleAllocator preAllocator; + SsaPreWorkCand *workCand; + // step 0 + MapleVector fullyAntBBs; // index is BBid; true if occ is fully anticipated at BB entry + // step 1 phi insertion data structures: + MapleSet phiDfns; // set by FormPhis(); set of BBs in terms of their + // dfn's; index into dominance->dt_preorder to get + // their bbid's + // step 2 renaming + uint32 classCount; // for assigning new class id + // the following 4 lists are all maintained in order of dt_preorder + MapleVector realOccs; + MapleVector allOccs; + MapleVector phiOccs; + MapleVector exitOccs; + bool asEarlyAsPossible; + bool enabledDebug; +}; + +}; // namespace maplabe +#endif // MAPLEBE_CG_INCLUDE_CG_SSA_PRE_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_ssu_pre.h b/ecmascript/mapleall/maple_be/include/cg/cg_ssu_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..7df4e08ca6a4eee0002e33fd4bbb1faa044413fb --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_ssu_pre.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGSSUPRE_H +#define MAPLEBE_CG_INCLUDE_CGSSUPRE_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "cg_dominance.h" +#include "cg_ssa_pre.h" + +// Use SSUPRE to determine where to insert restores for callee-saved registers. +// The external interface is DoRestorePlacementOpt(). Class SPreWorkCand is used +// as input/output interface. + +namespace maplebe { + +// This must have been constructed by the caller of DoRestorePlacementOpt() and +// passed to it as parameter. The caller of DoRestorePlacementOpt() describes +// the problem via occBBs and saveBBs. DoRestorePlacementOpt()'s outputs are +// returned to the caller by setting restoreAtEntryBBs and restoreAtExitBBs. +class SPreWorkCand { + public: + explicit SPreWorkCand(MapleAllocator *alloc): + occBBs(alloc->Adapter()), saveBBs(alloc->Adapter()), + restoreAtEntryBBs(alloc->Adapter()), restoreAtExitBBs(alloc->Adapter()) {} + // inputs + MapleSet occBBs; // Id's of BBs with appearances of the callee-saved reg + MapleSet saveBBs; // Id's of BBs with saves of the callee-saved reg + // outputs + MapleSet restoreAtEntryBBs; // Id's of BBs to insert restores of the register at BB entry + MapleSet restoreAtExitBBs; // Id's of BBs to insert restores of the register at BB exit + bool restoreAtEpilog = false; // if true, no shrinkwrapping can be done and + // the other outputs can be ignored +}; + +extern void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand); + +enum SOccType { + kSOccUndef, + kSOccReal, + kSOccLambda, + kSOccLambdaRes, + kSOccEntry, + kSOccKill, +}; + +class SOcc { + public: + SOcc(SOccType ty, BB *bb) : occTy(ty), cgbb(bb) {} + virtual ~SOcc() = default; + + virtual void Dump() const = 0; + bool IsPostDominate(PostDomAnalysis *pdom, const SOcc *occ) const { + return pdom->PostDominate(*cgbb, *occ->cgbb); + } + + + SOccType occTy; + uint32 classId = 0; + BB *cgbb; // the BB it occurs in + SOcc *use = nullptr; // points to its single use +}; + +class SRealOcc : public SOcc { + public: + explicit SRealOcc(BB *bb): SOcc(kSOccReal, bb) {} + virtual ~SRealOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); + LogInfo::MapleLogger() << " classId" << classId; + } + + + bool redundant = true; +}; + +class SLambdaOcc; + +class SLambdaResOcc : public SOcc { + public: + explicit SLambdaResOcc(BB *bb): SOcc(kSOccLambdaRes, bb) {} + virtual ~SLambdaResOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "LambdaResOcc at bb" << cgbb->GetId() << " classId" << classId; + } + + + SLambdaOcc *useLambdaOcc = nullptr; // its rhs use + bool hasRealUse = false; + bool insertHere = false; +}; + +class SLambdaOcc : public SOcc { + public: + SLambdaOcc(BB *bb, MapleAllocator &alloc) + : SOcc(kSOccLambda, bb), lambdaRes(alloc.Adapter()) {} + virtual ~SLambdaOcc() = default; + + bool WillBeAnt() const { + return isCanBeAnt && !isEarlier; + } + + void Dump() const override { + LogInfo::MapleLogger() << "LambdaOcc at bb" << cgbb->GetId() << " classId" << classId << " Lambda["; + for (size_t i = 0; i < lambdaRes.size(); i++) { + lambdaRes[i]->Dump(); + if (i != lambdaRes.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; + } + + + bool isUpsafe = true; + bool isCanBeAnt = true; + bool isEarlier = true; + MapleVector lambdaRes; +}; + +class SEntryOcc : public SOcc { + public: + explicit SEntryOcc(BB *bb) : SOcc(kSOccEntry, bb) {} + virtual ~SEntryOcc() = default; + + void Dump() const { + LogInfo::MapleLogger() << "EntryOcc at bb" << cgbb->GetId(); + } +}; + +class SKillOcc : public SOcc { + public: + explicit SKillOcc(BB *bb) : SOcc(kSOccKill, bb) {} + virtual ~SKillOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "KillOcc at bb" << cgbb->GetId(); + } +}; + +class SSUPre { + public: + SSUPre(CGFunc *cgfunc, PostDomAnalysis *pd, MemPool *memPool, SPreWorkCand *wkcand, bool alap, bool enDebug) + : cgFunc(cgfunc), + pdom(pd), + spreMp(memPool), + spreAllocator(memPool), + workCand(wkcand), + fullyAvailBBs(cgfunc->GetAllBBs().size(), true, spreAllocator.Adapter()), + lambdaDfns(std::less(), spreAllocator.Adapter()), + classCount(0), + realOccs(spreAllocator.Adapter()), + allOccs(spreAllocator.Adapter()), + lambdaOccs(spreAllocator.Adapter()), + entryOccs(spreAllocator.Adapter()), + asLateAsPossible(alap), + enabledDebug(enDebug) { + CreateEntryOcc(cgfunc->GetFirstBB()); + } + ~SSUPre() = default; + + void ApplySSUPre(); + + private: + // step 6 methods + void CodeMotion(); + // step 5 methods + void Finalize(); + // step 4 methods + void ResetCanBeAnt(SLambdaOcc *lambda) const; + void ComputeCanBeAnt() const; + void ResetEarlier(SLambdaOcc *lambda) const; + void ComputeEarlier() const; + // step 3 methods + void ResetUpsafe(const SLambdaResOcc *lambdaRes) const; + void ComputeUpsafe() const; + // step 2 methods + void Rename(); + // step 1 methods + void GetIterPdomFrontier(const BB *bb, MapleSet *pdfset) const { + for (BBId bbid : pdom->GetIpdomFrontier(bb->GetId())) { + (void)pdfset->insert(pdom->GetPdtDfnItem(bbid)); + } + } + void FormLambdas(); + void CreateSortedOccs(); + // step 0 methods + void CreateEntryOcc(BB *bb) { + SEntryOcc *entryOcc = spreMp->New(bb); + entryOccs.push_back(entryOcc); + } + void PropagateNotAvail(BB *bb, std::set *visitedBBs); + void FormReals(); + + CGFunc *cgFunc; + PostDomAnalysis *pdom; + MemPool *spreMp; + MapleAllocator spreAllocator; + SPreWorkCand *workCand; + // step 0 + MapleVector fullyAvailBBs; // index is BBid; true if occ is fully available at BB exit + // step 1 lambda insertion data structures: + MapleSet lambdaDfns; // set by FormLambdas(); set of BBs in terms of + // their dfn's; index into + // dominance->pdt_preorder to get their bbid's + // step 2 renaming + uint32 classCount; // for assigning new class id + // the following 4 lists are all maintained in order of pdt_preorder + MapleVector realOccs; // both real and kill occurrences + MapleVector allOccs; + MapleVector lambdaOccs; + MapleVector entryOccs; + bool asLateAsPossible; + bool enabledDebug; +}; + +}; // namespace maplabe +#endif // MAPLEBE_CG_INCLUDE_CGSSUPRE_H diff --git a/ecmascript/mapleall/maple_be/include/cg/cg_validbit_opt.h b/ecmascript/mapleall/maple_be/include/cg/cg_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..c3ab4a650da4e00fb98fd2b0146b45f144461cb3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cg_validbit_opt.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H + +#include "cg.h" +#include "cgfunc.h" +#include "bb.h" +#include "insn.h" +#include "cg_ssa.h" + +namespace maplebe { +#define CG_VALIDBIT_OPT_DUMP CG_DEBUG_FUNC(*cgFunc) +class ValidBitPattern { + public: + ValidBitPattern(CGFunc &f, CGSSAInfo &info) : + cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitPattern() { + cgFunc = nullptr; + ssaInfo = nullptr; + } + std::string PhaseName() const { + return "cgvalidbitopt"; + } + + virtual std::string GetPatternName() = 0; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Run(BB &bb, Insn &insn) = 0; + Insn *GetDefInsn(const RegOperand &useReg); + InsnSet GetAllUseInsn(const RegOperand &defReg); + void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); + + protected: + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; + +class ValidBitOpt { + public: + ValidBitOpt(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitOpt() { + cgFunc = nullptr; + ssaInfo = nullptr; + } + void Run(); + static uint32 GetImmValidBit(int64 value, uint32 size) { + if (value < 0) { + return size; + } else if (value == 0) { + return k1BitSize; + } + uint32 pos = 0; + constexpr int64 mask = 1; + for (uint32 i = 0; i <= k8BitSize * sizeof(int64); ++i, value >>= 1) { + if ((value & mask) == mask) { + pos = i + 1; + } + } + return pos; + } + + static int64 GetLogValueAtBase2(int64 val) { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; + } + + template + void Optimize(BB &bb, Insn &insn) { + VBOpt opt(*cgFunc, *ssaInfo); + opt.Run(bb, insn); + } + virtual void DoOpt(BB &bb, Insn &insn) = 0; + void RectifyValidBitNum(); + void RecoverValidBitNum(); + virtual void SetValidBits(Insn &insn) = 0; + virtual bool SetPhiValidBits(Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; +MAPLE_FUNC_PHASE_DECLARE(CgValidBitOpt, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cgbb.h b/ecmascript/mapleall/maple_be/include/cg/cgbb.h new file mode 100644 index 0000000000000000000000000000000000000000..9d8b49517d400fe8c30d38b69dc2790be89be06b --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cgbb.h @@ -0,0 +1,874 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CGBB_H +#define MAPLEBE_INCLUDE_CG_CGBB_H + +#include "isa.h" +#include "insn.h" +#include "sparse_datainfo.h" + +/* Maple IR headers */ +#include "mir_nodes.h" +#include "mir_symbol.h" + +/* Maple MP header */ +#include "mempool_allocator.h" +#include "maple_phase_manager.h" + +namespace maplebe { +/* For get bb */ +#define FIRST_BB_OF_FUNC(FUNC) ((FUNC)->GetFirstBB()) +#define LAST_BB_OF_FUNC(FUNC) ((FUNC)->GetLastBB()) + +/* For iterating over basic blocks. */ +#define FOR_BB_BETWEEN(BASE, FROM, TO, DIR) for (BB * (BASE) = (FROM); (BASE) != (TO); (BASE) = (BASE)->DIR()) +#define FOR_BB_BETWEEN_CONST(BASE, FROM, TO, DIR) \ + for (const BB * (BASE) = (FROM); (BASE) != (TO); (BASE) = (BASE)->DIR()) + +#define FOR_ALL_BB_CONST(BASE, FUNC) FOR_BB_BETWEEN_CONST(BASE, FIRST_BB_OF_FUNC(FUNC), nullptr, GetNext) +#define FOR_ALL_BB(BASE, FUNC) FOR_BB_BETWEEN(BASE, FIRST_BB_OF_FUNC(FUNC), nullptr, GetNext) +#define FOR_ALL_BB_REV(BASE, FUNC) FOR_BB_BETWEEN(BASE, LAST_BB_OF_FUNC(FUNC), nullptr, GetPrev) + + +/* For get insn */ +#define FIRST_INSN(BLOCK) (BLOCK)->GetFirstInsn() +#define LAST_INSN(BLOCK) (BLOCK)->GetLastInsn() +#define NEXT_INSN(INSN) (INSN)->GetNext() +#define PREV_INSN(INSN) (INSN)->GetPrev() + +/* For iterating over insns in basic block. */ +#define FOR_INSN_BETWEEN(INSN, FROM, TO, DIR) \ + for (Insn * (INSN) = (FROM); (INSN) != nullptr && (INSN) != (TO); (INSN) = (INSN)->DIR) + +#define FOR_BB_INSNS(INSN, BLOCK) \ + for (Insn * (INSN) = FIRST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetNext()) +#define FOR_BB_INSNS_CONST(INSN, BLOCK) \ + for (const Insn * (INSN) = FIRST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetNext()) + +#define FOR_BB_INSNS_REV(INSN, BLOCK) \ + for (Insn * (INSN) = LAST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetPrev()) + +/* For iterating over insns in basic block when we might remove the current insn. */ +#define FOR_BB_INSNS_SAFE(INSN, BLOCK, NEXT) \ + for (Insn * (INSN) = FIRST_INSN(BLOCK), *(NEXT) = (INSN) ? NEXT_INSN(INSN) : nullptr; (INSN) != nullptr; \ + (INSN) = (NEXT), (NEXT) = (INSN) ? NEXT_INSN(INSN) : nullptr) + +#define FOR_BB_INSNS_REV_SAFE(INSN, BLOCK, NEXT) \ + for (Insn * (INSN) = LAST_INSN(BLOCK), *(NEXT) = (INSN) ? PREV_INSN(INSN) : nullptr; (INSN) != nullptr; \ + (INSN) = (NEXT), (NEXT) = (INSN) ? PREV_INSN(INSN) : nullptr) + +class CGFuncLoops; +class CGFunc; + +class BB { + public: + enum BBKind : uint8 { + kBBFallthru, /* default */ + kBBIf, /* conditional branch */ + kBBGoto, /* unconditional branch */ + kBBIgoto, + kBBReturn, + kBBIntrinsic, /* BB created by inlining intrinsics; shares a lot with BB_if */ + kBBRangeGoto, + kBBThrow, /* For call __java_throw_* and call exit, which will run out of function. */ + kBBLast + }; + + BB(uint32 bbID, MapleAllocator &mallocator) + : id(bbID), + kind(kBBFallthru), /* kBBFallthru default kind */ + labIdx(MIRLabelTable::GetDummyLabel()), + preds(mallocator.Adapter()), + succs(mallocator.Adapter()), + ehPreds(mallocator.Adapter()), + ehSuccs(mallocator.Adapter()), + loopPreds(mallocator.Adapter()), + loopSuccs(mallocator.Adapter()), + liveInRegNO(mallocator.Adapter()), + liveOutRegNO(mallocator.Adapter()), + callInsns(mallocator.Adapter()), + rangeGotoLabelVec(mallocator.Adapter()), + phiInsnList(mallocator.Adapter()) {} + + virtual ~BB() = default; + + virtual BB *Clone(MemPool &memPool) const { + BB *bb = memPool.Clone(*this); + return bb; + } + + void Dump() const; + bool IsCommentBB() const; + bool IsEmptyOrCommentOnly() const; + bool IsSoloGoto() const; + BB* GetValidPrev(); + + bool IsEmpty() const { + if (lastInsn == nullptr) { + CHECK_FATAL(firstInsn == nullptr, "firstInsn must be nullptr"); + return true; + } else { + CHECK_FATAL(firstInsn != nullptr, "firstInsn must not be nullptr"); + return false; + } + } + + const std::string &GetKindName() const { + DEBUG_ASSERT(kind < kBBLast, "out of range in GetKindName"); + return bbNames[kind]; + } + + void SetKind(BBKind bbKind) { + kind = bbKind; + } + + BBKind GetKind() const { + return kind; + } + + void AddLabel(LabelIdx idx) { + labIdx = idx; + } + + void AppendBB(BB &bb) { + bb.prev = this; + bb.next = next; + if (next != nullptr) { + next->prev = &bb; + } + next = &bb; + } + + void PrependBB(BB &bb) { + bb.next = this; + bb.prev = this->prev; + if (this->prev != nullptr) { + this->prev->next = &bb; + } + this->prev = &bb; + } + + Insn *InsertInsnBefore(Insn &existing, Insn &newInsn); + + /* returns newly inserted instruction */ + Insn *InsertInsnAfter(Insn &existing, Insn &newInsn); + + void InsertInsnBegin(Insn &insn) { + if (lastInsn == nullptr) { + firstInsn = lastInsn = &insn; + insn.SetNext(nullptr); + insn.SetPrev(nullptr); + insn.SetBB(this); + } else { + InsertInsnBefore(*firstInsn, insn); + } + } + + void AppendInsn(Insn &insn) { + if (firstInsn != nullptr && lastInsn != nullptr) { + InsertInsnAfter(*lastInsn, insn); + } else { + firstInsn = lastInsn = &insn; + insn.SetNext(nullptr); + insn.SetPrev(nullptr); + insn.SetBB(this); + } + internalFlag1++; + } + + void ReplaceInsn(Insn &insn, Insn &newInsn); + + void RemoveInsn(Insn &insn); + + void RemoveInsnPair(Insn &insn, const Insn &nextInsn); + + void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + + /* append all insns from bb into this bb */ + void AppendBBInsns(BB &bb); + + /* append all insns from bb into this bb */ + void InsertAtBeginning(BB &bb); + void InsertAtEnd(BB &bb); + void InsertAtEndMinus1(BB &bb); + + /* clear BB but don't remove insns of this */ + void ClearInsns() { + firstInsn = lastInsn = nullptr; + } + + uint32 NumPreds() const { + return static_cast(preds.size()); + } + + bool IsPredecessor(const BB &predBB) { + for (const BB *bb : preds) { + if (bb == &predBB) { + return true; + } + } + return false; + } + + bool IsBackEdgeDest() const { + return !loopPreds.empty(); + } + + void RemoveFromPredecessorList(const BB &bb) { + for (auto i = preds.begin(); i != preds.end(); ++i) { + if (*i == &bb) { + preds.erase(i); + return; + } + } + CHECK_FATAL(false, "request to remove a non-existent element?"); + } + + void RemoveFromSuccessorList(const BB &bb) { + for (auto i = succs.begin(); i != succs.end(); ++i) { + if (*i == &bb) { + succs.erase(i); + return; + } + } + CHECK_FATAL(false, "request to remove a non-existent element?"); + } + + uint32 NumSuccs() const { + return static_cast(succs.size()); + } + + bool HasCall() const { + return hasCall; + } + + void SetHasCall() { + hasCall = true; + } + + /* Number of instructions excluding DbgInsn and comments */ + int32 NumInsn() const; + uint32 GetId() const { + return id; + } + uint32 GetLevel() const { + return level; + } + void SetLevel(uint32 arg) { + level = arg; + } + uint32 GetFrequency() const { + return frequency; + } + void SetFrequency(uint32 arg) { + frequency = arg; + } + BB *GetNext() { + return next; + } + const BB *GetNext() const { + return next; + } + BB *GetPrev() { + return prev; + } + const BB *GetPrev() const { + return prev; + } + void SetNext(BB *arg) { + next = arg; + } + void SetPrev(BB *arg) { + prev = arg; + } + LabelIdx GetLabIdx() const { + return labIdx; + } + void SetLabIdx(LabelIdx arg) { + labIdx = arg; + } + StmtNode *GetFirstStmt() { + return firstStmt; + } + const StmtNode *GetFirstStmt() const { + return firstStmt; + } + void SetFirstStmt(StmtNode &arg) { + firstStmt = &arg; + } + StmtNode *GetLastStmt() { + return lastStmt; + } + const StmtNode *GetLastStmt() const { + return lastStmt; + } + void SetLastStmt(StmtNode &arg) { + lastStmt = &arg; + } + Insn *GetFirstInsn() { + return firstInsn; + } + const Insn *GetFirstInsn() const { + return firstInsn; + } + + void SetFirstInsn(Insn *arg) { + firstInsn = arg; + } + Insn *GetFirstMachineInsn() { + FOR_BB_INSNS(insn, this) { + if (insn->IsMachineInstruction()) { + return insn; + } + } + return nullptr; + } + Insn *GetLastMachineInsn() { + FOR_BB_INSNS_REV(insn, this) { + if (insn->IsMachineInstruction()) { + return insn; + } + } + return nullptr; + } + Insn *GetLastInsn() { + return lastInsn; + } + const Insn *GetLastInsn() const { + return lastInsn; + } + void SetLastInsn(Insn *arg) { + lastInsn = arg; + } + bool IsLastInsn(const Insn *insn) const{ + return (lastInsn == insn); + } + void InsertPred(const MapleList::iterator &it, BB &bb) { + preds.insert(it, &bb); + } + void InsertSucc(const MapleList::iterator &it, BB &bb) { + succs.insert(it, &bb); + } + const MapleList &GetPreds() const { + return preds; + } + const MapleList &GetSuccs() const { + return succs; + } + const MapleList &GetEhPreds() const { + return ehPreds; + } + const MapleList &GetEhSuccs() const { + return ehSuccs; + } + const MapleList &GetLoopPreds() const { + return loopPreds; + } + MapleList &GetLoopSuccs() { + return loopSuccs; + } + const MapleList &GetLoopSuccs() const { + return loopSuccs; + } + MapleList::iterator GetPredsBegin() { + return preds.begin(); + } + MapleList::iterator GetSuccsBegin() { + return succs.begin(); + } + MapleList::iterator GetEhPredsBegin() { + return ehPreds.begin(); + } + MapleList::iterator GetLoopSuccsBegin() { + return loopSuccs.begin(); + } + MapleList::iterator GetPredsEnd() { + return preds.end(); + } + MapleList::iterator GetSuccsEnd() { + return succs.end(); + } + MapleList::iterator GetEhPredsEnd() { + return ehPreds.end(); + } + MapleList::iterator GetLoopSuccsEnd() { + return loopSuccs.end(); + } + void PushBackPreds(BB &bb) { + preds.push_back(&bb); + } + void PushBackSuccs(BB &bb) { + succs.push_back(&bb); + } + void PushBackEhPreds(BB &bb) { + ehPreds.push_back(&bb); + } + void PushBackEhSuccs(BB &bb) { + ehSuccs.push_back(&bb); + } + void PushBackLoopPreds(BB &bb) { + loopPreds.push_back(&bb); + } + void PushBackLoopSuccs(BB &bb) { + loopSuccs.push_back(&bb); + } + void PushFrontPreds(BB &bb) { + preds.push_front(&bb); + } + void PushFrontSuccs(BB &bb) { + succs.push_front(&bb); + } + void ErasePreds(MapleList::iterator it) { + preds.erase(it); + } + void EraseSuccs(MapleList::iterator it) { + succs.erase(it); + } + void RemovePreds(BB &bb) { + preds.remove(&bb); + } + void RemoveSuccs(BB &bb) { + succs.remove(&bb); + } + void RemoveEhPreds(BB &bb) { + ehPreds.remove(&bb); + } + void RemoveEhSuccs(BB &bb) { + ehSuccs.remove(&bb); + } + void ClearPreds() { + preds.clear(); + } + void ClearSuccs() { + succs.clear(); + } + void ClearEhPreds() { + ehPreds.clear(); + } + void ClearEhSuccs() { + ehSuccs.clear(); + } + void ClearLoopPreds() { + loopPreds.clear(); + } + void ClearLoopSuccs() { + loopSuccs.clear(); + } + const MapleSet &GetLiveInRegNO() const { + return liveInRegNO; + } + MapleSet &GetLiveInRegNO() { + return liveInRegNO; + } + void InsertLiveInRegNO(regno_t arg) { + (void)liveInRegNO.insert(arg); + } + void EraseLiveInRegNO(MapleSet::iterator it) { + liveInRegNO.erase(it); + } + void EraseLiveInRegNO(regno_t arg) { + liveInRegNO.erase(arg); + } + void ClearLiveInRegNO() { + liveInRegNO.clear(); + } + const MapleSet &GetLiveOutRegNO() const { + return liveOutRegNO; + } + MapleSet &GetLiveOutRegNO() { + return liveOutRegNO; + } + void InsertLiveOutRegNO(regno_t arg) { + (void)liveOutRegNO.insert(arg); + } + void EraseLiveOutRegNO(MapleSet::iterator it) { + liveOutRegNO.erase(it); + } + void ClearLiveOutRegNO() { + liveOutRegNO.clear(); + } + CGFuncLoops *GetLoop() const { + return loop; + } + void SetLoop(CGFuncLoops &arg) { + loop = &arg; + } + bool GetLiveInChange() const { + return liveInChange; + } + void SetLiveInChange(bool arg) { + liveInChange = arg; + } + bool GetCritical() const { + return isCritical; + } + void SetCritical(bool arg) { + isCritical = arg; + } + bool HasCriticalEdge(); + bool GetInsertUse() const { + return insertUse; + } + void SetInsertUse(bool arg) { + insertUse = arg; + } + bool IsUnreachable() const { + return unreachable; + } + void SetUnreachable(bool arg) { + unreachable = arg; + } + bool IsWontExit() const { + return wontExit; + } + void SetWontExit(bool arg) { + wontExit = arg; + } + void SetFastPath(bool arg) { + fastPath = arg; + } + bool IsCatch() const { + return isCatch; + } + void SetIsCatch(bool arg) { + isCatch = arg; + } + bool IsCleanup() const { + return isCleanup; + } + void SetIsCleanup(bool arg) { + isCleanup = arg; + } + bool IsProEpilog() const { + return isProEpilog; + } + void SetIsProEpilog(bool arg) { + isProEpilog = arg; + } + bool IsLabelTaken() const { + return labelTaken; + } + void SetLabelTaken() { + labelTaken = true; + } + bool GetHasCfi() const { + return hasCfi; + } + void SetHasCfi() { + hasCfi = true; + } + long GetInternalFlag1() const { + return internalFlag1; + } + void SetInternalFlag1(long arg) { + internalFlag1 = arg; + } + long GetInternalFlag2() const { + return internalFlag2; + } + void SetInternalFlag2(long arg) { + internalFlag2 = arg; + } + long GetInternalFlag3() const { + return internalFlag3; + } + void SetInternalFlag3(long arg) { + internalFlag3 = arg; + } + bool IsAtomicBuiltInBB() const { + return isAtomicBuiltIn; + } + void SetAtomicBuiltIn() { + isAtomicBuiltIn = true; + } + const MapleList &GetCallInsns() const { + return callInsns; + } + void PushBackCallInsns(Insn &insn) { + callInsns.push_back(&insn); + } + void ClearCallInsns() { + callInsns.clear(); + } + const MapleVector &GetRangeGotoLabelVec() const { + return rangeGotoLabelVec; + } + void SetRangeGotoLabel(uint32 index, LabelIdx labelIdx) { + rangeGotoLabelVec[index] = labelIdx; + } + void PushBackRangeGotoLabel(LabelIdx labelIdx) { + rangeGotoLabelVec.emplace_back(labelIdx); + } + void AddPhiInsn(regno_t regNO, Insn &insn) { + DEBUG_ASSERT(!phiInsnList.count(regNO), "repeat phiInsn"); + phiInsnList.emplace(std::pair(regNO, &insn)); + } + void RemovePhiInsn(regno_t regNO) { + DEBUG_ASSERT(phiInsnList.count(regNO), "no such insn"); + phiInsnList.erase(regNO); + } + bool HasPhiInsn(regno_t regNO) { + return phiInsnList.find(regNO) != phiInsnList.end(); + } + MapleMap &GetPhiInsns() { + return phiInsnList; + } + bool IsInPhiList(regno_t regNO); + bool IsInPhiDef(regno_t regNO); + const Insn *GetFirstLoc() const { + return firstLoc; + } + void SetFirstLoc(const Insn &arg) { + firstLoc = &arg; + } + const Insn *GetLastLoc() const { + return lastLoc; + } + void SetLastLoc(const Insn *arg) { + lastLoc = arg; + } + SparseDataInfo *GetLiveIn() { + return liveIn; + } + const SparseDataInfo *GetLiveIn() const { + return liveIn; + } + void SetLiveIn(SparseDataInfo &arg) { + liveIn = &arg; + } + void SetLiveInBit(uint32 arg) const { + liveIn->SetBit(arg); + } + void SetLiveInInfo(const SparseDataInfo &arg) const { + *liveIn = arg; + } + void LiveInOrBits(const SparseDataInfo &arg) const { + liveIn->OrBits(arg); + } + void LiveInEnlargeCapacity(uint32 arg) const { + liveIn->EnlargeCapacityToAdaptSize(arg); + } + void LiveInClearDataInfo() { + liveIn->ClearDataInfo(); + liveIn = nullptr; + } + SparseDataInfo *GetLiveOut() { + return liveOut; + } + const SparseDataInfo *GetLiveOut() const { + return liveOut; + } + void SetLiveOut(SparseDataInfo &arg) { + liveOut = &arg; + } + void SetLiveOutBit(uint32 arg) const { + liveOut->SetBit(arg); + } + void LiveOutOrBits(const SparseDataInfo &arg) const { + liveOut->OrBits(arg); + } + void LiveOutEnlargeCapacity(uint32 arg) const { + liveOut->EnlargeCapacityToAdaptSize(arg); + } + void LiveOutClearDataInfo() { + liveOut->ClearDataInfo(); + liveOut = nullptr; + } + const SparseDataInfo *GetDef() const { + return def; + } + void SetDef(SparseDataInfo &arg) { + def = &arg; + } + void SetDefBit(uint32 arg) const { + def->SetBit(arg); + } + void DefResetAllBit() const { + def->ResetAllBit(); + } + void DefResetBit(uint32 arg) const { + def->ResetBit(arg); + } + void DefClearDataInfo() { + def->ClearDataInfo(); + def = nullptr; + } + const SparseDataInfo *GetUse() const { + return use; + } + void SetUse(SparseDataInfo &arg) { + use = &arg; + } + void SetUseBit(uint32 arg) const { + use->SetBit(arg); + } + void UseResetAllBit() const { + use->ResetAllBit(); + } + void UseResetBit(uint32 arg) const { + use->ResetBit(arg); + } + void UseClearDataInfo() { + use->ClearDataInfo(); + use = nullptr; + } + void SetNeedAlign(bool flag) { + needAlign = flag; + } + bool IsBBNeedAlign() const { + return needAlign; + } + void SetAlignPower(uint32 power) { + alignPower = power; + } + uint32 GetAlignPower() const { + return alignPower; + } + void SetAlignNopNum(uint32 num) { + alignNopNum = num; + } + uint32 GetAlignNopNum() const { + return alignNopNum; + } + + private: + static const std::string bbNames[kBBLast]; + uint32 id; + uint32 level = 0; + uint32 frequency = 0; + BB *prev = nullptr; /* Doubly linked list of BBs; */ + BB *next = nullptr; + /* They represent the order in which blocks are to be emitted. */ + BBKind kind = kBBFallthru; /* The BB's last statement (i.e. lastStmt) determines */ + /* what type this BB has. By default, kBbFallthru */ + LabelIdx labIdx; + StmtNode *firstStmt = nullptr; + StmtNode *lastStmt = nullptr; + Insn *firstInsn = nullptr; /* the first instruction */ + Insn *lastInsn = nullptr; /* the last instruction */ + MapleList preds; /* preds, succs represent CFG */ + MapleList succs; + MapleList ehPreds; + MapleList ehSuccs; + MapleList loopPreds; + MapleList loopSuccs; + + /* this is for live in out analysis */ + MapleSet liveInRegNO; + MapleSet liveOutRegNO; + CGFuncLoops *loop = nullptr; + bool liveInChange = false; + bool isCritical = false; + bool insertUse = false; + bool hasCall = false; + bool unreachable = false; + bool wontExit = false; + bool fastPath = false; + bool isCatch = false; /* part of the catch bb, true does might also mean it is unreachable */ + /* + * Since isCatch is set early and unreachable detected later, there + * are some overlap here. + */ + bool isCleanup = false; /* true if the bb is cleanup bb. otherwise, false. */ + bool isProEpilog = false; /* Temporary tag for modifying prolog/epilog bb. */ + bool labelTaken = false; /* Block label is taken indirectly and can be used to jump to it. */ + bool hasCfi = false; /* bb contain cfi directive. */ + /* + * Different meaning for each data flow analysis. + * For HandleFunction(), rough estimate of num of insn created. + * For cgbb.cpp, track insn count during code selection. + * For cgbb.cpp, bb is traversed during BFS ordering. + * For aarchregalloc.cpp, the bb is part of cleanup at end of function. + * For aarchcolorra.cpp, the bb is part of cleanup at end of function. + * also used for live range splitting. + * For live analysis, it indicates if bb is cleanupbb. + */ + long internalFlag1 = 0; + + /* + * Different meaning for each data flow analysis. + * For cgbb.cpp, bb is levelized to be 1 more than largest predecessor. + * For aarchcolorra.cpp, used for live range splitting pruning of bb. + */ + long internalFlag2 = 0; + + /* + * Different meaning for each data flow analysis. + * For cgfunc.cpp, it temporarily marks for catch bb discovery. + * For live analysis, it indicates if bb is visited. + * For peephole, used for live-out checking of bb. + */ + long internalFlag3 = 0; + MapleList callInsns; + MapleVector rangeGotoLabelVec; + + /* bb support for SSA analysis */ + MapleMap phiInsnList; + + /* includes Built-in functions for atomic memory access */ + bool isAtomicBuiltIn = false; + + const Insn *firstLoc = nullptr; + const Insn *lastLoc = nullptr; + SparseDataInfo *liveIn = nullptr; + SparseDataInfo *liveOut = nullptr; + SparseDataInfo *def = nullptr; + SparseDataInfo *use = nullptr; + + bool needAlign = false; + uint32 alignPower = 0; + uint32 alignNopNum = 0; +}; /* class BB */ + +struct BBIdCmp { + bool operator()(const BB *lhs, const BB *rhs) const { + CHECK_FATAL(lhs != nullptr, "null ptr check"); + CHECK_FATAL(rhs != nullptr, "null ptr check"); + return (lhs->GetId() < rhs->GetId()); + } +}; + +class Bfs : public AnalysisResult { + public: + Bfs(CGFunc &cgFunc, MemPool &memPool) + : AnalysisResult(&memPool), + cgfunc(&cgFunc), + alloc(&memPool), + visitedBBs(alloc.Adapter()), + sortedBBs(alloc.Adapter()) {} + ~Bfs() = default; + + bool AllPredBBVisited(const BB &bb, long &level) const; + BB *MarkStraightLineBBInBFS(BB *bb); + BB *SearchForStraightLineBBs(BB &bb); + void BFS(BB &curBB); + void ComputeBlockOrder(); + + CGFunc *cgfunc; + MapleAllocator alloc; + MapleVector visitedBBs; + MapleVector sortedBBs; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgBBSort, CGFunc) + Bfs *GetResult() { + return bfs; + } + Bfs *bfs = nullptr; +OVERRIDE_DEPENDENCE +MAPLE_MODULE_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CGBB_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/cgfunc.h b/ecmascript/mapleall/maple_be/include/cg/cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..f89fe649451282b8042a59e0d5bac43ea6ac607d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/cgfunc.h @@ -0,0 +1,1387 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_CGFUNC_H + +#include "becommon.h" +#include "operand.h" +#include "eh_func.h" +#include "memlayout.h" +#include "reg_info.h" +#include "cgbb.h" +#include "reg_alloc.h" +#include "cfi.h" +#include "dbg.h" +#include "reaching.h" +#include "cg_cfg.h" +#include "cg_irbuilder.h" +#include "call_conv.h" +/* MapleIR headers. */ +#include "mir_parser.h" +#include "mir_function.h" +#include "debug_info.h" + +/* Maple MP header */ +#include "mempool_allocator.h" + +namespace maplebe { +constexpr int32 kBBLimit = 100000; +constexpr int32 kFreqBase = 100000; +struct MemOpndCmp { + bool operator()(const MemOperand *lhs, const MemOperand *rhs) const { + CHECK_FATAL(lhs != nullptr, "null ptr check"); + CHECK_FATAL(rhs != nullptr, "null ptr check"); + if (lhs == rhs) { + return false; + } + return (lhs->Less(*rhs)); + } +}; + +class SpillMemOperandSet { + public: + explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} + + virtual ~SpillMemOperandSet() = default; + + void Add(MemOperand &op) { + (void)reuseSpillLocMem.insert(&op); + } + + void Remove(MemOperand &op) { + reuseSpillLocMem.erase(&op); + } + + MemOperand *GetOne() { + if (!reuseSpillLocMem.empty()) { + MemOperand *res = *reuseSpillLocMem.begin(); + reuseSpillLocMem.erase(res); + return res; + } + return nullptr; + } + + private: + MapleSet reuseSpillLocMem; +}; + +#if TARGARM32 +class LiveRange; +#endif /* TARGARM32 */ +constexpr uint32 kVRegisterNumber = 80; +class CGFunc { + public: + enum ShiftDirection : uint8 { + kShiftLeft, + kShiftAright, + kShiftLright + }; + + CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); + virtual ~CGFunc(); + + const std::string &GetName() const { + return func.GetName(); + } + + const MapleMap &GetLabelAndValueMap() const { + return labelMap; + } + + void InsertLabelMap(LabelIdx idx, uint64 value) { + DEBUG_ASSERT(labelMap.find(idx) == labelMap.end(), "idx already exist"); + labelMap[idx] = value; + } + + void LayoutStackFrame() { + CHECK_FATAL(memLayout != nullptr, "memLayout should has been initialized in constructor"); + memLayout->LayoutStackFrame(structCopySize, maxParamStackSize); + } + + bool HasCall() const { + return func.HasCall(); + } + + bool HasVLAOrAlloca() const { + return hasVLAOrAlloca; + } + + void SetHasVLAOrAlloca(bool val) { + hasVLAOrAlloca = val; + } + + bool HasAlloca() const { + return hasAlloca; + } + + void SetHasAlloca(bool val) { + hasAlloca = val; + } + + void SetRD(ReachingDefinition *paramRd) { + reachingDef = paramRd; + } + + InsnBuilder *GetInsnBuilder() { + return insnBuilder; + } + OperandBuilder *GetOpndBuilder() { + return opndBuilder; + } + + bool GetRDStatus() const { + return (reachingDef != nullptr); + } + + ReachingDefinition *GetRD() { + return reachingDef; + } + + EHFunc *BuildEHFunc(); + virtual void GenSaveMethodInfoCode(BB &bb) = 0; + virtual void GenerateCleanupCode(BB &bb) = 0; + virtual bool NeedCleanup() = 0; + virtual void GenerateCleanupCodeForExtEpilog(BB &bb) = 0; + + void CreateLmbcFormalParamInfo(); + virtual uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) = 0; + virtual void AssignLmbcFormalParams() = 0; + LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); + virtual void LmbcGenSaveSpForAlloca() = 0; + void GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc); + int32 GetFreqFromStmt(uint32 stmtId); + void GenerateInstruction(); + bool MemBarOpt(const StmtNode &membar); + void UpdateCallBBFrequency(); + void HandleFunction(); + void ProcessExitBBVec(); + void AddCommonExitBB(); + virtual void MergeReturn() = 0; + void TraverseAndClearCatchMark(BB &bb); + void MarkCatchBBs(); + void MarkCleanupEntryBB(); + void SetCleanupLabel(BB &cleanupEntry); + bool ExitbbNotInCleanupArea(const BB &bb) const; + uint32 GetMaxRegNum() const { + return maxRegCount; + }; + void DumpCFG() const; + void DumpCGIR() const; + void DumpLoop() const; + void ClearLoopInfo(); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + virtual void DetermineReturnTypeofCall() = 0; + /* handle rc reset */ + virtual void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) = 0; + virtual void HandleRetCleanup(NaryStmtNode &retNode) = 0; + /* select stmt */ + virtual void SelectDassign(DassignNode &stmt, Operand &opnd0) = 0; + virtual void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) = 0; + virtual void SelectRegassign(RegassignNode &stmt, Operand &opnd0) = 0; + virtual void SelectAbort() = 0; + virtual void SelectAssertNull(UnaryStmtNode &stmt) = 0; + virtual void SelectAsm(AsmNode &node) = 0; + virtual void SelectAggDassign(DassignNode &stmt) = 0; + virtual void SelectIassign(IassignNode &stmt) = 0; + virtual void SelectIassignoff(IassignoffNode &stmt) = 0; + virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; + virtual void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) = 0; + virtual void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) = 0; + virtual void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) = 0; + virtual void SelectReturnSendOfStructInRegs(BaseNode *x) = 0; + virtual void SelectReturn(Operand *opnd) = 0; + virtual void SelectIgoto(Operand *opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) = 0; + virtual void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) = 0; + virtual void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) = 0; + virtual void SelectGoto(GotoNode &stmt) = 0; + virtual void SelectCall(CallNode &callNode) = 0; + virtual void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) = 0; + virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; + virtual Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinsicopNode, std::string name) = 0; + virtual Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinsicopNode, PrimType retType, + const std::string &name) = 0; + virtual Operand *SelectCclz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCctz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCpopcount(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCparity(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCclrsb(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCisaligned(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCalignup(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCaligndown(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) = 0; + virtual Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinsicopNode, PrimType pty) = 0; + virtual Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCReturnAddress(IntrinsicopNode &intrinsicopNode) = 0; + virtual void SelectMembar(StmtNode &membar) = 0; + virtual void SelectComment(CommentNode &comment) = 0; + virtual void HandleCatch() = 0; + + /* select expr */ + virtual Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) = 0; + virtual RegOperand *SelectRegread(RegreadNode &expr) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) = 0; + virtual Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) = 0; + virtual Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; + virtual Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) = 0; + virtual Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) = 0; + virtual Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) = 0; + virtual Operand *SelectIntConst(MIRIntConst &intConst) = 0; + virtual Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) = 0; + virtual Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) = 0; + virtual Operand *SelectStrConst(MIRStrConst &strConst) = 0; + virtual Operand *SelectStr16Const(MIRStr16Const &strConst) = 0; + virtual void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) = 0; + virtual Operand *SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) = 0; + virtual Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr = false) = 0; + virtual void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0) = 0; + virtual Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) = 0; + virtual Operand *SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) = 0; + virtual Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) = 0; + virtual Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare = false) = 0; + virtual Operand *SelectMalloc(UnaryNode &call, Operand &opnd0) = 0; + virtual RegOperand &SelectCopy(Operand &src, PrimType srcType, PrimType dstType) = 0; + virtual Operand *SelectAlloca(UnaryNode &call, Operand &opnd0) = 0; + virtual Operand *SelectGCMalloc(GCMallocNode &call) = 0; + virtual Operand *SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) = 0; + virtual void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) = 0; + virtual Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) = 0; + virtual Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) = 0; + virtual Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) = 0; + virtual void GenerateYieldpoint(BB &bb) = 0; + virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0; + + virtual Operand &GetOrCreateRflag() = 0; + virtual const Operand *GetRflag() const = 0; + virtual const Operand *GetFloatRflag() const = 0; + virtual const LabelOperand *GetLabelOperand(LabelIdx labIdx) const = 0; + virtual LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) = 0; + virtual LabelOperand &GetOrCreateLabelOperand(BB &bb) = 0; + virtual RegOperand &CreateVirtualRegisterOperand(regno_t vRegNO) = 0; + virtual RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vRegNO) = 0; + virtual RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) = 0; + virtual RegOperand &GetOrCreateFramePointerRegOperand() = 0; + virtual RegOperand &GetOrCreateStackBaseRegOperand() = 0; + virtual int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) = 0; + virtual RegOperand &GetZeroOpnd(uint32 size) = 0; + virtual Operand &CreateCfiRegOperand(uint32 reg, uint32 size) = 0; + virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; + virtual Operand &CreateImmOperand(PrimType primType, int64 val) = 0; + virtual void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) = 0; + virtual void CleanupDeadMov(bool dump = false) = 0; + virtual void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) = 0; + + /* ra */ + virtual void AddtoCalleeSaved(regno_t reg) = 0; + + virtual bool IsFrameReg(const RegOperand &opnd) const = 0; + virtual bool IsSPOrFP(const RegOperand &opnd) const { + return false; + }; + virtual bool IsReturnReg(const RegOperand &opnd) const { + return false; + }; + virtual bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const { + return false; + } + + /* For Neon intrinsics */ + virtual RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) = 0; + virtual RegOperand *SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) = 0; + virtual RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) = 0; + virtual RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) = 0; + virtual RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) = 0;; + virtual RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) = 0; + virtual RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) = 0; + virtual RegOperand *SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) = 0; + virtual RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) = 0; + virtual RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) = 0; + virtual RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) = 0; + virtual RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) = 0; + virtual RegOperand *SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) = 0; + virtual RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) = 0; + virtual RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) = 0; + virtual RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) = 0; + virtual RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) = 0; + virtual RegOperand *SelectVectorNot(PrimType rType, Operand *o1) = 0; + + virtual RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) = 0; + virtual RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) = 0; + virtual RegOperand *SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) = 0; + virtual RegOperand *SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, int32 lane) = 0; + virtual RegOperand *SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) = 0; + virtual RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) = 0; + virtual RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, + Operand *o2, bool isLow) = 0; + virtual RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) = 0; + virtual RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) = 0; + virtual RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) = 0; + virtual RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) = 0; + + /* For ebo issue. */ + virtual Operand *GetTrueOpnd() { + return nullptr; + } + virtual void ClearUnreachableGotInfos(BB &bb) { + (void)bb; + }; + virtual void ClearUnreachableConstInfos(BB &bb) { + (void)bb; + }; + LabelIdx CreateLabel(); + + RegOperand *GetVirtualRegisterOperand(regno_t vRegNO) { + auto it = vRegOperandTable.find(vRegNO); + return it == vRegOperandTable.end() ? nullptr : it->second; + } + + Operand &CreateCfiImmOperand(int64 val, uint32 size) const { + return *memPool->New(val, size); + } + + Operand &CreateCfiStrOperand(const std::string &str) { + return *memPool->New(str, *memPool); + } + + bool IsSpecialPseudoRegister(PregIdx spr) const { + return spr < 0; + } + + regno_t NewVReg(RegType regType, uint32 size) { + if (CGOptions::UseGeneralRegOnly()) { + CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); + } + /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (vRegCount >= maxRegCount) { + DEBUG_ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); + maxRegCount += kRegIncrStepLen; + vRegTable.resize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (regType == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + DEBUG_ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); + return vRegCount++; + } + + virtual regno_t NewVRflag() { + return 0; + } + + RegType GetRegTyFromPrimTy(PrimType primType) const { + switch (primType) { + case PTY_u1: + case PTY_i8: + case PTY_u8: + case PTY_i16: + case PTY_u16: + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + case PTY_a32: + case PTY_a64: + case PTY_ptr: + case PTY_i128: + case PTY_u128: + case PTY_agg: + return kRegTyInt; + case PTY_f32: + case PTY_f64: + case PTY_v2i32: + case PTY_v2u32: + case PTY_v2i64: + case PTY_v2u64: + case PTY_v2f32: + case PTY_v2f64: + case PTY_v4i16: + case PTY_v4u16: + case PTY_v4i32: + case PTY_v4u32: + case PTY_v4f32: + case PTY_v8i8: + case PTY_v8u8: + case PTY_v8i16: + case PTY_v8u16: + case PTY_v16i8: + case PTY_v16u8: + return kRegTyFloat; + default: + DEBUG_ASSERT(false, "Unexpected pty"); + return kRegTyUndef; + } + } + + /* return Register Type */ + virtual RegType GetRegisterType(regno_t rNum) const { + CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); + return vRegTable[rNum].GetType(); + } + +#if TARGX86_64 + uint32 GetMaxVReg() const { + return vRegCount + opndBuilder->GetCurrentVRegNum(); + } +#else + uint32 GetMaxVReg() const { + return vRegCount; + } +#endif + + uint32 GetSSAvRegCount() const { + return ssaVRegCount; + } + + void SetSSAvRegCount(uint32 count) { + ssaVRegCount = count; + } + + uint32 GetVRegSize(regno_t vregNum) { + CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; + } + + MIRSymbol *GetRetRefSymbol(BaseNode &expr); + void GenerateCfiPrologEpilog(); + + void PatchLongBranch(); + + virtual uint32 MaxCondBranchDistance() { + return INT_MAX; + } + + virtual void InsertJumpPad(Insn *) { + return; + } + + virtual LabelIdx GetLabelInInsn(Insn &insn) { + return 0; + } + + Operand *CreateDbgImmOperand(int64 val) const { + return memPool->New(val); + } + + uint32 NumBBs() const { + return bbCnt; + } + +#if DEBUG + StIdx GetLocalVarReplacedByPreg(PregIdx reg) { + auto it = pregsToVarsMap->find(reg); + return it != pregsToVarsMap->end() ? it->second : StIdx(); + } +#endif + + void IncTotalNumberOfInstructions() { + totalInsns++; + } + + void DecTotalNumberOfInstructions() { + totalInsns--; + } + + uint32 GetTotalNumberOfInstructions() const { + return totalInsns + insnBuilder->GetCreatedInsnNum(); + } + + int32 GetStructCopySize() const { + return structCopySize; + } + + int32 GetMaxParamStackSize() const { + return maxParamStackSize; + } + + virtual void ProcessLazyBinding() = 0; + + /* Debugging support */ + void SetDebugInfo(DebugInfo *dbgInfo) { + debugInfo = dbgInfo; + } + + void AddDIESymbolLocation(const MIRSymbol *sym, SymbolAlloc *loc); + + virtual void DBGFixCallFrameLocationOffsets() {}; + + /* Get And Set private members */ + CG *GetCG() { + return cg; + } + + const CG *GetCG() const { + return cg; + } + + MIRModule &GetMirModule(){ + return mirModule; + } + + const MIRModule &GetMirModule() const { + return mirModule; + } + + template + MIRConst *NewMirConst(T &mirConst) { + MIRConst *newConst = mirModule.GetMemPool()->New(mirConst.GetValue(), mirConst.GetType()); + return newConst; + } + + uint32 GetMIRSrcFileEndLineNum() const { + auto &srcFileInfo = mirModule.GetSrcFileInfo(); + if (!srcFileInfo.empty()) { + return srcFileInfo.back().second; + } else { + return 0; + } + } + + MIRFunction &GetFunction() { + return func; + } + + const MIRFunction &GetFunction() const { + return func; + } + + EHFunc *GetEHFunc() { + return ehFunc; + } + + const EHFunc *GetEHFunc() const { + return ehFunc; + } + + void SetEHFunc(EHFunc &ehFunction) { + ehFunc = &ehFunction; + } + + uint32 GetLabelIdx() const { + return labelIdx; + } + + void SetLabelIdx(uint32 idx) { + labelIdx = idx; + } + + LabelNode *GetStartLabel() { + return startLabel; + } + + const LabelNode *GetStartLabel() const { + return startLabel; + } + + void SetStartLabel(LabelNode &label) { + startLabel = &label; + } + + LabelNode *GetEndLabel() { + return endLabel; + } + + const LabelNode *GetEndLabel() const { + return endLabel; + } + + void SetEndLabel(LabelNode &label) { + endLabel = &label; + } + + LabelNode *GetCleanupLabel() { + return cleanupLabel; + } + + const LabelNode *GetCleanupLabel() const { + return cleanupLabel; + } + + void SetCleanupLabel(LabelNode &node) { + cleanupLabel = &node; + } + + BB *GetFirstBB() { + return firstBB; + } + + const BB *GetFirstBB() const { + return firstBB; + } + + void SetFirstBB(BB &bb) { + firstBB = &bb; + } + + BB *GetCleanupBB() { + return cleanupBB; + } + + const BB *GetCleanupBB() const { + return cleanupBB; + } + + void SetCleanupBB(BB &bb) { + cleanupBB = &bb; + } + + const BB *GetCleanupEntryBB() const { + return cleanupEntryBB; + } + + void SetCleanupEntryBB(BB &bb) { + cleanupEntryBB = &bb; + } + + BB *GetLastBB() { + return lastBB; + } + + const BB *GetLastBB() const { + return lastBB; + } + + void SetLastBB(BB &bb) { + lastBB = &bb; + } + + BB *GetCurBB() { + return curBB; + } + + const BB *GetCurBB() const { + return curBB; + } + + void SetCurBB(BB &bb) { + curBB = &bb; + } + + BB *GetDummyBB() { + return dummyBB; + } + + const BB *GetDummyBB() const { + return dummyBB; + } + + BB *GetCommonExitBB() { + return commonExitBB; + } + + LabelIdx GetFirstCGGenLabelIdx() const { + return firstCGGenLabelIdx; + } + + MapleVector &GetExitBBsVec() { + return exitBBVec; + } + + const MapleVector GetExitBBsVec() const { + return exitBBVec; + } + + size_t ExitBBsVecSize() const { + return exitBBVec.size(); + } + + bool IsExitBBsVecEmpty() const { + return exitBBVec.empty(); + } + + void EraseExitBBsVec(MapleVector::iterator it) { + exitBBVec.erase(it); + } + + void PushBackExitBBsVec(BB &bb) { + exitBBVec.emplace_back(&bb); + } + + void ClearExitBBsVec() { + exitBBVec.clear(); + } + + bool IsExtendReg(regno_t vregNum) { + return extendSet.find(vregNum) != extendSet.end(); + } + + void InsertExtendSet(regno_t vregNum) { + (void)extendSet.insert(vregNum); + } + + void RemoveFromExtendSet(regno_t vregNum) { + (void)extendSet.erase(vregNum); + } + + bool IsExitBB(const BB ¤tBB) { + for (BB *exitBB : exitBBVec) { + if (exitBB == ¤tBB) { + return true; + } + } + return false; + } + + BB *GetExitBB(int32 index) { + return exitBBVec.at(index); + } + + const BB *GetExitBB(int32 index) const { + return exitBBVec.at(index); + } + + void SetLab2BBMap(int32 index, BB &bb) { + lab2BBMap[index] = &bb; + } + + BB *GetBBFromLab2BBMap(uint32 index) { + return lab2BBMap[index]; + } + + MapleUnorderedMap &GetLab2BBMap() { + return lab2BBMap; + } + + void DumpCFGToDot(const std::string &fileNamePrefix); + + BECommon &GetBecommon() { + return beCommon; + } + + const BECommon GetBecommon() const { + return beCommon; + } + + MemLayout *GetMemlayout() { + return memLayout; + } + + const MemLayout *GetMemlayout() const { + return memLayout; + } + + void SetMemlayout(MemLayout &layout) { + memLayout = &layout; + } + + RegisterInfo *GetTargetRegInfo() { + return targetRegInfo; + } + + void SetTargetRegInfo(RegisterInfo ®Info) { + targetRegInfo = ®Info; + } + + MemPool *GetMemoryPool() { + return memPool; + } + + const MemPool *GetMemoryPool() const { + return memPool; + } + + StackMemPool &GetStackMemPool() { + return stackMp; + } + + MapleAllocator *GetFuncScopeAllocator() { + return funcScopeAllocator; + } + + const MapleAllocator *GetFuncScopeAllocator() const { + return funcScopeAllocator; + } + + const MapleMap GetEmitStVec() const { + return emitStVec; + } + + MIRSymbol* GetEmitSt(uint32 id) { + return emitStVec[id]; + } + + void AddEmitSt(uint32 id, MIRSymbol &symbol) { + CHECK_FATAL(symbol.GetKonst()->GetKind() == kConstAggConst, "not a kConstAggConst"); + MIRAggConst *arrayConst = safe_cast(symbol.GetKonst()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + ++switchLabelCnt[lblConst->GetValue()]; + } + emitStVec[id] = &symbol; + } + + void UpdateEmitSt(BB &bb, LabelIdx oldLabelIdx, LabelIdx newLabelIdx) { + MIRSymbol *st = GetEmitSt(bb.GetId()); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + MIRConst *mirConst = GetMemoryPool()->New(newLabelIdx, + GetFunction().GetPuidx(), *etype); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + if (oldLabelIdx == lblConst->GetValue()) { + arrayConst->SetConstVecItem(i, *mirConst); + ++switchLabelCnt[newLabelIdx]; + + CHECK_FATAL(switchLabelCnt[oldLabelIdx] > 0, "error labelIdx"); + --switchLabelCnt[oldLabelIdx]; + if (switchLabelCnt[oldLabelIdx] == 0) { + switchLabelCnt.erase(oldLabelIdx); + } + } + } + } + + void DeleteEmitSt(uint32 id) { + MIRSymbol &symbol = *emitStVec[id]; + CHECK_FATAL(symbol.GetKonst()->GetKind() == kConstAggConst, "not a kConstAggConst"); + MIRAggConst *arrayConst = safe_cast(symbol.GetKonst()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + + LabelIdx labelIdx = lblConst->GetValue(); + CHECK_FATAL(switchLabelCnt[labelIdx] > 0, "error labelIdx"); + --switchLabelCnt[labelIdx]; + if (switchLabelCnt[labelIdx] == 0) { + switchLabelCnt.erase(labelIdx); + } + } + (void)emitStVec.erase(id); + } + + bool InSwitchTable(LabelIdx label) const { + if (switchLabelCnt.empty()) { + return false; + } + return (switchLabelCnt.find(label) != switchLabelCnt.end()); + } + + LabelIdx GetLocalSymLabelIndex(const MIRSymbol &symbol) const { + auto itr = funcLocalSym2Label.find(&symbol); + CHECK_FATAL(itr != funcLocalSym2Label.end(), "not assign labelIndex to sym"); + return itr->second; + } + + void SetLocalSymLabelIndex(const MIRSymbol &mirSymbol, LabelIdx labelIndex) { + funcLocalSym2Label[&mirSymbol] = labelIndex; + } + + MapleVector &GetLoops() { + return loops; + } + + const MapleVector GetLoops() const { + return loops; + } + + void PushBackLoops(CGFuncLoops &loop) { + loops.emplace_back(&loop); + } + + MapleVector &GetLmbcParamVec() { + return lmbcParamVec; + } + + void IncLmbcArgsInRegs(RegType ty) { + if (ty == kRegTyInt) { + lmbcIntArgs++; + } else { + lmbcFpArgs++; + } + } + + int16 GetLmbcArgsInRegs(RegType ty) const { + return ty == kRegTyInt ? lmbcIntArgs : lmbcFpArgs; + } + + void ResetLmbcArgsInRegs() { + lmbcIntArgs = 0; + lmbcFpArgs = 0; + } + + void IncLmbcTotalArgs() { + lmbcTotalArgs++; + } + + uint32 GetLmbcTotalArgs() const { + return lmbcTotalArgs; + } + + void ResetLmbcTotalArgs() { + lmbcTotalArgs = 0; + } + + MapleVector &GetAllBBs() { + return bbVec; + } + + BB *GetBBFromID(uint32 id) { + return bbVec[id]; + } + void ClearBBInVec(uint32 id) { + bbVec[id] = nullptr; + } + +#if TARGARM32 + MapleVector &GetSortedBBs() { + return sortedBBs; + } + + const MapleVector &GetSortedBBs() const { + return sortedBBs; + } + + void SetSortedBBs(const MapleVector &bbVec) { + sortedBBs = bbVec; + } + + MapleVector &GetLrVec() { + return lrVec; + } + + const MapleVector &GetLrVec() const { + return lrVec; + } + + void SetLrVec(const MapleVector &newLrVec) { + lrVec = newLrVec; + } +#endif /* TARGARM32 */ + + CGCFG *GetTheCFG() { + return theCFG; + } + + void SetTheCFG(CGCFG *cfg) { + theCFG = cfg; + } + + const CGCFG *GetTheCFG() const { + return theCFG; + } + + regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { + return regno_t(idx + firstMapleIrVRegNO); + } + + bool GetHasProEpilogue() const { + return hasProEpilogue; + } + + void SetHasProEpilogue(bool state) { + hasProEpilogue = state; + } + + int32 GetDbgCallFrameOffset() const { + return dbgCallFrameOffset; + } + + void SetDbgCallFrameOffset(int32 val) { + dbgCallFrameOffset = val; + } + + BB *CreateNewBB() { + BB *bb = memPool->New(bbCnt++, *funcScopeAllocator); + bbVec.emplace_back(bb); + return bb; + } + + BB *CreateNewBB(bool unreachable, BB::BBKind kind, uint32 frequency) { + BB *newBB = CreateNewBB(); + newBB->SetKind(kind); + newBB->SetUnreachable(unreachable); + newBB->SetFrequency(frequency); + return newBB; + } + + BB *CreateNewBB(LabelIdx label, bool unreachable, BB::BBKind kind, uint32 frequency) { + BB *newBB = CreateNewBB(unreachable, kind, frequency); + newBB->AddLabel(label); + SetLab2BBMap(label, *newBB); + return newBB; + } + + void UpdateFrequency(const StmtNode &stmt) { + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); + if (!withFreqInfo) { + return; + } + auto it = func.GetLastFreqMap().find(stmt.GetStmtID()); + if (it != func.GetLastFreqMap().end()) { + frequency = it->second; + } + } + + BB *StartNewBBImpl(bool stmtIsCurBBLastStmt, StmtNode &stmt) { + BB *newBB = CreateNewBB(); + DEBUG_ASSERT(newBB != nullptr, "newBB should not be nullptr"); + if (stmtIsCurBBLastStmt) { + DEBUG_ASSERT(curBB != nullptr, "curBB should not be nullptr"); + curBB->SetLastStmt(stmt); + curBB->AppendBB(*newBB); + newBB->SetFirstStmt(*stmt.GetNext()); + } else { + newBB->SetFirstStmt(stmt); + if (curBB != nullptr) { + if (stmt.GetPrev() != nullptr) { + DEBUG_ASSERT(stmt.GetPrev()->GetNext() == &stmt, " the next of stmt's prev should be stmt self"); + } + curBB->SetLastStmt(*stmt.GetPrev()); + curBB->AppendBB(*newBB); + } + } + return newBB; + } + + BB *StartNewBB(StmtNode &stmt) { + BB *bb = curBB; + if (stmt.GetNext() != nullptr && stmt.GetNext()->GetOpCode() != OP_label) { + bb = StartNewBBImpl(true, stmt); + } + return bb; + } + + void SetCurBBKind(BB::BBKind bbKind) const { + curBB->SetKind(bbKind); + } + + void SetVolStore(bool val) { + isVolStore = val; + } + + void SetVolReleaseInsn(Insn *insn) { + volReleaseInsn = insn; + } + + bool IsAfterRegAlloc() const { + return isAfterRegAlloc; + } + + void SetIsAfterRegAlloc() { + isAfterRegAlloc = true; + } + + const MapleString &GetShortFuncName() const { + return shortFuncName; + } + + size_t GetLSymSize() const { + return lSymSize; + } + + bool HasTakenLabel() const{ + return hasTakenLabel; + } + + void SetHasTakenLabel() { + hasTakenLabel = true; + } + + virtual InsnVisitor *NewInsnModifier() = 0; + + bool GenCfi() const { + return (mirModule.GetSrcLang() != kSrcLangC) || mirModule.IsWithDbgInfo(); + } + + MapleVector &GetDbgCallFrameLocations() { + return dbgCallFrameLocations; + } + + bool HasAsm() const { + return hasAsm; + } + + uint32 GetUniqueID() const { + return func.GetPuidx(); + } + void SetUseFP(bool canUseFP) { + useFP = canUseFP; + } + + bool UseFP() const { + return useFP; + } + + void UnsetSeenFP() { + seenFP = false; + } + + bool SeenFP() const { + return seenFP; + } + + void UpdateAllRegisterVregMapping(MapleMap &newMap); + + void RegisterVregMapping(regno_t vRegNum, PregIdx pidx) { + vregsToPregsMap[vRegNum] = pidx; + } + + uint32 GetFirstMapleIrVRegNO() const { + return firstMapleIrVRegNO; + } + + void SetHasAsm() { + hasAsm = true; + } + + void SetStackProtectInfo(StackProtectKind kind) { + stackProtectInfo |= kind; + } + + uint8 GetStackProtectInfo() const { + return stackProtectInfo; + } + + void SetFuncEmitInfo(FuncEmitInfo *fnInfo) { + funcEmitInfo = fnInfo; + } + + FuncEmitInfo *GetFuncEmitInfo() { + return funcEmitInfo; + } + + protected: + uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ + uint32 firstNonPregVRegNO; + uint32 vRegCount; /* for assigning a number for each CG virtual register */ + uint32 ssaVRegCount = 0; /* vreg count in ssa */ + uint32 maxRegCount; /* for the current virtual register number limit */ + size_t lSymSize; /* size of local symbol table imported */ + MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ + MapleVector bbVec; + MapleUnorderedMap vRegOperandTable; + MapleUnorderedMap pRegSpillMemOperands; + MapleUnorderedMap spillRegMemOperands; + MapleUnorderedMap reuseSpillLocMem; + LabelIdx firstCGGenLabelIdx; + MapleMap labelMap; +#if DEBUG + MapleMap *pregsToVarsMap = nullptr; +#endif + MapleMap vregsToPregsMap; + uint32 totalInsns = 0; + int32 structCopySize; + int32 maxParamStackSize; + static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ + + bool hasVLAOrAlloca = false; + bool hasAlloca = false; + bool hasProEpilogue = false; + bool isVolLoad = false; + bool isVolStore = false; + bool isAfterRegAlloc = false; + bool isAggParamInReg = false; + bool hasTakenLabel = false; + uint32 frequency = 0; + DebugInfo *debugInfo = nullptr; /* debugging info */ + MapleVector dbgCallFrameLocations; + RegOperand *aggParamReg = nullptr; + ReachingDefinition *reachingDef = nullptr; + + int32 dbgCallFrameOffset = 0; + CG *cg; + MIRModule &mirModule; + MemPool *memPool; + StackMemPool &stackMp; + + PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { + if (IsVRegNOForPseudoRegister(vRegNO)) { + return PregIdx(vRegNO - firstMapleIrVRegNO); + } + return VRegNOToPRegIdx(vRegNO); + } + + bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { + /* 0 is not allowed for preg index */ + uint32 n = static_cast(vRegNum); + return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + } + + PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { + auto it = vregsToPregsMap.find(vRegNum); + if (it == vregsToPregsMap.end()) { + return PregIdx(-1); + } + return it->second; + } + + VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { + return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + } + + PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { + VirtualRegNode &vRegNode = GetVirtualRegNodeFromPseudoRegIdx(idx); + RegType regType = vRegNode.GetType(); + DEBUG_ASSERT(regType == kRegTyInt || regType == kRegTyFloat, ""); + uint32 size = vRegNode.GetSize(); /* in bytes */ + DEBUG_ASSERT(size == sizeof(int32) || size == sizeof(int64), ""); + return (regType == kRegTyInt ? (size == sizeof(int32) ? PTY_i32 : PTY_i64) + : (size == sizeof(float) ? PTY_f32 : PTY_f64)); + } + + int64 GetPseudoRegisterSpillLocation(PregIdx idx) { + const SymbolAlloc *symLoc = memLayout->GetSpillLocOfPseduoRegister(idx); + return static_cast(GetBaseOffset(*symLoc)); + } + + virtual MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) = 0; + + uint32 GetSpillLocation(uint32 size) { + uint32 offset = RoundUp(nextSpillLocation, static_cast(size)); + nextSpillLocation = offset + size; + return offset; + } + + /* See if the symbol is a structure parameter that requires a copy. */ + bool IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; + } + + private: + CGFunc &operator=(const CGFunc &cgFunc); + CGFunc(const CGFunc&); + StmtNode *HandleFirstStmt(); + bool CheckSkipMembarOp(const StmtNode &stmt); + MIRFunction &func; + EHFunc *ehFunc = nullptr; + + InsnBuilder *insnBuilder = nullptr; + OperandBuilder *opndBuilder = nullptr; + + uint32 bbCnt = 0; + uint32 labelIdx = 0; /* local label index number */ + LabelNode *startLabel = nullptr; /* start label of the function */ + LabelNode *endLabel = nullptr; /* end label of the function */ + LabelNode *cleanupLabel = nullptr; /* label to indicate the entry of cleanup code. */ + BB *firstBB = nullptr; + BB *cleanupBB = nullptr; + BB *cleanupEntryBB = nullptr; + BB *lastBB = nullptr; + BB *curBB = nullptr; + BB *dummyBB; /* use this bb for add some instructions to bb that is no curBB. */ + BB *commonExitBB = nullptr; /* this post-dominate all BBs */ + Insn *volReleaseInsn = nullptr; /* use to record the release insn for volatile strore */ + MapleVector exitBBVec; + MapleSet extendSet; /* use to mark regs which spilled 32 bits but loaded 64 bits. */ + MapleUnorderedMap lab2BBMap; + BECommon &beCommon; + MemLayout *memLayout = nullptr; + RegisterInfo *targetRegInfo = nullptr; + MapleAllocator *funcScopeAllocator; + MapleMap emitStVec; /* symbol that needs to be emit as a local symbol. i.e, switch table */ + MapleUnorderedMap switchLabelCnt; /* label in switch table */ + std::map funcLocalSym2Label; +#if TARGARM32 + MapleVector sortedBBs; + MapleVector lrVec; +#endif /* TARGARM32 */ + MapleVector loops; + MapleVector lmbcParamVec; + int32 lmbcIntArgs = 0; + int32 lmbcFpArgs = 0; + uint32 lmbcTotalArgs = 0; + CGCFG *theCFG = nullptr; + FuncEmitInfo *funcEmitInfo = nullptr; + uint32 nextSpillLocation = 0; + + const MapleString shortFuncName; + bool hasAsm = false; + bool useFP = true; + bool seenFP = true; + + /* save stack protect kinds which can trigger stack protect */ + uint8 stackProtectInfo = 0; +}; /* class CGFunc */ + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgEmission, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenProEpiLog, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_CGFUNC_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/datainfo.h b/ecmascript/mapleall/maple_be/include/cg/datainfo.h new file mode 100644 index 0000000000000000000000000000000000000000..4f23e6460780931ab610bbc8ad5fac3f7b95f902 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/datainfo.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DATAINFO_H +#define MAPLEBE_INCLUDE_CG_DATAINFO_H +#include "maple_string.h" +#include "common_utils.h" +#include "mempool.h" +#include "mempool_allocator.h" + +namespace maplebe { +class DataInfo { + public: + DataInfo(uint32 bitNum, MapleAllocator &alloc) + : info(alloc.Adapter()) { + info.resize(bitNum / kWordSize + 1, 0); + } + DataInfo(const DataInfo &other, MapleAllocator &alloc) : info(other.info, alloc.Adapter()) {} + DataInfo &Clone(MapleAllocator &alloc) { + auto *dataInfo = alloc.New(*this, alloc); + return *dataInfo; + } + + ~DataInfo() = default; + + void SetBit(int64 bitNO) { + DEBUG_ASSERT(bitNO < info.size() * kWordSize, "Out of Range"); + info[static_cast(bitNO / kWordSize)] |= (1ULL << static_cast((bitNO % kWordSize))); + } + + void ResetBit(uint32 bitNO) { + info[bitNO / kWordSize] &= (~(1ULL << (bitNO % kWordSize))); + } + + bool TestBit(uint32 bitNO) const { + return (info[bitNO / kWordSize] & (1ULL << (bitNO % kWordSize))) != 0ULL; + } + + const uint64 &GetElem(uint32 index) const { + DEBUG_ASSERT(index < info.size(), "out of range"); + return info[index]; + } + + void SetElem(uint32 index, uint64 val) { + DEBUG_ASSERT(index < info.size(), "out of range"); + info[index] = val; + } + + bool NoneBit() const { + for (auto &data : info) { + if (data != 0ULL) { + return false; + } + } + return true; + } + + size_t Size() const { + return info.size() * kWordSize; + } + + const MapleVector &GetInfo() const { + return info; + } + + bool IsEqual(const DataInfo &secondInfo) const { + auto infoSize = static_cast(info.size()); + DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; ++i) { + if (info[i] != secondInfo.GetElem(i)) { + return false; + } + } + return true; + } + + bool IsEqual(const MapleVector &LiveInfoBak) const { + size_t infoSize = info.size(); + DEBUG_ASSERT(infoSize == LiveInfoBak.size(), "two dataInfo's size different"); + for (size_t i = 0; i != infoSize; ++i) { + if (info[i] != LiveInfoBak[i]) { + return false; + } + } + return true; + } + + void AndBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; ++i) { + info[i] &= secondInfo.GetElem(i); + } + } + + void OrBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] |= secondInfo.GetElem(i); + } + } + + void OrDesignateBits(const DataInfo &secondInfo, uint32 infoIndex) { + DEBUG_ASSERT(infoIndex < secondInfo.GetInfo().size(), "out of secondInfo's range"); + DEBUG_ASSERT(infoIndex < info.size(), "out of secondInfo's range"); + info[infoIndex] |= secondInfo.GetElem(infoIndex); + } + + void EorBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] ^= secondInfo.GetElem(i); + } + } + + /* if bit in secondElem is 1, bit in current DataInfo is set 0 */ + void Difference(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] &= (~(secondInfo.GetElem(i))); + } + } + + void ResetAllBit() { + for (auto &data : info) { + data = 0ULL; + } + } + + void EnlargeCapacityToAdaptSize(uint32 bitNO) { + /* add one more size for each enlarge action */ + info.resize(bitNO / kWordSize + 1, 0); + } + + void GetNonZeroElemsIndex(std::set &index) { + auto infoSize = static_cast(info.size()); + for (int32 i = 0; i < infoSize; i++) { + if (info[i] != 0ULL) { + (void)index.insert(i); + } + } + } + + template + void GetBitsOfInfo(T &wordRes) const { + wordRes.clear(); + for (size_t i = 0; i != info.size(); ++i) { + uint32 result = 0; + uint64 word = info[i]; + uint32 offset = 0; + uint32 baseWord = 0; + bool firstTime = true; + while (word) { + int32 index = __builtin_ffsll(static_cast(word)); + if (index == 0) { + continue; + } + if (index == k64BitSize) { + /* when the highest bit is 1, the shift operation will cause error, need special treatment. */ + result = i * kWordSize + (index - 1); + (void)wordRes.insert(result); + break; + } + if (firstTime) { + offset = static_cast(index - 1); + baseWord = i * kWordSize; + firstTime = false; + } else { + offset = static_cast(index); + baseWord = 0; + } + result += baseWord + offset; + (void)wordRes.insert(result); + word = word >> static_cast(index); + } + } + } + + void ClearDataInfo() { + info.clear(); + } + + private: + /* long type has 8 bytes, 64 bits */ + static constexpr int32 kWordSize = 64; + MapleVector info; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/dbg.def b/ecmascript/mapleall/maple_be/include/cg/dbg.def new file mode 100644 index 0000000000000000000000000000000000000000..9c8a9fa94fa6355a61369f7c96d20631f642a644 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/dbg.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* .loc fileNum lineNum */ +DBG_DEFINE(loc, , 2, Immediate, Immediate, Undef) diff --git a/ecmascript/mapleall/maple_be/include/cg/dbg.h b/ecmascript/mapleall/maple_be/include/cg/dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..2ac26b850cd428ea3f5996fdfaec8f5831eea479 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/dbg.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DBG_H +#define MAPLEBE_INCLUDE_CG_DBG_H + +#include "insn.h" +#include "mempool_allocator.h" +#include "mir_symbol.h" +#include "debug_info.h" + +namespace mpldbg { +using namespace maple; + +/* https://sourceware.org/binutils/docs-2.28/as/Loc.html */ +enum LocOpt { kBB, kProEnd, kEpiBeg, kIsStmt, kIsa, kDisc }; + +enum DbgOpcode : uint8 { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) OP_DBG_##k##sub, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub, +#include "dbg.def" +#undef DBG_DEFINE +#undef ARM_DIRECTIVES_DEFINE + kOpDbgLast +}; + +class DbgInsn : public maplebe::Insn { + public: + DbgInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1) + : Insn(memPool, op, opnd0, opnd1) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1, + maplebe::Operand &opnd2) + : Insn(memPool, op, opnd0, opnd1, opnd2) {} + + ~DbgInsn() = default; + + bool IsMachineInstruction() const override { + return false; + } + + void Dump() const override; + +#if DEBUG + void Check() const override; +#endif + + bool IsTargetInsn() const override{ + return false; + } + + bool IsDbgInsn() const override { + return true; + } + + bool IsRegDefined(maplebe::regno_t regNO) const override { + CHECK_FATAL(false, "dbg insn do not def regs"); + return false; + } + + std::set GetDefRegs() const override{ + CHECK_FATAL(false, "dbg insn do not def regs"); + return std::set(); + } + + uint32 GetBothDefUseOpnd() const override { + return maplebe::kInsnMaxOpnd; + } + + uint32 GetLoc() const; + + private: + DbgInsn &operator=(const DbgInsn&); +}; + +class ImmOperand : public maplebe::OperandVisitable{ + public: + explicit ImmOperand(int64 val) : OperandVisitable(kOpdImmediate, 32), val(val) {} + + ~ImmOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + int64 GetVal() const { + return val; + } + + private: + int64 val; +}; + +class DBGOpndEmitVisitor : public maplebe::OperandVisitorBase, + public maplebe::OperandVisitor { + public: + explicit DBGOpndEmitVisitor(maplebe::Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~DBGOpndEmitVisitor() = default; + protected: + maplebe::Emitter &emitter; + private: + void Visit(ImmOperand *v) final; +}; + +} /* namespace mpldbg */ + +#endif /* MAPLEBE_INCLUDE_CG_DBG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/dependence.h b/ecmascript/mapleall/maple_be/include/cg/dependence.h new file mode 100644 index 0000000000000000000000000000000000000000..a36d9911d34b29cc91ad0b1154220abe4b9e422b --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/dependence.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DEPENDENCE_H +#define MAPLEBE_INCLUDE_CG_DEPENDENCE_H + +#include "deps.h" +#include "cgbb.h" + +namespace maplebe { +using namespace maple; +namespace { +constexpr maple::uint32 kMaxDependenceNum = 200; +}; + + +class DepAnalysis { + public: + DepAnalysis(CGFunc &func, MemPool &memPool, MAD &mad, bool beforeRA) + : cgFunc(func), memPool(memPool), alloc(&memPool), beforeRA(beforeRA), mad(mad), + lastComments(alloc.Adapter()) {} + + virtual ~DepAnalysis() = default; + + virtual void Run(BB &bb, MapleVector &nodes) = 0; + + const MapleVector &GetLastComments() const { + return lastComments; + } + virtual void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) = 0; + virtual void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine = false) = 0; + virtual void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) = 0; + + virtual const std::string &GetDepTypeName(DepType depType) const = 0; + virtual void DumpDepNode(DepNode &node) const = 0; + virtual void DumpDepLink(DepLink &link, const DepNode *node) const = 0; + + protected: + CGFunc &cgFunc; + MemPool &memPool; + MapleAllocator alloc; + bool beforeRA; + MAD &mad; + MapleVector lastComments; + + virtual void Init(BB &bb, MapleVector &nodes) = 0; + virtual void ClearAllDepData() = 0; + virtual void AnalysisAmbiInsns(BB &bb) = 0; + virtual void AppendRegUseList(Insn &insn, regno_t regNO) = 0; + virtual void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) = 0; + virtual void RemoveSelfDeps(Insn &insn) = 0; + virtual void BuildDepsUseReg(Insn &insn, regno_t regNO) = 0; + virtual void BuildDepsDefReg(Insn &insn, regno_t regNO) = 0; + virtual void BuildDepsAmbiInsn(Insn &insn) = 0; + virtual void BuildDepsMayThrowInsn(Insn &insn) = 0; + virtual void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsMemBar(Insn &insn) = 0; + virtual void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) = 0; + virtual void BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) = 0; + virtual void BuildDepsAccessStImmMem(Insn &insn, bool isDest) = 0; + virtual void BuildCallerSavedDeps(Insn &insn) = 0; + virtual void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) = 0; + virtual void BuildStackPassArgsDeps(Insn &insn) = 0; + virtual void BuildDepsDirtyStack(Insn &insn) = 0; + virtual void BuildDepsUseStack(Insn &insn) = 0; + virtual void BuildDepsDirtyHeap(Insn &insn) = 0; + virtual DepNode *BuildSeparatorNode() = 0; + virtual bool IfInAmbiRegs(regno_t regNO) const = 0; + virtual bool IsFrameReg(const RegOperand&) const = 0; +}; +} + +#endif /* MAPLEBE_INCLUDE_CG_DEPENDENCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/deps.h b/ecmascript/mapleall/maple_be/include/cg/deps.h new file mode 100644 index 0000000000000000000000000000000000000000..9127011f8a491286b9813955d5be2bab644ad165 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/deps.h @@ -0,0 +1,465 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DEPS_H +#define MAPLEBE_INCLUDE_CG_DEPS_H + +#include "mad.h" +#include "pressure.h" +#include +namespace maplebe { +#define PRINT_STR_VAL(STR, VAL) \ + LogInfo::MapleLogger() << std::left << std::setw(12) << STR << VAL << " | "; +#define PRINT_VAL(VAL) \ + LogInfo::MapleLogger() << std::left << std::setw(12) << VAL << " | "; + +enum DepType : uint8 { + kDependenceTypeTrue, + kDependenceTypeOutput, + kDependenceTypeAnti, + kDependenceTypeControl, + kDependenceTypeMembar, + kDependenceTypeThrow, + kDependenceTypeSeparator, + kDependenceTypeNone +}; + +inline const std::array kDepTypeName = { + "true-dep", + "output-dep", + "anti-dep", + "control-dep", + "membar-dep", + "throw-dep", + "separator-dep", + "none-dep", +}; + +enum NodeType : uint8 { + kNodeTypeNormal, + kNodeTypeSeparator, + kNodeTypeEmpty +}; + +enum ScheduleState : uint8 { + kNormal, + kReady, + kScheduled, +}; + +class DepNode; + +class DepLink { + public: + DepLink(DepNode &fromNode, DepNode &toNode, DepType typ) : from(fromNode), to(toNode), depType(typ), latency(0) {} + virtual ~DepLink() = default; + + DepNode &GetFrom() const { + return from; + } + DepNode &GetTo() const { + return to; + } + void SetDepType(DepType dType) { + depType = dType; + } + DepType GetDepType() const { + return depType; + } + void SetLatency(uint32 lat) { + latency = lat; + } + uint32 GetLatency() const { + return latency; + } + + private: + DepNode &from; + DepNode &to; + DepType depType; + uint32 latency; +}; + +class DepNode { + public: + bool CanBeScheduled() const; + void OccupyUnits(); + uint32 GetUnitKind() const; + + DepNode(Insn &insn, MapleAllocator &alloc) + : insn(&insn), units(nullptr), reservation(nullptr), unitNum(0), + eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), + schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), + preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), + cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), + defRegnos(alloc.Adapter()), regPressure(nullptr) {} + + DepNode(Insn &insn, MapleAllocator &alloc, Unit * const *unit, uint32 num, Reservation &rev) + : insn(&insn), units(unit), + reservation(&rev), unitNum(num), eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), + index(0), simulateCycle(0), schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), + preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), cfiInsns(alloc.Adapter()), + clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), defRegnos(alloc.Adapter()), + regPressure(nullptr) {} + + virtual ~DepNode() = default; + + Insn *GetInsn() const { + return insn; + } + void SetInsn(Insn &rvInsn) { + insn = &rvInsn; + } + void SetUnits(Unit * const *unit) { + units = unit; + } + const Unit *GetUnitByIndex(uint32 idx) const { + DEBUG_ASSERT(index < unitNum, "out of units"); + return units[idx]; + } + Reservation *GetReservation() const { + return reservation; + } + void SetReservation(Reservation &rev) { + reservation = &rev; + } + uint32 GetUnitNum() const { + return unitNum; + } + void SetUnitNum(uint32 num) { + unitNum = num; + } + uint32 GetEStart() const { + return eStart; + } + void SetEStart(uint32 start) { + eStart = start; + } + uint32 GetLStart() const { + return lStart; + } + void SetLStart(uint32 start) { + lStart = start; + } + uint32 GetVisit() const { + return visit; + } + void SetVisit(uint32 visitVal) { + visit = visitVal; + } + void IncreaseVisit() { + ++visit; + } + NodeType GetType() const { + return type; + } + void SetType(NodeType nodeType) { + type = nodeType; + } + ScheduleState GetState() const { + return state; + } + void SetState(ScheduleState scheduleState) { + state = scheduleState; + } + uint32 GetIndex() const { + return index; + } + void SetIndex(uint32 idx) { + index = idx; + } + void SetSchedCycle(uint32 cycle) { + schedCycle = cycle; + } + uint32 GetSchedCycle() const { + return schedCycle; + } + void SetSimulateCycle(uint32 cycle) { + simulateCycle = cycle; + } + uint32 GetSimulateCycle() const { + return simulateCycle; + } + void SetBruteForceSchedCycle(uint32 cycle) { + bruteForceSchedCycle = cycle; + } + uint32 GetBruteForceSchedCycle() const { + return bruteForceSchedCycle; + } + void SetValidPredsSize(uint32 validSize) { + validPredsSize = validSize; + } + uint32 GetValidPredsSize() const { + return validPredsSize; + } + void DescreaseValidPredsSize() { + --validPredsSize; + } + void IncreaseValidPredsSize() { + ++validPredsSize; + } + uint32 GetValidSuccsSize() const { + return validSuccsSize; + } + void SetValidSuccsSize(uint32 size) { + validSuccsSize = size; + } + const MapleVector &GetPreds() const { + return preds; + } + void ReservePreds(size_t size) { + preds.reserve(size); + } + void AddPred(DepLink &depLink) { + preds.emplace_back(&depLink); + } + void RemovePred() { + preds.pop_back(); + } + const MapleVector &GetSuccs() const{ + return succs; + } + void ReserveSuccs(size_t size) { + succs.reserve(size); + } + void AddSucc(DepLink &depLink) { + succs.emplace_back(&depLink); + } + void RemoveSucc() { + succs.pop_back(); + } + const MapleVector &GetComments() const { + return comments; + } + void SetComments(MapleVector com) { + comments = com; + } + void AddComments(Insn &insn) { + comments.emplace_back(&insn); + } + void ClearComments() { + comments.clear(); + } + const MapleVector &GetCfiInsns() const { + return cfiInsns; + } + void SetCfiInsns(MapleVector insns) { + cfiInsns = insns; + } + void AddCfiInsn(Insn &insn) { + cfiInsns.emplace_back(&insn); + } + void ClearCfiInsns() { + cfiInsns.clear(); + } + const MapleVector &GetClinitInsns() const { + return clinitInsns; + } + void SetClinitInsns(MapleVector insns) { + clinitInsns = insns; + } + void AddClinitInsn(Insn &insn) { + clinitInsns.emplace_back(&insn); + } + const RegPressure *GetRegPressure() const { + return regPressure; + } + void SetRegPressure(RegPressure &pressure) { + regPressure = &pressure; + } + void DumpRegPressure() const { + if (regPressure) { + regPressure->DumpRegPressure(); + } + } + void InitPressure() const { + regPressure->InitPressure(); + } + const MapleVector &GetPressure() const { + return regPressure->GetPressure(); + } + + void IncPressureByIndex(int32 idx) const { + regPressure->IncPressureByIndex(static_cast(idx)); + } + void DecPressureByIndex(int32 idx) const { + regPressure->DecPressureByIndex(static_cast(idx)); + } + + const MapleVector &GetDeadDefNum() const { + return regPressure->GetDeadDefNum(); + } + void IncDeadDefByIndex(int32 idx) const { + regPressure->IncDeadDefByIndex(static_cast(idx)); + } + + void SetRegUses(RegList ®List) const { + regPressure->SetRegUses(®List); + } + void SetRegDefs(size_t idx, RegList *regList) const { + regPressure->SetRegDefs(idx, regList); + } + + int32 GetIncPressure() const { + return regPressure->GetIncPressure(); + } + void SetIncPressure(bool value) const { + regPressure->SetIncPressure(value); + } + int32 GetMaxDepth() const { + return regPressure->GetMaxDepth(); + } + void SetMaxDepth(int32 value) const { + regPressure->SetMaxDepth(value); + } + int32 GetNear() const { + return regPressure->GetNear(); + } + void SetNear(int32 value) const { + regPressure->SetNear(value); + } + int32 GetPriority() const { + return regPressure->GetPriority(); + } + void SetPriority(int32 value) const { + regPressure->SetPriority(value); + } + RegList *GetRegUses(size_t idx) const { + return regPressure->GetRegUses(idx); + } + void InitRegUsesSize(size_t size) const { + regPressure->InitRegUsesSize(size); + } + RegList *GetRegDefs(size_t idx) const { + return regPressure->GetRegDefs(idx); + } + void InitRegDefsSize(size_t size) const { + regPressure->InitRegDefsSize(size); + } + + void SetNumCall(int32 value) const { + regPressure->SetNumCall(value); + } + + int32 GetNumCall() const { + return regPressure->GetNumCall(); + } + + void SetHasNativeCallRegister(bool value) const { + regPressure->SetHasNativeCallRegister(value); + } + + bool GetHasNativeCallRegister() const { + return regPressure->GetHasNativeCallRegister(); + } + + const Insn *GetLocInsn() const { + return locInsn; + } + void SetLocInsn(const Insn &insn) { + locInsn = &insn; + } + + /* printf dep-node's information of scheduling */ + void DumpSchedInfo() const { + PRINT_STR_VAL("estart: ", eStart); + PRINT_STR_VAL("lstart: ", lStart); + PRINT_STR_VAL("visit: ", visit); + PRINT_STR_VAL("state: ", state); + PRINT_STR_VAL("index: ", index); + PRINT_STR_VAL("validPredsSize: ", validPredsSize); + PRINT_STR_VAL("validSuccsSize: ", validSuccsSize); + LogInfo::MapleLogger() << '\n'; + + constexpr int32 width = 12; + LogInfo::MapleLogger() << std::left << std::setw(width) << "usereg: "; + for (const auto &useReg : useRegnos) { + LogInfo::MapleLogger() << "R" << useReg << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::left << std::setw(width) << "defreg: "; + for (const auto &defReg : defRegnos) { + LogInfo::MapleLogger() << "R" << defReg << " "; + } + LogInfo::MapleLogger() << "\n"; + } + + void SetHasPreg(bool value) const { + regPressure->SetHasPreg(value); + } + + bool GetHasPreg() const { + return regPressure->GetHasPreg(); + } + + void AddUseReg(regno_t reg) { + useRegnos.emplace_back(reg); + } + + const MapleVector &GetUseRegnos() const { + return useRegnos; + } + + void AddDefReg(regno_t reg) { + defRegnos.emplace_back(reg); + } + + const MapleVector &GetDefRegnos() const { + return defRegnos; + } + + private: + Insn *insn; + Unit * const *units; + Reservation *reservation; + uint32 unitNum; + uint32 eStart; + uint32 lStart; + uint32 visit; + NodeType type; + ScheduleState state; + uint32 index; + uint32 simulateCycle; + uint32 schedCycle; + uint32 bruteForceSchedCycle; + + /* For scheduling, denotes unscheduled preds/succs number. */ + uint32 validPredsSize; + uint32 validSuccsSize; + + /* Dependence links. */ + MapleVector preds; + MapleVector succs; + + /* Non-machine instructions prior to insn, such as comments. */ + MapleVector comments; + + /* Non-machine instructions which follows insn, such as cfi instructions. */ + MapleVector cfiInsns; + + /* Special instructions which follows insn, such as clinit instructions. */ + MapleVector clinitInsns; + + /* loc insn which indicate insn location in source file */ + const Insn *locInsn; + + MapleVector useRegnos; + MapleVector defRegnos; + + /* For register pressure analysis */ + RegPressure *regPressure; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_DEPS_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/ebo.h b/ecmascript/mapleall/maple_be/include/cg/ebo.h new file mode 100644 index 0000000000000000000000000000000000000000..340b3b840541c6f7f0cf5760b9ee78b1c9916993 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/ebo.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_EBO_H +#define MAPLEBE_INCLUDE_CG_EBO_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "live.h" +#include "loop.h" + +namespace maplebe { +namespace { +constexpr uint32 kEboDefaultMemHash = 0; +constexpr uint32 kEboNoAliasMemHash = 1; +constexpr uint32 kEboSpillMemHash = 2; +constexpr uint32 kEboCopyInsnHash = 3; +constexpr uint32 kEboReservedInsnHash = 4; +constexpr uint32 kEboMaxExpInsnHash = 1024; +constexpr uint32 kEboMaxOpndHash = 521; +constexpr uint32 kEboMaxInsnHash = kEboReservedInsnHash + kEboMaxExpInsnHash; +}; + +#define EBO_EXP_INSN_HASH(val) ((kEboMaxExpInsnHash - 1ULL) & (static_cast(val) >> 6)) + +/* forward decls */ +class InsnInfo; + +struct OpndInfo { + explicit OpndInfo(Operand &opnd) : opnd(&opnd) {} + + virtual ~OpndInfo() = default; + + int32 hashVal = 0; /* Mem operand is placed in hash table, this is the hashVal of it, and otherwise -1. */ + Operand *opnd; /* Operand */ + Operand *replacementOpnd = nullptr; /* Rename opnd with this new name. */ + OpndInfo *replacementInfo = nullptr; /* Rename opnd with this info. */ + BB *bb = nullptr; /* The Definining bb. */ + Insn *insn = nullptr; /* The Defining insn. */ + InsnInfo *insnInfo = nullptr; + bool redefinedInBB = false; /* A following definition exisit in bb. */ + bool redefined = false; /* A following definition exisit. */ + Insn *redefinedInsn = nullptr; /* Next defined insn if redefinedInBB is true */ +#if TARGARM32 + bool mayReDef = false; +#endif + OpndInfo *same = nullptr; /* Other definitions of the same operand. */ + OpndInfo *prev = nullptr; + OpndInfo *next = nullptr; + OpndInfo *hashNext = nullptr; + int32 refCount = 0; /* Number of references to the operand. */ +}; + +struct MemOpndInfo : public OpndInfo { + explicit MemOpndInfo(Operand &opnd) : OpndInfo(opnd) {} + + ~MemOpndInfo() override = default; + + OpndInfo *GetBaseInfo() const { + return base; + } + + OpndInfo *GetOffsetInfo() const{ + return offset; + } + + void SetBaseInfo(OpndInfo &baseInfo) { + base = &baseInfo; + } + + void SetOffsetInfo(OpndInfo &offInfo) { + offset = &offInfo; + } + + private: + OpndInfo *base = nullptr; + OpndInfo *offset = nullptr; +}; + +class InsnInfo { + public: + InsnInfo(MemPool &memPool, Insn &insn) + : alloc(&memPool), bb(insn.GetBB()), insn(&insn), result(alloc.Adapter()), + origOpnd(alloc.Adapter()), optimalOpnd(alloc.Adapter()) {} + + virtual ~InsnInfo() = default; + MapleAllocator alloc; + uint32 hashIndex = 0; + bool mustNotBeRemoved = false; /* Some condition requires this insn. */ + BB *bb; /* The defining bb. */ + Insn *insn; /* The defining insn. */ + InsnInfo *same = nullptr; /* Other insns with the same hash value. */ + InsnInfo *prev = nullptr; + InsnInfo *next = nullptr; + MapleVector result; /* Result array. */ + MapleVector origOpnd; + MapleVector optimalOpnd; +}; + +class Ebo { + public: + Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase) + : cgFunc(&func), + beforeRegAlloc(before), + phaseName(phase), + live(live), + eboMp(&memPool), + eboAllocator(&memPool), + visitedBBs(eboAllocator.Adapter()), + vRegInfo(eboAllocator.Adapter()), + exprInfoTable(eboAllocator.Adapter()), + insnInfoTable(eboAllocator.Adapter()) {} + + virtual ~Ebo() = default; + + MemOpndInfo *GetMemInfo(InsnInfo &insnInfo); + void SetInsnInfo(uint32 hashVal, InsnInfo &info) { + DEBUG_ASSERT(hashVal < insnInfoTable.size(), "hashVal out of insnInfoTable range"); + insnInfoTable.at(hashVal) = &info; + } + + void IncRef(OpndInfo &info) const { + ++info.refCount; + } + + void DecRef(OpndInfo &info) const { + --info.refCount; + } + + void EnlargeSpaceForLA(Insn &csetInsn); + bool IsSaveReg(const Operand &opnd) const; + bool IsFrameReg(Operand &opnd) const; + bool OperandEqual(const Operand &op1, const Operand &op2) const; + Operand *GetZeroOpnd(uint32 size) const; + bool IsPhysicalReg(const Operand &opnd) const; + bool HasAssignedReg(const Operand &opnd) const; + bool IsOfSameClass(const Operand &op0, const Operand &op1) const; + bool OpndAvailableInBB(const BB &bb, OpndInfo *info); + bool ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn); + bool RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo); + bool IsNotVisited(const BB &bb) { + return !visitedBBs.at(bb.GetId()); + }; + + void SetBBVisited(const BB &bb) { + visitedBBs.at(bb.GetId()) = true; + }; + + void UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal); + void SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal); + bool RegistersIdentical(const Operand &op0, const Operand &op1) const; + OpndInfo *GetOpndInfo(const Operand &opnd, int32 hashVal) const; + OpndInfo *GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal); + OpndInfo *OperandInfoUse(BB ¤tBB, Operand &localOpnd); + InsnInfo *GetNewInsnInfo(Insn &insn); + int32 ComputeOpndHash(const Operand &opnd) const; + uint32 ComputeHashVal( Insn &insn, const MapleVector &opndInfos) const; + void MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const; + void RemoveInsn(InsnInfo &info); + void RemoveUses(uint32 opndNum, const MapleVector &origInfo); + void HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfos); + void BuildAllInfo(BB &bb); + InsnInfo *LocateInsnInfo(const OpndInfo &info); + void RemoveUnusedInsns(BB &bb, bool normal); + void UpdateNextInfo(const OpndInfo &opndInfo); + void BackupOpndInfoList(OpndInfo *saveLast); + void BackupInsnInfoList(InsnInfo *saveLast); + void AddBB2EB(BB &bb); + void EboInit(); + void EboProcessSingleBB(); + void EboProcess(); + void Run(); + std::string PhaseName() const { + return phaseName; + } + + protected: + CGFunc *cgFunc; + bool beforeRegAlloc; /* True if perform Ebo before register allocation. */ + virtual OpndInfo *OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) = 0; + virtual const RegOperand &GetRegOperand(const Operand &opnd) const = 0; + virtual bool IsGlobalNeeded(Insn &insn) const = 0; + virtual bool IsDecoupleStaticOp(Insn &insn) const = 0; + virtual bool IsFmov(const Insn &insn) const = 0; + virtual bool SpecialSequence(Insn &insn, const MapleVector &origInfos) = 0; + virtual bool DoConstProp(Insn &insn, uint32 i, Operand &opnd) = 0; + virtual bool Csel2Cset(Insn &insn, const MapleVector &opnds) = 0; + virtual bool SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) = 0; + virtual int32 GetOffsetVal(const MemOperand &mem) const = 0; + virtual bool OperandEqSpecial(const Operand &op1, const Operand &op2) const = 0; + virtual void BuildCallerSaveRegisters() = 0; + virtual void DefineAsmRegisters(InsnInfo &insnInfo) = 0; + virtual void DefineCallerSaveRegisters(InsnInfo &insnInfo) = 0; + virtual void DefineReturnUseRegister(Insn &insn) = 0; + virtual void DefineCallUseSpecialRegister(Insn &insn) = 0; + virtual void DefineClinitSpecialRegisters(InsnInfo &insnInfo) = 0; + virtual bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const = 0; + virtual bool IsPseudoRet(Insn &insn) const = 0; + virtual bool ChangeLdrMop(Insn &insn, const Operand &opnd) const = 0; + virtual bool IsAdd(const Insn &insn) const = 0; + virtual bool IsClinitCheck(const Insn &insn) const = 0; + virtual bool IsLastAndBranch(BB &bb, Insn &insn) const = 0; + virtual bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const = 0; + virtual bool ResIsNotDefAndUse(Insn &insn) const = 0; + virtual bool LiveOutOfBB(const Operand &opnd, const BB &bb) const = 0; + virtual bool IsInvalidReg(const RegOperand &opnd) const = 0; + virtual bool IsZeroRegister(const Operand &opnd) const = 0; + virtual bool IsConstantImmOrReg(const Operand &opnd) const = 0; + OpndInfo *BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex); + OpndInfo *BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector &origInfos); + bool ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo, + MapleVector &origInfos); + void SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, const MapleVector &opnds, + const MapleVector &opndInfos, const MapleVector &origInfos); + void FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, + MapleVector &opnds, MapleVector &opndInfos, + const MapleVector &origInfos); + void PreProcessSpecialInsn(Insn &insn); + + std::string phaseName; + LiveAnalysis *live; + uint32 bbNum = 0; /* bb numbers for an extend block. */ + MemPool *eboMp; + MapleAllocator eboAllocator; + MapleVector visitedBBs; + OpndInfo *firstOpndInfo = nullptr; + OpndInfo *lastOpndInfo = nullptr; + InsnInfo *firstInsnInfo = nullptr; + InsnInfo *lastInsnInfo = nullptr; + MapleUnorderedMap vRegInfo; + MapleVector exprInfoTable; + MapleVector insnInfoTable; + bool optSuccess = false; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgEbo0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgEbo1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgPostEbo, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_EBO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/eh_func.h b/ecmascript/mapleall/maple_be/include/cg/eh_func.h new file mode 100644 index 0000000000000000000000000000000000000000..60d2fe975ecf83135037e47da985a49ecaa8aab0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/eh_func.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_EH_EH_FUNC_H +#define MAPLEBE_INCLUDE_EH_EH_FUNC_H +#include "mir_parser.h" +#include "mir_function.h" +#include "lsda.h" +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class EHTry { + public: + EHTry(MapleAllocator &alloc, TryNode &tryNode) + : tryNode(&tryNode), + catchVec(alloc.Adapter()) {} + ~EHTry() = default; + + TryNode *GetTryNode() const { + return tryNode; + } + + void SetEndtryNode(StmtNode &endtryNode) { + this->endTryNode = &endtryNode; + } + + StmtNode *GetEndtryNode() { + return endTryNode; + } + + void SetFallthruGoto(StmtNode *fallthruGoto) { + this->fallThroughGoto = fallthruGoto; + } + + StmtNode *GetFallthruGoto() { + return fallThroughGoto; + } + + size_t GetCatchVecSize() const { + return catchVec.size(); + } + + void PushBackCatchVec(CatchNode &catchNode) { + catchVec.emplace_back(&catchNode); + } + + CatchNode *GetCatchNodeAt(size_t pos) const { + CHECK_FATAL(pos < GetCatchVecSize(), "pos is out of range."); + return catchVec.at(pos); + } + + void SetLSDACallSite(LSDACallSite &lsdaCallSite) { + this->lsdaCallSite = &lsdaCallSite; + } + + void SetCSAction(uint32 action) const { + lsdaCallSite->csAction = action; + } + + void DumpEHTry(const MIRModule &mirModule); + + private: + TryNode *tryNode; + StmtNode *endTryNode = nullptr; + StmtNode *fallThroughGoto = nullptr; /* no throw in the try block, the goto stmt to the fall through */ + MapleVector catchVec; + LSDACallSite *lsdaCallSite = nullptr; /* one try has a callsite */ +}; + +class EHThrow { + public: + explicit EHThrow(UnaryStmtNode &rtNode) + : rethrow(&rtNode) {} + ~EHThrow() = default; + + bool IsUnderTry() const { + return javaTry != nullptr; + } + + bool HasLSDA() const { + return startLabel != nullptr; + } + + const UnaryStmtNode *GetRethrow() const { + return rethrow; + } + + void SetJavaTry(EHTry *javaTry) { + this->javaTry = javaTry; + } + + LabelNode *GetStartLabel() { + return startLabel; + } + + LabelNode *GetEndLabel() { + return endLabel; + } + + void Lower(CGFunc &cgFunc); + void ConvertThrowToRethrow(CGFunc &cgFunc); + void ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg); + + private: + UnaryStmtNode *rethrow; /* must be a throw stmt */ + EHTry *javaTry = nullptr; /* the try statement wrapping this throw */ + LabelNode *startLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" begin */ + LabelNode *endLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" end */ +}; + +class EHFunc { + public: + static constexpr uint8 kTypeEncoding = 0x9b; /* same thing as LSDAHeader.kTypeEncoding */ + explicit EHFunc(CGFunc &func); + ~EHFunc() = default; + + void CollectEHInformation(std::vector> &catchVec); + void InsertEHSwitchTable(); + void CreateLSDA(); + bool NeedFullLSDA() const; + bool NeedFastLSDA() const; + void InsertCxaAfterEachCatch(const std::vector> &catchVec); + void GenerateCleanupLabel(); + void MergeCatchToTry(const std::vector> &catchVec); + void BuildEHTypeTable(const std::vector> &catchVec); + void LowerThrow(); /* for non-personality function */ + void CreateTypeInfoSt(); + void DumpEHFunc() const; + + bool HasThrow() const { + return !rethrowVec.empty(); + } + + void AddTry(EHTry &ehTry) { + tryVec.emplace_back(&ehTry); + } + + size_t GetEHTyTableSize() const { + return ehTyTable.size(); + } + + TyIdx &GetEHTyTableMember(int32 index) { + CHECK_FATAL(static_cast(index) < ehTyTable.size(), "out of ehTyTable"); + return ehTyTable[index]; + } + + LSDAHeader *GetLSDAHeader() { + return lsdaHeader; + } + + LSDACallSiteTable *GetLSDACallSiteTable() { + return lsdaCallSiteTable; + } + + const LSDACallSiteTable *GetLSDACallSiteTable() const { + return lsdaCallSiteTable; + } + + const LSDAActionTable *GetLSDAActionTable() const { + return lsdaActionTable; + } + + void AddRethrow(EHThrow &rethrow) { + rethrowVec.emplace_back(&rethrow); + } + + private: + void CreateLSDAAction(); + void InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, + const StmtNode &beforeEndLabel); + void FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry); + void CreateLSDAHeader(); + void FillLSDACallSiteTable(); + LabelIdx CreateLabel(const std::string &cstr); + bool HasTry() const; + + CGFunc *cgFunc; + LabelIdx labelIdx = 0; + MapleVector tryVec; /* try stmt node */ + MapleVector ehTyTable; /* the type that would emit in LSDA */ + MapleMap ty2IndexTable; /* use the TyIdx to get the index of ehTyTable; */ + LSDAHeader *lsdaHeader = nullptr; + LSDACallSiteTable *lsdaCallSiteTable = nullptr; + LSDAActionTable *lsdaActionTable = nullptr; + MapleVector rethrowVec; /* EHRethrow */ +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgBuildEHFunc, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_EH_EH_FUNC_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/emit.h b/ecmascript/mapleall/maple_be/include/cg/emit.h new file mode 100644 index 0000000000000000000000000000000000000000..77dd94b2654dc05232d37036389a57eba4acaf54 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/emit.h @@ -0,0 +1,413 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_EMIT_H +#define MAPLEBE_INCLUDE_CG_EMIT_H + +/* C++ headers */ +#include +#include +#include +#include +#include "isa.h" +#include "lsda.h" +#include "asm_info.h" +#include "cg.h" + +/* Maple IR headers */ +#include "mir_module.h" +#include "mir_const.h" +#include "mempool_allocator.h" +#include "muid_replacement.h" +#include "namemangler.h" +#include "debug_info.h" +#include "alignment.h" + +namespace maple { +const char *GetDwTagName(unsigned n); +const char *GetDwFormName(unsigned n); +const char *GetDwAtName(unsigned n); +} /* namespace maple */ + +#if TARGRISCV64 +#define CMNT "\t# " +#else +#define CMNT "\t// " +#endif +#define TEXT_BEGIN text0 +#define TEXT_END etext0 +#define DEBUG_INFO_0 debug_info0 +#define DEBUG_ABBREV_0 debug_abbrev0 +#define DEBUG_LINE_0 debug_line0 +#define DEBUG_STR_LABEL ASF + +namespace maplebe { +constexpr int32 kSizeOfDecoupleStaticStruct = 4; +constexpr uint32 kHugeSoInsnCountThreshold = 0x1f00000; /* 124M (4bytes per Insn), leave 4M rooms for 128M */ +constexpr char kHugeSoPostFix[] = "$$hugeso_"; +constexpr char kDebugMapleThis[] = "_this"; +constexpr uint32 kDwarfVersion = 4; +constexpr uint32 kSizeOfPTR = 8; +class StructEmitInfo { + public: + /* default ctor */ + StructEmitInfo() = default; + + ~StructEmitInfo() = default; + + uint16 GetNextFieldOffset() const { + return nextFieldOffset; + } + + void SetNextFieldOffset(uint16 offset) { + nextFieldOffset = offset; + } + + void IncreaseNextFieldOffset(uint16 value) { + nextFieldOffset += value; + } + + uint8 GetCombineBitFieldWidth() const { + return combineBitFieldWidth; + } + + void SetCombineBitFieldWidth(uint8 offset) { + combineBitFieldWidth = offset; + } + + void IncreaseCombineBitFieldWidth(uint8 value) { + combineBitFieldWidth += value; + } + + void DecreaseCombineBitFieldWidth(uint8 value) { + combineBitFieldWidth -= value; + } + + uint64 GetCombineBitFieldValue() const { + return combineBitFieldValue; + } + + void SetCombineBitFieldValue(uint64 value) { + combineBitFieldValue = value; + } + + uint64 GetTotalSize() const { + return totalSize; + } + + void SetTotalSize(uint64 value) { + totalSize = value; + } + + void IncreaseTotalSize(uint64 value) { + totalSize += value; + } + + private: + /* Next field offset in struct. */ + uint16 nextFieldOffset = 0; + uint8 combineBitFieldWidth = 0; + uint64 combineBitFieldValue = 0; + /* Total size emitted in current struct. */ + uint64 totalSize = 0; +}; + +class FuncEmitInfo { + public: + CGFunc &GetCGFunc() { + return cgFunc; + } + + const CGFunc &GetCGFunc() const { + return cgFunc; + } + + protected: + explicit FuncEmitInfo(CGFunc &func) : cgFunc(func) {} + ~FuncEmitInfo() = default; + + private: + CGFunc &cgFunc; +}; + +class Emitter { + public: + virtual void Finish() {} + + virtual void CloseOutput() { + if (fileStream.is_open()) { + fileStream.close(); + } + rangeIdx2PrefixStr.clear(); + hugeSoTargets.clear(); + labdie2labidxTable.clear(); + fileMap.clear(); + } + + MOperator GetCurrentMOP() const { + return currentMop; + } + + void SetCurrentMOP(const MOperator &mOp) { + currentMop = mOp; + } + + void EmitAsmLabel(AsmLabel label); + void EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label); + void EmitFileInfo(const std::string &fileName); + /* a symbol start/end a block */ + void EmitBlockMarker(const std::string &markerName, const std::string §ionName, + bool withAddr, const std::string &addrName = ""); + void EmitNullConstant(uint64 size); + void EmitCombineBfldValue(StructEmitInfo &structEmitInfo); + void EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset); + void EmitScalarConstant(MIRConst &mirConst, bool newLine = true, bool flag32 = false, bool isIndirect = false); + void EmitStr(const std::string& mplStr, bool emitAscii = false, bool emitNewline = false); + void EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect = false); + void EmitStr16Constant(const MIRStr16Const &mirStr16Const); + void EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex, + const std::map &strIdx2Type, size_t idx); + void EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx); + void EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx); + void EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst, + const std::map &strIdx2Type); + void EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName); + void EmitMethodFieldSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName); + void EmitLiterals(std::vector> &literals, + const std::map &strIdx2Type); + void EmitFuncLayoutInfo(const MIRSymbol &layout); + void EmitGlobalVars(std::vector> &globalVars); + void EmitGlobalVar(const MIRSymbol &globalVar); + void EmitStaticFields(const std::vector &fields); + void EmitLiteral(const MIRSymbol &literal, const std::map &strIdx2Type); + void EmitStringPointers(); + void GetHotAndColdMetaSymbolInfo(const std::vector &mirSymbolVec, + std::vector &hotFieldInfoSymbolVec, + std::vector &coldFieldInfoSymbolVec, const std::string &prefixStr, + bool forceCold = false); + void EmitMetaDataSymbolWithMarkFlag(const std::vector &mirSymbolVec, + const std::map &strIdx2Type, + const std::string &prefixStr, const std::string §ionName, + bool isHotFlag); + void EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string §ionName); + void MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec); + void EmitArrayConstant(MIRConst &mirConst); + void EmitStructConstant(MIRConst &mirConst); + void EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts); + void EmitVectorConstant(MIRConst &mirConst); + void EmitLocalVariable(const CGFunc &cgFunc); + void EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName); + void EmitGlobalVariable(); + void EmitGlobalRootList(const MIRSymbol &mirSymbol); + void EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, + const std::string §ionName); + MIRAddroffuncConst *GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst); + int64 GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst, + const std::map &strIdx2Type); + + Emitter &Emit(int64 val) { + fileStream << val; + return *this; + } + + Emitter &Emit(const IntVal& val) { + fileStream << val.GetExtValue(); + return *this; + } + + Emitter &Emit(const MapleString &str) { + DEBUG_ASSERT(str.c_str() != nullptr, "nullptr check"); + fileStream << str; + return *this; + } + + Emitter &Emit(const std::string &str) { + fileStream << str; + return *this; + } + + Emitter &Emit(const void *data, size_t size) { + fileStream.write(reinterpret_cast(data), size); + return *this; + } + + void SetFileOffset(uint64 offset) { + fileStream.seekp(offset); + } + + void EmitLabelRef(LabelIdx labIdx); + void EmitStmtLabel(LabelIdx labIdx); + void EmitLabelPair(const LabelPair &pairLabel); + void EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx); + + /* Emit signed/unsigned integer literals in decimal or hexadecimal */ + void EmitDecSigned(int64 num); + void EmitDecUnsigned(uint64 num); + void EmitHexUnsigned(uint64 num); + + /* Dwarf debug info */ + void FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr); + void SetupDBGInfo(DebugInfo *mirdi); + void ApplyInPrefixOrder(DBGDie *die, const std::function &func); + void AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx); + LabelIdx GetLabelIdxForLabelDie(DBGDie *lblDie); + void EmitDIHeader(); + void EmitDIFooter(); + void EmitDIHeaderFileInfo(); + void EmitDIDebugInfoSection(DebugInfo *mirdi); + void EmitDIDebugAbbrevSection(DebugInfo *mirdi); + void EmitDIDebugARangesSection(); + void EmitDIDebugRangesSection(); + void EmitDIDebugLineSection(); + void EmitDIDebugStrSection(); + void EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di); + void EmitDIFormSpecification(unsigned int dwform); + void EmitDIFormSpecification(const DBGDieAttr *attr) { + EmitDIFormSpecification(attr->GetDwForm()); + } + +#if 1 /* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */ +/* Following code is under TARGAARCH64 condition */ + void EmitHugeSoRoutines(bool lastRoutine = false); + void EmitInlineAsmSection(); + + uint64 GetJavaInsnCount() const { + return javaInsnCount; + } + + uint64 GetFuncInsnCount() const { + return funcInsnCount; + } + + MapleMap &GetFileMap() { + return fileMap; + } + + void SetFileMapValue(uint32_t n, const std::string &file) { + fileMap[n] = file; + } + + CG *GetCG() const { + return cg; + } + + void ClearFuncInsnCount() { + funcInsnCount = 0; + } + + void IncreaseJavaInsnCount(uint64 n = 1, bool alignToQuad = false) { + if (alignToQuad) { + javaInsnCount = (javaInsnCount + 1) & (~0x1UL); + funcInsnCount = (funcInsnCount + 1) & (~0x1UL); + } + javaInsnCount += n; + funcInsnCount += n; +#ifdef EMIT_INSN_COUNT + Emit(" /* InsnCount: "); + Emit(javaInsnCount *); + Emit("*/ "); +#endif + } + + bool NeedToDealWithHugeSo() const { + return javaInsnCount > kHugeSoInsnCountThreshold; + } + + std::string HugeSoPostFix() const { + return std::string(kHugeSoPostFix) + std::to_string(hugeSoSeqence); + } + + void InsertHugeSoTarget(const std::string &target) { + (void)hugeSoTargets.insert(target); + } +#endif + + void InsertLabdie2labidxTable(DBGDie *lbldie, LabelIdx lab) { + if (labdie2labidxTable.find(lbldie) == labdie2labidxTable.end()) { + labdie2labidxTable[lbldie] = lab; + } + } + + protected: + Emitter(CG &cg, const std::string &fileName) + : cg(&cg), + rangeIdx2PrefixStr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + arraySize(0), + isFlexibleArray(false), + stringPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + localStrPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + hugeSoTargets(cg.GetMIRModule()->GetMPAllocator().Adapter()), + labdie2labidxTable(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()), + fileMap(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()) { + MIRModule &mirModule = *cg.GetMIRModule(); + memPool = mirModule.GetMemPool(); + asmInfo = memPool->New(*memPool); + } + + ~Emitter() = default; + protected: + std::ofstream fileStream; + MemPool *memPool; + CG *cg; + + private: + AsmLabel GetTypeAsmInfoName(PrimType primType) const; + void EmitDWRef(const std::string &name); + void InitRangeIdx2PerfixStr(); + void EmitAddressString(const std::string &address); + void EmitAliasAndRef(const MIRSymbol &sym); /* handle function symbol which has alias and weak ref */ + + MOperator currentMop = UINT_MAX; + MapleUnorderedMap rangeIdx2PrefixStr; + const AsmInfo *asmInfo; + uint32 arraySize; + bool isFlexibleArray; + MapleSet stringPtr; + MapleVector localStrPtr; +#if 1/* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */ +/* Following code is under TARGAARCH64 condition */ + uint64 javaInsnCount = 0; + uint64 funcInsnCount = 0; + MapleSet hugeSoTargets; + uint32 hugeSoSeqence = 2; +#endif + MapleMap labdie2labidxTable; + MapleMap fileMap; +}; + +class OpndEmitVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + explicit OpndEmitVisitor(Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~OpndEmitVisitor() = default; + protected: + Emitter &emitter; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_EMIT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/framewhitelist.def b/ecmascript/mapleall/maple_be/include/cg/framewhitelist.def new file mode 100644 index 0000000000000000000000000000000000000000..dc8aabeaa5049087cad2de275f849aea825df42d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/framewhitelist.def @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass1_7C_28_29Ljava_2Flang_2FClass_3B", +"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass2_7C_28_29Ljava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CnewInstance_7C_28_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FConstructor_3B_7CnewInstance_7C_28ALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetShort_7C_28Ljava_2Flang_2FObject_3B_29S", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetInt_7C_28Ljava_2Flang_2FObject_3B_29I", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetFloat_7C_28Ljava_2Flang_2FObject_3B_29F", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetDouble_7C_28Ljava_2Flang_2FObject_3B_29D", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetChar_7C_28Ljava_2Flang_2FObject_3B_29C", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetByte_7C_28Ljava_2Flang_2FObject_3B_29B", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetBoolean_7C_28Ljava_2Flang_2FObject_3B_29Z", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetLong_7C_28Ljava_2Flang_2FObject_3B_29J", +"Ljava_2Flang_2Freflect_2FField_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetBoolean_7C_28Ljava_2Flang_2FObject_3BZ_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7Cset_7C_28Ljava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetShort_7C_28Ljava_2Flang_2FObject_3BS_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetLong_7C_28Ljava_2Flang_2FObject_3BJ_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetInt_7C_28Ljava_2Flang_2FObject_3BI_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetFloat_7C_28Ljava_2Flang_2FObject_3BF_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetDouble_7C_28Ljava_2Flang_2FObject_3BD_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetChar_7C_28Ljava_2Flang_2FObject_3BC_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetByte_7C_28Ljava_2Flang_2FObject_3BB_29V", +"LThrowableNativeUncover_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_247_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_249_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2410_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithRedefinedMethods_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2413_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_241ImplementationSuperUser_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithStatic_3B_7CstaticMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Ljava_2Flang_2Freflect_2FMethod_3B_7Cinvoke_7C_28Ljava_2Flang_2FObject_3BALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24OtherInterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"LStackoverflow_3B_7CstackOverflow_7C_28_29V", +"Llibcore_2Fsun_2Fmisc_2FUnsafeTest_241_3B_7Crun_7C_28_29V" diff --git a/ecmascript/mapleall/maple_be/include/cg/global.h b/ecmascript/mapleall/maple_be/include/cg/global.h new file mode 100644 index 0000000000000000000000000000000000000000..80ad3f08de55de6be98331b10f65024ac4e5b5f1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/global.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_GLOBAL_H +#define MAPLEBE_INCLUDE_CG_GLOBAL_H + +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class GlobalOpt { + public: + explicit GlobalOpt(CGFunc &func) : cgFunc(func) {} + virtual ~GlobalOpt() = default; + virtual void Run() {} + std::string PhaseName() const { + return "globalopt"; + } + + protected: + /* if the number of bbs is more than 500 or the number of insns is more than 9000, don't optimize. */ + static constexpr uint32 kMaxBBNum = 500; + static constexpr uint32 kMaxInsnNum = 9000; + CGFunc &cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgGlobalOpt, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_GLOBAL_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/ico.h b/ecmascript/mapleall/maple_be/include/cg/ico.h new file mode 100644 index 0000000000000000000000000000000000000000..9155fd3a9c2580e4e7e726d7752acdfc2b4a1860 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/ico.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ICO_H +#define MAPLEBE_INCLUDE_CG_ICO_H +#include "optimize_common.h" +#include "live.h" + +namespace maplebe { +class IfConversionOptimizer : public Optimizer { + public: + IfConversionOptimizer(CGFunc &func, MemPool &memPool) : Optimizer(func, memPool) { + name = "ICO"; + } + + ~IfConversionOptimizer() override = default; +}; + +/* If-Then-Else pattern */ +class ICOPattern : public OptimizationPattern { + public: + explicit ICOPattern(CGFunc &func) : OptimizationPattern(func) { + dotColor = kIcoIte; + patternName = "If-Then-Else"; + } + ~ICOPattern() override = default; + static constexpr int kThreshold = 2; + + protected: + Insn *FindLastCmpInsn(BB &bb) const; + std::vector GetLabelOpnds(Insn &insn) const; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgIco, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ICO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/ifile.h b/ecmascript/mapleall/maple_be/include/cg/ifile.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ea286185687a759c081e14906e01ff42e7f397 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/ifile.h @@ -0,0 +1,403 @@ +/* + * Copyright (c) [2020-2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_IFILE_H +#define MAPLEBE_INCLUDE_CG_IFILE_H + +#include +#include +#include "types_def.h" +#include "common_utils.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "maple_string.h" +#include "mir_symbol.h" +#include "cg.h" + +namespace maplebe { +using SectionHeader = Elf64_Shdr; +using Address = Elf64_Addr; +using Offset = Elf64_Off; +using Word = Elf64_Word; +using Xword = Elf64_Xword; +using FileHeader = Elf64_Ehdr; +using Symbol = Elf64_Sym; +using SegmentHeader = Elf64_Phdr; +using DynSectionEntry = Elf64_Dyn; +using SectionIndex = Elf64_Section; +using Rela = Elf64_Rela; +class ObjEmitter; +class Fixup; + +class Alignment { + public: + template + static T Align(T offset, T align) { + if (align <= 1) { + return offset; + } + return (offset + align - 1) & (~(align - 1)); + } +}; /* class Alignment */ + +struct ObjLabel { + Offset offset; + size_t size; +}; + +enum LabelFixupKind : uint8 { + kLabelFixupDirect, + kLabelFixupInDirect, + kLabelFixupInDirectAdd, + kLabelFixupSize, + kLabelFixupDirect64, + kLabelFixupInDirect64, + kLabelFixupInDirectAdd64, + kLabelFixupSize64, + kLabelFixupGctib, +}; + +enum IFileClassStatus : uint32 { + kIFileClassResolved, + kIFileClassUnresolved, + kIFileClassUnknown +}; + +class LabelFixup { + public: + LabelFixup(const std::string &label, uint32 offset, LabelFixupKind kind) + : label(label), offset(offset), kind(kind) {} + ~LabelFixup() = default; + + const std::string GetLabel() const { + return label; + } + + uint32 GetOffset() const { + return offset; + } + + LabelFixupKind GetFixupKind() const { + return kind; + } + + private: + std::string label; + uint32 offset; + LabelFixupKind kind; +}; + +using Label2OffsetMap = std::unordered_map; +using LabelFixupVec = MapleVector; + +struct SectionDesc { + uint32 sectionOffset = 0; + uint32 sectionSize = 0; +}; + +class Section { + public: + Section(std::string name, Word type, Word flags, uint32 align, ObjEmitter &emitter, MemPool &memPool); + + virtual ~Section() = default; + virtual void GenerateData() = 0; + virtual void WriteSection(std::ofstream &outStream) = 0; + virtual void ClearData() { + return; + } + + virtual void HandleGlobalFixup(const Label2OffsetMap &globalLabel2Offset) { + (void)globalLabel2Offset; + return; + } + + virtual void HandleLocalFixup() {} + + virtual void Layout(); + + void SetIndex(uint32 idx) { + index = idx; + } + + void SetInfo(uint32 value) { + secHeader.sh_info = value; + } + + void SetLink(const Section §ion) { + secHeader.sh_link = section.GetIndex(); + } + + void SetEntSize(uint32 value) { + secHeader.sh_entsize = value; + } + + void SetDataSize(Word size) { + secHeader.sh_size = size; + } + + virtual uint32 GetDataSize() { + return secHeader.sh_size; + } + + void SetAddr(Address addr) { + secHeader.sh_addr = addr; + } + + Address GetAddr() const { + return secHeader.sh_addr; + } + + Word GetFlags() const { + return secHeader.sh_flags; + } + + void SetOffset(Offset value) { + secHeader.sh_offset = value; + } + + Offset GetOffset() const { + return secHeader.sh_offset; + } + + SectionIndex GetIndex() const { + return index; + } + + Word GetAlign() const { + return secHeader.sh_addralign; + } + + const MapleString &GetName() const { + return name; + } + + void SetSectionHeaderNameIndex(size_t index) { + secHeader.sh_name = index; + } + + Word GetType() const { + return secHeader.sh_type; + } + + const SectionHeader &GetSectionHeader() const { + return secHeader; + } + + protected: + ObjEmitter &emitter; + + private: + MapleString name; + SectionIndex index {}; + SectionHeader secHeader {}; +}; + +class DataSection : public Section { + public: + DataSection(const std::string &name, Word type, Word flags, Word align, ObjEmitter &emitter, MemPool &inputMemPool) + : Section(name, type, flags, align, emitter, inputMemPool), + memPool(inputMemPool), alloc(&memPool), data(alloc.Adapter()) {} + + ~DataSection() = default; + + virtual void GenerateData() override { + SetDataSize(data.size()); + if (GetName() == ".ifile.hex") { + SetDataSize(k8BitSize); + } + } + + virtual void ClearData() override { + data.clear(); + } + + virtual void HandleGlobalFixup(const Label2OffsetMap &globalLabel2Offset) override { + (void)globalLabel2Offset; + return; + } + + virtual void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(data.data()), data.size()); + } + + void AppendData(const void *value, size_t size) { + auto pdata = reinterpret_cast(value); + data.insert(data.end(), pdata, pdata + size); + } + + void AppendData(MapleVector value) { + data.insert(data.end(), value.begin(), value.end()); + } + + uint32 GetDataSize() override { + return data.size(); + } + + void FillPadding(uint32 paddingNum) { + if (paddingNum == 0) { + return; + } + std::vector paddingData(paddingNum, 0); + auto pdata = reinterpret_cast(paddingData.data()); + data.insert(data.end(), pdata, pdata + paddingNum); + } + + uint32 GetDataElem32(size_t index) { + uint32 value = 0; + errno_t res = memcpy_s(&value, sizeof(uint32), data.data() + index, sizeof(uint32)); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + return value; + } + + uint64 GetDataElem64(size_t index) { + uint64 value = 0; + errno_t res = memcpy_s(&value, sizeof(uint64), data.data() + index, sizeof(uint64)); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + return value; + } + + void Swap(const void *value, size_t index, size_t size) { + errno_t res = memcpy_s(data.data() + index, size, value, size); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + } + + static void AddLabel2Offset(Label2OffsetMap &label2Offsets, const std::string &name, ObjLabel &objLabel) { + label2Offsets.insert(std::make_pair(name, objLabel)); + } + + static void AddLabelFixup(LabelFixupVec &labelFixups, LabelFixup &labelFixup) { + labelFixups.emplace_back(&labelFixup); + } + + protected: + MemPool &memPool; + MapleAllocator alloc; + MapleVector data; +}; + +struct MethodHeader { + uint32 methodMetaOffset = 0; + uint16 localRefOffset = 0; + uint16 localRefNumber = 0; + uint32 codeInfoOffset = 0; + uint32 codeSize = 0; +}; + +class StringSection : public DataSection { + public: + StringSection(const std::string &name, Word type, Word flags, Word align, ObjEmitter &emitter, MemPool &memPool) + : DataSection(name, type, flags, align, emitter, memPool) { + AddString("\0"); + } + + ~StringSection() = default; + + size_t AddString(const std::string &str) { + size_t pos = data.size(); + AppendData(str.c_str(), str.size() + 1); + return pos; + } + + size_t AddString(const MapleString &str) { + size_t pos = data.size(); + AppendData(str.c_str(), str.length() + 1); + return pos; + } +}; + +class SymbolSection : public Section { + public: + SymbolSection(const std::string &name, Word type, Word flags, Word align, + ObjEmitter &emitter, MemPool &memPool, const Section &link) + : Section(name, type, flags, align, emitter, memPool), alloc(&memPool), symbols(alloc.Adapter()) { + SetEntSize(sizeof(Symbol)); + SetLink(link); + SetInfo(1); + AppendSymbol({ 0, 0, 0, 0, 0, 0 }); + } + + ~SymbolSection() = default; + + void GenerateData() override { + SetDataSize(symbols.size() * sizeof(Symbol)); + } + + void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(symbols.data()), symbols.size() * sizeof(Symbol)); + } + + void AppendSymbol(Symbol symbol) { + symbols.push_back(symbol); + } + + uint32 GetSymbolsSize() const { + return symbols.size(); + } + + uint64 GetIdxInSymbols(int64 symIdx) const { + return symbolIdxMap.at(symIdx); + } + + void AppendIdxInSymbols(int64 symIdx) { + symbolIdxMap[symIdx] = static_cast(GetSymbolsSize() - 1); + } + + bool ExistSymInSymbols(int64 symIdx) { + return symbolIdxMap.count(symIdx) != 0; + } + + private: + MapleAllocator alloc; + MapleVector symbols; + std::unordered_map symbolIdxMap; +}; + +class RelaSection : public Section { + public: + RelaSection(const std::string &name, Word type, Word flags, Word info, Word align, const Section &link, + ObjEmitter &emitter, MemPool &memPool) + : Section(name, type, flags, align, emitter, memPool), alloc(&memPool), relas(alloc.Adapter()) { + SetEntSize(sizeof(Rela)); + SetInfo(info); + SetLink(link); + } + + ~RelaSection() = default; + + void GenerateData() override { + SetDataSize(relas.size() * sizeof(Rela)); + } + + void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(relas.data()), relas.size() * sizeof(Rela)); + } + + void AppendRela(Rela rela) { + relas.push_back(rela); + } + + private: + MapleAllocator alloc; + MapleVector relas; +}; /* class RelaSection */ + +struct EmitInfo { + MIRConst &elemConst; + Offset &offset; + Label2OffsetMap &label2Offsets; + LabelFixupVec &labelFixups; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_IFILE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/immvalid.def b/ecmascript/mapleall/maple_be/include/cg/immvalid.def new file mode 100644 index 0000000000000000000000000000000000000000..33bd0714fc182c5a1049c87cfd54e9f14c436dbc --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/immvalid.def @@ -0,0 +1,176 @@ +static std::set ValidBitmaskImmSet = { +#include "valid_bitmask_imm.txt" +}; +constexpr uint32 kMaxBitTableSize = 5; +constexpr std::array bitmaskImmMultTable = { + 0x0000000100000001UL, 0x0001000100010001UL, 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL, +}; + +bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { + /* mask1 is a 64bits number that is all 1 shifts left size bits */ + const uint64 mask1 = 0xffffffffffffffffUL << bitLen; + /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ + uint64 mask2 = (1UL << static_cast(nLowerZeroBits)) - 1UL; + return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; +}; + +bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { + DEBUG_ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); + DEBUG_ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); + if ((bitLen == k32BitSize) && (static_cast(val) == -1)) { + return false; + } + uint64 val2 = val; + if (bitLen == k32BitSize) { + val2 = (val2 << k32BitSize) | (val2 & ((1ULL << k32BitSize) - 1)); + } + bool expectedOutcome = (ValidBitmaskImmSet.find(val2) != ValidBitmaskImmSet.end()); + + if ((val & 0x1) != 0) { + /* + * we want to work with + * 0000000000000000000000000000000000000000000001100000000000000000 + * instead of + * 1111111111111111111111111111111111111111111110011111111111111111 + */ + val = ~val; + } + + if (bitLen == k32BitSize) { + val = (val << k32BitSize) | (val & ((1ULL << k32BitSize) - 1)); + } + + /* get the least significant bit set and add it to 'val' */ + uint64 tmpVal = val + (val & static_cast(UINT64_MAX - val + 1)); + + /* now check if tmp is a power of 2 or tmpVal==0. */ + tmpVal = tmpVal & (tmpVal - 1); + if (tmpVal == 0) { + if (!expectedOutcome) { + LogInfo::MapleLogger() << "0x" << std::hex << std::setw(static_cast(k16ByteSize)) << + std::setfill('0') << static_cast(val) << "\n"; + return false; + } + DEBUG_ASSERT(expectedOutcome, "incorrect implementation: not valid value but returning true"); + /* power of two or zero ; return true */ + return true; + } + + int32 p0 = __builtin_ctzll(val); + int32 p1 = __builtin_ctzll(tmpVal); + int64 diff = p1 - p0; + + /* check if diff is a power of two; return false if not. */ + if ((static_cast(diff) & (static_cast(diff) - 1)) != 0) { + DEBUG_ASSERT(!expectedOutcome, "incorrect implementation: valid value but returning false"); + return false; + } + + int32 logDiff = __builtin_ctzll(static_cast(diff)); + int64 pattern = static_cast(val & ((1ULL << static_cast(diff)) - 1)); +#if DEBUG + bool ret = (val == pattern * bitmaskImmMultTable[kMaxBitTableSize - logDiff]); + DEBUG_ASSERT(expectedOutcome == ret, "incorrect implementation: return value does not match expected outcome"); + return ret; +#else + return val == pattern * bitmaskImmMultTable[kMaxBitTableSize - logDiff]; +#endif +} + +bool Imm12BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +bool Imm12BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return true; + } + return IsBitmaskImmediate(static_cast(value), k32BitSize); +} + +bool Imm13BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); + return result; +} + +bool Imm13BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return true; + } + return IsBitmaskImmediate(static_cast(value), k64BitSize); +} + +bool Imm16BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); + /* + * for target linux-aarch64-gnu + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +/* + * 8bit : 0 + * halfword : 1 + * 32bit - word : 2 + * 64bit - word : 3 + * 128bit- word : 4 + */ +bool StrLdrSignedOfstValid(int64 value, uint wordSize) { + if (value <= k256BitSize && value >= kNegative256BitSize) { + return true; + } else if ((value > k256BitSize) && (value <= kMaxPimm[wordSize])) { + uint64 mask = (1U << wordSize) - 1U; + return (static_cast(value) & mask) ? false : true; + } + return false; +} + + +bool StrLdr8ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, 0); +} + +bool StrLdr16ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k1ByteSize); +} + +bool StrLdr32ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k2ByteSize); +} + +bool StrLdr32PairImmValid(int64 value) { + if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { + return (static_cast(value) & 3) ? false : true; + } + return false; +} + +bool StrLdr64ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k3ByteSize); +} + +bool StrLdr64PairImmValid(int64 value) { + if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { + return (static_cast(value) & 7) ? false : true; + } + return false; +} + +bool StrLdr128ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k4ByteSize); +} + +bool StrLdr128PairImmValid(int64 value) { + if (value < k1024BitSize && (value >= kNegative1024BitSize)) { + return (static_cast(value) & 0xf) ? false : true; + } + return false; +} diff --git a/ecmascript/mapleall/maple_be/include/cg/insn.h b/ecmascript/mapleall/maple_be/include/cg/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..00f9086db529fa42f5d6cf1aa0123d682f594c82 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/insn.h @@ -0,0 +1,648 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_INSN_H +#define MAPLEBE_INCLUDE_CG_INSN_H +/* C++ headers */ +#include /* for nullptr */ +#include +#include +#include +#include "operand.h" +#include "mpl_logging.h" + +/* Maple IR header */ +#include "types_def.h" /* for uint32 */ +#include "common_utils.h" + +namespace maplebe { +/* forward declaration */ +class BB; +class CG; +class Emitter; +class DepNode; +struct InsnDesc; +class Insn { + public: + enum RetType : uint8 { + kRegNull, /* no return type */ + kRegFloat, /* return register is V0 */ + kRegInt /* return register is R0 */ + }; + /* MCC_DecRefResetPair clear 2 stack position, MCC_ClearLocalStackRef clear 1 stack position */ + static constexpr uint8 kMaxStackOffsetSize = 2; + + Insn(MemPool &memPool, MOperator opc) + : mOp(opc), + localAlloc(&memPool), + opnds(localAlloc.Adapter()), + registerBinding(localAlloc.Adapter()), + deoptVreg2Opnd(localAlloc.Adapter()), + comment(&memPool) {} + Insn(MemPool &memPool, MOperator opc, Operand &opnd0) : Insn(memPool, opc) { opnds.emplace_back(&opnd0); } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1) : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2) : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3) + : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3, Operand &opnd4) + : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); + opnds.emplace_back(&opnd4); + } + virtual ~Insn() = default; + + MOperator GetMachineOpcode() const { + return mOp; + } + + void SetMOP(const InsnDesc &idesc); + + void AddOperand(Operand &opnd) { + opnds.emplace_back(&opnd); + } + + Insn &AddOpndChain(Operand &opnd) { + AddOperand(opnd); + return *this; + } + /* use carefully which might cause insn to illegal */ + void CommuteOperands(uint32 dIndex, uint32 sIndex); + void CleanAllOperand() { + opnds.clear(); + } + + void PopBackOperand() { + opnds.pop_back(); + } + + Operand &GetOperand(uint32 index) const { + DEBUG_ASSERT(index < opnds.size(), "index out of range"); + return *opnds[index]; + } + + void ResizeOpnds(uint32 newSize) { + opnds.resize(static_cast(newSize)); + } + + uint32 GetOperandSize() const { + return static_cast(opnds.size()); + } + + void SetOperand(uint32 index, Operand &opnd) { + DEBUG_ASSERT(index <= opnds.size(), "index out of range"); + opnds[index] = &opnd; + } + + void SetRetSize(uint32 size) { + DEBUG_ASSERT(IsCall(), "Insn should be a call."); + retSize = size; + } + + uint32 GetRetSize() const { + DEBUG_ASSERT(IsCall(), "Insn should be a call."); + return retSize; + } + + virtual bool IsMachineInstruction() const; + + virtual bool IsIntrinsic() const { + return false; + } + + bool OpndIsDef(uint32 id) const; + + bool OpndIsUse(uint32 id) const; + + virtual bool IsPCLoad() const { + return false; + } + + Operand *GetMemOpnd() const; + + void SetMemOpnd(MemOperand *memOpnd); + + bool IsCall() const; + bool IsTailCall() const; + bool IsAsmInsn() const; + bool IsClinit() const; + bool CanThrow() const; + bool MayThrow() const; + bool IsBranch() const; + bool IsCondBranch() const; + bool IsUnCondBranch() const; + bool IsMove() const; + bool IsBasicOp() const; + bool IsUnaryOp() const; + bool IsShift() const; + bool IsPhi() const; + bool IsLoad() const; + bool IsStore() const; + bool IsConversion() const; + bool IsAtomic() const; + + bool IsLoadPair() const; + bool IsStorePair() const; + bool IsLoadStorePair() const; + bool IsLoadLabel() const; + + virtual bool NoAlias() const { + return false; + } + + bool IsVolatile() const; + + bool IsMemAccessBar() const; + + bool IsMemAccess() const; + + virtual bool HasSideEffects() const { + return false; + } + + bool HasLoop() const; + + virtual bool IsSpecialIntrinsic() const; + + bool IsComment() const; + bool IsImmaterialInsn() const; + + bool IsPseudo() const; + + virtual bool IsTargetInsn() const { + return true; + } + + virtual bool IsCfiInsn() const { + return false; + } + + virtual bool IsDbgInsn() const { + return false; + } + + bool IsDMBInsn() const; + + bool IsVectorOp() const; + + virtual Operand *GetCallTargetOperand() const; + + uint32 GetAtomicNum() const; + /* + * returns a ListOperand + * Note that we don't really need this for Emit + * Rather, we need it for register allocation, to + * correctly state the live ranges for operands + * use for passing call arguments + */ + virtual ListOperand *GetCallArgumentOperand(); + bool IsAtomicStore() const { + return IsStore() && IsAtomic(); + } + + void SetCondDef() { + flags |= kOpCondDef; + } + + bool IsCondDef() const { + return flags & kOpCondDef; + } + + bool AccessMem() const { + return IsLoad() || IsStore(); + } + + bool IsFrameDef() const { + return isFrameDef; + } + + void SetFrameDef(bool b) { + isFrameDef = b; + } + + bool IsAsmDefCondCode() const { + return asmDefCondCode; + } + + void SetAsmDefCondCode() { + asmDefCondCode = true; + } + + bool IsAsmModMem() const { + return asmModMem; + } + + void SetAsmModMem() { + asmModMem = true; + } + + virtual uint32 GetUnitType() { + return 0; + } + + virtual void Dump() const; + +#if DEBUG + virtual void Check() const; +#endif + + void SetComment(const std::string &str) { + comment = str; + } + + void SetComment(const MapleString &str) { + comment = str; + } + + const MapleString &GetComment() const { + return comment; + } + + void AppendComment(const std::string &str) { + comment += str; + } + + void MarkAsSaveRetValToLocal() { + flags |= kOpDassignToSaveRetValToLocal; + } + + bool IsSaveRetValToLocal() const { + return ((flags & kOpDassignToSaveRetValToLocal) != 0); + } + + void MarkAsAccessRefField(bool cond) { + if (cond) { + flags |= kOpAccessRefField; + } + } + + bool IsAccessRefField() const { + return ((flags & kOpAccessRefField) != 0); + } + + Insn *GetPreviousMachineInsn() const { + for (Insn *returnInsn = prev; returnInsn != nullptr; returnInsn = returnInsn->prev) { + DEBUG_ASSERT(returnInsn->bb == bb, "insn and it's prev insn must have same bb"); + if (returnInsn->IsMachineInstruction()) { + return returnInsn; + } + } + return nullptr; + } + + Insn *GetNextMachineInsn() const { + for (Insn *returnInsn = next; returnInsn != nullptr; returnInsn = returnInsn->next) { + CHECK_FATAL(returnInsn->bb == bb, "insn and it's next insn must have same bb"); + if (returnInsn->IsMachineInstruction()) { + return returnInsn; + } + } + return nullptr; + } + + uint32 GetLatencyType() const; + + void SetPrev(Insn *prev) { + this->prev = prev; + } + + Insn *GetPrev() { + return prev; + } + + const Insn *GetPrev() const { + return prev; + } + + void SetNext(Insn *next) { + this->next = next; + } + + Insn *GetNext() const { + return next; + } + + void SetBB(BB *bb) { + this->bb = bb; + } + + BB *GetBB() { + return bb; + } + + const BB *GetBB() const { + return bb; + } + + void SetId(uint32 id) { + this->id = id; + } + + uint32 GetId() const { + return id; + } + + void SetAddress(uint32 addr) { + address = addr; + } + + uint32 GetAddress() const { + return address; + } + + void SetNopNum(uint32 num) { + nopNum = num; + } + + uint32 GetNopNum() const { + return nopNum; + } + + void SetNeedSplit(bool flag) { + needSplit = flag; + } + + bool IsNeedSplit() const { + return needSplit; + } + + void SetIsThrow(bool isThrow) { + this->isThrow = isThrow; + } + + bool GetIsThrow() const { + return isThrow; + } + + void SetDoNotRemove(bool doNotRemove) { + this->doNotRemove = doNotRemove; + } + + bool GetDoNotRemove() const { + return doNotRemove; + } + + void SetIsSpill() { + this->isSpill = true; + } + + bool GetIsSpill() const { + return isSpill; + } + + void SetIsReload() { + this->isReload = true; + } + + bool GetIsReload() const { + return isReload; + } + + bool IsSpillInsn() const { + return (isSpill || isReload); + } + + void SetIsCallReturnUnsigned(bool unSigned) { + DEBUG_ASSERT(IsCall(), "Insn should be a call."); + this->isCallReturnUnsigned = unSigned; + } + + bool GetIsCallReturnUnsigned() const { + DEBUG_ASSERT(IsCall(), "Insn should be a call."); + return isCallReturnUnsigned; + } + + bool GetIsCallReturnSigned() const { + DEBUG_ASSERT(IsCall(), "Insn should be a call."); + return !isCallReturnUnsigned; + } + + void SetRetType(RetType retType) { + this->retType = retType; + } + + RetType GetRetType() const { + return retType; + } + + void SetClearStackOffset(short index, int64 offset) { + CHECK_FATAL(index < kMaxStackOffsetSize, "out of clearStackOffset's range"); + clearStackOffset[index] = offset; + } + + int64 GetClearStackOffset(short index) const { + CHECK_FATAL(index < kMaxStackOffsetSize, "out of clearStackOffset's range"); + return clearStackOffset[index]; + } + + /* if function name is MCC_ClearLocalStackRef or MCC_DecRefResetPair, will clear designate stack slot */ + bool IsClearDesignateStackCall() const { + return clearStackOffset[0] != -1 || clearStackOffset[1] != -1; + } + + void SetDepNode(DepNode &depNode) { + this->depNode = &depNode; + } + + DepNode *GetDepNode() { + return depNode; + } + + const DepNode *GetDepNode() const { + return depNode; + } + + void SetIsPhiMovInsn(bool val) { + isPhiMovInsn = val; + } + + bool IsPhiMovInsn() const { + return isPhiMovInsn; + } + + Insn *Clone(MemPool &memPool) const; + + void SetInsnDescrption(const InsnDesc &newMD) { + md = &newMD; + } + + const InsnDesc *GetDesc() const { + return md; + } + + void AddRegBinding(uint32 regA, uint32 regB) { + (void)registerBinding.emplace(regA, regB); + } + + const MapleMap& GetRegBinding() const { + return registerBinding; + } + + void AddDeoptBundleInfo(int32 deoptVreg, Operand* opnd) { + deoptVreg2Opnd.insert(std::pair(deoptVreg, opnd)); + } + + const MapleMap &GetDeoptBundleInfo() const { + return deoptVreg2Opnd; + } + + void SetRefSkipIdx(int32 index) { + refSkipIdx = index; + } + + /* Get Size of memory write/read by insn */ + uint32 GetMemoryByteSize() const; + + /* return ture if register appears */ + virtual bool ScanReg(regno_t regNO) const; + + virtual bool IsRegDefined(regno_t regNO) const; + + virtual std::set GetDefRegs() const; + + virtual uint32 GetBothDefUseOpnd() const; + + protected: + MOperator mOp; + MapleAllocator localAlloc; + MapleVector opnds; + Insn *prev = nullptr; + Insn *next = nullptr; + BB *bb = nullptr; /* BB to which this insn belongs */ + uint32 flags = 0; + bool isPhiMovInsn = false; + + private: + MapleMap registerBinding; /* used for inline asm only */ + MapleMap deoptVreg2Opnd; + enum OpKind : uint32 { + kOpUnknown = 0, + kOpCondDef = 0x1, + kOpAccessRefField = (1ULL << 30), /* load-from/store-into a ref flag-fieldGetMachineOpcode() */ + kOpDassignToSaveRetValToLocal = (1ULL << 31) /* save return value to local flag */ + }; + + uint32 id = 0; + uint32 address = 0; + uint32 nopNum = 0; + RetType retType = kRegNull; /* if this insn is call, it represent the return register type R0/V0 */ + uint32 retSize = 0; /* Byte size of the return value if insn is a call. */ + /* record the stack cleared by MCC_ClearLocalStackRef or MCC_DecRefResetPair */ + int64 clearStackOffset[kMaxStackOffsetSize] = { -1, -1 }; + DepNode *depNode = nullptr; /* For dependence analysis, pointing to a dependence node. */ + MapleString comment; + bool isThrow = false; + bool doNotRemove = false; /* caller reg cross call */ + bool isCallReturnUnsigned = false; /* for call insn only. false: signed, true: unsigned */ + bool isSpill = false; /* used as hint for optimization */ + bool isReload = false; /* used as hint for optimization */ + bool isFrameDef = false; + bool asmDefCondCode = false; + bool asmModMem = false; + bool needSplit = false; + + /* for dynamic language to mark reference counting */ + int32 refSkipIdx = -1; + + /* for multiple architecture */ + const InsnDesc *md = nullptr; +}; + +struct VectorRegSpec { + VectorRegSpec() : vecLane(-1), vecLaneMax(0), vecElementSize(0), compositeOpnds(0) {} + + VectorRegSpec(PrimType type, int16 lane = -1, uint16 compositeOpnds = 0) : + vecLane(lane), + vecLaneMax(GetVecLanes(type)), + vecElementSize(GetVecEleSize(type)), + compositeOpnds(compositeOpnds) {} + + VectorRegSpec(uint16 laneNum, uint16 eleSize, int16 lane = -1, uint16 compositeOpnds = 0) : + vecLane(lane), + vecLaneMax(laneNum), + vecElementSize(eleSize), + compositeOpnds(compositeOpnds) {} + + int16 vecLane; /* -1 for whole reg, 0 to 15 to specify individual lane */ + uint16 vecLaneMax; /* Maximum number of lanes for this vregister */ + uint16 vecElementSize; /* element size in each Lane */ + uint16 compositeOpnds; /* Number of enclosed operands within this composite operand */ +}; + +class VectorInsn : public Insn { + public: + VectorInsn(MemPool &memPool, MOperator opc) + : Insn(memPool, opc), + regSpecList(localAlloc.Adapter()) { + regSpecList.clear(); + } + + ~VectorInsn() override = default; + + void ClearRegSpecList() { + regSpecList.clear(); + } + + VectorRegSpec *GetAndRemoveRegSpecFromList(); + + size_t GetNumOfRegSpec() const { + if (IsVectorOp() && !regSpecList.empty()) { + return regSpecList.size(); + } + return 0; + } + + MapleVector &GetRegSpecList() { + return regSpecList; + } + + void SetRegSpecList(const MapleVector &vec) { + regSpecList = vec; + } + + VectorInsn &PushRegSpecEntry(VectorRegSpec *v) { + regSpecList.emplace(regSpecList.begin(), v); + return *this; + } + + private: + MapleVector regSpecList; +}; + +struct InsnIdCmp { + bool operator()(const Insn *lhs, const Insn *rhs) const { + CHECK_FATAL(lhs != nullptr, "lhs is nullptr in InsnIdCmp"); + CHECK_FATAL(rhs != nullptr, "rhs is nullptr in InsnIdCmp"); + return lhs->GetId() < rhs->GetId(); + } +}; +using InsnSet = std::set; +using InsnMapleSet = MapleSet; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/instruction_selection.h b/ecmascript/mapleall/maple_be/include/cg/instruction_selection.h new file mode 100644 index 0000000000000000000000000000000000000000..00568de66be166b2a8b088aeb24758cb805abcf3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/instruction_selection.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H +#define MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H + +#include "maple_phase_manager.h" + +namespace maplebe { + +class InsnSel { + public: + explicit InsnSel(CGFunc &tempCGFunc) : cgFunc(&tempCGFunc) {} + + virtual ~InsnSel() = default; + + virtual bool InsnSel() = 0; + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgIsel, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END + +} /* namespace maplebe */ + + +#endif /* MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/isa.h b/ecmascript/mapleall/maple_be/include/cg/isa.h new file mode 100644 index 0000000000000000000000000000000000000000..9df5fb486bd46ed3e8876e4381b922d7ebc7930c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/isa.h @@ -0,0 +1,439 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ISA_H +#define MAPLEBE_INCLUDE_CG_ISA_H + +#include +#include "types_def.h" +#include "operand.h" + +namespace maplebe { +enum MopProperty : maple::uint8 { + kInsnIsAbstract, + kInsnIsMove, + kInsnIsLoad, + kInsnIsLoadPair, + kInsnIsStore, + kInsnIsStorePair, + kInsnIsAtomic, + kInsnIsCall, + kInsnIsTailCall, + kInsnIsConversion, + kInsnIsCondDef, + kInsnHasAcqure, + kInsnHasAcqureRCpc, + kInsnHasLOAcqure, + kInsnHasRelease, + kInsnHasLORelease, + kInsnCanThrow, + kInsnIsDMB, + kInsnIsUnCondBr, + kInsnIsCondBr, + kInsnHasLoop, + kInsnIsVectorOp, + kInsnIsBinaryOp, + kInsnIsPhi, + kInsnIsUnaryOp, + kInsnIsShift, + kInsnInlineAsm, + kInsnSpecialIntrisic, + kInsnIsNop, + kInsnIntrinsic, +}; +using regno_t = uint32_t; +#define ISABSTRACT 1ULL +#define ISMOVE (1ULL << kInsnIsMove) +#define ISLOAD (1ULL << kInsnIsLoad) +#define ISLOADPAIR (1ULL << kInsnIsLoadPair) +#define ISSTORE (1ULL << kInsnIsStore) +#define ISSTOREPAIR (1ULL << kInsnIsStorePair) +#define ISATOMIC (1ULL << kInsnIsAtomic) +#define ISCALL (1ULL << kInsnIsCall) +#define ISTAILCALL (1ULL << kInsnIsTailCall) +#define ISCONVERSION (1ULL << kInsnIsConversion) +#define ISCONDDEF (1ULL << kInsnIsCondDef) +#define HASACQUIRE (1ULL << kInsnHasAcqure) +#define HASACQUIRERCPC (1ULL << kInsnHasAcqureRCpc) +#define HASLOACQUIRE (1ULL << kInsnHasLOAcqure) +#define HASRELEASE (1ULL << kInsnHasRelease) +#define HASLORELEASE (1ULL << kInsnHasLORelease) +#define CANTHROW (1ULL << kInsnCanThrow) +#define ISDMB (1ULL << kInsnIsDMB) +#define ISUNCONDBRANCH (1ULL << kInsnIsUnCondBr) +#define ISCONDBRANCH (1ULL << kInsnIsCondBr) +#define HASLOOP (1ULL << kInsnHasLoop) +#define ISVECTOR (1ULL << kInsnIsVectorOp) +#define ISBASICOP (1ULL << kInsnIsBinaryOp) +#define ISPHI (1ULL << kInsnIsPhi) +#define ISUNARYOP (1ULL << kInsnIsUnaryOp) +#define ISSHIFT (1ULL << kInsnIsShift) +#define INLINEASM (1ULL << kInsnInlineAsm) +#define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) +#define ISNOP (1ULL << kInsnIsNop) +#define ISINTRINSIC (1ULL << kInsnIntrinsic) +constexpr maplebe::regno_t kInvalidRegNO = 0; + +/* + * ARM64 has 32 int registes and 32 FP registers. + * AMD64/X86_64 has 16 int registes, and 16 FP registers. + * In either case, the corresponding calling conventions use + * the smaller number of caller-saved registers. + * 64 bit is not large enough? + */ +using CsrBitset = uint64_t; + +template +class ConstraintFunction { + public: + using cfPointer = bool (*) (ParaType); + bool CheckConstraint(cfPointer ccfunc, ParaType a) const { + return (*ccfunc)(a); + } +}; + +/* + * abstract machine instruction + * a lower-level maple IR which is aimed to represent general machine instruction for extreme cpus + * 1. Support conversion between all types and registers + * 2. Support conversion between memory and registers + * 3. Support three address basic operations + * + */ +namespace abstract { +#define DEFINE_MOP(op, ...) op, +enum AbstractMOP_t : maple::uint32 { +#include "abstract_mmir.def" + kMopLast +}; +#undef DEFINE_MOP +} + +enum EncodeType : uint8 { + kMovImm, + kMovReg, + kAddSubExtendReg, + kAddSubImm, + kAddSubShiftImm, + kAddSubReg, + kAddSubShiftReg, + kBitfield, + kExtract, + kBranchImm, + kBranchReg, + kCompareBranch, + kCondCompareImm, + kCondCompareReg, + kConditionalSelect, + kDataProcess1Src, + kDataProcess2Src, + kDataProcess3Src, + kFloatIntConversions, + kFloatCompare, + kFloatDataProcessing1, + kFloatDataProcessing2, + kFloatDataProcessing3, + kFloatImm, + kFloatCondSelect, + kLoadStoreReg, + kLoadStoreAR, + kLoadExclusive, + kLoadExclusivePair, + kStoreExclusive, + kStoreExclusivePair, + kLoadPair, + kStorePair, + kLoadStoreFloat, + kLoadPairFloat, + kStorePairFloat, + kLoadLiteralReg, + kLogicalReg, + kLogicalImm, + kMoveWide, + kPCRelAddr, + kAddPCRelAddr, + kSystemInsn, + kTestBranch, + kCondBranch, + kUnknownEncodeType, +}; + +struct InsnDesc { + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, + const std::string &inName, const std::string &inFormat, uint32 anum) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum) { + }; + + // for hard-coded machine description. + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, + const std::string &inName, const std::string &inFormat, uint32 anum, std::function vFunc) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum), + validFunc(vFunc) { + }; + + // for aarch64 assemble + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype,const std::string &inName, + const std::string &inFormat, uint32 anum, std::function vFunc, EncodeType type, uint32 encode) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum), + validFunc(vFunc), + encodeType(type), + mopEncode(encode) { + }; + + // for aarch64 assemble + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, + const std::string &inName, const std::string &inFormat, uint32 anum, EncodeType type, uint32 encode) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum), + encodeType(type), + mopEncode(encode) { + }; + + MOperator opc; + std::vector opndMD; + uint64 properties; + uint32 latencyType; + const std::string name; + const std::string format; + uint32 atomicNum; /* indicate how many asm instructions it will emit. */ + std::function validFunc = nullptr; /* If insn has immOperand, this function needs to be implemented. */ + EncodeType encodeType = kUnknownEncodeType; + uint32 mopEncode = 0x00000000; + + bool IsSame(const InsnDesc &left, + std::function cmp) const; + + bool IsCall() const { + return (properties & ISCALL) != 0; + } + bool IsTailCall() const { + return properties & ISTAILCALL; + } + bool IsPhi() const { + return (properties & ISPHI) != 0; + } + bool IsPhysicalInsn() const { + return (properties & ISABSTRACT) == 0; + } + bool IsStore() const { + return (properties & ISSTORE) != 0; + } + bool IsLoad() const { + return (properties & ISLOAD) != 0; + } + bool IsConversion() const { + return (properties & ISCONVERSION) != 0; + } + bool IsLoadPair() const { + return (properties & (ISLOADPAIR)) != 0; + } + bool IsStorePair() const { + return (properties & (ISSTOREPAIR)) != 0; + } + bool IsLoadStorePair() const { + return (properties & (ISLOADPAIR | ISSTOREPAIR)) != 0; + } + bool IsMove() const { + return (properties & ISMOVE) != 0; + } + bool IsDMB() const { + return (properties & (ISDMB)) != 0; + } + bool IsBasicOp() const { + return (properties & ISBASICOP) != 0; + } + bool IsCondBranch() const { + return (properties & (ISCONDBRANCH)) != 0; + } + bool IsUnCondBranch() const { + return (properties & (ISUNCONDBRANCH)) != 0; + } + bool IsAtomic() const { + return (properties & ISATOMIC) != 0; + } + + bool IsCondDef() const { + return (properties & ISCONDDEF) != 0; + } + + bool IsVectorOp() const { + return (properties & ISVECTOR) != 0; + } + + bool IsVolatile() const { + return ((properties & HASRELEASE) != 0) || ((properties & HASACQUIRE) != 0); + } + + bool IsMemAccessBar() const { + return (properties & (HASRELEASE | HASACQUIRE | HASACQUIRERCPC | HASLOACQUIRE | HASLORELEASE)) != 0; + } + + bool IsMemAccess() const { + return (properties & (ISLOAD | ISSTORE | ISLOADPAIR | ISSTOREPAIR)) != 0; + } + + bool IsBranch() const { + return (properties & (ISCONDBRANCH | ISUNCONDBRANCH)) != 0; + } + + bool HasLoop() const { + return (properties & HASLOOP) != 0; + } + + bool CanThrow() const { + return (properties & CANTHROW) != 0; + } + + bool IsInlineAsm() const { + return properties & INLINEASM; + } + + bool IsSpecialIntrinsic() const { + return properties & SPINTRINSIC; + } + + bool IsIntrinsic() const { + return properties & ISINTRINSIC; + } + + MOperator GetOpc() const { + return opc; + } + + const OpndDesc *GetOpndDes(size_t index) const { + return opndMD[index]; + } + + const uint32 GetOpndMDLength() const { + return opndMD.size(); + } + + uint32 GetOperandSize() const { + if (properties & (ISLOAD | ISSTORE)) { + /* use memory operand */ + return GetOpndDes(1)->GetSize(); + } + /* use dest operand */ + return GetOpndDes(0)->GetSize(); + } + + bool Is64Bit() const { + return GetOperandSize() == k64BitSize; + } + + bool IsValidImmOpnd(int64 val) const { + if (!validFunc) { + return true; + } + return validFunc(val); + } + + uint32 GetLatencyType() const { + return latencyType; + } + + bool IsUnaryOp() const { + return (properties & ISUNARYOP) != 0; + } + + bool IsShift() const { + return (properties & ISSHIFT) != 0; + } + + const std::string &GetName() const { + return name; + } + + const std::string &GetFormat() const { + return format; + } + + uint32 GetAtomicNum() const { + return atomicNum; + } + + EncodeType GetEncodeType() const { + return encodeType; + } + + uint32 GetMopEncode() const { + return mopEncode; + } + + static const InsnDesc &GetAbstractId(MOperator opc) { + ASSERT(opc < abstract::kMopLast, "op must be lower than kMopLast"); + return abstractId[opc]; + } + + static const InsnDesc abstractId[abstract::kMopLast]; +}; + +enum RegAddress : uint32 { + kRegHigh = 0x4, + kRegLow = 0x8 +}; +constexpr uint32 kMemLow12 = 0x10; +constexpr uint32 kLiteralLow12 = kMemLow12; +constexpr uint32 kPreInc = 0x20; +constexpr uint32 kPostInc = 0x40; +constexpr uint32 kLoadLiteral = 0x80; + +enum BitIndex : maple::uint8 { + k8BitIndex = 0, + k16BitIndex, + k32BitIndex, + k64BitIndex, + kBitIndexEnd, +}; + +static inline BitIndex GetBitIndex(uint32 bitSize) { + switch (bitSize) { + case k8BitSize: + return k8BitIndex; + case k16BitSize: + return k16BitIndex; + case k32BitSize: + return k32BitIndex; + case k64BitSize: + return k64BitIndex; + default: + CHECK_FATAL(false, "NIY, Not support size"); + } +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ISA_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/isel.h b/ecmascript/mapleall/maple_be/include/cg/isel.h new file mode 100644 index 0000000000000000000000000000000000000000..ef01a35252499ca41b95ab8225d13d55a0b5dc8a --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/isel.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_ISEL_H +#define MAPLEBE_INCLUDE_CG_ISEL_H + +#include "cgfunc.h" + +namespace maplebe { +struct MirTypeInfo { + PrimType primType; + int32 offset = 0; + uint32 size = 0; /* for aggType */ +}; +/* macro expansion instruction selection */ +class MPISel { + public: + MPISel(MemPool &mp, MapleAllocator &allocator, CGFunc &f) + : isMp(&mp), cgFunc(&f), preg2Opnd(allocator.Adapter()) {} + + virtual ~MPISel() { + isMp = nullptr; + cgFunc = nullptr; + } + + void doMPIS(); + + CGFunc *GetCurFunc() { + return cgFunc; + } + + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + + void SelectDassign(const DassignNode &stmt, Operand &opndRhs); + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0); + void SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs); + void SelectIassignoff(const IassignoffNode &stmt); + RegOperand *SelectRegread(RegreadNode &expr); + void SelectRegassign(RegassignNode &stmt, Operand &opnd0); + Operand* SelectDread(const BaseNode &parent, const AddrofNode &expr); + Operand* SelectBand(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); + Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); + Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); + Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); + Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); + ImmOperand *SelectIntConst(const MIRIntConst &intConst, PrimType primType); + void SelectCallCommon(StmtNode &stmt, MPISel &iSel); + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + Operand *SelectShift(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, + PrimType opnd0Type, PrimType opnd1Type); + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; + virtual void SelectReturn() = 0; + virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; + virtual void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) = 0; + virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; + virtual void SelectGoto(GotoNode &stmt) = 0; + virtual void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) = 0; + virtual void SelectIgoto(Operand &opnd0) = 0; + virtual void SelectCall(CallNode &callNode) = 0; + virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; + virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; + virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); + Operand *SelectIreadoff(const BaseNode &parent, const IreadoffNode &ireadoff); + virtual Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) = 0; + virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual void SelectAsm(AsmNode &node) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); + Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + virtual RegOperand &SelectSpecialRegread(PregIdx pregIdx, PrimType primType) = 0; + protected: + MemPool *isMp; + CGFunc *cgFunc; + MapleMap preg2Opnd; + + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); + PrimType GetIntegerPrimTypeFromSize(bool isSigned, uint32 bitSize); + std::pair GetFieldIdAndMirTypeFromMirNode(const BaseNode &node); + MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); + MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); + MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + private: + StmtNode *HandleFuncEntry(); + void HandleFuncExit(); + void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); + void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) = 0; + virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; + void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); + /* + * Support conversion between all types and registers + * only Support conversion between registers and memory + * alltypes -> reg -> mem + */ + void SelectCopyInsn(Operand &dest, Operand &src, PrimType type); + void SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType); + void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType); + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; + virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; + void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; +}; +MAPLE_FUNC_PHASE_DECLARE_BEGIN(InstructionSelector, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif /* MAPLEBE_INCLUDE_CG_ISEL_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/label_creation.h b/ecmascript/mapleall/maple_be/include/cg/label_creation.h new file mode 100644 index 0000000000000000000000000000000000000000..c2d7ca23ff74198df4453c690df9ba4aad597e0c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/label_creation.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LABEL_CREATION_H +#define MAPLEBE_INCLUDE_CG_LABEL_CREATION_H + +#include "cgfunc.h" +#include "cg_phase.h" +#include "mir_builder.h" + +namespace maplebe { +class LabelCreation { + public: + explicit LabelCreation(CGFunc &func) : cgFunc(&func) {} + + ~LabelCreation() = default; + + void Run(); + + std::string PhaseName() const { + return "createlabel"; + } + + private: + CGFunc *cgFunc; + void CreateStartEndLabel(); +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCreateLabel, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LABEL_CREATION_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/live.h b/ecmascript/mapleall/maple_be/include/cg/live.h new file mode 100644 index 0000000000000000000000000000000000000000..6a9e34736817fe70a16116fa079b7888e95d72e1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/live.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LIVE_H +#define MAPLEBE_INCLUDE_CG_LIVE_H + +#include "cg_phase.h" +#include "insn.h" +#include "cgbb.h" +#include "sparse_datainfo.h" +#include "cgfunc.h" + +namespace maplebe { +class LiveAnalysis : public AnalysisResult { + public: + LiveAnalysis(CGFunc &func, MemPool &memPool) + : AnalysisResult(&memPool), cgFunc(&func), memPool(&memPool), alloc(&memPool), stackMp(func.GetStackMemPool()) {} + ~LiveAnalysis() override = default; + + void AnalysisLive(); + void Dump() const; + void DumpInfo(const SparseDataInfo &info) const; + void InitBB(BB &bb); + void InitAndGetDefUse(); + bool GenerateLiveOut(BB &bb); + bool GenerateLiveIn(BB &bb); + void BuildInOutforFunc(); + void DealWithInOutOfCleanupBB(); + void InsertInOutOfCleanupBB(); + void ResetLiveSet(); + void ClearInOutDataInfo(); + void EnlargeSpaceForLiveAnalysis(BB &currBB); + void GetBBDefUse(BB &bb); + void ProcessAsmListOpnd(BB &bb, Operand &opnd, uint32 idx) const; + void ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const; + void ProcessMemOpnd(BB &bb, Operand &opnd) const; + void ProcessCondOpnd(BB &bb) const; + void CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const; + + SparseDataInfo *NewLiveIn(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewLiveOut(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewDef(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewUse(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + virtual void GenerateReturnBBDefUse(BB &bb) const = 0; + virtual void ProcessCallInsnParam(BB &bb, const Insn &insn) const = 0; + virtual bool CleanupBBIgnoreReg(regno_t reg) = 0; + virtual void InitEhDefine(BB &bb) = 0; + + protected: + int iteration = 0; + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + StackMemPool &stackMp; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLiveAnalysis, maplebe::CGFunc) + LiveAnalysis *GetResult() { + return live; + } + LiveAnalysis *live = nullptr; +OVERRIDE_DEPENDENCE +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_LIVE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/local_opt.h b/ecmascript/mapleall/maple_be/include/cg/local_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..9b3d1246481a9aea7d10c2efc0ce50e4b2071b88 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/local_opt.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LOCALO_H +#define MAPLEBE_INCLUDE_CG_LOCALO_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "live.h" +#include "loop.h" +#include "cg.h" + +namespace maplebe { +class LocalOpt { + public: + LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition &rd) + : localoMp(&memPool), + cgFunc(&func), + reachindDef(&rd) {} + + virtual ~LocalOpt() = default; + + void DoLocalCopyPropOptmize(); + + protected: + ReachingDefinition *GetRDInfo() { + return reachindDef; + } + MemPool *localoMp; + CGFunc *cgFunc; + ReachingDefinition *reachindDef; + + private: + virtual void DoLocalCopyProp() = 0; +}; + +class LocalOptimizeManager { + public: + LocalOptimizeManager(CGFunc &cgFunc, ReachingDefinition &rd) + : cgFunc(cgFunc), + reachingDef(&rd) {} + ~LocalOptimizeManager() = default; + template + void Optimize() { + LocalPropOptimizePattern optPattern(cgFunc, *reachingDef); + optPattern.Run(); + } + private: + CGFunc &cgFunc; + ReachingDefinition *reachingDef; +}; + +class LocalPropOptimizePattern { + public: + LocalPropOptimizePattern(CGFunc &cgFunc, ReachingDefinition &rd) + : cgFunc(cgFunc), + reachingDef(&rd) {} + virtual ~LocalPropOptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(BB &bb, Insn &insn) = 0; + void Run(); + protected: + std::string PhaseName() const { + return "localopt"; + } + CGFunc &cgFunc; + ReachingDefinition *reachingDef; +}; + +class RedundantDefRemove : public LocalPropOptimizePattern { + public: + RedundantDefRemove(CGFunc &cgFunc, ReachingDefinition &rd) : LocalPropOptimizePattern(cgFunc, rd) {} + ~RedundantDefRemove() override = default; + bool CheckCondition(Insn &insn) final; +}; + +MAPLE_FUNC_PHASE_DECLARE(LocalCopyProp, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_LOCALO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/loop.h b/ecmascript/mapleall/maple_be/include/cg/loop.h new file mode 100644 index 0000000000000000000000000000000000000000..658ee83f0433058f14f7fae281f8b11e78a98172 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/loop.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LOOP_H +#define MAPLEBE_INCLUDE_CG_LOOP_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "insn.h" +#include "maple_phase.h" + +namespace maplebe { +class LoopHierarchy { + public: + struct HeadIDCmp { + bool operator()(const LoopHierarchy *loopHierarchy1, const LoopHierarchy *loopHierarchy2) const { + CHECK_NULL_FATAL(loopHierarchy1); + CHECK_NULL_FATAL(loopHierarchy2); + return (loopHierarchy1->GetHeader()->GetId() < loopHierarchy2->GetHeader()->GetId()); + } + }; + + explicit LoopHierarchy(MemPool &memPool) + : loopMemPool(&memPool), + otherLoopEntries(loopMemPool.Adapter()), + loopMembers(loopMemPool.Adapter()), + backedge(loopMemPool.Adapter()), + exits(loopMemPool.Adapter()), + innerLoops(loopMemPool.Adapter()) {} + + virtual ~LoopHierarchy() = default; + + BB *GetHeader() const { + return header; + } + const MapleSet &GetLoopMembers() const { + return loopMembers; + } + const MapleSet &GetBackedge() const { + return backedge; + } + MapleSet &GetBackedgeNonConst() { + return backedge; + } + const MapleSet &GetExits() const { + return exits; + } + const MapleSet &GetInnerLoops() const { + return innerLoops; + } + const LoopHierarchy *GetOuterLoop() const { + return outerLoop; + } + LoopHierarchy *GetPrev() { + return prev; + } + LoopHierarchy *GetNext() { + return next; + } + + MapleSet::iterator EraseLoopMembers(MapleSet::iterator it) { + return loopMembers.erase(it); + } + void InsertLoopMembers(BB &bb) { + (void)loopMembers.insert(&bb); + } + void InsertBackedge(BB &bb) { + (void)backedge.insert(&bb); + } + void InsertExit(BB &bb) { + (void)exits.insert(&bb); + } + void InsertInnerLoops(LoopHierarchy &loop) { + (void)innerLoops.insert(&loop); + } + void SetHeader(BB &bb) { + header = &bb; + } + void SetOuterLoop(LoopHierarchy &loop) { + outerLoop = &loop; + } + void SetPrev(LoopHierarchy *loop) { + prev = loop; + } + void SetNext(LoopHierarchy *loop) { + next = loop; + } + void PrintLoops(const std::string &name) const; + + protected: + LoopHierarchy *prev = nullptr; + LoopHierarchy *next = nullptr; + + private: + MapleAllocator loopMemPool; + BB *header = nullptr; + public: + MapleSet otherLoopEntries; + MapleSet loopMembers; + MapleSet backedge; + MapleSet exits; + MapleSet innerLoops; + LoopHierarchy *outerLoop = nullptr; +}; + +class LoopFinder : public AnalysisResult { + public: + LoopFinder(CGFunc &func, MemPool &mem) + : AnalysisResult(&mem), + cgFunc(&func), + memPool(&mem), + loopMemPool(memPool), + visitedBBs(loopMemPool.Adapter()), + sortedBBs(loopMemPool.Adapter()), + dfsBBs(loopMemPool.Adapter()), + onPathBBs(loopMemPool.Adapter()), + recurseVisited(loopMemPool.Adapter()) + {} + + ~LoopFinder() override = default; + + void formLoop(BB* headBB, BB* backBB); + void seekBackEdge(BB* bb, MapleList succs); + void seekCycles(); + void markExtraEntryAndEncl(); + bool HasSameHeader(const LoopHierarchy *lp1, const LoopHierarchy *lp2); + void MergeLoops(); + void SortLoops(); + void UpdateOuterForInnerLoop(BB *bb, LoopHierarchy *outer); + void UpdateOuterLoop(const LoopHierarchy *loop); + void CreateInnerLoop(LoopHierarchy &inner, LoopHierarchy &outer); + void DetectInnerLoop(); + void UpdateCGFunc(); + void FormLoopHierarchy(); + + private: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator loopMemPool; + MapleVector visitedBBs; + MapleVector sortedBBs; + MapleStack dfsBBs; + MapleVector onPathBBs; + MapleVector recurseVisited; + LoopHierarchy *loops = nullptr; +}; + +class CGFuncLoops { + public: + explicit CGFuncLoops(MemPool &memPool) + : loopMemPool(&memPool), + multiEntries(loopMemPool.Adapter()), + loopMembers(loopMemPool.Adapter()), + backedge(loopMemPool.Adapter()), + exits(loopMemPool.Adapter()), + innerLoops(loopMemPool.Adapter()) {} + + ~CGFuncLoops() = default; + + void CheckOverlappingInnerLoops(const MapleVector &iLoops, + const MapleVector &loopMem) const; + void CheckLoops() const; + void PrintLoops(const CGFuncLoops &funcLoop) const; + + const BB *GetHeader() const { + return header; + } + const MapleVector &GetMultiEntries() const { + return multiEntries; + } + const MapleVector &GetLoopMembers() const { + return loopMembers; + } + const MapleVector &GetBackedge() const { + return backedge; + } + const MapleVector &GetExits() const { + return exits; + } + const MapleVector &GetInnerLoops() const { + return innerLoops; + } + const CGFuncLoops *GetOuterLoop() const { + return outerLoop; + } + uint32 GetLoopLevel() const { + return loopLevel; + } + + void AddMultiEntries(BB &bb) { + multiEntries.emplace_back(&bb); + } + void AddLoopMembers(BB &bb) { + loopMembers.emplace_back(&bb); + } + void AddBackedge(BB &bb) { + backedge.emplace_back(&bb); + } + void AddExit(BB &bb) { + exits.emplace_back(&bb); + } + void AddInnerLoops(CGFuncLoops &loop) { + innerLoops.emplace_back(&loop); + } + void SetHeader(BB &bb) { + header = &bb; + } + void SetOuterLoop(CGFuncLoops &loop) { + outerLoop = &loop; + } + void SetLoopLevel(uint32 val) { + loopLevel = val; + } + + private: + MapleAllocator loopMemPool; + BB *header = nullptr; + MapleVector multiEntries; + MapleVector loopMembers; + MapleVector backedge; + MapleVector exits; + MapleVector innerLoops; + CGFuncLoops *outerLoop = nullptr; + uint32 loopLevel = 0; +}; + +struct CGFuncLoopCmp { + bool operator()(const CGFuncLoops *lhs, const CGFuncLoops *rhs) const { + CHECK_NULL_FATAL(lhs); + CHECK_NULL_FATAL(rhs); + return lhs->GetHeader()->GetId() < rhs->GetHeader()->GetId(); + } +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLoopAnalysis, maplebe::CGFunc); +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LOOP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/lsda.h b/ecmascript/mapleall/maple_be/include/cg/lsda.h new file mode 100644 index 0000000000000000000000000000000000000000..c4f4157f1b440e6067137e6b58ef248675253974 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/lsda.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LSDA_H +#define MAPLEBE_INCLUDE_CG_LSDA_H +#include "types_def.h" +#include "mir_nodes.h" +#include "cgbb.h" + +namespace maplebe { +using namespace maple; + +class LabelPair { + public: + LabelPair() = default; + LabelPair(LabelNode *startOffsetLbl, LabelNode *endOffsetLbl) { + startOffset = startOffsetLbl; + endOffset = endOffsetLbl; + } + ~LabelPair() = default; + + const LabelNode *GetStartOffset() const { + return startOffset; + } + + void SetStartOffset(LabelNode *lableNode) { + startOffset = lableNode; + } + + const LabelNode *GetEndOffset() const { + return endOffset; + } + + void SetEndOffsetLabelIdx(LabelIdx index) const { + endOffset->SetLabelIdx(index); + } + + void SetEndOffset(LabelNode *labelNode) { + endOffset = labelNode; + } + + private: + LabelNode *startOffset; + LabelNode *endOffset; +}; + +class LSDAHeader { + public: + const LabelNode *GetLSDALabel() const { + return lsdaLabel; + } + + void SetLSDALabel(LabelNode &labelNode) { + lsdaLabel = &labelNode; + } + + uint8 GetLPStartEncoding() const { + return lpStartEncoding; + } + + void SetLPStartEncoding(uint8 encoding) { + lpStartEncoding = encoding; + } + + uint8 GetTTypeEncoding() const { + return tTypeEncoding; + } + + void SetTTypeEncoding(uint8 encoding) { + tTypeEncoding = encoding; + } + + const LabelPair &GetTTypeOffset() const { + return tTypeOffset; + } + + void SetTTypeOffset(LabelNode *start, LabelNode *end) { + tTypeOffset.SetStartOffset(start); + tTypeOffset.SetEndOffset(end); + } + + uint8 GetCallSiteEncoding() const { + return callSiteEncoding; + } + + void SetCallSiteEncoding(uint8 encoding) { + callSiteEncoding = encoding; + } + + private: + LabelNode *lsdaLabel; + uint8 lpStartEncoding; + uint8 tTypeEncoding; + LabelPair tTypeOffset; + uint8 callSiteEncoding; +}; + +struct LSDACallSite { + LabelPair csStart; + LabelPair csLength; + LabelPair csLandingPad; + uint32 csAction; + + public: + void Init(const LabelPair &start, const LabelPair &length, const LabelPair &landingPad, uint32 action) { + csStart = start; + csLength = length; + csLandingPad = landingPad; + csAction = action; + } +}; + +class LSDAAction { + public: + LSDAAction(uint8 idx, uint8 filter) : actionIndex(idx), actionFilter(filter) {} + ~LSDAAction() = default; + + uint8 GetActionIndex() const { + return actionIndex; + } + + uint8 GetActionFilter() const { + return actionFilter; + } + + private: + uint8 actionIndex; + uint8 actionFilter; +}; + +class LSDACallSiteTable { + public: + explicit LSDACallSiteTable(MapleAllocator &alloc) : callSiteTable(alloc.Adapter()) { + csTable.SetStartOffset(nullptr); + csTable.SetEndOffset(nullptr); + } + ~LSDACallSiteTable() = default; + + const MapleVector &GetCallSiteTable() const { + return callSiteTable; + } + + void PushBack(LSDACallSite &lsdaCallSite) { + callSiteTable.emplace_back(&lsdaCallSite); + } + + const LabelPair &GetCSTable() const { + return csTable; + } + + void SetCSTable(LabelNode *start, LabelNode *end) { + csTable.SetStartOffset(start); + csTable.SetEndOffset(end); + } + + void UpdateCallSite(const BB &oldBB, const BB &newBB) { + for (auto *callSite : callSiteTable) { + if (callSite->csStart.GetEndOffset() != nullptr) { + if (callSite->csStart.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csStart.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + } + + CHECK_NULL_FATAL(callSite->csLength.GetEndOffset()); + if (callSite->csLength.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csLength.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + + if (callSite->csLandingPad.GetEndOffset() != nullptr) { + if (callSite->csLandingPad.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csLandingPad.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + } + } + } + + void RemoveCallSite(const BB &bb) { + for (int32 i = static_cast(callSiteTable.size()) - 1; i > -1; --i) { + if (callSiteTable[i]->csStart.GetEndOffset() != nullptr) { + if (callSiteTable[i]->csStart.GetEndOffset()->GetLabelIdx() == bb.GetLabIdx()) { + callSiteTable.erase(callSiteTable.begin() + i); + continue; + } + } + if (callSiteTable[i]->csLandingPad.GetEndOffset() != nullptr) { + if (callSiteTable[i]->csLandingPad.GetEndOffset()->GetLabelIdx() == bb.GetLabIdx()) { + callSiteTable.erase(callSiteTable.begin() + i); + continue; + } + } + } + } + + /* return true if label is in callSiteTable */ + bool InCallSiteTable(LabelIdx label) const { + for (auto *callSite : callSiteTable) { + if (label == callSite->csStart.GetEndOffset()->GetLabelIdx() || + label == callSite->csStart.GetStartOffset()->GetLabelIdx()) { + return true; + } + if (label == callSite->csLength.GetEndOffset()->GetLabelIdx() || + label == callSite->csLength.GetStartOffset()->GetLabelIdx()) { + return true; + } + if (callSite->csLandingPad.GetStartOffset()) { + if (label == callSite->csLandingPad.GetEndOffset()->GetLabelIdx() || + label == callSite->csLandingPad.GetStartOffset()->GetLabelIdx()) { + return true; + } + } + } + return false; + } + + bool IsTryBlock(const BB &bb) const { + for (auto *callSite : callSiteTable) { + if (callSite->csLength.GetStartOffset()->GetLabelIdx() == bb.GetLabIdx()) { + return true; + } + } + return false; + } + + void SortCallSiteTable(std::function const &func) { + std::sort(callSiteTable.begin(), callSiteTable.end(), func); + } + + private: + MapleVector callSiteTable; + LabelPair csTable; +}; + +class LSDAActionTable { + public: + explicit LSDAActionTable(MapleAllocator &alloc) : actionTable(alloc.Adapter()) {} + virtual ~LSDAActionTable() = default; + + const MapleVector &GetActionTable() const{ + return actionTable; + } + + void PushBack(LSDAAction &lsdaAction) { + actionTable.emplace_back(&lsdaAction); + } + + size_t Size() const { + return actionTable.size(); + } + + private: + MapleVector actionTable; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LSDA_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/memlayout.h b/ecmascript/mapleall/maple_be/include/cg/memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..ac1d5384878fd345a2572476169c2367d0b2785c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/memlayout.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_MEMLAYOUT_H + +/* C++ headers. */ +#include +#include +#include "becommon.h" +#include "mir_function.h" +#include "mir_nodes.h" /* StmtNode */ + +namespace maplebe { +using regno_t = uint32; +enum MemSegmentKind : uint8 { + kMsUnknown, + /* + * Function arguments that are not passed through registers + * are passed to the callee through stack. + */ + kMsArgsStkPassed, + /* + * In between MS_args_stackpassed and kMsArgsRegpassed, + * we store call-saved registers if any. + */ + /* + * Args passed via registers according to the architecture-specific ABI + * may need be stored in stack. + * 1) In the unoptimized version, we implement a model (similar to GCC -O0) + * where all the values are initially stored in the memory and + * loaded into registers when needed, and stored back to the memory when + * their uses are done. + * 2) In an optimized version, some register-passed values may need to be + * spilled into memory. We allocate the space in this Memory segment. + * (or we may allocate them in caller-saved; may be this is better...) + */ + kMsArgsRegPassed, + /* + * GR/VR Save areas for unnamed arguments under vararg functions + */ + kMsGrSaveArea, + kMsVrSaveArea, + /* local (auto) variables */ + kMsRefLocals, + kMsLocals, + kMsSpillReg, + /* + * In between kMsLocals and MS_args_to_stackpass, we allocate + * a register-spill area and space for caller-saved registers + */ + /* + * When a function calls another which takes some arguments + * that cannot be passed through registers, it is the caller's + * responsibility to allocate space for those arguments in memory. + */ + kMsArgsToStkPass, + /* The red zone stack area will not be modified by the exception signal. */ + kMsRedZone, +}; + +enum StackProtectKind : uint8 { + kNone = 0, + kAddrofStack = 0x1, + /* if a callee has return agg type which size over 16bytes */ + kRetureStackSlot = 0x2, +}; + +class CGFunc; + +/* keeps track of the allocation of a memory segment */ +class MemSegment { + public: + explicit MemSegment(MemSegmentKind memSegKind) : kind(memSegKind), size(0) {} + + ~MemSegment() = default; + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 memSize) { + size = memSize; + } + + MemSegmentKind GetMemSegmentKind() const { + return kind; + } + + private: + MemSegmentKind kind; + uint32 size; /* size is negative if allocated offsets are negative */ +}; /* class MemSegment */ + +/* describes where a symbol is allocated */ +class SymbolAlloc { + public: + SymbolAlloc() = default; + + ~SymbolAlloc() = default; + + const MemSegment *GetMemSegment() const { + return memSegment; + } + + void SetMemSegment(const MemSegment &memSeg) { + memSegment = &memSeg; + } + + int64 GetOffset() const { + return offset; + } + + void SetOffset(int64 off) { + offset = off; + } + + protected: + const MemSegment *memSegment = nullptr; + int64 offset = 0; +}; /* class SymbolAlloc */ + +class MemLayout { + public: + MemLayout(BECommon &beCommon, MIRFunction &mirFunc, MapleAllocator &mallocator, uint32 kStackPtrAlignment) + : be(beCommon), + mirFunction(&mirFunc), + segArgsStkPassed(kMsArgsStkPassed), + segArgsRegPassed(kMsArgsRegPassed), + segArgsToStkPass(kMsArgsToStkPass), + symAllocTable(mallocator.Adapter()), + spillLocTable(mallocator.Adapter()), + spillRegLocMap(mallocator.Adapter()), + localRefLocMap(std::less(), mallocator.Adapter()), + memAllocator(&mallocator), + stackPtrAlignment(kStackPtrAlignment) { + symAllocTable.resize(mirFunc.GetSymTab()->GetSymbolTableSize()); + } + + virtual ~MemLayout() = default; + + void SetCurrFunction(CGFunc &func) { + cgFunc = &func; + } + + /* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ + virtual uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmtNode, int32 &aggCopySize, bool isIcall) = 0; + + /* + * Go over all outgoing calls in the function body and get the maximum space + * needed for storing the actuals based on the actual parameters and the ABI. + * These are usually those arguments that cannot be passed + * through registers because a call passes more than 8 arguments, or + * they cannot be fit in a pair of registers. + */ + uint32 FindLargestActualArea(int32 &aggCopySize); + + virtual void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) = 0; + + /* + * "Pseudo-registers can be regarded as local variables of a + * primitive type whose addresses are never taken" + */ + virtual void AssignSpillLocationsToPseudoRegisters() = 0; + + virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) = 0; + + SymbolAlloc *GetSymAllocInfo(uint32 stIdx) { + DEBUG_ASSERT(stIdx < symAllocTable.size(), "out of symAllocTable's range"); + return symAllocTable[stIdx]; + } + + void SetSymAllocInfo(uint32 stIdx, SymbolAlloc &symAlloc) { + DEBUG_ASSERT(stIdx < symAllocTable.size(), "out of symAllocTable's range"); + symAllocTable[stIdx] = &symAlloc; + } + + const SymbolAlloc *GetSpillLocOfPseduoRegister(PregIdx index) const { + return spillLocTable.at(index); + } + + SymbolAlloc *GetLocOfSpillRegister(regno_t vrNum) { + SymbolAlloc *loc = nullptr; + auto pos = spillRegLocMap.find(vrNum); + if (pos == spillRegLocMap.end()) { + loc = AssignLocationToSpillReg(vrNum); + } else { + loc = pos->second; + } + return loc; + } + + uint32 SizeOfArgsToStackPass() const { + return segArgsToStkPass.GetSize(); + } + + uint32 SizeOfArgsRegisterPassed() const { + return segArgsRegPassed.GetSize(); + } + + BECommon &GetBECommon() { + return be; + } + + MIRFunction *GetMIRFunction() { + return mirFunction; + } + + const MemSegment &GetSegArgsStkPassed() const { + return segArgsStkPassed; + } + + const MemSegment &GetSegArgsRegPassed() const { + return segArgsRegPassed; + } + + const MemSegment &GetSegArgsToStkPass() const { + return segArgsToStkPass; + } + + const MapleVector &GetSymAllocTable() const { + return symAllocTable; + } + + void SetSpillRegLocInfo(regno_t regNum, SymbolAlloc &symAlloc) { + spillRegLocMap[regNum] = &symAlloc; + } + + const MapleMap &GetLocalRefLocMap() const { + return localRefLocMap; + } + + void SetLocalRegLocInfo(StIdx idx, SymbolAlloc &symAlloc) { + localRefLocMap[idx] = &symAlloc; + } + + bool IsLocalRefLoc(const MIRSymbol &symbol) const { + return localRefLocMap.find(symbol.GetStIdx()) != localRefLocMap.end(); + } + + uint32 GetStackPtrAlignment() const { + return stackPtrAlignment; + } + protected: + BECommon &be; + MIRFunction *mirFunction; + MemSegment segArgsStkPassed; + MemSegment segArgsRegPassed; + MemSegment segArgsToStkPass; + MapleVector symAllocTable; /* index is stindex from StIdx */ + MapleVector spillLocTable; /* index is preg idx */ + MapleUnorderedMap spillRegLocMap; + MapleMap localRefLocMap; /* localrefvar formals. real address passed in stack. */ + MapleAllocator *memAllocator; + CGFunc *cgFunc = nullptr; + const uint32 stackPtrAlignment; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_MEMLAYOUT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_arch_define.def b/ecmascript/mapleall/maple_be/include/cg/mplad_arch_define.def new file mode 100644 index 0000000000000000000000000000000000000000..d0893ea3ad59c966a4842dcb2a94cdc9da5cf51d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_arch_define.def @@ -0,0 +1,2 @@ +/* cortex_a55 Architecture definition : */ +SetMaxParallelism(2); diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_bypass_define.def b/ecmascript/mapleall/maple_be/include/cg/mplad_bypass_define.def new file mode 100644 index 0000000000000000000000000000000000000000..bc2394e19ee224b9a8f8e8ba1d825e86ed7eb12b --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_bypass_define.def @@ -0,0 +1,143 @@ +ADDBYPASS(kLtShift, kLtAlu, 0); +ADDBYPASS(kLtShiftReg, kLtAlu, 0); +ADDBYPASS(kLtShift, kLtShift, 1); +ADDBYPASS(kLtShift, kLtShiftReg, 1); +ADDBYPASS(kLtShift, kLtAluShift, 1); +ADDBYPASS(kLtShift, kLtAluShiftReg, 1); +ADDBYPASS(kLtShiftReg, kLtShift, 1); +ADDBYPASS(kLtShiftReg, kLtShiftReg, 1); +ADDBYPASS(kLtShiftReg, kLtAluShift, 1); +ADDBYPASS(kLtShiftReg, kLtAluShiftReg, 1); +ADDBYPASS(kLtAlu, kLtAlu, 1); +ADDBYPASS(kLtAluShift, kLtAlu, 1); +ADDBYPASS(kLtAluShiftReg, kLtAlu, 1); +ADDALUSHIFTBYPASS(kLtAlu, kLtAluShift, 1); +ADDALUSHIFTBYPASS(kLtAluShift, kLtAluShift, 1); +ADDALUSHIFTBYPASS(kLtAluShiftReg, kLtAluShift, 1); +ADDALUSHIFTBYPASS(kLtAluExtr, kLtAluShift, 1); +ADDALUSHIFTBYPASS(kLtAlu, kLtAluShiftReg, 1); +ADDALUSHIFTBYPASS(kLtAluShift, kLtAluShiftReg, 1); +ADDALUSHIFTBYPASS(kLtAluShiftReg, kLtAluShiftReg, 1); +ADDALUSHIFTBYPASS(kLtAluExtr, kLtAluShiftReg, 1); +ADDBYPASS(kLtAlu, kLtAluShift, 1); +ADDBYPASS(kLtAluShift, kLtAluShift, 1); +ADDBYPASS(kLtAluShiftReg, kLtAluShift, 1); +ADDBYPASS(kLtAluExtr, kLtAluShift, 1); +ADDBYPASS(kLtAlu, kLtAluShiftReg, 1); +ADDBYPASS(kLtAluShift, kLtAluShiftReg, 1); +ADDBYPASS(kLtAluShiftReg, kLtAluShiftReg, 1); +ADDBYPASS(kLtAluExtr, kLtAluShiftReg, 1); +ADDBYPASS(kLtAlu, kLtAluExtr, 2); +ADDBYPASS(kLtAluShift, kLtAluExtr, 2); +ADDBYPASS(kLtAluShiftReg, kLtAluExtr, 2); +ADDBYPASS(kLtAluExtr, kLtAluExtr, 2); +ADDBYPASS(kLtAlu, kLtShift, 2); +ADDBYPASS(kLtAluShift, kLtShift, 2); +ADDBYPASS(kLtAluShiftReg, kLtShift, 2); +ADDBYPASS(kLtAluExtr, kLtShift, 2); +ADDBYPASS(kLtAlu, kLtShiftReg, 2); +ADDBYPASS(kLtAluShift, kLtShiftReg, 2); +ADDBYPASS(kLtAluShiftReg, kLtShiftReg, 2); +ADDBYPASS(kLtAluExtr, kLtShiftReg, 2); +ADDACCUMULATORBYPASS(kLtMul, kLtMul, 1); +ADDBYPASS(kLtMul, kLtAlu, 2); +ADDBYPASS(kLtMul, kLtAluShift, 3); +ADDBYPASS(kLtMul, kLtAluShiftReg, 3); +ADDBYPASS(kLtMul, kLtAluExtr, 3); +ADDBYPASS(kLtMul, kLtShift, 3); +ADDBYPASS(kLtMul, kLtShiftReg, 3); +ADDBYPASS(kLtLoad1, kLtAlu, 2); +ADDBYPASS(kLtLoad1, kLtAluShift, 3); +ADDBYPASS(kLtLoad1, kLtAluShiftReg, 3); +ADDBYPASS(kLtLoad1, kLtAluExtr, 3); +ADDBYPASS(kLtLoad1, kLtShift, 3); +ADDBYPASS(kLtLoad1, kLtShiftReg, 3); +ADDBYPASS(kLtLoad2, kLtAlu, 3); +ADDSTOREBYPASS(kLtAlu, kLtStore1, 0); +ADDSTOREBYPASS(kLtAlu, kLtStore2, 0); +ADDSTOREBYPASS(kLtAlu, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtAluShift, kLtStore1, 0); +ADDSTOREBYPASS(kLtAluShift, kLtStore2, 0); +ADDSTOREBYPASS(kLtAluShift, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtAluShiftReg, kLtStore1, 0); +ADDSTOREBYPASS(kLtAluShiftReg, kLtStore2, 0); +ADDSTOREBYPASS(kLtAluShiftReg, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtAluExtr, kLtStore1, 0); +ADDSTOREBYPASS(kLtAluExtr, kLtStore2, 0); +ADDSTOREBYPASS(kLtAluExtr, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtShift, kLtStore1, 0); +ADDSTOREBYPASS(kLtShift, kLtStore2, 0); +ADDSTOREBYPASS(kLtShift, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtShiftReg, kLtStore1, 0); +ADDSTOREBYPASS(kLtShiftReg, kLtStore2, 0); +ADDSTOREBYPASS(kLtShiftReg, kLtStore3plus, 0); +ADDSTOREBYPASS(kLtMul, kLtStore1, 1); +ADDSTOREBYPASS(kLtMul, kLtStore2, 1); +ADDSTOREBYPASS(kLtMul, kLtStore3plus, 1); +ADDSTOREBYPASS(kLtLoad1, kLtStore1, 1); +ADDSTOREBYPASS(kLtLoad1, kLtStore2, 1); +ADDSTOREBYPASS(kLtLoad1, kLtStore3plus, 1); +ADDSTOREBYPASS(kLtLoad2, kLtStore1, 1); +ADDSTOREBYPASS(kLtLoad2, kLtStore2, 1); +ADDSTOREBYPASS(kLtLoad2, kLtStore3plus, 1); +ADDSTOREBYPASS(kLtLoad3plus, kLtStore1, 1); +ADDSTOREBYPASS(kLtLoad3plus, kLtStore2, 1); +ADDSTOREBYPASS(kLtLoad3plus, kLtStore3plus, 1); +ADDBYPASS(kLtAlu, kLtR2f, 0); +ADDBYPASS(kLtAluShift, kLtR2f, 0); +ADDBYPASS(kLtAluShiftReg, kLtR2f, 0); +ADDBYPASS(kLtAluExtr, kLtR2f, 0); +ADDBYPASS(kLtShift, kLtR2f, 0); +ADDBYPASS(kLtShiftReg, kLtR2f, 0); +ADDBYPASS(kLtMul, kLtR2f, 1); +ADDBYPASS(kLtLoad1, kLtR2f, 1); +ADDBYPASS(kLtLoad2, kLtR2f, 1); +ADDBYPASS(kLtAlu, kLtR2fCvt, 2); +ADDBYPASS(kLtAluShift, kLtR2fCvt, 2); +ADDBYPASS(kLtAluShiftReg, kLtR2fCvt, 2); +ADDBYPASS(kLtAluExtr, kLtR2fCvt, 2); +ADDBYPASS(kLtMul, kLtR2fCvt, 3); +ADDBYPASS(kLtLoad1, kLtR2fCvt, 3); +ADDBYPASS(kLtLoad2, kLtR2fCvt, 3); +ADDBYPASS(kLtAlu, kLtBranch, 0); +ADDBYPASS(kLtAluShift, kLtBranch, 0); +ADDBYPASS(kLtAluShiftReg, kLtBranch, 0); +ADDBYPASS(kLtAluExtr, kLtBranch, 0); +ADDBYPASS(kLtShift, kLtBranch, 0); +ADDBYPASS(kLtShiftReg, kLtBranch, 0); +ADDACCUMULATORBYPASS(kLtFpalu, kLtFpmac, 1); +ADDACCUMULATORBYPASS(kLtFpmul, kLtFpmac, 1); +ADDACCUMULATORBYPASS(kLtR2f, kLtFpmac, 1); +ADDACCUMULATORBYPASS(kLtR2fCvt, kLtFpmac, 1); +ADDACCUMULATORBYPASS(kLtFconst, kLtFpmac, 1); +ADDBYPASS(kLtFLoad64, kLtFpmac, 1); +ADDBYPASS(kLtFLoadMany, kLtFpmac, 1); +ADDACCUMULATORBYPASS(kLtFpmac, kLtFpmac, 4); +ADDBYPASS(kLtCryptoAese, kLtCryptoAesmc, 0); +ADDBYPASS(kLtShiftReg, kLtClinit, 1); +ADDBYPASS(kLtAlu, kLtClinit, 2); +ADDBYPASS(kLtAluShift, kLtClinit, 2); +ADDBYPASS(kLtAluExtr, kLtClinit, 2); +ADDBYPASS(kLtMul, kLtClinit, 3); +ADDBYPASS(kLtLoad1, kLtClinit, 3); +ADDBYPASS(kLtAlu, kLtClinit, 13); +ADDSTOREBYPASS(kLtClinit, kLtStore1, 11); +ADDSTOREBYPASS(kLtClinit, kLtStore3plus, 11); +ADDBYPASS(kLtClinit, kLtR2f, 11); +ADDBYPASS(kLtClinit, kLtR2fCvt, 13); +ADDBYPASS(kLtShiftReg, kLtAdrpLdr, 1); +ADDBYPASS(kLtAlu, kLtAdrpLdr, 2); +ADDBYPASS(kLtAluShift, kLtAdrpLdr, 2); +ADDBYPASS(kLtAluExtr, kLtAdrpLdr, 2); +ADDBYPASS(kLtMul, kLtAdrpLdr, 3); +ADDBYPASS(kLtLoad1, kLtAdrpLdr, 3); +ADDBYPASS(kLtAdrpLdr, kLtAlu, 5); +ADDSTOREBYPASS(kLtAdrpLdr, kLtStore1, 3); +ADDSTOREBYPASS(kLtAdrpLdr, kLtStore3plus, 3); +ADDBYPASS(kLtAdrpLdr, kLtR2f, 3); +ADDBYPASS(kLtAdrpLdr, kLtR2fCvt, 5); +ADDBYPASS(kLtClinitTail, kLtAlu, 7); +ADDSTOREBYPASS(kLtClinitTail, kLtStore1, 5); +ADDSTOREBYPASS(kLtClinitTail, kLtStore3plus, 5); +ADDBYPASS(kLtClinitTail, kLtR2f, 5); +ADDBYPASS(kLtClinitTail, kLtR2fCvt, 7); diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_latency_type.def b/ecmascript/mapleall/maple_be/include/cg/mplad_latency_type.def new file mode 100644 index 0000000000000000000000000000000000000000..e17ccc374054789a3b165d0ac355d2b2b1c5def8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_latency_type.def @@ -0,0 +1,45 @@ +/* cortex_a55 latency type definition definition : */ + kLtUndef, + kLtShift, + kLtShiftReg, + kLtAlu, + kLtAluShift, + kLtAluShiftReg, + kLtAluExtr, + kLtMul, + kLtDiv, + kLtLoad1, + kLtStore1, + kLtLoad2, + kLtStore2, + kLtLoad3plus, + kLtStore3plus, + kLtBranch, + kLtFpalu, + kLtFconst, + kLtFpmul, + kLtFpmac, + kLtR2f, + kLtF2r, + kLtR2fCvt, + kLtF2rCvt, + kLtFFlags, + kLtFLoad64, + kLtFLoadMany, + kLtFStore64, + kLtFStoreMany, + kLtAdvsimdAlu, + kLtAdvsimdAluQ, + kLtAdvsimdMul, + kLtAdvsimdMulQ, + kLtAdvsimdDivS, + kLtAdvsimdDivD, + kLtAdvsimdDivSQ, + kLtAdvsimdDivdQ, + kLtCryptoAese, + kLtCryptoAesmc, + kLtClinit, + kLtAdrpLdr, + kLtClinitTail, + kLtTlsRel, + kLtTlsCall, diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_reservation_define.def b/ecmascript/mapleall/maple_be/include/cg/mplad_reservation_define.def new file mode 100644 index 0000000000000000000000000000000000000000..db20985f806caa3b7c648299436f78ee500702c7 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_reservation_define.def @@ -0,0 +1,374 @@ +/* cortex_a55 reservations definition : */ +Reservation *resvInstkLtUndef = new Reservation(kLtUndef, 0, 0); +if(resvInstkLtUndef == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtUndef failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtUndef, "Reservation allocation for kLtUndef failed."); + +Reservation *resvInstkLtShift = new Reservation(kLtShift, 2, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtShift == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtShift failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtShift, "Reservation allocation for kLtShift failed."); + +Reservation *resvInstkLtShiftReg = new Reservation(kLtShiftReg, 2, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdHazard)); +if(resvInstkLtShiftReg == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtShiftReg failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtShiftReg, "Reservation allocation for kLtShiftReg failed."); + +Reservation *resvInstkLtAlu = new Reservation(kLtAlu, 3, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtAlu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAlu failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAlu, "Reservation allocation for kLtAlu failed."); + +Reservation *resvInstkLtAluShift = new Reservation(kLtAluShift, 3, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtAluShift == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAluShift failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAluShift, "Reservation allocation for kLtAluShift failed."); + +Reservation *resvInstkLtAluShiftReg = new Reservation(kLtAluShiftReg, 3, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdHazard)); +if(resvInstkLtAluShiftReg == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAluShiftReg failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAluShiftReg, "Reservation allocation for kLtAluShiftReg failed."); + +Reservation *resvInstkLtAluExtr = new Reservation(kLtAluExtr, 3, 1, + GetUnitByUnitId(kUnitIdSlot1)); +if(resvInstkLtAluExtr == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAluExtr failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAluExtr, "Reservation allocation for kLtAluExtr failed."); + +Reservation *resvInstkLtMul = new Reservation(kLtMul, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdMul)); +if(resvInstkLtMul == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtMul failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtMul, "Reservation allocation for kLtMul failed."); + +Reservation *resvInstkLtDiv = new Reservation(kLtDiv, 4, 3, + GetUnitByUnitId(kUnitIdSlot0), + GetUnitByUnitId(kUnitIdDiv), + GetUnitByUnitId(kUnitIdDiv)); +if(resvInstkLtDiv == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtDiv failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtDiv, "Reservation allocation for kLtDiv failed."); + +Reservation *resvInstkLtLoad1 = new Reservation(kLtLoad1, 4, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtLoad1 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtLoad1 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtLoad1, "Reservation allocation for kLtLoad1 failed."); + +Reservation *resvInstkLtStore1 = new Reservation(kLtStore1, 2, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdStAgu)); +if(resvInstkLtStore1 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtStore1 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtStore1, "Reservation allocation for kLtStore1 failed."); + +Reservation *resvInstkLtLoad2 = new Reservation(kLtLoad2, 4, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtLoad2 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtLoad2 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtLoad2, "Reservation allocation for kLtLoad2 failed."); + +Reservation *resvInstkLtStore2 = new Reservation(kLtStore2, 2, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdStAgu)); +if(resvInstkLtStore2 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtStore2 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtStore2, "Reservation allocation for kLtStore2 failed."); + +Reservation *resvInstkLtLoad3plus = new Reservation(kLtLoad3plus, 6, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtLoad3plus == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtLoad3plus failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtLoad3plus, "Reservation allocation for kLtLoad3plus failed."); + +Reservation *resvInstkLtStore3plus = new Reservation(kLtStore3plus, 2, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdStAgu)); +if(resvInstkLtStore3plus == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtStore3plus failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtStore3plus, "Reservation allocation for kLtStore3plus failed."); + +Reservation *resvInstkLtBranch = new Reservation(kLtBranch, 0, 1, + GetUnitByUnitId(kUnitIdSlotSBranch)); +if(resvInstkLtBranch == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtBranch failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtBranch, "Reservation allocation for kLtBranch failed."); + +Reservation *resvInstkLtFpalu = new Reservation(kLtFpalu, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtFpalu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFpalu failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFpalu, "Reservation allocation for kLtFpalu failed."); + +Reservation *resvInstkLtFconst = new Reservation(kLtFconst, 2, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtFconst == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFconst failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFconst, "Reservation allocation for kLtFconst failed."); + +Reservation *resvInstkLtFpmul = new Reservation(kLtFpmul, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpMulS)); +if(resvInstkLtFpmul == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFpmul failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFpmul, "Reservation allocation for kLtFpmul failed."); + +Reservation *resvInstkLtFpmac = new Reservation(kLtFpmac, 8, 6, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpMulS), + nullptr, + nullptr, + nullptr, + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtFpmac == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFpmac failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFpmac, "Reservation allocation for kLtFpmac failed."); + +Reservation *resvInstkLtR2f = new Reservation(kLtR2f, 2, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtR2f == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtR2f failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtR2f, "Reservation allocation for kLtR2f failed."); + +Reservation *resvInstkLtF2r = new Reservation(kLtF2r, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtF2r == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtF2r failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtF2r, "Reservation allocation for kLtF2r failed."); + +Reservation *resvInstkLtR2fCvt = new Reservation(kLtR2fCvt, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtR2fCvt == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtR2fCvt failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtR2fCvt, "Reservation allocation for kLtR2fCvt failed."); + +Reservation *resvInstkLtF2rCvt = new Reservation(kLtF2rCvt, 5, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtF2rCvt == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtF2rCvt failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtF2rCvt, "Reservation allocation for kLtF2rCvt failed."); + +Reservation *resvInstkLtFFlags = new Reservation(kLtFFlags, 5, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtFFlags == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFFlags failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFFlags, "Reservation allocation for kLtFFlags failed."); + +Reservation *resvInstkLtFLoad64 = new Reservation(kLtFLoad64, 3, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtFLoad64 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFLoad64 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFLoad64, "Reservation allocation for kLtFLoad64 failed."); + +Reservation *resvInstkLtFLoadMany = new Reservation(kLtFLoadMany, 4, 3, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtFLoadMany == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFLoadMany failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFLoadMany, "Reservation allocation for kLtFLoadMany failed."); + +Reservation *resvInstkLtFStore64 = new Reservation(kLtFStore64, 0, 2, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdStAgu)); +if(resvInstkLtFStore64 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFStore64 failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFStore64, "Reservation allocation for kLtFStore64 failed."); + +Reservation *resvInstkLtFStoreMany = new Reservation(kLtFStoreMany, 0, 3, + GetUnitByUnitId(kUnitIdSlotSAgen), + GetUnitByUnitId(kUnitIdSlot0StAgu), + GetUnitByUnitId(kUnitIdStAgu)); +if(resvInstkLtFStoreMany == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtFStoreMany failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtFStoreMany, "Reservation allocation for kLtFStoreMany failed."); + +Reservation *resvInstkLtAdvsimdAlu = new Reservation(kLtAdvsimdAlu, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpAluS)); +if(resvInstkLtAdvsimdAlu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdAlu failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdAlu, "Reservation allocation for kLtAdvsimdAlu failed."); + +Reservation *resvInstkLtAdvsimdAluQ = new Reservation(kLtAdvsimdAluQ, 5, 2, + GetUnitByUnitId(kUnitIdSlot0), + GetUnitByUnitId(kUnitIdFpAluD)); +if(resvInstkLtAdvsimdAluQ == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdAluQ failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdAluQ, "Reservation allocation for kLtAdvsimdAluQ failed."); + +Reservation *resvInstkLtAdvsimdMul = new Reservation(kLtAdvsimdMul, 4, 2, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdFpMulS)); +if(resvInstkLtAdvsimdMul == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdMul failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdMul, "Reservation allocation for kLtAdvsimdMul failed."); + +Reservation *resvInstkLtAdvsimdMulQ = new Reservation(kLtAdvsimdMulQ, 4, 2, + GetUnitByUnitId(kUnitIdSlot0), + GetUnitByUnitId(kUnitIdFpMulD)); +if(resvInstkLtAdvsimdMulQ == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdMulQ failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdMulQ, "Reservation allocation for kLtAdvsimdMulQ failed."); + +Reservation *resvInstkLtAdvsimdDivS = new Reservation(kLtAdvsimdDivS, 14, 3, + GetUnitByUnitId(kUnitIdSlot0), + GetUnitByUnitId(kUnitIdFpMulS), + GetUnitByUnitId(kUnitIdFpDivS)); +if(resvInstkLtAdvsimdDivS == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdDivS failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdDivS, "Reservation allocation for kLtAdvsimdDivS failed."); + +Reservation *resvInstkLtAdvsimdDivD = new Reservation(kLtAdvsimdDivD, 29, 3, + GetUnitByUnitId(kUnitIdSlot0), + GetUnitByUnitId(kUnitIdFpMulS), + GetUnitByUnitId(kUnitIdFpDivS)); +if(resvInstkLtAdvsimdDivD == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdDivD failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdDivD, "Reservation allocation for kLtAdvsimdDivD failed."); + +Reservation *resvInstkLtAdvsimdDivSQ = new Reservation(kLtAdvsimdDivSQ, 14, 3, + GetUnitByUnitId(kUnitIdSlotD), + GetUnitByUnitId(kUnitIdFpMulD), + GetUnitByUnitId(kUnitIdFpDivD)); +if(resvInstkLtAdvsimdDivSQ == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdDivSQ failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdDivSQ, "Reservation allocation for kLtAdvsimdDivSQ failed."); + +Reservation *resvInstkLtAdvsimdDivdQ = new Reservation(kLtAdvsimdDivdQ, 29, 3, + GetUnitByUnitId(kUnitIdSlotD), + GetUnitByUnitId(kUnitIdFpMulD), + GetUnitByUnitId(kUnitIdFpDivD)); +if(resvInstkLtAdvsimdDivdQ == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdvsimdDivdQ failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdvsimdDivdQ, "Reservation allocation for kLtAdvsimdDivdQ failed."); + +Reservation *resvInstkLtCryptoAese = new Reservation(kLtCryptoAese, 3, 1, + GetUnitByUnitId(kUnitIdSlot0)); +if(resvInstkLtCryptoAese == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtCryptoAese failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtCryptoAese, "Reservation allocation for kLtCryptoAese failed."); + +Reservation *resvInstkLtCryptoAesmc = new Reservation(kLtCryptoAesmc, 3, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtCryptoAesmc == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtCryptoAesmc failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtCryptoAesmc, "Reservation allocation for kLtCryptoAesmc failed."); + +Reservation *resvInstkLtClinit = new Reservation(kLtClinit, 14, 13, + GetUnitByUnitId(kUnitIdSlotS), + nullptr, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu), + nullptr, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu), + nullptr, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtClinit == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtClinit failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtClinit, "Reservation allocation for kLtClinit failed."); + +Reservation *resvInstkLtAdrpLdr = new Reservation(kLtAdrpLdr, 6, 5, + GetUnitByUnitId(kUnitIdSlotS), + nullptr, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtAdrpLdr == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtAdrpLdr failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtAdrpLdr, "Reservation allocation for kLtAdrpLdr failed."); + +Reservation *resvInstkLtClinitTail = new Reservation(kLtClinitTail, 8, 7, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu), + nullptr, + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtClinitTail == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtClinitTail failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtClinitTail, "Reservation allocation for kLtClinitTail failed."); + +Reservation *resvInstkLtTlsRel = new Reservation(kLtTlsRel, 6, 1, + GetUnitByUnitId(kUnitIdSlotS)); +if(resvInstkLtTlsRel == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtTlsRel failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtTlsRel, "Reservation allocation for kLtTlsRel failed."); + +Reservation *resvInstkLtTlsCall = new Reservation(kLtTlsCall, 10, 4, + GetUnitByUnitId(kUnitIdSlotS), + GetUnitByUnitId(kUnitIdSlotDAgen), + GetUnitByUnitId(kUnitIdSlot0LdAgu), + GetUnitByUnitId(kUnitIdLdAgu)); +if(resvInstkLtTlsCall == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Reservation allocation for kLtTlsCall failed." << std::endl; +} +DEBUG_ASSERT(resvInstkLtTlsCall, "Reservation allocation for kLtTlsCall failed."); + diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_unit_define.def b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_define.def new file mode 100644 index 0000000000000000000000000000000000000000..24aaa00b8b8adcc0b63dfefc1e5595ee9d10862d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_define.def @@ -0,0 +1,204 @@ +/* cortex_a55 function units definition : */ + +const unsigned int kunitNum = 2; +Unit *instancekUnitIdSlot0 = new Unit(kUnitIdSlot0); +if(instancekUnitIdSlot0 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlot0 failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlot0, "Unit allocation for kUnitIdSlot0 failed."); + +Unit *instancekUnitIdSlot1 = new Unit(kUnitIdSlot1); +if(instancekUnitIdSlot1 == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlot1 failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlot1, "Unit allocation for kUnitIdSlot1 failed."); + +Unit *instancekUnitIdAgen = new Unit(kUnitIdAgen); +if(instancekUnitIdAgen == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdAgen failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdAgen, "Unit allocation for kUnitIdAgen failed."); + +Unit *instancekUnitIdHazard = new Unit(kUnitIdHazard); +if(instancekUnitIdHazard == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdHazard failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdHazard, "Unit allocation for kUnitIdHazard failed."); + +Unit *instancekUnitIdCrypto = new Unit(kUnitIdCrypto); +if(instancekUnitIdCrypto == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdCrypto failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdCrypto, "Unit allocation for kUnitIdCrypto failed."); + +Unit *instancekUnitIdMul = new Unit(kUnitIdMul); +if(instancekUnitIdMul == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdMul failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdMul, "Unit allocation for kUnitIdMul failed."); + +Unit *instancekUnitIdDiv = new Unit(kUnitIdDiv); +if(instancekUnitIdDiv == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdDiv failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdDiv, "Unit allocation for kUnitIdDiv failed."); + +Unit *instancekUnitIdBranch = new Unit(kUnitIdBranch); +if(instancekUnitIdBranch == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdBranch failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdBranch, "Unit allocation for kUnitIdBranch failed."); + +Unit *instancekUnitIdStAgu = new Unit(kUnitIdStAgu); +if(instancekUnitIdStAgu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdStAgu failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdStAgu, "Unit allocation for kUnitIdStAgu failed."); + +Unit *instancekUnitIdLdAgu = new Unit(kUnitIdLdAgu); +if(instancekUnitIdLdAgu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdLdAgu failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdLdAgu, "Unit allocation for kUnitIdLdAgu failed."); + +Unit *instancekUnitIdFpAluLo = new Unit(kUnitIdFpAluLo); +if(instancekUnitIdFpAluLo == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpAluLo failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpAluLo, "Unit allocation for kUnitIdFpAluLo failed."); + +Unit *instancekUnitIdFpAluHi = new Unit(kUnitIdFpAluHi); +if(instancekUnitIdFpAluHi == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpAluHi failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpAluHi, "Unit allocation for kUnitIdFpAluHi failed."); + +Unit *instancekUnitIdFpMulLo = new Unit(kUnitIdFpMulLo); +if(instancekUnitIdFpMulLo == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpMulLo failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpMulLo, "Unit allocation for kUnitIdFpMulLo failed."); + +Unit *instancekUnitIdFpMulHi = new Unit(kUnitIdFpMulHi); +if(instancekUnitIdFpMulHi == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpMulHi failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpMulHi, "Unit allocation for kUnitIdFpMulHi failed."); + +Unit *instancekUnitIdFpDivLo = new Unit(kUnitIdFpDivLo); +if(instancekUnitIdFpDivLo == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpDivLo failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpDivLo, "Unit allocation for kUnitIdFpDivLo failed."); + +Unit *instancekUnitIdFpDivHi = new Unit(kUnitIdFpDivHi); +if(instancekUnitIdFpDivHi == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpDivHi failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpDivHi, "Unit allocation for kUnitIdFpDivHi failed."); + +Unit *instancekUnitIdSlotS = new Unit(kUnitTypeOr, kUnitIdSlotS, kunitNum, + instancekUnitIdSlot0, instancekUnitIdSlot1); +if(instancekUnitIdSlotS == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotS failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotS, "Unit allocation for kUnitIdSlotS failed."); + +Unit *instancekUnitIdFpAluS = new Unit(kUnitTypeOr, kUnitIdFpAluS, kunitNum, + instancekUnitIdFpAluLo, instancekUnitIdFpAluHi); +if(instancekUnitIdFpAluS == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpAluS failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpAluS, "Unit allocation for kUnitIdFpAluS failed."); + +Unit *instancekUnitIdFpMulS = new Unit(kUnitTypeOr, kUnitIdFpMulS, kunitNum, + instancekUnitIdFpMulLo, instancekUnitIdFpMulHi); +if(instancekUnitIdFpMulS == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpMulS failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpMulS, "Unit allocation for kUnitIdFpMulS failed."); + +Unit *instancekUnitIdFpDivS = new Unit(kUnitTypeOr, kUnitIdFpDivS, kunitNum, + instancekUnitIdFpDivLo, instancekUnitIdFpDivHi); +if(instancekUnitIdFpDivS == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpDivS failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpDivS, "Unit allocation for kUnitIdFpDivS failed."); + +Unit *instancekUnitIdSlotD = new Unit(kUnitTypeAnd, kUnitIdSlotD, kunitNum, + instancekUnitIdSlot0, instancekUnitIdSlot1); +if(instancekUnitIdSlotD == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotD failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotD, "Unit allocation for kUnitIdSlotD failed."); + +Unit *instancekUnitIdFpAluD = new Unit(kUnitTypeAnd, kUnitIdFpAluD, kunitNum, + instancekUnitIdFpAluLo, instancekUnitIdFpAluHi); +if(instancekUnitIdFpAluD == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpAluD failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpAluD, "Unit allocation for kUnitIdFpAluD failed."); + +Unit *instancekUnitIdFpMulD = new Unit(kUnitTypeAnd, kUnitIdFpMulD, kunitNum, + instancekUnitIdFpMulLo, instancekUnitIdFpMulHi); +if(instancekUnitIdFpMulD == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpMulD failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpMulD, "Unit allocation for kUnitIdFpMulD failed."); + +Unit *instancekUnitIdFpDivD = new Unit(kUnitTypeAnd, kUnitIdFpDivD, kunitNum, + instancekUnitIdFpDivLo, instancekUnitIdFpDivHi); +if(instancekUnitIdFpDivD == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdFpDivD failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdFpDivD, "Unit allocation for kUnitIdFpDivD failed."); + +Unit *instancekUnitIdSlotSHazard = new Unit(kUnitTypeAnd, kUnitIdSlotSHazard, kunitNum, + instancekUnitIdSlotS, instancekUnitIdHazard); +if(instancekUnitIdSlotSHazard == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotSHazard failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotSHazard, "Unit allocation for kUnitIdSlotSHazard failed."); + +Unit *instancekUnitIdSlotSMul = new Unit(kUnitTypeAnd, kUnitIdSlotSMul, kunitNum, + instancekUnitIdSlotS, instancekUnitIdMul); +if(instancekUnitIdSlotSMul == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotSMul failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotSMul, "Unit allocation for kUnitIdSlotSMul failed."); + +Unit *instancekUnitIdSlotSBranch = new Unit(kUnitTypeAnd, kUnitIdSlotSBranch, kunitNum, + instancekUnitIdSlotS, instancekUnitIdBranch); +if(instancekUnitIdSlotSBranch == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotSBranch failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotSBranch, "Unit allocation for kUnitIdSlotSBranch failed."); + +Unit *instancekUnitIdSlotSAgen = new Unit(kUnitTypeAnd, kUnitIdSlotSAgen, kunitNum, + instancekUnitIdSlotS, instancekUnitIdAgen); +if(instancekUnitIdSlotSAgen == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotSAgen failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotSAgen, "Unit allocation for kUnitIdSlotSAgen failed."); + +Unit *instancekUnitIdSlotDAgen = new Unit(kUnitTypeAnd, kUnitIdSlotDAgen, kunitNum, + instancekUnitIdSlot0, instancekUnitIdSlot1, instancekUnitIdAgen); +if(instancekUnitIdSlotDAgen == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlotDAgen failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlotDAgen, "Unit allocation for kUnitIdSlotDAgen failed."); + +Unit *instancekUnitIdSlot0LdAgu = new Unit(kUnitTypeAnd, kUnitIdSlot0LdAgu, kunitNum, + instancekUnitIdSlot0, instancekUnitIdLdAgu); +if(instancekUnitIdSlot0LdAgu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlot0LdAgu failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlot0LdAgu, "Unit allocation for kUnitIdSlot0LdAgu failed."); + +Unit *instancekUnitIdSlot0StAgu = new Unit(kUnitTypeAnd, kUnitIdSlot0StAgu, kunitNum, + instancekUnitIdSlot0, instancekUnitIdStAgu); +if(instancekUnitIdSlot0StAgu == nullptr) { + maple::LogInfo::MapleLogger(maple::kLlErr) << "Unit allocation for kUnitIdSlot0StAgu failed." << std::endl; +} +DEBUG_ASSERT(instancekUnitIdSlot0StAgu, "Unit allocation for kUnitIdSlot0StAgu failed."); + diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_unit_id.def b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_id.def new file mode 100644 index 0000000000000000000000000000000000000000..66396da32c9e833def7346109afb4329b51e1bc6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_id.def @@ -0,0 +1,33 @@ +/* cortex_a55 function unit ID definition : */ + kUnitIdSlot0, + kUnitIdSlot1, + kUnitIdAgen, + kUnitIdHazard, + kUnitIdCrypto, + kUnitIdMul, + kUnitIdDiv, + kUnitIdBranch, + kUnitIdStAgu, + kUnitIdLdAgu, + kUnitIdFpAluLo, + kUnitIdFpAluHi, + kUnitIdFpMulLo, + kUnitIdFpMulHi, + kUnitIdFpDivLo, + kUnitIdFpDivHi, + kUnitIdSlotS, + kUnitIdFpAluS, + kUnitIdFpMulS, + kUnitIdFpDivS, + kUnitIdSlotD, + kUnitIdFpAluD, + kUnitIdFpMulD, + kUnitIdFpDivD, + kUnitIdSlotSHazard, + kUnitIdSlotSMul, + kUnitIdSlotSBranch, + kUnitIdSlotSAgen, + kUnitIdSlotDAgen, + kUnitIdSlot0LdAgu, + kUnitIdSlot0StAgu, + nothing, diff --git a/ecmascript/mapleall/maple_be/include/cg/mplad_unit_name.def b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_name.def new file mode 100644 index 0000000000000000000000000000000000000000..2586d10668c3462550cc3daf5277752ba454ac38 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/mplad_unit_name.def @@ -0,0 +1,32 @@ +/* cortex_a55 function unit name definition : */ +"Slot0", +"Slot1", +"Agen", +"Hazard", +"Crypto", +"Mul", +"Div", +"Branch", +"StAgu", +"LdAgu", +"FpAluLo", +"FpAluHi", +"FpMulLo", +"FpMulHi", +"FpDivLo", +"FpDivHi", +"SlotS", +"FpAluS", +"FpMulS", +"FpDivS", +"SlotD", +"FpAluD", +"FpMulD", +"FpDivD", +"SlotSHazard", +"SlotSMul", +"SlotSBranch", +"SlotSAgen", +"SlotDAgen", +"Slot0LdAgu", +"Slot0StAgu", diff --git a/ecmascript/mapleall/maple_be/include/cg/obj_emit.h b/ecmascript/mapleall/maple_be/include/cg/obj_emit.h new file mode 100644 index 0000000000000000000000000000000000000000..c328db9d244a7948e4b5d1617433826264c6944a --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/obj_emit.h @@ -0,0 +1,446 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OBJ_EMIT_H +#define MAPLEBE_INCLUDE_CG_OBJ_EMIT_H + +#include "emit.h" +#include "ifile.h" +#include "string_utils.h" + +namespace maplebe { +enum FixupKind : uint32 { + kFKNone, + kExceptFixup, + kEhTypeDefFixup, + kEhTypeUndefFixup, + kLSDAFixup, + kFirstTargetFixupKind = 64, /* the kind in subclass start from 64 */ +}; + +class Fixup { + public: + Fixup(const std::string &label, uint32 relOffsetVal, uint32 offsetVal, FixupKind fixupKind) + : labelName(label), relOffset(relOffsetVal), offset(offsetVal), kind(fixupKind) {} + + ~Fixup() = default; + + const std::string &GetLabel() const { + return labelName; + } + + uint32 GetRelOffset() const { + return relOffset; + } + + void SetOffset(uint32 value) { + offset = value; + } + + uint32 GetOffset() const { + return offset; + } + + FixupKind GetFixupKind() const { + return kind; + } + + private: + std::string labelName; /* target label name */ + uint32 relOffset; /* offset to target label */ + uint32 offset; /* record where to fix up */ + FixupKind kind; /* record how to fix up */ +}; + +class LocalFixup { + public: + LocalFixup(uint32 label, uint32 offsetVal, FixupKind fixupkind) + : labelIndex(label), offset(offsetVal), kind(fixupkind) {} + + ~LocalFixup() = default; + + uint32 GetLabelIndex() const { + return labelIndex; + } + + uint32 GetOffset() const { + return offset; + } + + FixupKind GetFixupKind() const { + return kind; + } + + private: + uint32 labelIndex; /* target label index */ + uint32 offset; /* record where to fix up */ + FixupKind kind; /* record how to fix up */ +}; + +enum SymbolKind : uint32 { + kStFunc, + kStNone, +}; + +class ObjSymbol { + public: + ObjSymbol(const std::string &name, SymbolKind kind, uint32 pos) : symbolName(name), symbolKind(kind), offset(pos) {} + ~ObjSymbol() = default; + + std::string GetSymbolName() const { + return symbolName; + } + + SymbolKind GetSymbolKind() const { + return symbolKind; + } + + uint32 GetOffset() const { + return offset; + } + + private: + std::string symbolName; + SymbolKind symbolKind; + uint32 offset; +}; + +class ObjFuncEmitInfo : public FuncEmitInfo { + public: + ObjFuncEmitInfo(CGFunc &func, MemPool &inputMemPool) + : FuncEmitInfo(func), + memPool(inputMemPool), + alloc(&memPool), + localFixups(alloc.Adapter()), + globalFixups(alloc.Adapter()), + relocations(alloc.Adapter()), + textData(alloc.Adapter()), + label2Order(alloc.Adapter()), + switchTableOffset(alloc.Adapter()), + funcName(&memPool) {} + + virtual ~ObjFuncEmitInfo() = default; + + uint32 GetEndOffset() const { + return endOffset; + } + + void SetEndOffset(uint32 offset) { + endOffset = offset; + } + + uint32 GetStartOffset() const { + return startOffset; + } + + void SetStartOffset(uint32 offset) { + startOffset = offset; + } + + uint32 GetExceptStartOffset() const { + return exceptStartOffset; + } + + void SetExceptStartOffset(uint32 offset) { + exceptStartOffset = offset; + } + + void AppendLocalFixups(LocalFixup &fixup) { + localFixups.push_back(&fixup); + } + + void AppendGlobalFixups(Fixup &fixup) { + globalFixups.push_back(&fixup); + } + + void AppendRelocations(Fixup &fixup) { + relocations.push_back(&fixup); + } + + const MapleVector &GetGlobalFixups() const { + return globalFixups; + } + + virtual void AppendContents(uint64 binInsn, uint32 byteSize) { + (void)binInsn; + (void)byteSize; + CHECK_FATAL(false, "this function should be implemented in subclass"); + } + + virtual void HandleLocalBranchFixup(const std::vector &label2Offset) { + (void)label2Offset; + CHECK_FATAL(false, "this fucntion should be implemented in subclass"); + } + + const MapleString &GetFuncName() const { + return funcName; + } + + void SetFuncName(const std::string &name) { + funcName = name; + } + + const MapleVector GetTextData() const { + return textData; + } + + size_t GetTextDataSize() const { + return textData.size(); + } + + void AppendTextData(const void *data, uint32 byteSize) { + auto pdata = reinterpret_cast(data); // data:0xa9be7c1d pdata:1d 7c be a9 + (void)textData.insert(textData.end(), pdata, pdata + byteSize); + } + + void AppendTextData(uint64 data, uint32 byteSize) { + for (size_t i = 0; i < byteSize; i++) { + textData.push_back(static_cast(data >> (i * 8))); + } + } + + uint32 GetTextDataElem32(size_t index) { + uint32 value = 0; + errno_t res = memcpy_s(&value, sizeof(uint32), textData.data() + index, sizeof(uint32)); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + return value; + } + + uint64 GetTextDataElem64(size_t index) { + uint64 value = 0; + errno_t res = memcpy_s(&value, sizeof(uint64), textData.data() + index, sizeof(uint64)); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + return value; + } + + void SwapTextData(const void *value, size_t index, size_t byteSize) { + errno_t res = memcpy_s(textData.data() + index, byteSize, value, byteSize); + CHECK_FATAL(res == EOK, "call memcpy_s failed"); + } + + void FillTextDataPadding(uint32 padding) { + for (uint32 i = 0; i < padding; ++i) { + textData.push_back(0); + } + } + + void FillTextDataNop(uint32 padding) { + ASSERT(padding % k4ByteSize == 0, "padding is not a multiple of 4!\n"); + uint32 nopNum = padding >> k2BitSize; + for (uint32 i = 0; i < nopNum; i++) { + AppendTextData(0xd503201f, k4ByteSize); + } + } + + void SetSwitchTableOffset(const std::string &name, uint32 offset) { + MapleString switchTableName(name, &memPool); + switchTableOffset[switchTableName] = offset; + } + + const MapleMap &GetSwitchTableOffset() const { + return switchTableOffset; + } + + const MethodHeader &GetMethodHeader() const { + return methodHeader; + } + + void UpdateMethodCodeSize() { + methodHeader.codeSize = static_cast(GetTextDataSize()); + } + + void AppendLabel2Order(uint32 label) { + (void)label2Order.insert(std::make_pair(label, order)); + order++; + } + + uint32 GetLabelOrder(uint32 label) const { + auto itr = label2Order.find(label); + CHECK_FATAL(itr != label2Order.end(), "not found label"); + return itr->second; + } + + protected: + MemPool &memPool; + MapleAllocator alloc; + MapleVector localFixups; + MapleVector globalFixups; + MapleVector relocations; + MapleVector textData; + MapleMap label2Order; /* this is used to sort callsite */ + MapleMap switchTableOffset; + uint32 endOffset = 0; + uint32 startOffset = 0; + uint32 exceptStartOffset = 0; + MapleString funcName; + MethodHeader methodHeader; + uint32 order = 0; +}; + +class ObjEmitter : public Emitter { + public: + ObjEmitter(CG &cg, const std::string &objFileName) + : Emitter(cg, objFileName), alloc(memPool), sections(alloc.Adapter()), contents(alloc.Adapter()) { + fileStream.open(objFileName, std::ios::trunc | std::ios::binary); + + uint32 funcNum = 0; + for (auto func : cg.GetMIRModule()->GetFunctionList()) { + if (func->GetBody() != nullptr) { + funcNum++; + } + } + contents.resize(funcNum); + } + + virtual ~ObjEmitter() = default; + + void EmitFuncBinaryCode(ObjFuncEmitInfo &objFuncEmitInfo); + void EmitInstructions(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &label2Offset); + void EmitLocalFloatValue(ObjFuncEmitInfo &objFuncEmitInfo); + void EmitFullLSDA(ObjFuncEmitInfo &objFuncEmitInfo, const std::vector &label2Offset); + void EmitFastLSDA(ObjFuncEmitInfo &objFuncEmitInfo, const std::vector &label2Offset); + void EmitSwitchTable(ObjFuncEmitInfo &objFuncEmitInfo, const std::vector &label2Offset); + void WriteObjFile(); + + void HandleGlobalFixup() { + for (auto *section : sections) { + section->HandleGlobalFixup(globalLabel2Offset); + } + } + + void Run(FuncEmitInfo &funcEmitInfo); + void EmitFuncBuffer(CGFunc &cgFunc); + + FuncEmitInfo &CreateFuncEmitInfo(CGFunc &cgFunc) { + CHECK_FATAL(false, "this function should be implemented in subclass"); + MemPool *memPool = cgFunc.GetCG()->GetMIRModule()->GetMemPool(); + return *memPool->New(cgFunc, *memPool); + } + + void InitELFHeader(); + void AddSymbol(const std::string &name, Word size, const Section §ion, Address value); + void AddFuncSymbol(const MapleString &name, Word size, Address value); + void ClearData(); + void HandleExceptFixup(); + + void UpdateSectionOffsetAddr(Section *section) { + if (section->GetType() != SHT_NOBITS) { + section->SetOffset(globalOffset); + } else { + section->SetOffset(0); + } + } + + void UpdateGlobalOffsetAddr(Section *section) { + if ((section->GetFlags() & SHF_ALLOC) != 0) { + globalAddr += section->GetDataSize(); + } + if (section->GetType() != SHT_NOBITS) { + globalOffset += section->GetDataSize(); + } + } + + void RegisterSection(Section *section) { + sections.push_back(section); + section->SetIndex(sections.size() - 1); + } + + void RegisterGlobalLabel(const std::string labelName, ObjLabel label) { + (void)globalLabel2Offset.insert(std::make_pair(labelName, label)); + } + + size_t AddSectionName(const std::string &name) { + return name.empty() ? 0 : shStrSection->AddString(name); + } + + void Finish() override { + InitSections(); + AppendGlobalLabel(); + AppendSymsToSymTabSec(); + HandleTextSectionGlobalFixup(); + AppendTextSectionData(); + LayoutSections(); + // HandleGlobalFixup(); + WriteObjFile(); + ClearData(); + } + + void CloseOutput() override { + if (fileStream.is_open()) { + fileStream.close(); + } + } + + virtual void EncodeInstruction(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) = 0; + virtual uint32 GetInsnSize(const Insn &insn) const = 0; + virtual void HandleTextSectionGlobalFixup() = 0; + virtual void AppendTextSectionData() = 0; + virtual void AppendGlobalLabel() = 0; + virtual void AppendSymsToSymTabSec() = 0; + virtual void InitSections() = 0; + virtual void LayoutSections() = 0; + virtual void UpdateMachineAndFlags(FileHeader &header) = 0; + + MapleVector &GetContents() { + return contents; + } + + size_t GetBeforeTextDataSize(ObjFuncEmitInfo &objFuncEmitInfo) const { + size_t textDataSize = 0; + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + textDataSize += content->GetTextDataSize(); + if (content->GetFuncName() == objFuncEmitInfo.GetFuncName()) { + break; + } + } + return textDataSize; + } + + void EmitMIRIntConst(EmitInfo &emitInfo); + void EmitMIRAddrofConst(EmitInfo &emitInfo); + void EmitMIRAddrofConstOffset(EmitInfo &emitInfo); + void EmitMIRAddrofConstCommon(EmitInfo &emitInfo, uint64 specialOffset); + void EmitFunctionSymbolTable(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &label2Offset); + void EmitStr16Const(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &str16Symbol); + void EmitStrConst(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &strSymbol); + protected: + virtual void InsertNopInsn(ObjFuncEmitInfo &objFuncEmitInfo) const = 0; + virtual void EmitIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) = 0; + virtual void EmitSpinIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) = 0; + + MapleString fileName; + MapleAllocator alloc; + MapleVector sections; + Offset globalOffset = 0; /* global offset of the ifile */ + Address globalAddr = 0; /* global adress of the ifile */ + FileHeader header {}; + StringSection *shStrSection = nullptr; + StringSection *strTabSection = nullptr; + SymbolSection *symbolTabSection = nullptr; + DataSection *textSection = nullptr; + DataSection *dataSection = nullptr; + DataSection *rodataSection = nullptr; + RelaSection *relaSection = nullptr; + MapleVector contents; /* each item is the code info of a cgfunc */ + Label2OffsetMap globalLabel2Offset; /* record global info */ +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OBJ_EMIT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/offset_adjust.h b/ecmascript/mapleall/maple_be/include/cg/offset_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..774e4fc6d8bec1950c2b8e1ee52f27837eed2d8c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/offset_adjust.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H +#define MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class FrameFinalize { + public: + explicit FrameFinalize(CGFunc &func) : cgFunc(&func) {} + + virtual ~FrameFinalize() { + cgFunc = nullptr; + } + + virtual void Run() {} + + std::string PhaseName() const { + return "framefinalize"; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFrameFinalize, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/operand.def b/ecmascript/mapleall/maple_be/include/cg/operand.def new file mode 100644 index 0000000000000000000000000000000000000000..a6d1bba6e6adc67aba4b7ef7ec71f57d3e7c076d --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/operand.def @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +DEFINE_MOP(Mem8S, {Operand::kOpdMem, operand::kIsUse, 8}) +DEFINE_MOP(Mem8D, {Operand::kOpdMem, operand::kIsDef, 8}) +DEFINE_MOP(Mem16S, {Operand::kOpdMem, operand::kIsUse, 16}) +DEFINE_MOP(Mem16D, {Operand::kOpdMem, operand::kIsDef, 16}) +DEFINE_MOP(Mem32D, {Operand::kOpdMem, operand::kIsDef, 32}) +DEFINE_MOP(Mem32S, {Operand::kOpdMem, operand::kIsUse, 32}) +DEFINE_MOP(Mem64D, {Operand::kOpdMem, operand::kIsDef, 64}) +DEFINE_MOP(Mem64S, {Operand::kOpdMem, operand::kIsUse, 64}) +DEFINE_MOP(Mem128D, {Operand::kOpdMem, operand::kIsDef, 128}) +DEFINE_MOP(Mem128S, {Operand::kOpdMem, operand::kIsUse, 128}) + +DEFINE_MOP(Reg8IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg8ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 8}) +DEFINE_MOP(Reg8IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg16ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 16}) +DEFINE_MOP(Reg16IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg16IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg32ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 32}) +DEFINE_MOP(Reg32IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg32IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg64ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 64}) +DEFINE_MOP(Reg64IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 64}) +DEFINE_MOP(Reg64IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 64}) + +DEFINE_MOP(Reg8FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 8}) +DEFINE_MOP(Reg8FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 8}) +DEFINE_MOP(Reg16FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 16}) +DEFINE_MOP(Reg16FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg16FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg32FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 32}) +DEFINE_MOP(Reg32FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg32FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg64FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 64}) +DEFINE_MOP(Reg64FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg64FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg128ID, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 128}) +DEFINE_MOP(Reg128IS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 128}) +DEFINE_MOP(Reg128IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 128}) + +DEFINE_MOP(Reg64VD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg64VS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg64VDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg128VD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat | operand::kIsVector, 128}) +DEFINE_MOP(Reg128VS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat | operand::kIsVector, 128}) +DEFINE_MOP(Reg128VDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat | operand::kIsVector, 128}) + +DEFINE_MOP(CCD, {Operand::kOpdRegister, operand::kRegTyCc | operand::kIsDef, 1}) +DEFINE_MOP(CCS, {Operand::kOpdRegister, operand::kRegTyCc | operand::kIsUse, 1}) +DEFINE_MOP(Cond, {Operand::kOpdCond, operand::kRegTyCc | operand::kIsUse, 4}) + +DEFINE_MOP(Imm4, {Operand::kOpdImmediate, operand::kIsUse, 4}) +DEFINE_MOP(Imm5, {Operand::kOpdImmediate, operand::kIsUse, 5}) +DEFINE_MOP(Imm6, {Operand::kOpdImmediate, operand::kIsUse, 6}) +DEFINE_MOP(Imm8, {Operand::kOpdImmediate, operand::kIsUse, 8}) +DEFINE_MOP(Imm12, {Operand::kOpdImmediate, operand::kIsUse, 12}) +DEFINE_MOP(Imm13, {Operand::kOpdImmediate, operand::kIsUse, 13}) +DEFINE_MOP(Imm16, {Operand::kOpdImmediate, operand::kIsUse, 16}) +DEFINE_MOP(Imm32, {Operand::kOpdImmediate, operand::kIsUse, 32}) +DEFINE_MOP(Imm64, {Operand::kOpdImmediate, operand::kIsUse, 64}) +DEFINE_MOP(StImm32, {Operand::kOpdStImmediate, operand::kIsUse, 32}) +DEFINE_MOP(StImm64, {Operand::kOpdStImmediate, operand::kIsUse, 64}) +DEFINE_MOP(FpImm8, {Operand::kOpdFPImmediate, operand::kIsUse, 8}) +DEFINE_MOP(LiteralSrc, {Operand::kOpdStImmediate, operand::kIsUse, 64}) +DEFINE_MOP(Literal12Src, {Operand::kOpdStImmediate, operand::kLiteralLow12, 12}) + +/* for movk */ +DEFINE_MOP(Lsl4, {Operand::kOpdShift, operand::kIsUse, 4}) +DEFINE_MOP(Lsl6, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Lsl12, {Operand::kOpdShift, operand::kIsUse, 12}) +/* for shift */ +DEFINE_MOP(Bitshift32, {Operand::kOpdShift, operand::kIsUse, 5}) +DEFINE_MOP(Bitshift64, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Extendshift64, {Operand::kOpdExtend, operand::kIsUse, 3}) + +DEFINE_MOP(ListSrc, {Operand::kOpdList, operand::kIsUse, 1}) +DEFINE_MOP(ListDest, {Operand::kOpdList, operand::kIsDef, 1}) +DEFINE_MOP(String0S, {Operand::kOpdString, operand::kIsUse, 0}) +DEFINE_MOP(AddressName, {Operand::kOpdBBAddress, operand::kIsUse, 64}) + +DEFINE_MOP(Lbl64, {Operand::kOpdBBAddress, operand::kIsUse, 64}) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/operand.h b/ecmascript/mapleall/maple_be/include/cg/operand.h new file mode 100644 index 0000000000000000000000000000000000000000..4c6666457c5e5de788f384121f0264a3b50418a3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/operand.h @@ -0,0 +1,1844 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OPERAND_H +#define MAPLEBE_INCLUDE_CG_OPERAND_H + +#include "becommon.h" +#include "cg_option.h" +#include "visitor_common.h" + +/* maple_ir */ +#include "types_def.h" /* need uint8 etc */ +#include "prim_types.h" /* for PrimType */ +#include "mir_symbol.h" + +/* Mempool */ +#include "mempool_allocator.h" /* MapleList */ +#include "memlayout.h" + +namespace maplebe { +class OpndDesc; +class Emitter; +class FuncEmitInfo; + +bool IsBitSizeImmediate(maple::uint64 val, maple::uint32 bitLen, maple::uint32 nLowerZeroBits); +bool IsBitmaskImmediate(maple::uint64 val, maple::uint32 bitLen); +bool IsMoveWidableImmediate(uint64 val, uint32 bitLen); +bool BetterUseMOVZ(uint64 val); + + +using MOperator = uint32; +enum RegType : maple::uint8 { + kRegTyUndef, + kRegTyInt, + kRegTyFloat, + kRegTyCc, + kRegTyX87, + kRegTyVary, + kRegTyFpsc, + kRegTyIndex, + kRegTyLast, +}; + +class Operand { + public: + enum OperandType : uint8 { + kOpdRegister, + kOpdImmediate, + kOpdMem, + kOpdCond, /* for condition code */ + kOpdPhi, /* for phi operand */ + kOpdFPImmediate, + kOpdStImmediate, /* use the symbol name as the offset */ + kOpdOffset, /* for the offset operand in MemOperand */ + kOpdBBAddress, + kOpdList, /* for list operand */ + kOpdShift, /* for imm shift operand */ + kOpdRegShift, /* for reg shift operand */ + kOpdExtend, /* for extend operand */ + kOpdString, /* for comments */ + kOpdUndef + }; + + Operand(OperandType type, uint32 size) : opndKind(type), size(size) {} + virtual ~Operand() = default; + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 sz) { + size = sz; + } + + OperandType GetKind() const { + return opndKind; + } + + bool IsIntImmediate() const { + return opndKind == kOpdImmediate || opndKind == kOpdOffset; + } + + bool IsConstImmediate() const { + return opndKind == kOpdImmediate || opndKind == kOpdOffset || opndKind == kOpdFPImmediate; + } + + bool IsOfstImmediate() const { + return opndKind == kOpdOffset; + } + + bool IsStImmediate() const { + return opndKind == kOpdStImmediate; + } + + bool IsImmediate() const { + return (kOpdFPImmediate <= opndKind && opndKind <= kOpdOffset) || opndKind == kOpdImmediate; + } + + bool IsRegister() const { + return opndKind == kOpdRegister; + } + + bool IsList() const { + return opndKind == kOpdList; + } + + bool IsPhi() const { + return opndKind == kOpdPhi; + } + + bool IsMemoryAccessOperand() const { + return opndKind == kOpdMem; + } + + bool IsLabel() const { + return opndKind == kOpdBBAddress; + } + + virtual bool IsZeroRegister() const { + return false; + }; + + bool IsConditionCode() const { + return opndKind == kOpdCond; + } + + bool IsOpdShift() const { + return opndKind == kOpdShift; + } + + bool IsRegShift() const { + return opndKind == kOpdRegShift; + } + + bool IsOpdExtend() const { + return opndKind == kOpdExtend; + } + + virtual bool IsLabelOpnd() const { + return false; + } + + virtual bool IsFuncNameOpnd() const { + return false; + } + + virtual bool IsCommentOpnd() const { + return false; + } + + virtual Operand *Clone(MemPool &memPool) const = 0; + + /* + * A simple implementation here. + * Each subclass can elaborate on demand. + */ + virtual bool Equals(Operand &op) const { + return BasicEquals(op) && (&op == this); + } + + bool BasicEquals(const Operand &op) const { + return opndKind == op.GetKind() && size == op.GetSize(); + } + + virtual void Dump() const = 0; + + virtual bool Less(const Operand &right) const = 0; + + virtual void Accept(OperandVisitorBase &v) = 0; + + protected: + OperandType opndKind; /* operand type */ + uint32 size; /* size in bits */ + uint64 flag = 0; /* operand property*/ +}; + +/* RegOperand */ +enum RegOperandState : uint32 { + kRegOpndNone = 0, + kRegOpndSetLow32 = 0x1, + kRegOpndSetHigh32 = 0x2 +}; + +template +class OperandVisitable : public Operand { + public: + using Operand::Operand; + void Accept(OperandVisitorBase &v) override { + if (OperandVisitor* typeV = dynamic_cast*>(&v)) { + typeV->Visit(static_cast(this)); + } else { + /* the type which has no implements */ + } + } +}; + +class RegOperand : public OperandVisitable { + public: + RegOperand(regno_t regNum, uint32 size, RegType type, uint32 flg = 0) + : OperandVisitable(kOpdRegister, size), + regNO(regNum), + regType(type), + validBitsNum(size), + regFlag(flg) {} + + ~RegOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void SetValidBitsNum(uint32 validNum) { + validBitsNum = validNum; + } + + uint32 GetValidBitsNum() const { + return validBitsNum; + } + + bool IsOfIntClass() const { + return regType == kRegTyInt; + } + + bool IsOfFloatOrSIMDClass() const { + return regType == kRegTyFloat; + } + + bool IsOfCC() const { + return regType == kRegTyCc; + } + + bool IsOfVary() const { + return regType == kRegTyVary; + } + + RegType GetRegisterType() const { + return regType; + } + + void SetRegisterType(RegType newTy) { + regType = newTy; + } + + virtual bool IsBBLocalReg() const { + return isBBLocal; + } + + void SetRegNotBBLocal() { + isBBLocal = false; + } + + regno_t GetRegisterNumber() const { + return regNO; + } + + void SetRegisterNumber(regno_t regNum) { + regNO = regNum; + } + + void Dump() const override { + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "size : " << GetSize(); + LogInfo::MapleLogger() << " NO_" << GetRegisterNumber(); + }; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + /* The same type. */ + return regNO < rightOpnd->regNO; + } + + bool Less(const RegOperand &right) const { + return regNO < right.regNO; + } + + bool RegNumEqual(const RegOperand &right) const { + return regNO == right.GetRegisterNumber(); + } + + int32 RegCompare(const RegOperand &right) const { + return (regNO - right.GetRegisterNumber()); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsRegister()) { + return false; + } + auto &op = static_cast(operand); + if (&op == this) { + return true; + } + return (BasicEquals(op) && regNO == op.GetRegisterNumber() && regType == op.GetRegisterType() && + IsBBLocalReg() == op.IsBBLocalReg()); + } + + static bool IsSameRegNO(const Operand &firstOpnd, const Operand &secondOpnd) { + if (!firstOpnd.IsRegister() || !secondOpnd.IsRegister()) { + return false; + } + auto &firstReg = static_cast(firstOpnd); + auto &secondReg = static_cast(secondOpnd); + return firstReg.RegNumEqual(secondReg); + } + + static bool IsSameReg(const Operand &firstOpnd, const Operand &secondOpnd) { + if (firstOpnd.GetSize() != secondOpnd.GetSize()) { + return false; + } + return IsSameRegNO(firstOpnd, secondOpnd); + } + + void SetOpndSSAForm() { + isSSAForm = true; + } + + void SetOpndOutOfSSAForm() { + isSSAForm = false; + } + + bool IsSSAForm() const { + return isSSAForm; + } + + void SetRefField(bool newIsRefField) { + isRefField = newIsRefField; + } + + bool IsPhysicalRegister() const { + return GetRegisterNumber() > 0 && GetRegisterNumber() < 100 && !IsOfCC(); + } + + bool IsVirtualRegister() const { + return !IsPhysicalRegister(); + } + + bool IsBBLocalVReg() const { + return IsVirtualRegister() && IsBBLocalReg(); + } + + void SetIF64Vec() { + if64Vec = true; + } + + bool GetIF64Vec() const { + return if64Vec; + } + + void SetVecLanePosition(int32 pos) { + vecLane = static_cast(pos); + } + + int32 GetVecLanePosition() const { + return vecLane; + } + + void SetVecLaneSize(uint32 size) { + vecLaneSize = static_cast(size); + } + + uint32 GetVecLaneSize() const { + return vecLaneSize; + } + + void SetVecElementSize(uint32 size) { + vecElementSize = size; + } + + uint64 GetVecElementSize() const { + return vecElementSize; + } + + void SetHigh8Bit() { + isHigh8Bit = true; + } + + bool IsHigh8Bit() { + return isHigh8Bit; + } + + bool operator==(const RegOperand &o) const; + + bool operator<(const RegOperand &o) const; + + protected: + regno_t regNO; + RegType regType; + + /* + * used for EBO(-O1), it can recognize the registers whose use and def are in different BB. It is + * true by default. Sometime it should be false such as when handle intrinsiccall for target + * aarch64(AArch64CGFunc::SelectIntrinCall). + */ + bool isBBLocal = true; + uint32 validBitsNum; + /* use for SSA analysis */ + bool isSSAForm = false; + bool isRefField = false; + uint32 regFlag = 0; + int16 vecLane = -1; /* -1 for whole reg, 0 to 15 to specify each lane one at a time */ + uint16 vecLaneSize = 0; /* Number of lanes */ + uint64 vecElementSize = 0; /* size of vector element in each lane */ + bool if64Vec = false; /* operand returning 64x1's int value in FP/Simd register */ + bool isHigh8Bit = false; +}; /* class RegOperand */ + +enum VaryType : uint8 { + kNotVary = 0, + kUnAdjustVary, + kAdjustVary, +}; + +class ImmOperand : public OperandVisitable { + public: + ImmOperand(int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(kOpdImmediate, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} + ImmOperand(OperandType type, int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(type, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} + ImmOperand(const MIRSymbol &symbol, int64 val, int32 relocs, bool isSigned, VaryType isVar = kNotVary, + bool isFloat = false) : OperandVisitable(kOpdStImmediate, 0), value(val), isSigned(isSigned), isVary(isVar), + isFmov(isFloat), symbol(&symbol), relocs(relocs) {} + ~ImmOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetName() const { + return symbol->GetName(); + } + + int32 GetRelocs() const { + return relocs; + } + + bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const { + return maplebe::IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); + } + + bool IsBitmaskImmediate() const { + DEBUG_ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + DEBUG_ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); + } + + bool IsBitmaskImmediate(uint32 destSize) const { + DEBUG_ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + DEBUG_ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); + } + + bool IsSingleInstructionMovable() const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(size)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(size)) || + IsBitmaskImmediate()); + } + + bool IsSingleInstructionMovable(uint32 destSize) const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(destSize)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(destSize)) || + IsBitmaskImmediate(destSize)); + } + + int64 GetValue() const { + return value; + } + + void SetValue(int64 val) { + value = val; + } + + void SetVary(VaryType flag) { + isVary = flag; + } + + bool IsZero() const { + return value == 0; + } + + VaryType GetVary() const { + return isVary; + } + + bool IsOne() const { + return value == 1; + } + + bool IsSignedValue() const { + return isSigned; + } + + void SetSigned() { + isSigned = true; + } + + void SetSigned(bool flag) { + isSigned = flag; + } + + bool IsInBitSizeRot(uint8 size) const { + return IsInBitSizeRot(size, value); + } + + static bool IsInBitSizeRot(uint8 size, int64 val) { + /* to tell if the val is in a rotate window of size */ +#if __GNU_C__ || __clang__ + if (val == 0) { + return true; + } + int32 start = __builtin_ctzll(static_cast(val)); + int32 end = static_cast(sizeof(val) * kBitsPerByte - __builtin_clzll(static_cast(val)) - 1); + return (size >= end - start + 1); +#else + uint8 start = 0; + uint8 end = 0; + bool isFound = false; + CHECK_FATAL(val > 0, "do not perform bit operator operations on signed integers"); + for (uint32 i = 0; i < k64BitSize; ++i) { + /* check whether the ith bit of val is 1 or not */ + if (((static_cast(val) >> i) & 0x1) == 0x1) { + if (!isFound) { + start = i; + end = i; + isFound = true; + } else { + end = i; + } + } + } + return !isFound || (size >= (end - start) + 1); +#endif + } + + static bool IsInValueRange(int32 lowVal, int32 highVal, int32 val) { + return val >= lowVal && val <= highVal; + } + + bool IsNegative() const { + return isSigned && value < 0; + } + + void Add(int64 delta) { + value += delta; + } + + void Negate() { + value = -value; + } + + void BitwiseNegate() { + value = ~(static_cast(value)) & ((1ULL << size) - 1UL); + } + + void DivideByPow2(int32 shift) { + value = (static_cast(value)) >> shift; + } + + void ModuloByPow2(int32 shift) { + value = (static_cast(value)) & ((1ULL << shift) - 1UL); + } + + bool IsAllOnes() const { + return value == -1; + } + + bool IsAllOnes32bit() const { + return value == 0x0ffffffffLL; + } + + bool operator<(const ImmOperand &iOpnd) const { + return value < iOpnd.value || (value == iOpnd.value && isSigned < iOpnd.isSigned) || + (value == iOpnd.value && isSigned == iOpnd.isSigned && size < iOpnd.GetSize()); + } + + bool operator==(const ImmOperand &iOpnd) const { + return (value == iOpnd.value && isSigned == iOpnd.isSigned && size == iOpnd.GetSize()); + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + /* The same type. */ + if (isSigned != rightOpnd->isSigned) { + return isSigned; + } + + if (isVary != rightOpnd->isVary) { + return isVary; + } + + return value < rightOpnd->value; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsImmediate()) { + return false; + } + auto &op = static_cast(operand); + if (&op == this) { + return true; + } + return (BasicEquals(op) && value == op.GetValue() && isSigned == op.IsSignedValue()); + } + + bool ValueEquals(const ImmOperand &op) const { + if (&op == this) { + return true; + } + return (value == op.GetValue() && isSigned == op.IsSignedValue()); + } + bool IsFmov() const { + return isFmov; + } + + protected: + int64 value; + bool isSigned; + VaryType isVary; + bool isFmov = false; + const MIRSymbol *symbol; /* for Immediate in symbol form */ + int32 relocs; +}; + +class OfstOperand : public ImmOperand { + public: + enum OfstType : uint8 { + kSymbolOffset, + kImmediateOffset, + kSymbolImmediateOffset, + }; + + /* only for symbol offset */ + OfstOperand(const MIRSymbol &mirSymbol, uint32 size, int32 relocs) + : ImmOperand(kOpdOffset, 0, size, true, kNotVary, false), + offsetType(kSymbolOffset), symbol(&mirSymbol), relocs(relocs) {} + /* only for Immediate offset */ + OfstOperand(int64 val, uint32 size, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, static_cast(val), size, true, isVar, false), + offsetType(kImmediateOffset), symbol(nullptr), relocs(0) {} + /* for symbol and Immediate offset */ + OfstOperand(const MIRSymbol &mirSymbol, int64 val, uint32 size, int32 relocs, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, val, size, true, isVar, false), + offsetType(kSymbolImmediateOffset), + symbol(&mirSymbol), + relocs(relocs) {} + + ~OfstOperand() override { + symbol = nullptr; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsSymOffset() const { + return offsetType == kSymbolOffset; + } + bool IsImmOffset() const { + return offsetType == kImmediateOffset; + } + bool IsSymAndImmOffset() const { + return offsetType == kSymbolImmediateOffset; + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetSymbolName() const { + return symbol->GetName(); + } + + int64 GetOffsetValue() const { + return GetValue(); + } + + void SetOffsetValue(int32 offVal) { + SetValue(static_cast(offVal)); + } + + void AdjustOffset(int32 delta) { + Add(static_cast(delta)); + } + + bool operator==(const OfstOperand &opnd) const { + return (offsetType == opnd.offsetType && symbol == opnd.symbol && + ImmOperand::operator==(opnd) && relocs == opnd.relocs); + } + + bool operator<(const OfstOperand &opnd) const { + return (offsetType < opnd.offsetType || + (offsetType == opnd.offsetType && symbol < opnd.symbol) || + (offsetType == opnd.offsetType && symbol == opnd.symbol && GetValue() < opnd.GetValue())); + } + + void Dump() const override { + if (IsImmOffset()) { + LogInfo::MapleLogger() << "ofst:" << GetValue(); + } else { + LogInfo::MapleLogger() << GetSymbolName(); + LogInfo::MapleLogger() << "+offset:" << GetValue(); + } + } + + private: + OfstType offsetType; + const MIRSymbol *symbol; + int32 relocs; +}; + +/* + * Table C1-6 A64 Load/Store addressing modes + * | Offset + * Addressing Mode | Immediate | Register | Extended Register + * + * Base register only | [base{,#0}] | - | - + * (no offset) | B_OI_NONE | | + * imm=0 + * + * Base plus offset | [base{,#imm}] | [base,Xm{,LSL #imm}] | [base,Wm,(S|U)XTW {#imm}] + * B_OI_NONE | B_OR_X | B_OR_X + * imm=0,1 (0,3) | imm=00,01,10,11 (0/2,s/u) + * + * Pre-indexed | [base, #imm]! | - | - + * + * Post-indexed | [base], #imm | [base], Xm(a) | - + * + * Literal | label | - | - + * (PC-relative) + * + * a) The post-indexed by register offset mode can be used with the SIMD Load/Store + * structure instructions described in Load/Store Vector on page C3-154. Otherwise + * the post-indexed by register offset mode is not available. + */ +class MemOperand : public OperandVisitable { + public: + enum AArch64AddressingMode : uint8 { + kAddrModeUndef, + /* AddrMode_BO, base, offset. EA = [base] + offset; */ + kAddrModeBOi, /* INTACT: EA = [base]+immediate */ + /* + * PRE: base += immediate, EA = [base] + * POST: EA = [base], base += immediate + */ + kAddrModeBOrX, /* EA = [base]+Extend([offreg/idxreg]), OR=Wn/Xn */ + kAddrModeLiteral, /* AArch64 insruction LDR takes literal and */ + /* + * "calculates an address from the PC value and an immediate offset, + * loads a word from memory, and writes it to a register." + */ + kAddrModeLo12Li // EA = [base] + #:lo12:Label+immediate. (Example: [x0, #:lo12:__Label300+456] + }; + /* + * ARMv8-A A64 ISA Overview by Matteo Franchin @ ARM + * (presented at 64-bit Android on ARM. Sep. 2015) p.14 + * o Address to load from/store to is a 64-bit base register + an optional offset + * LDR X0, [X1] ; Load from address held in X1 + * STR X0, [X1] ; Store to address held in X1 + * + * o Offset can be an immediate or a register + * LDR X0, [X1, #8] ; Load from address [X1 + 8 bytes] + * LDR X0, [X1, #-8] ; Load with negative offset + * LDR X0, [X1, X2] ; Load from address [X1 + X2] + * + * o A Wn register offset needs to be extended to 64 bits + * LDR X0, [X1, W2, SXTW] ; Sign-extend offset in W2 + * LDR X0, [X1, W2, UXTW] ; Zero-extend offset in W2 + * + * o Both Xn and Wn register offsets can include an optional left-shift + * LDR X0, [X1, W2, UXTW #2] ; Zero-extend offset in W2 & left-shift by 2 + * LDR X0, [X1, X2, LSL #2] ; Left-shift offset in X2 by 2 + * + * p.15 + * Addressing Modes Analogous C Code + * int *intptr = ... // X1 + * int out; // W0 + * o Simple: X1 is not changed + * LDR W0, [X1] out = *intptr; + * o Offset: X1 is not changed + * LDR W0, [X1, #4] out = intptr[1]; + * o Pre-indexed: X1 changed before load + * LDR W0, [X1, #4]! =|ADD X1,X1,#4 out = *(++intptr); + * |LDR W0,[X1] + * o Post-indexed: X1 changed after load + * LDR W0, [X1], #4 =|LDR W0,[X1] out = *(intptr++); + * |ADD X1,X1,#4 + */ + enum ExtendInfo : uint8 { + kShiftZero = 0x1, + kShiftOne = 0x2, + kShiftTwo = 0x4, + kShiftThree = 0x8, + kUnsignedExtend = 0x10, + kSignExtend = 0x20 + }; + + enum IndexingOption : uint8 { + kIntact, /* base register stays the same */ + kPreIndex, /* base register gets changed before load */ + kPostIndex, /* base register gets changed after load */ + }; + + MemOperand(uint32 size) : + OperandVisitable(Operand::kOpdMem, size) {} + MemOperand(uint32 size, const MIRSymbol &mirSymbol) : + OperandVisitable(Operand::kOpdMem, size), symbol(&mirSymbol) {} + + MemOperand(uint32 size, RegOperand *baseOp, RegOperand *indexOp, ImmOperand *ofstOp, const MIRSymbol *mirSymbol, + ImmOperand *scaleOp = nullptr) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(baseOp), + indexOpnd(indexOp), + offsetOpnd(ofstOp), + scaleOpnd(scaleOp), + symbol(mirSymbol) {} + + MemOperand(RegOperand *base, OfstOperand *offset, uint32 size, IndexingOption idxOpt = kIntact) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(base), + indexOpnd(nullptr), + offsetOpnd(offset), + symbol(nullptr), + addrMode(kAddrModeBOi), + extend(0), + idxOpt(idxOpt), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *sym) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(index), + offsetOpnd(offset), + symbol(sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &sym, bool noExtend) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(&index), + offsetOpnd(offset), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(noExtend), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, RegOperand &baseOpnd, RegOperand &indexOpnd, + uint32 shift, bool isSigned = false) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(&baseOpnd), + indexOpnd(&indexOpnd), + offsetOpnd(nullptr), + symbol(nullptr), + addrMode(mode), + extend((isSigned ? kSignExtend : kUnsignedExtend) | (1U << shift)), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(nullptr), + indexOpnd(nullptr), + offsetOpnd(nullptr), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) { + DEBUG_ASSERT(mode == kAddrModeLiteral, "This constructor version is supposed to be used with AddrMode_Literal only"); + } + + /* Copy constructor */ + explicit MemOperand(const MemOperand &memOpnd) + : OperandVisitable(Operand::kOpdMem, memOpnd.GetSize()), + baseOpnd(memOpnd.baseOpnd), + indexOpnd(memOpnd.indexOpnd), + offsetOpnd(memOpnd.offsetOpnd), + scaleOpnd(memOpnd.scaleOpnd), + symbol(memOpnd.symbol), + memoryOrder(memOpnd.memoryOrder), + addrMode(memOpnd.addrMode), + extend(memOpnd.extend), + idxOpt(memOpnd.idxOpt), + noExtend(memOpnd.noExtend), + isStackMem(memOpnd.isStackMem), + isStackArgMem(memOpnd.isStackArgMem){} + + MemOperand &operator=(const MemOperand &memOpnd) = default; + + ~MemOperand() override = default; + using OperandVisitable::OperandVisitable; + + MemOperand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Dump() const override {}; + + RegOperand *GetBaseRegister() const { + return baseOpnd; + } + + void SetBaseRegister(RegOperand ®Opnd) { + baseOpnd = ®Opnd; + } + + RegOperand *GetIndexRegister() const { + return indexOpnd; + } + + void SetIndexRegister(RegOperand ®Opnd) { + indexOpnd = ®Opnd; + } + + ImmOperand *GetOffsetOperand() const { + return offsetOpnd; + } + + void SetOffsetOperand(ImmOperand &oftOpnd) { + offsetOpnd = &oftOpnd; + } + + const ImmOperand *GetScaleOperand() const { + return scaleOpnd; + } + + void SetScaleOperand(ImmOperand &scaOpnd) { + scaleOpnd = &scaOpnd; + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + void SetMemoryOrdering(uint32 memOrder) { + memoryOrder |= memOrder; + } + + bool HasMemoryOrdering(uint32 memOrder) const { + return (memoryOrder & memOrder) != 0; + } + + void SetAccessSize(uint8 size) { + accessSize = size; + } + + uint8 GetAccessSize() const { + return accessSize; + } + + AArch64AddressingMode GetAddrMode() const { + return addrMode; + } + + const std::string &GetSymbolName() const { + return GetSymbol()->GetName(); + } + + bool IsStackMem() const { + return isStackMem; + } + + void SetStackMem(bool isStack) { + isStackMem = isStack; + } + + bool IsStackArgMem() const { + return isStackArgMem; + } + + void SetStackArgMem(bool isStackArg) { + isStackArgMem = isStackArg; + } + + Operand *GetOffset() const; + + OfstOperand *GetOffsetImmediate() const { + return static_cast(GetOffsetOperand()); + } + + /* Returns N where alignment == 2^N */ + static int32 GetImmediateOffsetAlignment(uint32 dSize) { + DEBUG_ASSERT(dSize >= k8BitSize, "error val:dSize"); + DEBUG_ASSERT(dSize <= k128BitSize, "error val:dSize"); + DEBUG_ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + /* dSize==8: 0, dSize==16 : 1, dSize==32: 2, dSize==64: 3 */ + return __builtin_ctz(dSize) - static_cast(kBaseOffsetAlignment); + } + + static int32 GetMaxPIMM(uint32 dSize) { + dSize = dSize > k64BitSize ? k64BitSize : dSize; + DEBUG_ASSERT(dSize >= k8BitSize, "error val:dSize"); + DEBUG_ASSERT(dSize <= k128BitSize, "error val:dSize"); + DEBUG_ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + DEBUG_ASSERT(alignment >= kOffsetAlignmentOf8Bit, "error val:alignment"); + DEBUG_ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPimm[alignment]); + } + + static int32 GetMaxPairPIMM(uint32 dSize) { + DEBUG_ASSERT(dSize >= k32BitSize, "error val:dSize"); + DEBUG_ASSERT(dSize <= k128BitSize, "error val:dSize"); + DEBUG_ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + DEBUG_ASSERT(alignment >= kOffsetAlignmentOf32Bit, "error val:alignment"); + DEBUG_ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPairPimm[static_cast(alignment) - k2BitSize]); + } + + bool IsOffsetMisaligned(uint32 dSize) const { + DEBUG_ASSERT(dSize >= k8BitSize, "error val:dSize"); + DEBUG_ASSERT(dSize <= k128BitSize, "error val:dSize"); + DEBUG_ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + if (dSize == k8BitSize || addrMode != kAddrModeBOi) { + return false; + } + OfstOperand *ofstOpnd = GetOffsetImmediate(); + if (ofstOpnd->GetOffsetValue() >= -256 && ofstOpnd->GetOffsetValue() <= 255) { + return false; + } + return ((static_cast(ofstOpnd->GetOffsetValue()) & + static_cast((1U << static_cast(GetImmediateOffsetAlignment(dSize))) - 1)) != 0); + } + + static bool IsSIMMOffsetOutOfRange(int64 offset, bool is64bit, bool isLDSTPair) { + if (!isLDSTPair) { + return (offset < kMinSimm32 || offset > kMaxSimm32); + } + if (is64bit) { + return (offset < kMinSimm64 || offset > kMaxSimm64Pair) || (static_cast(offset) & k7BitSize) ; + } + return (offset < kMinSimm32 || offset > kMaxSimm32Pair) || (static_cast(offset) & k3BitSize); + } + + static bool IsPIMMOffsetOutOfRange(int32 offset, uint32 dSize) { + DEBUG_ASSERT(dSize >= k8BitSize, "error val:dSize"); + DEBUG_ASSERT(dSize <= k128BitSize, "error val:dSize"); + DEBUG_ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + return (offset < 0 || offset > GetMaxPIMM(dSize)); + } + + bool operator<(const MemOperand &opnd) const { + return addrMode < opnd.addrMode || + (addrMode == opnd.addrMode && GetBaseRegister() < opnd.GetBaseRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() < opnd.GetIndexRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() < opnd.GetOffsetOperand()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() < opnd.GetSymbol()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() < opnd.GetSize()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() == opnd.GetSize() && extend < opnd.extend); + } + + bool operator==(const MemOperand &opnd) const { + return (GetSize() == opnd.GetSize()) && (addrMode == opnd.addrMode) && (extend == opnd.extend) && + (GetBaseRegister() == opnd.GetBaseRegister()) && + (GetIndexRegister() == opnd.GetIndexRegister()) && + (GetSymbol() == opnd.GetSymbol()) && + (GetOffsetOperand() == opnd.GetOffsetOperand()) ; + } + + VaryType GetMemVaryType() const { + Operand *ofstOpnd = GetOffsetOperand(); + if (ofstOpnd != nullptr) { + auto *opnd = static_cast(ofstOpnd); + return opnd->GetVary(); + } + return kNotVary; + } + + void SetAddrMode(AArch64AddressingMode val) { + addrMode = val; + } + + bool IsExtendedRegisterMode() const { + return addrMode == kAddrModeBOrX; + } + + void UpdateExtend(ExtendInfo flag) { + extend = flag | (1U << ShiftAmount()); + } + + bool SignedExtend() const { + return IsExtendedRegisterMode() && ((extend & kSignExtend) != 0); + } + + bool UnsignedExtend() const { + return IsExtendedRegisterMode() && !SignedExtend(); + } + + uint32 ShiftAmount() const { + uint32 scale = extend & 0xF; + /* 8 is 1 << 3, 4 is 1 << 2, 2 is 1 << 1, 1 is 1 << 0; */ + return (scale == 8) ? 3 : ((scale == 4) ? 2 : ((scale == 2) ? 1 : 0)); + } + + bool ShouldEmitExtend() const { + return !noExtend && ((extend & 0x3F) != 0); + } + + IndexingOption GetIndexOpt() const { + return idxOpt; + } + + void SetIndexOpt(IndexingOption newidxOpt) { + idxOpt = newidxOpt; + } + + bool GetNoExtend() const { + return noExtend; + } + + void SetNoExtend(bool val) { + noExtend = val; + } + + uint32 GetExtend() const { + return extend; + } + + void SetExtend(uint32 val) { + extend = val; + } + + bool IsIntactIndexed() const { + return idxOpt == kIntact; + } + + bool IsPostIndexed() const { + return idxOpt == kPostIndex; + } + + bool IsPreIndexed() const { + return idxOpt == kPreIndex; + } + + std::string GetExtendAsString() const { + if (GetIndexRegister()->GetSize() == k64BitSize) { + return std::string("LSL"); + } + return ((extend & kSignExtend) != 0) ? std::string("SXTW") : std::string("UXTW"); + } + + /* Return true if given operand has the same base reg and offset with this. */ + bool Equals(Operand &op) const override; + bool Equals(const MemOperand &op) const; + bool Less(const Operand &right) const override; + + private: + RegOperand *baseOpnd = nullptr; /* base register */ + RegOperand *indexOpnd = nullptr; /* index register */ + ImmOperand *offsetOpnd = nullptr; /* offset immediate */ + ImmOperand *scaleOpnd = nullptr; + const MIRSymbol *symbol; /* AddrMode_Literal */ + uint32 memoryOrder = 0; + uint8 accessSize = 0; /* temp, must be set right before use everytime. */ + AArch64AddressingMode addrMode = kAddrModeBOi; + uint32 extend = false; /* used with offset register ; AddrMode_B_OR_X */ + IndexingOption idxOpt = kIntact; /* used with offset immediate ; AddrMode_B_OI */ + bool noExtend = false; + bool isStackMem = false; + bool isStackArgMem = false; +}; + +class LabelOperand : public OperandVisitable { + public: + LabelOperand(const char *parent, LabelIdx labIdx) + : OperandVisitable(kOpdBBAddress, 0), labelIndex(labIdx), parentFunc(parent), orderID(-1u) {} + + ~LabelOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsLabelOpnd() const override { + return true; + } + + LabelIdx GetLabelIndex() const { + return labelIndex; + } + + const std::string &GetParentFunc() const { + return parentFunc; + } + + LabelIDOrder GetLabelOrder() const { + return orderID; + } + + void SetLabelOrder(LabelIDOrder idx) { + orderID = idx; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + int32 nRes = strcmp(parentFunc.c_str(), rightOpnd->parentFunc.c_str()); + if (nRes == 0) { + return labelIndex < rightOpnd->labelIndex; + } else { + return nRes < 0; + } + } + + bool Equals(Operand &operand) const override { + if (!operand.IsLabel()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetLabelIndex() == labelIndex)); + } + + protected: + LabelIdx labelIndex; + const std::string parentFunc; + + private: + /* this index records the order this label is defined during code emit. */ + LabelIDOrder orderID = -1u; +}; + +class ListOperand : public OperandVisitable { + public: + explicit ListOperand(MapleAllocator &allocator) : + OperandVisitable(Operand::kOpdList, 0), + opndList(allocator.Adapter()) {} + + ~ListOperand() override = default; + + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void PushOpnd(RegOperand &opnd) { + opndList.push_back(&opnd); + } + + MapleList &GetOperands() { + return opndList; + } + + void Dump() const override { + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + } + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + DEBUG_ASSERT(false, "We don't need to compare list operand."); + return false; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsList()) { + return false; + } + auto &op = static_cast(operand); + return (&op == this); + } + + protected: + MapleList opndList; +}; + +/* representing for global variables address */ +class StImmOperand : public OperandVisitable { + public: + StImmOperand(const MIRSymbol &symbol, int64 offset, int32 relocs) + : OperandVisitable(kOpdStImmediate, 0), symbol(&symbol), offset(offset), relocs(relocs) {} + + ~StImmOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetName() const { + return symbol->GetName(); + } + + int64 GetOffset() const { + return offset; + } + + void SetOffset(int64 newOffset) { + offset = newOffset; + } + + int32 GetRelocs() const { + return relocs; + } + + bool operator==(const StImmOperand &opnd) const { + return (symbol == opnd.symbol && offset == opnd.offset && relocs == opnd.relocs); + } + + bool operator<(const StImmOperand &opnd) const { + return (symbol < opnd.symbol || (symbol == opnd.symbol && offset < opnd.offset) || + (symbol == opnd.symbol && offset == opnd.offset && relocs < opnd.relocs)); + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + private: + const MIRSymbol *symbol; + int64 offset; + int32 relocs; +}; + +class ExtendShiftOperand : public OperandVisitable { + public: + /* if and only if at least one register is WSP, ARM Recommends use of the LSL operator name rathe than UXTW */ + enum ExtendOp : uint8 { + kUndef, + kUXTB, + kUXTH, + kUXTW, /* equal to lsl in 32bits */ + kUXTX, /* equal to lsl in 64bits */ + kSXTB, + kSXTH, + kSXTW, + kSXTX, + }; + + ExtendShiftOperand(ExtendOp op, uint32 amt, int32 bitLen) + : OperandVisitable(Operand::kOpdExtend, bitLen), extendOp(op), shiftAmount(amt) {} + + ~ExtendShiftOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + uint32 GetShiftAmount() const { + return shiftAmount; + } + + ExtendOp GetExtendOp() const { + return extendOp; + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + private: + ExtendOp extendOp; + uint32 shiftAmount; +}; + +class BitShiftOperand : public OperandVisitable { + public: + enum ShiftOp : uint8 { + kUndef, + kLSL, /* logical shift left */ + kLSR, /* logical shift right */ + kASR, /* arithmetic shift right */ + }; + + /* bitlength is equal to 5 or 6 */ + BitShiftOperand(ShiftOp op, uint32 amt, int32 bitLen) + : OperandVisitable(Operand::kOpdShift, bitLen), shiftOp(op), shiftAmount(amt) {} + + ~BitShiftOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const BitShiftOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (shiftOp != rightOpnd->shiftOp) { + return shiftOp < rightOpnd->shiftOp; + } + return shiftAmount < rightOpnd->shiftAmount; + } + + uint32 GetShiftAmount() const { + return shiftAmount; + } + + ShiftOp GetShiftOp() const { + return shiftOp; + } + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + private: + ShiftOp shiftOp; + uint32 shiftAmount; +}; + +class CommentOperand : public OperandVisitable { + public: + CommentOperand(const char *str, MemPool &memPool) + : OperandVisitable(Operand::kOpdString, 0), comment(str, &memPool) {} + + CommentOperand(const std::string &str, MemPool &memPool) + : OperandVisitable(Operand::kOpdString, 0), comment(str, &memPool) {} + + ~CommentOperand() override = default; + using OperandVisitable::OperandVisitable; + + const MapleString &GetComment() const { + return comment; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsCommentOpnd() const override { + return true; + } + + bool Less(const Operand &right) const override { + /* For different type. */ + return GetKind() < right.GetKind(); + } + + void Dump() const override { + LogInfo::MapleLogger() << "# "; + if (!comment.empty()) { + LogInfo::MapleLogger() << comment; + } + } + + private: + const MapleString comment; +}; + +using StringOperand = CommentOperand; + +class ListConstraintOperand : public OperandVisitable { + public: + explicit ListConstraintOperand(MapleAllocator &allocator) + : OperandVisitable(Operand::kOpdString, 0), + stringList(allocator.Adapter()) {}; + + ~ListConstraintOperand() override = default; + using OperandVisitable::OperandVisitable; + + void Dump() const override { + for (auto *str : stringList) { + LogInfo::MapleLogger() << "(" << str->GetComment().c_str() << ")"; + } + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + DEBUG_ASSERT(false, "We don't need to compare list operand."); + return false; + } + + MapleVector stringList; +}; + +/* for cg ssa analysis */ +class PhiOperand : public OperandVisitable { + public: + explicit PhiOperand(MapleAllocator &allocator) + : OperandVisitable(Operand::kOpdPhi, 0), + phiList(allocator.Adapter()) {} + + ~PhiOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Dump() const override { + CHECK_FATAL(false, "NIY"); + } + + void InsertOpnd(uint32 bbId, RegOperand &phiParam) { + DEBUG_ASSERT(!phiList.count(bbId), "cannot insert duplicate operand"); + (void)phiList.emplace(std::pair(bbId, &phiParam)); + } + + void UpdateOpnd(uint32 bbId, uint32 newId, RegOperand &phiParam) { + (void)phiList.emplace(std::pair(newId, &phiParam)); + phiList.erase(bbId); + } + + MapleMap &GetOperands() { + return phiList; + } + + uint32 GetLeastCommonValidBit() const; + + bool IsRedundancy() const; + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + DEBUG_ASSERT(false, "We don't need to compare list operand."); + return false; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsPhi()) { + return false; + } + auto &op = static_cast(operand); + return (&op == this); + } + + protected: + MapleMap phiList; /* ssa-operand && BBId */ +}; + +/* Use StImmOperand instead? */ +class FuncNameOperand : public OperandVisitable { + public: + explicit FuncNameOperand(const MIRSymbol &fsym) : OperandVisitable(kOpdBBAddress, 0), + symbol(&fsym) {} + + ~FuncNameOperand() override { + symbol = nullptr; + } + using OperandVisitable::OperandVisitable; + + const std::string &GetName() const { + return symbol->GetName(); + } + + bool IsFuncNameOpnd() const override { + return true; + } + + const MIRSymbol *GetFunctionSymbol() const { + return symbol; + } + + void SetFunctionSymbol(const MIRSymbol &fsym) { + symbol = &fsym; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + return static_cast(symbol) < static_cast(rightOpnd->symbol); + } + + void Dump() const override { + LogInfo::MapleLogger() << GetName(); + } + + private: + const MIRSymbol *symbol; +}; + +namespace operand { +/* bit 0-7 for common */ +enum CommOpndDescProp : maple::uint64 { + kIsDef = 1ULL, + kIsUse = (1ULL << 1), + kIsVector = (1ULL << 2) + +}; + +/* bit 8-15 for reg */ +enum RegOpndDescProp : maple::uint64 { + kInt = (1ULL << 8), + kFloat = (1ULL << 9), + kRegTyCc = (1ULL << 10), + kRegTyVary = (1ULL << 11), +}; + +/* bit 16-23 for imm */ +enum ImmOpndDescProp : maple::uint64 { + +}; + +/* bit 24-31 for mem */ +enum MemOpndDescProp : maple::uint64 { + kMemLow12 = (1ULL << 24), + kLiteralLow12 = kMemLow12, + kIsLoadLiteral = (1ULL << 25) + +}; +} + +class OpndDesc { + public: + OpndDesc(Operand::OperandType t, maple::uint64 p, maple::uint32 s) : + opndType(t), property(p), size(s) {} + virtual ~OpndDesc() = default; + + Operand::OperandType GetOperandType() const { + return opndType; + } + + maple::uint32 GetSize() const { + return size; + } + + bool IsImm() const { + return opndType == Operand::kOpdImmediate; + } + + bool IsRegister() const { + return opndType == Operand::kOpdRegister; + } + + bool IsMem() const { + return opndType == Operand::kOpdMem; + } + + bool IsRegDef() const { + return opndType == Operand::kOpdRegister && (property & operand::kIsDef); + } + + bool IsRegUse() const { + return opndType == Operand::kOpdRegister && (property & operand::kIsUse); + } + + bool IsDef() const { + return (property & operand::kIsDef) != 0; + } + + bool IsUse() const { + return (property & operand::kIsUse) != 0; + } + + bool IsMemLow12() const { + return IsMem() && (property & operand::kMemLow12); + } + + bool IsLiteralLow12() const { + return opndType == Operand::kOpdStImmediate && (property & operand::kLiteralLow12); + } + + bool IsLoadLiteral() const { + return (property & operand::kIsLoadLiteral) != 0; + } + + bool IsVectorOperand() const { + return (property & operand::kIsVector); + } + +#define DEFINE_MOP(op, ...) static const OpndDesc op; +#include "operand.def" +#undef DEFINE_MOP + + private: + Operand::OperandType opndType; + maple::uint64 property; + maple::uint32 size; +}; + +class CondOperand : public OperandVisitable { + public: + explicit CondOperand(maplebe::ConditionCode cc) : OperandVisitable(Operand::kOpdCond, k4ByteSize), cc(cc) {} + + ~CondOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(cc); + } + + ConditionCode GetCode() const { + return cc; + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + static const char *ccStrs[kCcLast]; + + private: + ConditionCode cc; +}; + +class OpndDumpVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + explicit OpndDumpVisitor(const OpndDesc &operandDesc) : opndDesc(&operandDesc) {} + virtual ~OpndDumpVisitor() { + opndDesc = nullptr; + } + + protected: + virtual void DumpOpndPrefix() { + LogInfo::MapleLogger() << " (opnd:"; + } + virtual void DumpOpndSuffix() { + LogInfo::MapleLogger() << " )"; + } + void DumpSize(const Operand &opnd) const { + LogInfo::MapleLogger() << " [size:" << opnd.GetSize() << "]"; + } + const OpndDesc *GetOpndDesc() const { + return opndDesc; + } + + private: + const OpndDesc *opndDesc; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OPERAND_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/optimize_common.h b/ecmascript/mapleall/maple_be/include/cg/optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..c4575c2d9474d62de6dfb76723d2471c119cf9b9 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/optimize_common.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H +#include "cgfunc.h" + +namespace maplebe { +inline const std::string kCfgoChaining = "red"; +inline const std::string kCfgoSj = "burlywood1"; +inline const std::string kCfgoFlipCond = "cadetblue1"; +inline const std::string kCfgoAlways = "green"; +inline const std::string kCfgoUnreach = "yellow"; +inline const std::string kCfgoDup = "orange"; +inline const std::string kCfgoEmpty = "purple"; +inline const std::string kIcoIte = "blue"; /* if conversion optimization, if-then-else */ +inline const std::string kIcoIt = "grey"; /* if conversion optimization, if-then-else */ + +class OptimizationPattern { + public: + explicit OptimizationPattern(CGFunc &func) + : patternName(func.GetMemoryPool()), + cgFunc(&func), + dotColor(func.GetMemoryPool()) {} + virtual ~OptimizationPattern() = default; + + bool IsKeepPosition() const { + return keepPosition; + } + + void SetKeepPosition(bool flag) { + keepPosition = flag; + } + + bool IsLabelInLSDAOrSwitchTable(LabelIdx label) const { + EHFunc *ehFunc = cgFunc->GetEHFunc(); + return (ehFunc != nullptr && cgFunc->GetTheCFG()->InLSDA(label, *ehFunc)) || + cgFunc->GetTheCFG()->InSwitchTable(label, *cgFunc); + } + + void Search2Op(bool noOptimize); + virtual bool Optimize(BB &curBB) = 0; + + protected: + void Log(uint32 bbID); + + bool keepPosition = false; + MapleString patternName; + CGFunc *cgFunc; + MapleString dotColor; + bool checkOnly = false; +}; + +class Optimizer { + public: + Optimizer(CGFunc &func, MemPool &memPool) + : cgFunc(&func), + name(nullptr), + memPool(&memPool), + alloc(&memPool), + diffPassPatterns(alloc.Adapter()), + singlePassPatterns(alloc.Adapter()) { + func.GetTheCFG()->InitInsnVisitor(func); + } + + virtual ~Optimizer() = default; + void Run(const std::string &funcName, bool checkOnly = false); + virtual void InitOptimizePatterns() = 0; + + protected: + CGFunc *cgFunc; + const char *name; + MemPool *memPool; + MapleAllocator alloc; + /* patterns need to run in different passes of cgFunc */ + MapleVector diffPassPatterns; + /* patterns can run in a single pass of cgFunc */ + MapleVector singlePassPatterns; +}; + +class OptimizeLogger { + public: + static OptimizeLogger &GetLogger() { + static OptimizeLogger instance; + return instance; + } + + void Log(const std::string &patternName); + void ClearLocal(); + void Print(const std::string &funcName); + + private: + OptimizeLogger() = default; + + ~OptimizeLogger() = default; + + OptimizeLogger(const OptimizeLogger&); + OptimizeLogger &operator=(const OptimizeLogger&); + + std::map globalStat; + std::map localStat; +}; + +class DotGenerator { + public: + static void SetColor(uint32 bbID, const std::string &color); + static void GenerateDot(const std::string &preFix, const CGFunc &cgFunc, const MIRModule &mod, + bool includeEH = false, const std::string fname = "", regno_t vReg = 0); + private: + static std::map coloringMap; + static std::string GetFileName(const MIRModule &mirModule, const std::string &filePreFix); + static bool IsBackEdge(const CGFunc &cgFunction, const BB &from, const BB &to); + static void DumpEdge(const CGFunc &cgFunction, std::ofstream &cfgFileOfStream, bool isIncludeEH); + static void DumpBBInstructions(const CGFunc &cgFunction, regno_t vReg, std::ofstream &cfgFile); + static bool FoundListOpndRegNum(ListOperand &listOpnd, const Insn &insnObj, regno_t vReg); + static bool FoundMemAccessOpndRegNum(const MemOperand &memOperand, const Insn &insnObj, regno_t vReg); + static bool FoundNormalOpndRegNum(const RegOperand ®Opnd, const Insn &insnObj, regno_t vReg); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/peep.h b/ecmascript/mapleall/maple_be/include/cg/peep.h new file mode 100644 index 0000000000000000000000000000000000000000..64f2c31fdd6b506a7acce3c64fc9dcc8900462f3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/peep.h @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PEEP_H +#define MAPLEBE_INCLUDE_CG_PEEP_H + +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +enum ReturnType : uint8 { + kResUseFirst, + kResDefFirst, + kResNotFind +}; + +class PeepOptimizeManager { + public: + /* normal constructor */ + PeepOptimizeManager(CGFunc &f, BB &bb, Insn &insn) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(nullptr) {} + /* constructor for ssa */ + PeepOptimizeManager(CGFunc &f, BB &bb, Insn &insn, CGSSAInfo &info) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(&info) {} + ~PeepOptimizeManager() = default; + template + void Optimize(bool patternEnable = false) { + if (!patternEnable) { + return; + } + OptimizePattern optPattern(*cgFunc, *currBB, *currInsn, *ssaInfo); + optPattern.Run(*currBB, *currInsn); + optSuccess = optPattern.GetPatternRes(); + if (optSuccess && optPattern.GetCurrInsn() != nullptr) { + currInsn = optPattern.GetCurrInsn(); + } + } + template + void NormalPatternOpt(bool patternEnable = false) { + if (!patternEnable) { + return; + } + OptimizePattern optPattern(*cgFunc, *currBB, *currInsn); + optPattern.Run(*currBB, *currInsn); + } + bool OptSuccess() const { + return optSuccess; + } + private: + CGFunc *cgFunc; + BB *currBB; + Insn *currInsn; + CGSSAInfo *ssaInfo; + bool optSuccess = false; +}; + +class CGPeepHole { + public: + /* normal constructor */ + CGPeepHole(CGFunc &f, MemPool *memPool) + : cgFunc(&f), + peepMemPool(memPool), + ssaInfo(nullptr) {} + /* constructor for ssa */ + CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) + : cgFunc(&f), + peepMemPool(memPool), + ssaInfo(cgssaInfo) {} + ~CGPeepHole() = default; + + virtual void Run() = 0; + virtual bool DoSSAOptimize(BB &bb, Insn &insn) = 0; + virtual void DoNormalOptimize(BB &bb, Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + MemPool *peepMemPool; + CGSSAInfo *ssaInfo; + PeepOptimizeManager *manager = nullptr; +}; + +class PeepPattern { + public: + explicit PeepPattern(CGFunc &oneCGFunc) : cgFunc(oneCGFunc) {} + virtual ~PeepPattern() = default; + virtual void Run(BB &bb, Insn &insn) = 0; + /* optimization support function */ + bool IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); + bool FindRegLiveOut(const RegOperand ®Opnd, const BB &bb); + bool CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const; + bool CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const; + ReturnType IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const; + int logValueAtBase2(int64 val) const; + bool IsMemOperandOptPattern(const Insn &insn, Insn &nextInsn); + + protected: + CGFunc &cgFunc; +}; + +class CGPeepPattern { + public: + /* normal constructor */ + CGPeepPattern(CGFunc &f, BB &bb, Insn &insn) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(nullptr) {} + /* constructor for ssa */ + CGPeepPattern(CGFunc &f, BB &bb, Insn &insn, CGSSAInfo &info) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(&info) {} + virtual ~CGPeepPattern() = default; + + std::string PhaseName() const { + return "cgpeephole"; + } + + virtual std::string GetPatternName() = 0; + Insn *GetDefInsn(const RegOperand &useReg); + void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); + InsnSet GetAllUseInsn(const RegOperand &defReg); + int64 GetLogValueAtBase2(int64 val) const; + /* The CC reg is unique and cannot cross-version props. */ + bool IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg); + /* optimization support function */ + bool IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); + bool FindRegLiveOut(const RegOperand ®Opnd, const BB &bb); + bool CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const; + bool CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const; + ReturnType IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const; + bool GetPatternRes() const { + return optSuccess; + } + Insn *GetCurrInsn() { + return currInsn; + } + void SetCurrInsn(Insn *updateInsn) { + currInsn = updateInsn; + } + virtual void Run(BB &bb, Insn &insn) = 0; + virtual bool CheckCondition(Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + BB *currBB; + Insn *currInsn; + CGSSAInfo *ssaInfo; + bool optSuccess = false; +}; + +class PeepHoleOptimizer { + public: + explicit PeepHoleOptimizer(CGFunc *cf) : cgFunc(cf) { + cg = cgFunc->GetCG(); + } + ~PeepHoleOptimizer() = default; + void Peephole0(); + void PeepholeOpt(); + void PrePeepholeOpt(); + void PrePeepholeOpt1(); + + private: + CGFunc *cgFunc; + CG *cg; +}; /* class PeepHoleOptimizer */ + +class PeepPatternMatch { + public: + PeepPatternMatch(CGFunc &oneCGFunc, MemPool *memPool) + : optOwnMemPool(memPool), + peepAllocator(memPool), + optimizations(peepAllocator.Adapter()), + cgFunc(oneCGFunc) {} + virtual ~PeepPatternMatch() = default; + virtual void Run(BB &bb, Insn &insn) = 0; + virtual void InitOpts() = 0; + protected: + MemPool *optOwnMemPool; + MapleAllocator peepAllocator; + MapleVector optimizations; + CGFunc &cgFunc; +}; + +class PeepOptimizer { + public: + PeepOptimizer(CGFunc &oneCGFunc, MemPool *memPool) + : cgFunc(oneCGFunc), + peepOptMemPool(memPool) { + index = 0; + } + ~PeepOptimizer() = default; + template + void Run(); + static int32 index; + + private: + CGFunc &cgFunc; + MemPool *peepOptMemPool; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostPeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPeepHole0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPeepHole1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_PEEP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/pressure.h b/ecmascript/mapleall/maple_be/include/cg/pressure.h new file mode 100644 index 0000000000000000000000000000000000000000..c2c79831c2eba5db9e23c64cbe81d797d1724a7c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/pressure.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PRESSURE_H +#define MAPLEBE_INCLUDE_CG_PRESSURE_H + +#include "cgbb.h" +#include "cgfunc.h" + +namespace maplebe { +struct RegList { + Insn *insn; + RegList *next; +}; + +#define FOR_ALL_REGCLASS(i) \ + for (uint32 i = 0; i < static_cast(RegPressure::GetMaxRegClassNum()); ++i) + +class RegPressure { + public: + explicit RegPressure(MapleAllocator &alloc) + : regUses(alloc.Adapter()), regDefs(alloc.Adapter()), + pressure(alloc.Adapter()), deadDefNum(alloc.Adapter()) {} + + virtual ~RegPressure() = default; + + void DumpRegPressure() const; + + void SetRegUses(RegList *regList) { + regUses.emplace_back(regList); + } + + void SetRegDefs(size_t idx, RegList *regList) { + if (idx < regDefs.size()) { + regDefs[idx] = regList; + } else { + regDefs.emplace_back(regList); + } + } + + static void SetMaxRegClassNum(int32 maxClassNum) { + maxRegClassNum = maxClassNum; + } + + static int32 GetMaxRegClassNum() { + return maxRegClassNum; + } + + int32 GetPriority() const { + return priority; + } + + void SetPriority(int32 value) { + priority = value; + } + + int32 GetMaxDepth() const { + return maxDepth; + } + + void SetMaxDepth(int32 value) { + maxDepth = value; + } + + int32 GetNear() const { + return near; + } + + void SetNear(int32 value) { + near = value; + } + + int32 GetIncPressure() const { + return incPressure; + } + + void SetIncPressure(bool value) { + incPressure = value; + } + const MapleVector &GetPressure() const { + return pressure; + } + + void IncPressureByIndex(uint32 index) { + DEBUG_ASSERT(index < pressure.size(), "index out of range"); + ++pressure[index]; + } + + void DecPressureByIndex(uint32 index) { + DEBUG_ASSERT(index < pressure.size(), "index out of range"); + --pressure[index]; + } + + void InitPressure() { + pressure.resize(static_cast(maxRegClassNum), 0); + deadDefNum.resize(static_cast(maxRegClassNum), 0); + incPressure = false; + } + + const MapleVector &GetDeadDefNum() const { + return deadDefNum; + } + + void IncDeadDefByIndex(uint32 index) { + DEBUG_ASSERT(index < deadDefNum.size(), "index out of range"); + ++deadDefNum[index]; + } + + RegList *GetRegUses(size_t idx) const { + return idx < regUses.size() ? regUses[idx] : nullptr; + } + + void InitRegUsesSize(size_t size) { + regUses.reserve(size); + } + + RegList *GetRegDefs(size_t idx) const { + return idx < regDefs.size() ? regDefs[idx] : nullptr; + } + + void InitRegDefsSize(size_t size) { + regDefs.reserve(size); + } + + void SetHasPreg(bool value) { + hasPreg = value; + } + + bool GetHasPreg() const { + return hasPreg; + } + + void SetNumCall(int32 value) { + callNum = value; + } + + int32 GetNumCall() const { + return callNum; + } + + void SetHasNativeCallRegister(bool value) { + hasNativeCallRegister = value; + } + + bool GetHasNativeCallRegister() const { + return hasNativeCallRegister; + } + + private: + /* save reglist of every uses'register */ + MapleVector regUses; + /* save reglist of every defs'register */ + MapleVector regDefs; + + /* the number of the node needs registers */ + MapleVector pressure; + /* the count of dead define registers */ + MapleVector deadDefNum; + /* max number of reg's class */ + static int32 maxRegClassNum; + int32 priority = 0; + int32 maxDepth = 0; + int32 near = 0; + /* the number of successor call */ + int32 callNum = 0; + /* if a type register increase then set incPressure as true. */ + bool incPressure = false; + /* if define physical register, set hasPreg as true */ + bool hasPreg = false; + /* it is call native special register */ + bool hasNativeCallRegister = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_PRESSURE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/proepilog.h b/ecmascript/mapleall/maple_be/include/cg/proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..6584a7338e88ccb8658b872bb04ba2a88fecd54e --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/proepilog.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_PROEPILOG_H +#include "cg_phase.h" +#include "cgfunc.h" +#include "insn.h" + +namespace maplebe { +class GenProEpilog { + public: + explicit GenProEpilog(CGFunc &func) : cgFunc(func) {} + + virtual ~GenProEpilog() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "generateproepilog"; + } + + virtual bool TailCallOpt() { + return false; + } + + virtual bool NeedProEpilog() { + return true; + } + + /* CFI related routines */ + int64 GetOffsetFromCFA() const { + return offsetFromCfa; + } + + /* add increment (can be negative) and return the new value */ + int64 AddtoOffsetFromCFA(int64 delta) { + offsetFromCfa += delta; + return offsetFromCfa; + } + + Insn *InsertCFIDefCfaOffset(int32 &cfiOffset, Insn &insertAfter); /* cfiOffset in-out */ + + protected: + /* check if the current funtion need stack protect code */ + void NeedStackProtect(); + /* check if a type include array */ + bool IncludeArray(const MIRType &type) const; + + CGFunc &cgFunc; + int64 offsetFromCfa = 0; /* SP offset from Call Frame Address */ + bool stackProtect = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_PROEPILOG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/ra_opt.h b/ecmascript/mapleall/maple_be/include/cg/ra_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..4129db007591c18b0230eca8ee13a32dc5d4914f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/ra_opt.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_RAOPT_H +#define MAPLEBE_INCLUDE_CG_RAOPT_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class RaOpt { + public: + RaOpt(CGFunc &func, MemPool &pool) : cgFunc(&func), memPool(&pool) {} + + virtual ~RaOpt() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "raopt"; + } + + const CGFunc *GetCGFunc() const { + return cgFunc; + } + const MemPool *GetMemPool() const { + return memPool; + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgRaOpt, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_RAOPT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reaching.h b/ecmascript/mapleall/maple_be/include/cg/reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..e08958658ebeea255fc336cc5981596abd4dbfde --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reaching.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REACHING_H +#define MAPLEBE_INCLUDE_CG_REACHING_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "datainfo.h" +#include "maple_phase.h" + +namespace maplebe { +enum VisitStatus : uint8 { + kNotVisited, + kNormalVisited, + kEHVisited +}; + +enum AnalysisType : uint8 { + kRDRegAnalysis = 1, + kRDMemAnalysis = 2, + kRDAllAnalysis = 3 +}; + +enum DumpType : uint32 { + kDumpAll = 0xFFF, + kDumpRegGen = 0x001, + kDumpRegUse = 0x002, + kDumpRegIn = 0x004, + kDumpRegOut = 0x008, + kDumpMemGen = 0x010, + kDumpMemIn = 0x020, + kDumpMemOut = 0x040, + kDumpMemUse = 0x080, + kDumpBBCGIR = 0x100 +}; + +class ReachingDefinition : public AnalysisResult { + public: + ReachingDefinition(CGFunc &func, MemPool &memPool); + ~ReachingDefinition() override = default; + void AnalysisStart(); + void Dump(uint32 flag) const; + void DumpInfo(const BB &bb, DumpType flag) const; + void DumpBBCGIR(const BB &bb) const; + void ClearDefUseInfo(); + void UpdateInOut(BB &changedBB); + void UpdateInOut(BB &changedBB, bool isReg); + void SetAnalysisMode(AnalysisType analysisMode) { + mode = analysisMode; + } + + bool OnlyAnalysisReg() const { + return mode == kRDRegAnalysis; + } + + uint32 GetMaxInsnNO() const { + return maxInsnNO; + } + + size_t GetRegSize(const BB &bb) const { + return regUse[bb.GetId()]->Size(); + } + + bool CheckRegGen(const BB &bb, uint32 regNO) const { + return regGen[bb.GetId()]->TestBit(regNO); + } + + void EnlargeRegCapacity(uint32 size); + bool IsFrameReg(const Operand &opnd) const; + InsnSet FindUseForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const; + bool RegIsUsedIncaller(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool CheckRegLiveinReturnBB(uint32 regNO, const BB &bb) const; + bool RegIsLiveBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn, bool isBack = false, + bool isFirstNo = false) const; + bool RegIsUsedOrDefBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool IsLiveInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, std::vector &visitedBB, + bool isFirstNo = false) const; + bool IsUseOrDefInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, std::vector &visitedBB) const; + bool IsUseOrDefBetweenInsn(uint32 regNO, const BB &curBB, const Insn &startInsn, Insn &endInsn) const; + bool HasCallBetweenDefUse(const Insn &defInsn, const Insn &useInsn) const; + std::vector FindRegDefBetweenInsn ( + uint32 regNO, Insn *startInsn, Insn *endInsn, bool findAll = false, bool analysisDone = true) const; + virtual void InitGenUse(BB &bb, bool firstTime = true) = 0; + virtual InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const = 0; + virtual InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const = 0; + virtual std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const = 0; + virtual std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const = 0; + virtual bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const = 0; + virtual bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const = 0; + virtual bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const = 0; + virtual InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const = 0; + + static constexpr int32 kWordByteNum = 4; + static constexpr int32 kDoubleWordByteNum = 8; + /* to save storage space, the offset of stack memory is devided by 4 and then saved in DataInfo */ + static constexpr int32 kMemZoomSize = 4; + /* number the insn interval 3. make sure no repeated insn number when new insn inserted */ + static constexpr uint32 kInsnNoInterval = 3; + bool HasCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn) const; + virtual bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const = 0; + protected: + virtual void InitStartGen() = 0; + virtual void InitEhDefine(BB &bb) = 0; + virtual void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) = 0; + virtual void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) = 0; + virtual void GenAllCallerSavedRegs(BB &bb, Insn &insn) = 0; + virtual void AddRetPseudoInsn(BB &bb) = 0; + virtual void AddRetPseudoInsns() = 0; + virtual int32 GetStackSize() const = 0; + virtual bool IsCallerSavedReg(uint32 regNO) const = 0; + virtual void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const = 0; + virtual void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const = 0; + virtual bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const = 0; + virtual void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const = 0; + virtual void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const = 0; + void DFSFindUseForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc) const; + + CGFunc *cgFunc; + MapleAllocator rdAlloc; + StackMemPool &stackMp; + MapleVector pseudoInsns; + AnalysisType mode = kRDRegAnalysis; + BB *firstCleanUpBB = nullptr; + std::vector regGen; + std::vector regUse; + std::vector regIn; + std::vector regOut; + std::vector memGen; + std::vector memUse; + std::vector memIn; + std::vector memOut; + const uint32 kMaxBBNum; + private: + void Initialize(); + void InitDataSize(); + void BuildInOutForFuncBody(); + void BuildInOutForCleanUpBB(); + void BuildInOutForCleanUpBB(bool isReg, const std::set &index); + void InitRegAndMemInfo(const BB &bb); + void InitOut(const BB &bb); + bool GenerateIn(const BB &bb); + bool GenerateIn(const BB &bb, const std::set &infoIndex, const bool isReg); + bool GenerateOut(const BB &bb); + bool GenerateOut(const BB &bb, const std::set &infoIndex, const bool isReg); + bool GenerateInForFirstCleanUpBB(); + bool GenerateInForFirstCleanUpBB(bool isReg, const std::set &infoIndex); + void DFSFindUseForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc) const; + bool RegIsUsedInOtherBB(const BB &startBB, uint32 regNO, std::vector &visitedBB) const; + bool RegHasUsePoint(uint32 regNO, Insn ®DefInsn) const; + bool CanReachEndBBFromCurrentBB(const BB ¤tBB, const BB &endBB, std::vector &traversedBBSet) const; + + bool HasCallInPath(const BB &startBB, const BB &endBB, std::vector &visitedBB) const; + bool RegIsUsedInCleanUpBB(uint32 regNO) const; + + MapleSet normalBBSet; + MapleSet cleanUpBBSet; + uint32 maxInsnNO = 0; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgReachingDefinition, maplebe::CGFunc) + ReachingDefinition *GetResult() { + return reachingDef; + } + ReachingDefinition *reachingDef = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgClearRDInfo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REACHING_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reg_alloc.h b/ecmascript/mapleall/maple_be/include/cg/reg_alloc.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2a8c349848ac0d1a77477cb54c6477d2bc32cf --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reg_alloc.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_H + +#include +#include +#include "isa.h" +#include "cg_phase.h" +#include "maple_phase_manager.h" + +namespace maplebe { +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class RegAllocator { + public: + RegAllocator(CGFunc &tempCGFunc, MemPool &memPool) : + cgFunc(&tempCGFunc), + memPool(&memPool), + alloc(&memPool) {} + + virtual ~RegAllocator() = default; + + virtual bool AllocateRegisters() = 0; + + bool IsYieldPointReg(regno_t regNO) const; + bool IsUntouchableReg(regno_t regNO) const; + + virtual std::string PhaseName() const { + return "regalloc"; + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgRegAlloc, CGFunc) +OVERRIDE_DEPENDENCE +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reg_alloc_basic.h b/ecmascript/mapleall/maple_be/include/cg/reg_alloc_basic.h new file mode 100644 index 0000000000000000000000000000000000000000..d4893e4b2958d0a70d7824131b6312a8cc9ae0c1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reg_alloc_basic.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H + +#include "reg_alloc.h" +#include "operand.h" +#include "cgfunc.h" + +namespace maplebe { +class DefaultO0RegAllocator : public RegAllocator { + public: + DefaultO0RegAllocator(CGFunc &cgFunc, MemPool &memPool) + : RegAllocator(cgFunc, memPool), + calleeSaveUsed(alloc.Adapter()), + availRegSet(alloc.Adapter()), + regMap(std::less(), alloc.Adapter()), + liveReg(std::less(), alloc.Adapter()), + allocatedSet(std::less(), alloc.Adapter()), + regLiveness(alloc.Adapter()), + rememberRegs(alloc.Adapter()) { + regInfo = cgFunc.GetTargetRegInfo(); + availRegSet.resize(regInfo->GetAllRegNum()); + } + + ~DefaultO0RegAllocator() override { + regInfo = nullptr; + } + + bool AllocateRegisters() override; + + void InitAvailReg(); + + bool AllocatePhysicalRegister(const RegOperand &opnd); + void ReleaseReg(regno_t reg); + void ReleaseReg(const RegOperand ®Opnd); + void GetPhysicalRegisterBank(RegType regType, regno_t &start, regno_t &end) const; + void AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx); + bool IsSpecialReg(regno_t reg) const; + void SaveCalleeSavedReg(const RegOperand &opnd); + + protected: + Operand *HandleRegOpnd(Operand &opnd); + Operand *HandleMemOpnd(Operand &opnd); + Operand *AllocSrcOpnd(Operand &opnd); + Operand *AllocDestOpnd(Operand &opnd, const Insn &insn); + uint32 GetRegLivenessId(regno_t regNo); + bool CheckRangesOverlap(const std::pair &range1, + const MapleVector> &ranges2) const; + void SetupRegLiveness(BB *bb); + void SetupRegLiveness(MemOperand &opnd, uint32 insnId); + void SetupRegLiveness(ListOperand &opnd, uint32 insnId, bool isDef); + void SetupRegLiveness(RegOperand &opnd, uint32 insnId, bool isDef); + + RegisterInfo *regInfo = nullptr; + MapleSet calleeSaveUsed; + MapleVector availRegSet; + MapleMap regMap; /* virtual-register-to-physical-register map */ + MapleSet liveReg; /* a set of currently live physical registers */ + MapleSet allocatedSet; /* already allocated */ + MapleMap>> regLiveness; + MapleVector rememberRegs; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reg_alloc_lsra.h b/ecmascript/mapleall/maple_be/include/cg/reg_alloc_lsra.h new file mode 100644 index 0000000000000000000000000000000000000000..b80c0f6eb14bd20fdf26a17af9e93fbbc83a2057 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reg_alloc_lsra.h @@ -0,0 +1,739 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H +#include "reg_alloc.h" +#include "cgfunc.h" +#include "optimize_common.h" + +namespace maplebe { +class LSRALinearScanRegAllocator : public RegAllocator { + enum RegInCatch : uint8 { + /* + * RA do not want to allocate certain registers if a live interval is + * only inside of catch blocks. + */ + kRegCatchNotInit = 0, /* unitialized state */ + kRegNOtInCatch = 1, /* interval is part or all outside of catch */ + kRegAllInCatch = 2, /* inteval is completely inside catch */ + }; + + enum RegInCleanup : uint8 { + /* Similar to reg_in_catch_t */ + kRegCleanupNotInit = 0, /* unitialized state */ + kRegAllInFirstbb = 1, /* interval is all in the first bb */ + kRegAllOutCleanup = 2, /* interval is all outside of cleanup, must in normal bb, may in first bb. */ + kRegInCleanupAndFirstbb = 3, /* inteval is in cleanup and first bb. */ + kRegInCleanupAndNormalbb = 4, /* inteval is in cleanup and non-first bb. */ + kRegAllInCleanup = 5 /* inteval is inside cleanup, except for bb 1 */ + }; + + class LinearRange { + public: + LinearRange() = default; + + LinearRange(uint32 start, uint32 end) : start(start), end(end) {} + + ~LinearRange() = default; + + uint32 GetStart() const { + return start; + } + + void SetStart(uint32 position) { + start = position; + } + + uint32 GetEnd() const { + return end; + } + + void SetEnd(uint32 position) { + end = position; + } + + uint32 GetEhStart() const { + return ehStart; + } + + void SetEhStart(uint32 position) { + ehStart = position; + } + + private: + uint32 start = 0; + uint32 end = 0; + uint32 ehStart = 0; + }; + + class LiveInterval { + public: + explicit LiveInterval(MemPool &memPool) + : memPool(&memPool), + alloc(&memPool), + ranges(alloc.Adapter()), + usePositions(alloc.Adapter()) {} + + virtual ~LiveInterval() = default; + + void AddRange(uint32 from, uint32 to); + void AddUsePos(uint32 pos); + + LiveInterval *SplitAt(uint32 pos); + + LiveInterval *SplitBetween(const BB &startBB, const BB &endBB); + + uint32 GetUsePosAfter(uint32 pos) const; + + void InitRangeFinder() { + rangeFinder = ranges.begin(); + } + + MapleVector::iterator FindPosRange(uint32 pos); + + uint32 GetSplitPos() const { + return splitSafePos; + } + + void SetSplitPos(uint32 position) { + splitSafePos = position; + } + + const Insn *GetIsCall() const { + return isCall; + } + + void SetIsCall(Insn &newIsCall) { + isCall = &newIsCall; + } + + uint32 GetPhysUse() const { + return physUse; + } + + void SetPhysUse(uint32 newPhysUse) { + physUse = newPhysUse; + } + + uint32 GetLastUse() const { + return lastUse; + } + + void SetLastUse(uint32 newLastUse) { + lastUse = newLastUse; + } + + uint32 GetRegNO() const { + return regNO; + } + + void SetRegNO(uint32 newRegNO) { + regNO = newRegNO; + } + + uint32 GetAssignedReg() const { + return assignedReg; + } + + void SetAssignedReg(uint32 newAssignedReg) { + assignedReg = newAssignedReg; + } + + uint32 GetFirstDef() const { + return firstDef; + } + + void SetFirstDef(uint32 newFirstDef) { + firstDef = newFirstDef; + } + + uint32 GetStackSlot() const { + return stackSlot; + } + + void SetStackSlot(uint32 newStkSlot) { + stackSlot = newStkSlot; + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType newRegType) { + regType = newRegType; + } + + uint32 GetRegSize() const { + return regSize; + } + + void SetRegSize(uint32 size) { + if (size > regSize) { + regSize = size; + } + } + + uint32 GetFirstAcrossedCall() const { + return firstAcrossedCall; + } + + void SetFirstAcrossedCall(uint32 newFirstAcrossedCall) { + firstAcrossedCall = newFirstAcrossedCall; + } + + bool IsEndByCall() const { + return endByCall; + } + + void SetEndByMov(bool isEndByMov) { + endByMov = isEndByMov; + } + + bool IsEndByMov() const { + return endByMov; + } + + void SetPrefer(uint32 preg) { + prefer = preg; + } + + uint32 GetPrefer() const { + return prefer; + } + + bool IsUseBeforeDef() const { + return useBeforeDef; + } + + void SetUseBeforeDef(bool newUseBeforeDef) { + useBeforeDef = newUseBeforeDef; + } + + bool IsShouldSave() const { + return shouldSave; + } + + void SetShouldSave(bool newShouldSave) { + shouldSave = newShouldSave; + } + + bool IsMultiUseInBB() const { + return multiUseInBB; + } + + void SetMultiUseInBB(bool newMultiUseInBB) { + multiUseInBB = newMultiUseInBB; + } + + bool IsThrowVal() const { + return isThrowVal; + } + + bool IsCallerSpilled() const { + return isCallerSpilled; + } + + void SetIsCallerSpilled(bool newIsCallerSpilled) { + isCallerSpilled = newIsCallerSpilled; + } + + bool IsMustAllocate() const { + return mustAllocate; + } + + void SetMustAllocate(bool newMustAllocate) { + mustAllocate = newMustAllocate; + } + + bool IsSplitForbidden() const { + return splitForbidden; + } + + void SetSplitForbid(bool isForbidden) { + splitForbidden = isForbidden; + } + + uint32 GetRefCount() const{ + return refCount; + } + + void SetRefCount(uint32 newRefCount) { + refCount = newRefCount; + } + + void AddUsePositions(uint32 insertId) { + (void)usePositions.push_back(insertId); + } + + LiveInterval *GetSplitNext() { + return splitNext; + } + + const LiveInterval *GetSplitNext() const { + return splitNext; + } + + const LiveInterval *GetSplitParent() const { + return splitParent; + } + void SetSplitParent(LiveInterval &li) { + splitParent = &li; + } + + float GetPriority() const { + return priority; + } + + void SetOverlapPhyRegSet(regno_t regNo) { + overlapPhyRegSet.insert(regNo); + } + + bool IsOverlapPhyReg(regno_t regNo) { + return overlapPhyRegSet.find(regNo) != overlapPhyRegSet.end(); + } + + void SetPriority(float newPriority) { + priority = newPriority; + } + + const MapleVector &GetRanges() const { + return ranges; + } + + MapleVector &GetRanges() { + return ranges; + } + + size_t GetRangesSize () const { + return ranges.size(); + } + + const LiveInterval *GetLiParent() const { + return liveParent; + } + + void SetLiParent(LiveInterval *newLiParent) { + liveParent = newLiParent; + } + + void SetLiParentChild(LiveInterval *child) const { + liveParent->SetLiChild(child); + } + + const LiveInterval *GetLiChild() const { + return liveChild; + } + + void SetLiChild(LiveInterval *newLiChild) { + liveChild = newLiChild; + } + + uint32 GetResultCount() const { + return resultCount; + } + + void SetResultCount(uint32 newResultCount) { + resultCount = newResultCount; + } + + void SetInCatchState() { + /* + * Once in REG_NOT_IN_CATCH, it is irreversible since once an interval + * is not in a catch, it is not completely in a catch. + */ + if (inCatchState == kRegNOtInCatch) { + return; + } + inCatchState = kRegAllInCatch; + } + + void SetNotInCatchState() { + inCatchState = kRegNOtInCatch; + } + + bool IsAllInCatch() const { + return (inCatchState == kRegAllInCatch); + } + + void SetInCleanupState() { + switch (inCleanUpState) { + case kRegCleanupNotInit: + inCleanUpState = kRegAllInCleanup; + break; + case kRegAllInFirstbb: + inCleanUpState = kRegInCleanupAndFirstbb; + break; + case kRegAllOutCleanup: + inCleanUpState = kRegInCleanupAndNormalbb; + break; + case kRegInCleanupAndFirstbb: + break; + case kRegInCleanupAndNormalbb: + break; + case kRegAllInCleanup: + break; + default: + DEBUG_ASSERT(false, "CG Internal error."); + break; + } + } + + void SetNotInCleanupState(bool isFirstBB) { + switch (inCleanUpState) { + case kRegCleanupNotInit: { + if (isFirstBB) { + inCleanUpState = kRegAllInFirstbb; + } else { + inCleanUpState = kRegAllOutCleanup; + } + break; + } + case kRegAllInFirstbb: { + if (!isFirstBB) { + inCleanUpState = kRegAllOutCleanup; + } + break; + } + case kRegAllOutCleanup: + break; + case kRegInCleanupAndFirstbb: { + if (!isFirstBB) { + inCleanUpState = kRegInCleanupAndNormalbb; + } + break; + } + case kRegInCleanupAndNormalbb: + break; + case kRegAllInCleanup: { + if (isFirstBB) { + inCleanUpState = kRegInCleanupAndFirstbb; + } else { + inCleanUpState = kRegInCleanupAndNormalbb; + } + break; + } + default: + DEBUG_ASSERT(false, "CG Internal error."); + break; + } + } + + bool IsAllInCleanupOrFirstBB() const { + return (inCleanUpState == kRegAllInCleanup) || (inCleanUpState == kRegInCleanupAndFirstbb); + } + + bool IsAllOutCleanup() const { + return (inCleanUpState == kRegAllInFirstbb) || (inCleanUpState == kRegAllOutCleanup); + } + + private: + MemPool *memPool; + MapleAllocator alloc; + Insn *isCall = nullptr; + uint32 firstDef = 0; + uint32 lastUse = 0; + uint32 splitSafePos = 0; /* splitNext's start positon */ + uint32 physUse = 0; + uint32 regNO = 0; + /* physical register, using cg defined reg based on R0/V0. */ + uint32 assignedReg = 0; + uint32 stackSlot = -1; + RegType regType = kRegTyUndef; + uint32 regSize = 0; + uint32 firstAcrossedCall = 0; + bool endByCall = false; + bool endByMov = false; /* do move coalesce */ + uint32 prefer = 0; /* prefer register */ + bool useBeforeDef = false; + bool shouldSave = false; + bool multiUseInBB = false; /* vreg has more than 1 use in bb */ + bool isThrowVal = false; + bool isCallerSpilled = false; /* only for R0(R1?) which are used for explicit incoming value of throwval; */ + bool mustAllocate = false; /* The register cannot be spilled (clinit pair) */ + bool splitForbidden = false; /* can not split if true */ + uint32 refCount = 0; + float priority = 0.0; + MapleVector ranges; + MapleVector::iterator rangeFinder; + std::unordered_set overlapPhyRegSet; + MapleVector usePositions; + LiveInterval *splitNext = nullptr; /* next split part */ + LiveInterval *splitParent = nullptr; /* parent split part */ + LiveInterval *liveParent = nullptr; /* Current li is in aother li's hole. */ + LiveInterval *liveChild = nullptr; /* Another li is in current li's hole. */ + uint32 resultCount = 0; /* number of times this vreg has been written */ + uint8 inCatchState = kRegCatchNotInit; /* part or all of live interval is outside of catch blocks */ + uint8 inCleanUpState = kRegCleanupNotInit; /* part or all of live interval is outside of cleanup blocks */ + }; + + /* used to resolve Phi and Split */ + class MoveInfo { + public: + MoveInfo() = default; + MoveInfo(LiveInterval &fromLi, LiveInterval &toLi, BB &bb, bool isEnd) + : fromLi(&fromLi), + toLi(&toLi), + bb(&bb), + isEnd(isEnd) {} + ~MoveInfo() = default; + + void Init(LiveInterval &firstLi, LiveInterval &secondLi, BB &targetBB, bool endMove) { + fromLi = &firstLi; + toLi = &secondLi; + bb = &targetBB; + isEnd = endMove; + } + + const LiveInterval *GetFromLi() const { + return fromLi; + } + + const LiveInterval *GetToLi() const { + return toLi; + } + + BB *GetBB() { + return bb; + } + + const BB *GetBB() const { + return bb; + } + + bool IsEndMove() const { + return isEnd; + } + + void Dump() const { + LogInfo::MapleLogger() << "from:R" << fromLi->GetRegNO() << " to:R" << toLi->GetRegNO() << " in " + << bb->GetId() << "\n"; + } + + private: + LiveInterval *fromLi = nullptr; + LiveInterval *toLi = nullptr; + BB *bb = nullptr; + bool isEnd = true; + }; + + struct ActiveCmp { + bool operator()(const LiveInterval *lhs, const LiveInterval *rhs) const { + CHECK_NULL_FATAL(lhs); + CHECK_NULL_FATAL(rhs); + /* elements considered equal if return false */ + if (lhs == rhs) { + return false; + } + if (lhs->GetFirstDef() == rhs->GetFirstDef() && lhs->GetLastUse() == rhs->GetLastUse() && + lhs->GetRegNO() == rhs->GetRegNO() && lhs->GetRegType() == rhs->GetRegType() && + lhs->GetAssignedReg() == rhs->GetAssignedReg()) { + return false; + } + if (lhs->GetFirstDef() == rhs->GetFirstDef() && lhs->GetLastUse() == rhs->GetLastUse() && + lhs->GetPhysUse() == rhs->GetPhysUse() && lhs->GetRegType() == rhs->GetRegType()) { + return lhs->GetRegNO() < rhs->GetRegNO(); + } + if (lhs->GetPhysUse() != 0 && rhs->GetPhysUse() != 0) { + if (lhs->GetFirstDef() == rhs->GetFirstDef()) { + return lhs->GetPhysUse() < rhs->GetPhysUse(); + } else { + return lhs->GetFirstDef() < rhs->GetFirstDef(); + } + } + /* At this point, lhs != rhs */ + if (lhs->GetLastUse() == rhs->GetLastUse()) { + return lhs->GetFirstDef() <= rhs->GetFirstDef(); + } + return lhs->GetLastUse() < rhs->GetLastUse(); + } + }; + + public: + LSRALinearScanRegAllocator(CGFunc &cgFunc, MemPool &memPool, Bfs *bbSort) + : RegAllocator(cgFunc, memPool), + liveIntervalsArray(alloc.Adapter()), + initialQue(alloc.Adapter()), + intParamQueue(alloc.Adapter()), + fpParamQueue(alloc.Adapter()), + callQueue(alloc.Adapter()), + active(alloc.Adapter()), + freeUntilPos(alloc.Adapter()), + intCallerRegSet(alloc.Adapter()), + intCalleeRegSet(alloc.Adapter()), + intParamRegSet(alloc.Adapter()), + intSpillRegSet(alloc.Adapter()), + fpCallerRegSet(alloc.Adapter()), + fpCalleeRegSet(alloc.Adapter()), + fpParamRegSet(alloc.Adapter()), + fpSpillRegSet(alloc.Adapter()), + calleeUseCnt(alloc.Adapter()), + bfs(bbSort), + liQue(alloc.Adapter()), + splitPosMap(alloc.Adapter()), + splitInsnMap(alloc.Adapter()), + moveInfoVec(alloc.Adapter()) { + regInfo = cgFunc.GetTargetRegInfo(); + regInfo->Init(); + for (int32 i = 0; i < regInfo->GetIntRegs().size(); ++i) { + intParamQueue.push_back(initialQue); + } + for (int32 i = 0; i < regInfo->GetFpRegs().size(); ++i) { + fpParamQueue.push_back(initialQue); + } + firstIntReg = *regInfo->GetIntRegs().begin(); + firstFpReg = *regInfo->GetFpRegs().begin(); + } + ~LSRALinearScanRegAllocator() override = default; + + bool AllocateRegisters() override; + void PreWork(); + bool CheckForReg(Operand &opnd, const Insn &insn, const LiveInterval &li, regno_t regNO, bool isDef) const; + void PrintRegSet(const MapleSet &set, const std::string &str) const; + void PrintLiveInterval(const LiveInterval &li, const std::string &str) const; + void PrintLiveRanges(const LiveInterval &li) const; + void PrintAllLiveRanges() const; + void PrintLiveRangesGraph() const; + void PrintParamQueue(const std::string &str); + void PrintCallQueue(const std::string &str) const; + void PrintActiveList(const std::string &str, uint32 len = 0) const; + void PrintActiveListSimple() const; + void PrintLiveIntervals() const; + void DebugCheckActiveList() const; + void InitFreeRegPool(); + void RecordCall(Insn &insn); + void RecordPhysRegs(const RegOperand ®Opnd, uint32 insnNum, bool isDef); + void UpdateLiveIntervalState(const BB &bb, LiveInterval &li) const; + void UpdateRegUsedInfo(LiveInterval &li, regno_t regNO); + void SetupLiveInterval(Operand &opnd, Insn &insn, bool isDef, uint32 &nUses); + void UpdateLiveIntervalByLiveIn(const BB &bb, uint32 insnNum); + void UpdateParamLiveIntervalByLiveIn(const BB &bb, uint32 insnNum); + void ComputeLiveIn(BB &bb, uint32 insnNum); + void ComputeLiveOut(BB &bb, uint32 insnNum); + void ComputeLiveIntervalForEachOperand(Insn &insn); + void ComputeLiveInterval(); + void FindLowestPrioInActive(LiveInterval *&targetLi, LiveInterval *li = nullptr, RegType regType = kRegTyInt); + void LiveIntervalAnalysis(); + void UpdateCallQueueAtRetirement(uint32 insnID); + void UpdateActiveAllocateInfo(const LiveInterval &li); + void UpdateParamAllocateInfo(const LiveInterval &li); + void RetireActive(LiveInterval &li, uint32 insnID); + void AssignPhysRegsForLi(LiveInterval &li); + LiveInterval *GetLiveIntervalAt(uint32 regNO, uint32 pos); + bool OpndNeedAllocation(const Insn &insn, Operand &opnd, bool isDef, uint32 insnNum); + void InsertParamToActive(Operand &opnd); + void InsertToActive(Operand &opnd, uint32 insnNum); + void ReleasePregToSet(const LiveInterval &li, uint32 preg); + void UpdateActiveAtRetirement(uint32 insnID); + void RetireFromActive(const Insn &insn); + void AssignPhysRegsForInsn(Insn &insn); + RegOperand *GetReplaceOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx, bool isDef); + RegOperand *GetReplaceUdOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx); + void ResolveSplitBBEdge(BB &bb); + void SetAllocMode(); + void CheckSpillCallee(); + void LinearScanRegAllocator(); + void FinalizeRegisters(); + void ResolveMoveVec(); + void SpillOperand(Insn &insn, Operand &opnd, bool isDef, uint32 spillIdx); + void SetOperandSpill(Operand &opnd); + regno_t HandleSpillForLi(LiveInterval &li); + RegOperand *HandleSpillForInsn(const Insn &insn, Operand &opnd); + MemOperand *GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, regno_t regNO, + bool &isOutOfRange, uint32 bitSize) const; + void InsertCallerSave(Insn &insn, Operand &opnd, bool isDef); + uint32 GetRegFromMask(uint32 mask, regno_t offset, const LiveInterval &li); + uint32 GetSpecialPhysRegPattern(const LiveInterval &li); + uint32 GetRegFromSet(MapleSet &set, regno_t offset, LiveInterval &li, regno_t forcedReg = 0) const; + uint32 AssignSpecialPhysRegPattern(const Insn &insn, LiveInterval &li); + uint32 FindAvailablePhyReg(LiveInterval &li); + uint32 AssignPhysRegs(LiveInterval &li); + void SetupIntervalRangesByOperand(Operand &opnd, const Insn &insn, uint32 blockFrom, bool isDef); + void BuildIntervalRangesForEachOperand(const Insn &insn, uint32 blockFrom); + void BuildIntervalRanges(); + bool SplitLiveInterval(LiveInterval &li, uint32 pos); + void LiveIntervalQueueInsert(LiveInterval &li); + void ComputeLoopLiveIntervalPriority(const CGFuncLoops &loop); + void ComputeLoopLiveIntervalPriorityInInsn(const Insn &insn); + uint32 FillInHole(const LiveInterval &li); + void SetLiSpill(LiveInterval &li); + + private: + uint32 FindAvailablePhyRegByFastAlloc(LiveInterval &li); + bool NeedSaveAcrossCall(LiveInterval &li); + uint32 FindAvailablePhyReg(LiveInterval &li, bool isIntReg); + + RegisterInfo *regInfo = nullptr; + regno_t firstIntReg = 0; + regno_t firstFpReg = 0; + + /* Comparison function for LiveInterval */ + static constexpr uint32 kMaxSpillRegNum = 3; + static constexpr uint32 kMaxFpSpill = 2; + MapleVector liveIntervalsArray; + MapleQueue initialQue; + using SingleQue = MapleQueue; + MapleVector intParamQueue; + MapleVector fpParamQueue; + MapleQueue callQueue; + MapleSet active; + MapleSet::iterator itFinded; + MapleVector freeUntilPos; + + /* Change these into vectors so it can be added and deleted easily. */ + MapleSet intCallerRegSet; /* integer caller saved */ + MapleSet intCalleeRegSet; /* callee */ + MapleSet intParamRegSet; /* parameters */ + MapleVector intSpillRegSet; /* integer regs put aside for spills */ + + /* and register */ + uint32 intCallerMask = 0; /* bit mask for all possible caller int */ + uint32 intCalleeMask = 0; /* callee */ + uint32 intParamMask = 0; /* (physical-register) parameter */ + MapleSet fpCallerRegSet; /* float caller saved */ + MapleSet fpCalleeRegSet; /* callee */ + MapleSet fpParamRegSet; /* parameter */ + MapleVector fpSpillRegSet; /* float regs put aside for spills */ + MapleVector calleeUseCnt; /* Number of time callee reg is seen */ + std::unordered_set loopBBRegSet; + Bfs *bfs = nullptr; + uint32 fpCallerMask = 0; /* bit mask for all possible caller fp */ + uint32 fpCalleeMask = 0; /* callee */ + uint32 fpParamMask = 0; /* (physical-register) parameter */ + uint32 intBBDefMask = 0; /* locally which int physical reg is defined */ + uint32 fpBBDefMask = 0; /* locally which float physical reg is defined */ + uint64 blockForbiddenMask = 0; /* bit mask for forbidden physical reg */ + uint32 debugSpillCnt = 0; + std::vector regUsedInBB; + uint32 maxInsnNum = 0; + regno_t minVregNum = 0xFFFFFFFF; + regno_t maxVregNum = 0; + bool fastAlloc = false; + bool spillAll = false; + bool needExtraSpillReg = false; + bool isSpillZero = false; + bool shouldOptIntCallee = false; + bool shouldOptFpCallee = false; + uint64 spillCount = 0; + uint64 reloadCount = 0; + uint64 callerSaveSpillCount = 0; + uint64 callerSaveReloadCount = 0; + MapleQueue liQue; + MapleMultiMap splitPosMap; /* LiveInterval split position */ + MapleUnorderedMap splitInsnMap; /* split insn */ + MapleVector moveInfoVec; /* insertion of move(PHI&Split) is based on this */ +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reg_coalesce.h b/ecmascript/mapleall/maple_be/include/cg/reg_coalesce.h new file mode 100644 index 0000000000000000000000000000000000000000..66e066c9acc459d2a40f9d27783c1b621aae2035 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reg_coalesce.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REGCOALESCE_H +#define MAPLEBE_INCLUDE_CG_REGCOALESCE_H +#include "live.h" + +namespace maplebe { + +using posPair = std::pair; +class LiveInterval { +public: + explicit LiveInterval(MapleAllocator &allocator) + : ranges(allocator.Adapter()), + conflict(allocator.Adapter()), + defPoints(allocator.Adapter()), + usePoints(allocator.Adapter()), + alloc(allocator) {} + + void IncNumCall() { + ++numCall; + } + + MapleMap> GetRanges() { + return ranges; + } + + void AddRange(uint32 bbid, uint32 end, bool alreadLive) { + auto it = ranges.find(bbid); + if (it == ranges.end()) { + MapleVector posVec(alloc.Adapter()); + posVec.emplace_back(std::pair(end, end)); + ranges.emplace(bbid, posVec); + } else { + MapleVector &posVec = it->second; + if (alreadLive) { + posPair lastPos = posVec[posVec.size() - 1]; + posVec[posVec.size() - 1] = std::pair(end, lastPos.second); + } else { + posVec.emplace_back(std::pair(end, end)); + } + } + } + + void MergeRanges(LiveInterval &lr) { + auto lrDestRanges = lr.GetRanges(); + for (auto destRange : lrDestRanges) { + uint32 bbid = destRange.first; + auto &destPosVec = destRange.second; + auto it = ranges.find(bbid); + if (it == ranges.end()) { + /* directly add it */ + MapleVector posVec(alloc.Adapter()); + for (auto pos: destPosVec) { + posVec.emplace_back(std::pair(pos.first, pos.second)); + } + ranges.emplace(bbid, posVec); + } else { + /* merge it simply, so that subpos may overlap. */ + auto &srcPosVec = it->second; + for (auto pos1 : destPosVec) { + bool merged = false; + for (auto &pos2 : srcPosVec) { + if (!((pos1.first < pos2.first && pos1.second < pos2.first) + || (pos2.first < pos1.second && pos2.second < pos1.first))) { + uint32 bgn = pos1.first < pos2.first ? pos1.first : pos2.first; + uint32 end = pos1.second > pos2.second ? pos1.second : pos2.second; + pos2 = std::pair(bgn, end); + merged = true; + } + } + /* add it directly when no overlap*/ + if (!merged) { + srcPosVec.emplace_back(std::pair(pos1.first, pos1.second)); + } + } + } + } + } + + void MergeConflict(LiveInterval &lr) { + for (auto reg : lr.conflict) { + conflict.insert(reg); + } + } + + void MergeRefPoints(LiveInterval &lr) { + if (lr.GetDefPoint().size() != k1ByteSize) { + for (auto insn : lr.defPoints) { + defPoints.insert(insn); + } + } + for (auto insn : lr.usePoints) { + usePoints.insert(insn); + } + } + + void AddConflict(regno_t val) { + conflict.insert(val); + } + + MapleSet GetConflict() { + return conflict; + } + + void AddRefPoint(Insn *val, bool isDef) { + if (isDef) { + defPoints.insert(val); + } else { + usePoints.insert(val); + } + } + + InsnMapleSet &GetDefPoint() { + return defPoints; + } + + InsnMapleSet &GetUsePoint() { + return usePoints; + } + + bool IsConflictWith(regno_t val) { + return conflict.find(val) != conflict.end(); + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType val) { + this->regType = val; + } + + regno_t GetRegNO() const { + return regno; + } + + void SetRegNO(regno_t val) { + this->regno = val; + } + + void Dump() { + std::cout << "R" << regno << ": "; + for (auto range : ranges) { + uint32 bbid = range.first; + std::cout << "BB" << bbid << ": < " ; + for (auto pos : range.second) { + std::cout << "[" << pos.first << ", " << pos.second << ") "; + } + std::cout << " > " ; + } + std::cout << "\n"; + } + void DumpDefs() { + std::cout << "R" << regno << ": "; + for (auto def : defPoints) { + def->Dump(); + } + std::cout << "\n"; + } + void DumpUses() { + std::cout << "R" << regno << ": "; + for (auto def : usePoints) { + def->Dump(); + } + std::cout << "\n"; + } + +private: + MapleMap> ranges; + MapleSet conflict; + InsnMapleSet defPoints; + InsnMapleSet usePoints; + uint32 numCall = 0; + RegType regType = kRegTyUndef; + regno_t regno = 0; + MapleAllocator &alloc; +}; + +class LiveIntervalAnalysis { + public: + LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) + : cgFunc(&func), + memPool(&memPool), + alloc(&memPool), + vregIntervals(alloc.Adapter()) {} + + virtual ~LiveIntervalAnalysis() { + cgFunc = nullptr; + memPool = nullptr; + bfs = nullptr; + } + + virtual void ComputeLiveIntervals() = 0; + virtual void CoalesceRegisters() = 0; + void Run(); + void Analysis(); + void DoAnalysis(); + void ClearBFS(); + void Dump(); + void CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc); + LiveInterval *GetLiveInterval(regno_t regno) { + auto it = vregIntervals.find(regno); + if (it == vregIntervals.end()) { + return nullptr; + } else { + return it->second; + } + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + MapleMap vregIntervals; + Bfs *bfs = nullptr; + bool runAnalysis = false; +}; + + +MAPLE_FUNC_PHASE_DECLARE(CgRegCoalesce, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CGliveIntervalAnalysis, maplebe::CGFunc) + LiveIntervalAnalysis *GetResult() { + return liveInterval; + } + LiveIntervalAnalysis *liveInterval = nullptr; + OVERRIDE_DEPENDENCE +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REGCOALESCE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/reg_info.h b/ecmascript/mapleall/maple_be/include/cg/reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..868af5655008bb83756229222cbd6dc870fbd5d7 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/reg_info.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_REG_INFO_H + +#include +#include +#include "isa.h" +#include "insn.h" + +namespace maplebe { + +class RegisterInfo { + public: + explicit RegisterInfo(MapleAllocator &mallocator): + allIntRegs(mallocator.Adapter()), + allFpRegs(mallocator.Adapter()), + allregs(mallocator.Adapter()) {} + + virtual ~RegisterInfo() { + cgFunc = nullptr; + } + + virtual void Init() = 0; + virtual void Fini() = 0; + void AddToAllRegs(regno_t regNo) { + (void)allregs.insert(regNo); + } + const MapleSet &GetAllRegs() const { + return allregs; + } + void AddToIntRegs(regno_t regNo) { + (void)allIntRegs.insert(regNo); + } + const MapleSet &GetIntRegs() const { + return allIntRegs; + } + void AddToFpRegs(regno_t regNo) { + (void)allFpRegs.insert(regNo); + } + const MapleSet &GetFpRegs() const { + return allFpRegs; + } + void SetCurrFunction(CGFunc &func) { + cgFunc = &func; + } + CGFunc *GetCurrFunction() const { + return cgFunc; + } + virtual RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag = 0) = 0; + virtual ListOperand *CreateListOperand() = 0; + virtual Insn *BuildMovInstruction(Operand &opnd0, Operand &opnd1) = 0; + virtual bool IsGPRegister(regno_t regNO) const = 0; + virtual bool IsPreAssignedReg(regno_t regNO) const = 0; + virtual uint32 GetIntParamRegIdx(regno_t regNO) const = 0; + virtual uint32 GetFpParamRegIdx(regno_t regNO) const = 0; + virtual bool IsSpecialReg(regno_t regno) const = 0; + virtual bool IsAvailableReg(regno_t regNO) const = 0; + virtual bool IsCalleeSavedReg(regno_t regno) const = 0; + virtual bool IsYieldPointReg(regno_t regNO) const = 0; + virtual bool IsUntouchableReg(uint32 regNO) const = 0; + virtual bool IsUnconcernedReg(regno_t regNO) const = 0; + virtual bool IsUnconcernedReg(const RegOperand ®Opnd) const = 0; + virtual bool IsVirtualRegister(const RegOperand ®Opnd) = 0; + virtual bool IsVirtualRegister(regno_t regno) = 0; + virtual void SaveCalleeSavedReg(MapleSet savedRegs) = 0; + virtual uint32 GetIntRegsParmsNum() = 0; + virtual uint32 GetIntRetRegsNum() = 0; + virtual uint32 GetFpRetRegsNum() = 0; + virtual uint32 GetFloatRegsParmsNum() = 0; + virtual regno_t GetLastParamsIntReg() = 0; + virtual regno_t GetLastParamsFpReg() = 0; + virtual regno_t GetIntRetReg(uint32 idx) = 0; + virtual regno_t GetFpRetReg(uint32 idx) = 0; + virtual uint32 GetReservedSpillReg() = 0; + virtual uint32 GetSecondReservedSpillReg() = 0; + virtual uint32 GetAllRegNum() = 0; + virtual uint32 GetNormalUseOperandNum() = 0; + virtual regno_t GetInvalidReg() = 0; + virtual bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) = 0; + virtual Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) = 0; + virtual Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) = 0; + virtual Insn *BuildCommentInsn(const std::string &comment) = 0; + virtual MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) = 0; + virtual MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) = 0; + virtual void FreeSpillRegMem(regno_t vrNum) = 0; + private: + MapleSet allIntRegs; + MapleSet allFpRegs; + MapleSet allregs; + CGFunc *cgFunc = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_INFO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/regsaves.h b/ecmascript/mapleall/maple_be/include/cg/regsaves.h new file mode 100644 index 0000000000000000000000000000000000000000..5f20313a9ec66c01957725e113a60512bcf47f45 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/regsaves.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H +#define MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class RegSavesOpt { + public: + RegSavesOpt(CGFunc &func, MemPool &pool) + : cgFunc(&func), + memPool(&pool), + alloc(&pool) {} + + virtual ~RegSavesOpt() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "regsavesopt"; + } + + CGFunc *GetCGFunc() const { + return cgFunc; + } + + MemPool *GetMemPool() const { + return memPool; + } + + bool GetEnabledDebug() const { + return enabledDebug; + } + + void SetEnabledDebug(bool d) { + enabledDebug = d; + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + bool enabledDebug = false; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgRegSavesOpt, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/schedule.h b/ecmascript/mapleall/maple_be/include/cg/schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..4ebf6fde7071e4a4d0f040d02da0a53b44cc38e3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/schedule.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_SCHEDULE_H + +#include "insn.h" +#include "mad.h" +#include "dependence.h" +#include "live.h" + +namespace maplebe { +#define LIST_SCHED_DUMP_NEWPM CG_DEBUG_FUNC(f) +#define LIST_SCHED_DUMP_REF CG_DEBUG_FUNC(cgFunc) + +class RegPressureSchedule { + public: + RegPressureSchedule (CGFunc &func, MapleAllocator &alloc) + : cgFunc(func), liveReg(alloc.Adapter()), scheduledNode(alloc.Adapter()), + originalNodeSeries(alloc.Adapter()), readyList(alloc.Adapter()), + partialList(alloc.Adapter()), partialSet(alloc.Adapter()), + partialScheduledNode(alloc.Adapter()), optimisticScheduledNodes(alloc.Adapter()), + splitterIndexes(alloc.Adapter()),liveInRegNO(alloc.Adapter()), liveOutRegNO(alloc.Adapter()) {} + virtual ~RegPressureSchedule() = default; + + void InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes); + void BuildPhyRegInfo(const std::vector ®NumVec); + void initPartialSplitters(const MapleVector &nodes); + void Init(const MapleVector &nodes); + void UpdateBBPressure(const DepNode &node); + void CalculatePressure(DepNode &node, regno_t reg, bool def); + void SortReadyList(); + static bool IsLastUse(const DepNode &node, regno_t regNO) ; + void ReCalculateDepNodePressure(DepNode &node); + void UpdateLiveReg(const DepNode &node, regno_t reg, bool def); + bool CanSchedule(const DepNode &node) const; + void UpdateReadyList(const DepNode &node); + void BruteUpdateReadyList(const DepNode &node, std::vector &changedToReady); + void RestoreReadyList(DepNode &node, std::vector &changedToReady); + void UpdatePriority(DepNode &node); + void CalculateMaxDepth(const MapleVector &nodes); + void CalculateNear(const DepNode &node); + static bool DepNodePriorityCmp(const DepNode *node1, const DepNode *node2); + DepNode *ChooseNode(); + void DoScheduling(MapleVector &nodes); + void HeuristicScheduling(MapleVector &nodes); + int CalculateRegisterPressure(MapleVector &nodes); + void PartialScheduling(MapleVector &nodes); + void BruteForceScheduling(); + void CalculatePredSize(DepNode &node); + void InitBruteForceScheduling(MapleVector &nodes); + void EmitSchedulingSeries(MapleVector &nodes); + private: + void DumpBBPressureInfo() const; + void DumpBBLiveInfo() const; + void DumpReadyList() const; + void DumpSelectInfo(const DepNode &node) const; + static void DumpDependencyInfo(const MapleVector &nodes); + void ReportScheduleError() const; + void ReportScheduleOutput() const; + RegType GetRegisterType(regno_t reg) const; + + CGFunc &cgFunc; + BB *bb = nullptr; + int32 *maxPressure = nullptr; + int32 *curPressure = nullptr; + MapleUnorderedSet liveReg; + /* save node that has been scheduled. */ + MapleVector scheduledNode; + MapleVector originalNodeSeries; + MapleVector readyList; + /* save partial nodes to be scheduled */ + MapleVector partialList; + MapleSet partialSet; + /* save partial nodes which have been scheduled. */ + MapleVector partialScheduledNode; + /* optimistic schedule series with minimum register pressure */ + MapleVector optimisticScheduledNodes; + /* save split points */ + MapleVector splitterIndexes; + /* save integer register pressure */ + std::vector integerRegisterPressureList; + /* save the amount of every type register. */ + int32 *physicalRegNum = nullptr; + int32 maxPriority = 0; + int32 scheduleSeriesCount = 0; + /* live in register set */ + MapleSet liveInRegNO; + /* live out register set */ + MapleSet liveOutRegNO; + /* register pressure without pre-scheduling */ + int originalPressure = 0; + /* register pressure after pre-scheduling */ + int scheduledPressure = 0; + /* minimum pressure ever met */ + int minPressure = -1; +}; + +enum SimulateType : uint8 { + kListSchedule, + kBruteForce, + kSimulateOnly +}; + +class Schedule { + public: + Schedule(CGFunc &func, MemPool &memPool, LiveAnalysis &liveAnalysis, const std::string &phase) + : phaseName(phase), + cgFunc(func), + memPool(memPool), + alloc(&memPool), + live(liveAnalysis), + considerRegPressure(false), + nodes(alloc.Adapter()), + readyList(alloc.Adapter()), + liveInRegNo(alloc.Adapter()), + liveOutRegNo(alloc.Adapter()) {} + + virtual ~Schedule() = default; + virtual void MemoryAccessPairOpt() = 0; + virtual void ClinitPairOpt() = 0; + virtual uint32 DoSchedule() = 0; + virtual uint32 DoBruteForceSchedule() = 0; + virtual uint32 SimulateOnly() = 0; + virtual void UpdateBruteForceSchedCycle() = 0; + virtual void IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) = 0; + virtual void UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) = 0; + virtual void ListScheduling(bool beforeRA) = 0; + virtual void FinalizeScheduling(BB &bb, const DepAnalysis &depAnalysis) = 0; + + protected: + virtual void Init() = 0; + virtual uint32 ComputeEstart(uint32 cycle) = 0; + virtual void ComputeLstart(uint32 maxEstart) = 0; + virtual void UpdateELStartsOnCycle(uint32 cycle) = 0; + virtual void RandomTest() = 0; + virtual void EraseNodeFromReadyList(const DepNode &target) = 0; + virtual void EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) = 0; + virtual uint32 GetNextSepIndex() const = 0; + virtual void CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const = 0; + virtual void FindAndCombineMemoryAccessPair(const std::vector &memList) = 0; + virtual void RegPressureScheduling(BB &bb, MapleVector &nodes) = 0; + virtual bool CanCombine(const Insn &insn) const = 0; + void SetConsiderRegPressure() { + considerRegPressure = true; + } + bool GetConsiderRegPressure() const { + return considerRegPressure; + } + void InitIDAndLoc(); + void RestoreFirstLoc(); + std::string PhaseName() const { + return phaseName; + } + + const std::string phaseName; + CGFunc &cgFunc; + MemPool &memPool; + MapleAllocator alloc; + LiveAnalysis &live; + DepAnalysis *depAnalysis = nullptr; + MAD *mad = nullptr; + uint32 lastSeparatorIndex = 0; + uint32 nodeSize = 0; + bool considerRegPressure = false; + MapleVector nodes; /* Dependence graph */ + MapleVector readyList; /* Ready list. */ + MapleSet liveInRegNo; + MapleSet liveOutRegNo; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPreScheduling, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgScheduling, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_SCHEDULE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/sparse_datainfo.h b/ecmascript/mapleall/maple_be/include/cg/sparse_datainfo.h new file mode 100644 index 0000000000000000000000000000000000000000..4faf43cec92710dd24a876b785b094ce3e4ff05f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/sparse_datainfo.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_SPARSE_DATAINFO_H +#define MAPLEBE_INCLUDE_CG_SPARSE_DATAINFO_H +#include "maple_string.h" +#include "common_utils.h" +#include "mempool.h" +#include "mempool_allocator.h" + +namespace maplebe { +class SparseDataInfo { + /* + * SparseDataInfo has the same imterface like DataInfo + * it can be used when the data member is small while the data + * range is big.like in live analysis, in some extreme case the + * vreg num range is 10k while in each bb, the max num of is 30+. + */ + public: + SparseDataInfo(uint32 bitNum, MapleAllocator &alloc) + : info(alloc.Adapter()), + maxRegNum(bitNum) {} + + SparseDataInfo(const SparseDataInfo &other, MapleAllocator &alloc) + : info(other.info, alloc.Adapter()), + maxRegNum(other.maxRegNum) {} + + SparseDataInfo &Clone(MapleAllocator &alloc) { + auto *dataInfo = alloc.New(*this, alloc); + return *dataInfo; + } + + ~SparseDataInfo() = default; + + void SetBit(uint32 bitNO) { + info.insert(bitNO); + } + + void ResetBit(uint32 bitNO) { + info.erase(bitNO); + } + + bool TestBit(uint32 bitNO) const { + return info.find(bitNO) != info.end(); + } + + bool NoneBit() const { + return info.empty(); + } + + size_t Size() const { + return maxRegNum; + } + + const MapleSet &GetInfo() const { + return info; + } + + bool IsEqual(const SparseDataInfo &secondInfo) const { + return info == secondInfo.GetInfo(); + } + + bool IsEqual(const MapleSet &LiveInfoBak) const { + return info == LiveInfoBak; + } + + void AndBits(const SparseDataInfo &secondInfo) { + for (auto it = info.begin(); it != info.end();) { + if (!secondInfo.TestBit(*it)) { + it = info.erase(it); + } else { + ++it; + } + } + } + + void OrBits(const SparseDataInfo &secondInfo) { + for (auto data : secondInfo.GetInfo()) { + info.insert(data); + } + } + + /* if bit in secondElem is 1, bit in current DataInfo is set 0 */ + void Difference(const SparseDataInfo &secondInfo) { + for (auto it = info.begin(); it != info.end();) { + if (secondInfo.TestBit(*it)) { + it = info.erase(it); + } else { + ++it; + } + } + } + + void ResetAllBit() { + info.clear(); + } + + void EnlargeCapacityToAdaptSize(uint32 bitNO) { + /* add one more size for each enlarge action */ + } + + void ClearDataInfo() { + info.clear(); + } + + template + void GetBitsOfInfo(T &wordRes) const { + wordRes = info; + } + + private: + /* long type has 8 bytes, 64 bits */ + static constexpr int32 kWordSize = 64; + MapleSet info; + uint32 maxRegNum; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/standardize.h b/ecmascript/mapleall/maple_be/include/cg/standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..6df1d12dca8be24849f342469c293d7be6e29c18 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/standardize.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_STANDARDIZE_H +#define MAPLEBE_INCLUDE_STANDARDIZE_H + +#include "cgfunc.h" +namespace maplebe { +class Standardize { + public: + explicit Standardize(CGFunc &f) : cgFunc(&f) {} + + virtual ~Standardize() { + cgFunc = nullptr; + } + + /* + * for cpu instruction contains different operands + * maple provide a default implement from three address to two address + * convertion rule is: + * mop(dest, src1, src2) -> mov(dest, src1) + * mop(dest, src2) + * maple provide a default implement from two address to one address for unary op + * convertion rule is: + * mop(dest, src) -> mov(dest, src1) + * mop(dest) + */ + void AddressMapping(Insn &insn); + + void DoStandardize(); + + protected: + void SetAddressMapping(bool needMapping) { + needAddrMapping = needMapping; + } + bool NeedAddressMapping(const Insn &insn) { + /* Operand number for two addressing mode is 2 */ + /* and 3 for three addressing mode */ + needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); + return needAddrMapping; + } + private: + virtual void StdzMov(Insn &insn) = 0; + virtual void StdzStrLdr(Insn &insn) = 0; + virtual void StdzBasicOp(Insn &insn) = 0; + virtual void StdzUnaryOp(Insn &insn) = 0; + virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + CGFunc *cgFunc; + bool needAddrMapping = false; +}; +} +#endif /* MAPLEBE_INCLUDE_STANDARDIZE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/strldr.h b/ecmascript/mapleall/maple_be/include/cg/strldr.h new file mode 100644 index 0000000000000000000000000000000000000000..c36e5772923e79c0c686674fe5c7ccf89be502ae --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/strldr.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_STRLDR_H +#define MAPLEBE_INCLUDE_CG_STRLDR_H +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class StoreLoadOpt { + public: + StoreLoadOpt(CGFunc &func, MemPool &memPool) : cgFunc(func), memPool(memPool) {} + virtual ~StoreLoadOpt() = default; + virtual void Run() = 0; + std::string PhaseName() const { + return "storeloadopt"; + } + + protected: + CGFunc &cgFunc; + MemPool &memPool; + /* if the number of bbs is more than 500 or the number of insns is more than 9000, don't optimize. */ + static constexpr uint32 kMaxBBNum = 500; + static constexpr uint32 kMaxInsnNum = 9000; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgStoreLoadOpt, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_STRLDR_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/valid_bitmask_imm.txt b/ecmascript/mapleall/maple_be/include/cg/valid_bitmask_imm.txt new file mode 100755 index 0000000000000000000000000000000000000000..53a6135b6ebd4f570d728df66a9b2584a3a677ef --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/valid_bitmask_imm.txt @@ -0,0 +1,5372 @@ +0x5555555555555555, +0xaaaaaaaaaaaaaaaa, +0x1111111111111111, +0x2222222222222222, +0x4444444444444444, +0x8888888888888888, +0x3333333333333333, +0x6666666666666666, +0xcccccccccccccccc, +0x9999999999999999, +0x7777777777777777, +0xeeeeeeeeeeeeeeee, +0xdddddddddddddddd, +0xbbbbbbbbbbbbbbbb, +0x0101010101010101, +0x0202020202020202, +0x0404040404040404, +0x0808080808080808, +0x1010101010101010, +0x2020202020202020, +0x4040404040404040, +0x8080808080808080, +0x0303030303030303, +0x0606060606060606, +0x0c0c0c0c0c0c0c0c, +0x1818181818181818, +0x3030303030303030, +0x6060606060606060, +0xc0c0c0c0c0c0c0c0, +0x8181818181818181, +0x0707070707070707, +0x0e0e0e0e0e0e0e0e, +0x1c1c1c1c1c1c1c1c, +0x3838383838383838, +0x7070707070707070, +0xe0e0e0e0e0e0e0e0, +0xc1c1c1c1c1c1c1c1, +0x8383838383838383, +0x0f0f0f0f0f0f0f0f, +0x1e1e1e1e1e1e1e1e, +0x3c3c3c3c3c3c3c3c, +0x7878787878787878, +0xf0f0f0f0f0f0f0f0, +0xe1e1e1e1e1e1e1e1, +0xc3c3c3c3c3c3c3c3, +0x8787878787878787, +0x1f1f1f1f1f1f1f1f, +0x3e3e3e3e3e3e3e3e, +0x7c7c7c7c7c7c7c7c, +0xf8f8f8f8f8f8f8f8, +0xf1f1f1f1f1f1f1f1, +0xe3e3e3e3e3e3e3e3, +0xc7c7c7c7c7c7c7c7, +0x8f8f8f8f8f8f8f8f, +0x3f3f3f3f3f3f3f3f, +0x7e7e7e7e7e7e7e7e, +0xfcfcfcfcfcfcfcfc, +0xf9f9f9f9f9f9f9f9, +0xf3f3f3f3f3f3f3f3, +0xe7e7e7e7e7e7e7e7, +0xcfcfcfcfcfcfcfcf, +0x9f9f9f9f9f9f9f9f, +0x7f7f7f7f7f7f7f7f, +0xfefefefefefefefe, +0xfdfdfdfdfdfdfdfd, +0xfbfbfbfbfbfbfbfb, +0xf7f7f7f7f7f7f7f7, +0xefefefefefefefef, +0xdfdfdfdfdfdfdfdf, +0xbfbfbfbfbfbfbfbf, +0x0001000100010001, +0x0002000200020002, +0x0004000400040004, +0x0008000800080008, +0x0010001000100010, +0x0020002000200020, +0x0040004000400040, +0x0080008000800080, +0x0100010001000100, +0x0200020002000200, +0x0400040004000400, +0x0800080008000800, +0x1000100010001000, +0x2000200020002000, +0x4000400040004000, +0x8000800080008000, +0x0003000300030003, +0x0006000600060006, +0x000c000c000c000c, +0x0018001800180018, +0x0030003000300030, +0x0060006000600060, +0x00c000c000c000c0, +0x0180018001800180, +0x0300030003000300, +0x0600060006000600, +0x0c000c000c000c00, +0x1800180018001800, +0x3000300030003000, +0x6000600060006000, +0xc000c000c000c000, +0x8001800180018001, +0x0007000700070007, +0x000e000e000e000e, +0x001c001c001c001c, +0x0038003800380038, +0x0070007000700070, +0x00e000e000e000e0, +0x01c001c001c001c0, +0x0380038003800380, +0x0700070007000700, +0x0e000e000e000e00, +0x1c001c001c001c00, +0x3800380038003800, +0x7000700070007000, +0xe000e000e000e000, +0xc001c001c001c001, +0x8003800380038003, +0x000f000f000f000f, +0x001e001e001e001e, +0x003c003c003c003c, +0x0078007800780078, +0x00f000f000f000f0, +0x01e001e001e001e0, +0x03c003c003c003c0, +0x0780078007800780, +0x0f000f000f000f00, +0x1e001e001e001e00, +0x3c003c003c003c00, +0x7800780078007800, +0xf000f000f000f000, +0xe001e001e001e001, +0xc003c003c003c003, +0x8007800780078007, +0x001f001f001f001f, +0x003e003e003e003e, +0x007c007c007c007c, +0x00f800f800f800f8, +0x01f001f001f001f0, +0x03e003e003e003e0, +0x07c007c007c007c0, +0x0f800f800f800f80, +0x1f001f001f001f00, +0x3e003e003e003e00, +0x7c007c007c007c00, +0xf800f800f800f800, +0xf001f001f001f001, +0xe003e003e003e003, +0xc007c007c007c007, +0x800f800f800f800f, +0x003f003f003f003f, +0x007e007e007e007e, +0x00fc00fc00fc00fc, +0x01f801f801f801f8, +0x03f003f003f003f0, +0x07e007e007e007e0, +0x0fc00fc00fc00fc0, +0x1f801f801f801f80, +0x3f003f003f003f00, +0x7e007e007e007e00, +0xfc00fc00fc00fc00, +0xf801f801f801f801, +0xf003f003f003f003, +0xe007e007e007e007, +0xc00fc00fc00fc00f, +0x801f801f801f801f, +0x007f007f007f007f, +0x00fe00fe00fe00fe, +0x01fc01fc01fc01fc, +0x03f803f803f803f8, +0x07f007f007f007f0, +0x0fe00fe00fe00fe0, +0x1fc01fc01fc01fc0, +0x3f803f803f803f80, +0x7f007f007f007f00, +0xfe00fe00fe00fe00, +0xfc01fc01fc01fc01, +0xf803f803f803f803, +0xf007f007f007f007, +0xe00fe00fe00fe00f, +0xc01fc01fc01fc01f, +0x803f803f803f803f, +0x00ff00ff00ff00ff, +0x01fe01fe01fe01fe, +0x03fc03fc03fc03fc, +0x07f807f807f807f8, +0x0ff00ff00ff00ff0, +0x1fe01fe01fe01fe0, +0x3fc03fc03fc03fc0, +0x7f807f807f807f80, +0xff00ff00ff00ff00, +0xfe01fe01fe01fe01, +0xfc03fc03fc03fc03, +0xf807f807f807f807, +0xf00ff00ff00ff00f, +0xe01fe01fe01fe01f, +0xc03fc03fc03fc03f, +0x807f807f807f807f, +0x01ff01ff01ff01ff, +0x03fe03fe03fe03fe, +0x07fc07fc07fc07fc, +0x0ff80ff80ff80ff8, +0x1ff01ff01ff01ff0, +0x3fe03fe03fe03fe0, +0x7fc07fc07fc07fc0, +0xff80ff80ff80ff80, +0xff01ff01ff01ff01, +0xfe03fe03fe03fe03, +0xfc07fc07fc07fc07, +0xf80ff80ff80ff80f, +0xf01ff01ff01ff01f, +0xe03fe03fe03fe03f, +0xc07fc07fc07fc07f, +0x80ff80ff80ff80ff, +0x03ff03ff03ff03ff, +0x07fe07fe07fe07fe, +0x0ffc0ffc0ffc0ffc, +0x1ff81ff81ff81ff8, +0x3ff03ff03ff03ff0, +0x7fe07fe07fe07fe0, +0xffc0ffc0ffc0ffc0, +0xff81ff81ff81ff81, +0xff03ff03ff03ff03, +0xfe07fe07fe07fe07, +0xfc0ffc0ffc0ffc0f, +0xf81ff81ff81ff81f, +0xf03ff03ff03ff03f, +0xe07fe07fe07fe07f, +0xc0ffc0ffc0ffc0ff, +0x81ff81ff81ff81ff, +0x07ff07ff07ff07ff, +0x0ffe0ffe0ffe0ffe, +0x1ffc1ffc1ffc1ffc, +0x3ff83ff83ff83ff8, +0x7ff07ff07ff07ff0, +0xffe0ffe0ffe0ffe0, +0xffc1ffc1ffc1ffc1, +0xff83ff83ff83ff83, +0xff07ff07ff07ff07, +0xfe0ffe0ffe0ffe0f, +0xfc1ffc1ffc1ffc1f, +0xf83ff83ff83ff83f, +0xf07ff07ff07ff07f, +0xe0ffe0ffe0ffe0ff, +0xc1ffc1ffc1ffc1ff, +0x83ff83ff83ff83ff, +0x0fff0fff0fff0fff, +0x1ffe1ffe1ffe1ffe, +0x3ffc3ffc3ffc3ffc, +0x7ff87ff87ff87ff8, +0xfff0fff0fff0fff0, +0xffe1ffe1ffe1ffe1, +0xffc3ffc3ffc3ffc3, +0xff87ff87ff87ff87, +0xff0fff0fff0fff0f, +0xfe1ffe1ffe1ffe1f, +0xfc3ffc3ffc3ffc3f, +0xf87ff87ff87ff87f, +0xf0fff0fff0fff0ff, +0xe1ffe1ffe1ffe1ff, +0xc3ffc3ffc3ffc3ff, +0x87ff87ff87ff87ff, +0x1fff1fff1fff1fff, +0x3ffe3ffe3ffe3ffe, +0x7ffc7ffc7ffc7ffc, +0xfff8fff8fff8fff8, +0xfff1fff1fff1fff1, +0xffe3ffe3ffe3ffe3, +0xffc7ffc7ffc7ffc7, +0xff8fff8fff8fff8f, +0xff1fff1fff1fff1f, +0xfe3ffe3ffe3ffe3f, +0xfc7ffc7ffc7ffc7f, +0xf8fff8fff8fff8ff, +0xf1fff1fff1fff1ff, +0xe3ffe3ffe3ffe3ff, +0xc7ffc7ffc7ffc7ff, +0x8fff8fff8fff8fff, +0x3fff3fff3fff3fff, +0x7ffe7ffe7ffe7ffe, +0xfffcfffcfffcfffc, +0xfff9fff9fff9fff9, +0xfff3fff3fff3fff3, +0xffe7ffe7ffe7ffe7, +0xffcfffcfffcfffcf, +0xff9fff9fff9fff9f, +0xff3fff3fff3fff3f, +0xfe7ffe7ffe7ffe7f, +0xfcfffcfffcfffcff, +0xf9fff9fff9fff9ff, +0xf3fff3fff3fff3ff, +0xe7ffe7ffe7ffe7ff, +0xcfffcfffcfffcfff, +0x9fff9fff9fff9fff, +0x7fff7fff7fff7fff, +0xfffefffefffefffe, +0xfffdfffdfffdfffd, +0xfffbfffbfffbfffb, +0xfff7fff7fff7fff7, +0xffefffefffefffef, +0xffdfffdfffdfffdf, +0xffbfffbfffbfffbf, +0xff7fff7fff7fff7f, +0xfefffefffefffeff, +0xfdfffdfffdfffdff, +0xfbfffbfffbfffbff, +0xf7fff7fff7fff7ff, +0xefffefffefffefff, +0xdfffdfffdfffdfff, +0xbfffbfffbfffbfff, +0x0000000100000001, +0x0000000200000002, +0x0000000400000004, +0x0000000800000008, +0x0000001000000010, +0x0000002000000020, +0x0000004000000040, +0x0000008000000080, +0x0000010000000100, +0x0000020000000200, +0x0000040000000400, +0x0000080000000800, +0x0000100000001000, +0x0000200000002000, +0x0000400000004000, +0x0000800000008000, +0x0001000000010000, +0x0002000000020000, +0x0004000000040000, +0x0008000000080000, +0x0010000000100000, +0x0020000000200000, +0x0040000000400000, +0x0080000000800000, +0x0100000001000000, +0x0200000002000000, +0x0400000004000000, +0x0800000008000000, +0x1000000010000000, +0x2000000020000000, +0x4000000040000000, +0x8000000080000000, +0x0000000300000003, +0x0000000600000006, +0x0000000c0000000c, +0x0000001800000018, +0x0000003000000030, +0x0000006000000060, +0x000000c0000000c0, +0x0000018000000180, +0x0000030000000300, +0x0000060000000600, +0x00000c0000000c00, +0x0000180000001800, +0x0000300000003000, +0x0000600000006000, +0x0000c0000000c000, +0x0001800000018000, +0x0003000000030000, +0x0006000000060000, +0x000c0000000c0000, +0x0018000000180000, +0x0030000000300000, +0x0060000000600000, +0x00c0000000c00000, +0x0180000001800000, +0x0300000003000000, +0x0600000006000000, +0x0c0000000c000000, +0x1800000018000000, +0x3000000030000000, +0x6000000060000000, +0xc0000000c0000000, +0x8000000180000001, +0x0000000700000007, +0x0000000e0000000e, +0x0000001c0000001c, +0x0000003800000038, +0x0000007000000070, +0x000000e0000000e0, +0x000001c0000001c0, +0x0000038000000380, +0x0000070000000700, +0x00000e0000000e00, +0x00001c0000001c00, +0x0000380000003800, +0x0000700000007000, +0x0000e0000000e000, +0x0001c0000001c000, +0x0003800000038000, +0x0007000000070000, +0x000e0000000e0000, +0x001c0000001c0000, +0x0038000000380000, +0x0070000000700000, +0x00e0000000e00000, +0x01c0000001c00000, +0x0380000003800000, +0x0700000007000000, +0x0e0000000e000000, +0x1c0000001c000000, +0x3800000038000000, +0x7000000070000000, +0xe0000000e0000000, +0xc0000001c0000001, +0x8000000380000003, +0x0000000f0000000f, +0x0000001e0000001e, +0x0000003c0000003c, +0x0000007800000078, +0x000000f0000000f0, +0x000001e0000001e0, +0x000003c0000003c0, +0x0000078000000780, +0x00000f0000000f00, +0x00001e0000001e00, +0x00003c0000003c00, +0x0000780000007800, +0x0000f0000000f000, +0x0001e0000001e000, +0x0003c0000003c000, +0x0007800000078000, +0x000f0000000f0000, +0x001e0000001e0000, +0x003c0000003c0000, +0x0078000000780000, +0x00f0000000f00000, +0x01e0000001e00000, +0x03c0000003c00000, +0x0780000007800000, +0x0f0000000f000000, +0x1e0000001e000000, +0x3c0000003c000000, +0x7800000078000000, +0xf0000000f0000000, +0xe0000001e0000001, +0xc0000003c0000003, +0x8000000780000007, +0x0000001f0000001f, +0x0000003e0000003e, +0x0000007c0000007c, +0x000000f8000000f8, +0x000001f0000001f0, +0x000003e0000003e0, +0x000007c0000007c0, +0x00000f8000000f80, +0x00001f0000001f00, +0x00003e0000003e00, +0x00007c0000007c00, +0x0000f8000000f800, +0x0001f0000001f000, +0x0003e0000003e000, +0x0007c0000007c000, +0x000f8000000f8000, +0x001f0000001f0000, +0x003e0000003e0000, +0x007c0000007c0000, +0x00f8000000f80000, +0x01f0000001f00000, +0x03e0000003e00000, +0x07c0000007c00000, +0x0f8000000f800000, +0x1f0000001f000000, +0x3e0000003e000000, +0x7c0000007c000000, +0xf8000000f8000000, +0xf0000001f0000001, +0xe0000003e0000003, +0xc0000007c0000007, +0x8000000f8000000f, +0x0000003f0000003f, +0x0000007e0000007e, +0x000000fc000000fc, +0x000001f8000001f8, +0x000003f0000003f0, +0x000007e0000007e0, +0x00000fc000000fc0, +0x00001f8000001f80, +0x00003f0000003f00, +0x00007e0000007e00, +0x0000fc000000fc00, +0x0001f8000001f800, +0x0003f0000003f000, +0x0007e0000007e000, +0x000fc000000fc000, +0x001f8000001f8000, +0x003f0000003f0000, +0x007e0000007e0000, +0x00fc000000fc0000, +0x01f8000001f80000, +0x03f0000003f00000, +0x07e0000007e00000, +0x0fc000000fc00000, +0x1f8000001f800000, +0x3f0000003f000000, +0x7e0000007e000000, +0xfc000000fc000000, +0xf8000001f8000001, +0xf0000003f0000003, +0xe0000007e0000007, +0xc000000fc000000f, +0x8000001f8000001f, +0x0000007f0000007f, +0x000000fe000000fe, +0x000001fc000001fc, +0x000003f8000003f8, +0x000007f0000007f0, +0x00000fe000000fe0, +0x00001fc000001fc0, +0x00003f8000003f80, +0x00007f0000007f00, +0x0000fe000000fe00, +0x0001fc000001fc00, +0x0003f8000003f800, +0x0007f0000007f000, +0x000fe000000fe000, +0x001fc000001fc000, +0x003f8000003f8000, +0x007f0000007f0000, +0x00fe000000fe0000, +0x01fc000001fc0000, +0x03f8000003f80000, +0x07f0000007f00000, +0x0fe000000fe00000, +0x1fc000001fc00000, +0x3f8000003f800000, +0x7f0000007f000000, +0xfe000000fe000000, +0xfc000001fc000001, +0xf8000003f8000003, +0xf0000007f0000007, +0xe000000fe000000f, +0xc000001fc000001f, +0x8000003f8000003f, +0x000000ff000000ff, +0x000001fe000001fe, +0x000003fc000003fc, +0x000007f8000007f8, +0x00000ff000000ff0, +0x00001fe000001fe0, +0x00003fc000003fc0, +0x00007f8000007f80, +0x0000ff000000ff00, +0x0001fe000001fe00, +0x0003fc000003fc00, +0x0007f8000007f800, +0x000ff000000ff000, +0x001fe000001fe000, +0x003fc000003fc000, +0x007f8000007f8000, +0x00ff000000ff0000, +0x01fe000001fe0000, +0x03fc000003fc0000, +0x07f8000007f80000, +0x0ff000000ff00000, +0x1fe000001fe00000, +0x3fc000003fc00000, +0x7f8000007f800000, +0xff000000ff000000, +0xfe000001fe000001, +0xfc000003fc000003, +0xf8000007f8000007, +0xf000000ff000000f, +0xe000001fe000001f, +0xc000003fc000003f, +0x8000007f8000007f, +0x000001ff000001ff, +0x000003fe000003fe, +0x000007fc000007fc, +0x00000ff800000ff8, +0x00001ff000001ff0, +0x00003fe000003fe0, +0x00007fc000007fc0, +0x0000ff800000ff80, +0x0001ff000001ff00, +0x0003fe000003fe00, +0x0007fc000007fc00, +0x000ff800000ff800, +0x001ff000001ff000, +0x003fe000003fe000, +0x007fc000007fc000, +0x00ff800000ff8000, +0x01ff000001ff0000, +0x03fe000003fe0000, +0x07fc000007fc0000, +0x0ff800000ff80000, +0x1ff000001ff00000, +0x3fe000003fe00000, +0x7fc000007fc00000, +0xff800000ff800000, +0xff000001ff000001, +0xfe000003fe000003, +0xfc000007fc000007, +0xf800000ff800000f, +0xf000001ff000001f, +0xe000003fe000003f, +0xc000007fc000007f, +0x800000ff800000ff, +0x000003ff000003ff, +0x000007fe000007fe, +0x00000ffc00000ffc, +0x00001ff800001ff8, +0x00003ff000003ff0, +0x00007fe000007fe0, +0x0000ffc00000ffc0, +0x0001ff800001ff80, +0x0003ff000003ff00, +0x0007fe000007fe00, +0x000ffc00000ffc00, +0x001ff800001ff800, +0x003ff000003ff000, +0x007fe000007fe000, +0x00ffc00000ffc000, +0x01ff800001ff8000, +0x03ff000003ff0000, +0x07fe000007fe0000, +0x0ffc00000ffc0000, +0x1ff800001ff80000, +0x3ff000003ff00000, +0x7fe000007fe00000, +0xffc00000ffc00000, +0xff800001ff800001, +0xff000003ff000003, +0xfe000007fe000007, +0xfc00000ffc00000f, +0xf800001ff800001f, +0xf000003ff000003f, +0xe000007fe000007f, +0xc00000ffc00000ff, +0x800001ff800001ff, +0x000007ff000007ff, +0x00000ffe00000ffe, +0x00001ffc00001ffc, +0x00003ff800003ff8, +0x00007ff000007ff0, +0x0000ffe00000ffe0, +0x0001ffc00001ffc0, +0x0003ff800003ff80, +0x0007ff000007ff00, +0x000ffe00000ffe00, +0x001ffc00001ffc00, +0x003ff800003ff800, +0x007ff000007ff000, +0x00ffe00000ffe000, +0x01ffc00001ffc000, +0x03ff800003ff8000, +0x07ff000007ff0000, +0x0ffe00000ffe0000, +0x1ffc00001ffc0000, +0x3ff800003ff80000, +0x7ff000007ff00000, +0xffe00000ffe00000, +0xffc00001ffc00001, +0xff800003ff800003, +0xff000007ff000007, +0xfe00000ffe00000f, +0xfc00001ffc00001f, +0xf800003ff800003f, +0xf000007ff000007f, +0xe00000ffe00000ff, +0xc00001ffc00001ff, +0x800003ff800003ff, +0x00000fff00000fff, +0x00001ffe00001ffe, +0x00003ffc00003ffc, +0x00007ff800007ff8, +0x0000fff00000fff0, +0x0001ffe00001ffe0, +0x0003ffc00003ffc0, +0x0007ff800007ff80, +0x000fff00000fff00, +0x001ffe00001ffe00, +0x003ffc00003ffc00, +0x007ff800007ff800, +0x00fff00000fff000, +0x01ffe00001ffe000, +0x03ffc00003ffc000, +0x07ff800007ff8000, +0x0fff00000fff0000, +0x1ffe00001ffe0000, +0x3ffc00003ffc0000, +0x7ff800007ff80000, +0xfff00000fff00000, +0xffe00001ffe00001, +0xffc00003ffc00003, +0xff800007ff800007, +0xff00000fff00000f, +0xfe00001ffe00001f, +0xfc00003ffc00003f, +0xf800007ff800007f, +0xf00000fff00000ff, +0xe00001ffe00001ff, +0xc00003ffc00003ff, +0x800007ff800007ff, +0x00001fff00001fff, +0x00003ffe00003ffe, +0x00007ffc00007ffc, +0x0000fff80000fff8, +0x0001fff00001fff0, +0x0003ffe00003ffe0, +0x0007ffc00007ffc0, +0x000fff80000fff80, +0x001fff00001fff00, +0x003ffe00003ffe00, +0x007ffc00007ffc00, +0x00fff80000fff800, +0x01fff00001fff000, +0x03ffe00003ffe000, +0x07ffc00007ffc000, +0x0fff80000fff8000, +0x1fff00001fff0000, +0x3ffe00003ffe0000, +0x7ffc00007ffc0000, +0xfff80000fff80000, +0xfff00001fff00001, +0xffe00003ffe00003, +0xffc00007ffc00007, +0xff80000fff80000f, +0xff00001fff00001f, +0xfe00003ffe00003f, +0xfc00007ffc00007f, +0xf80000fff80000ff, +0xf00001fff00001ff, +0xe00003ffe00003ff, +0xc00007ffc00007ff, +0x80000fff80000fff, +0x00003fff00003fff, +0x00007ffe00007ffe, +0x0000fffc0000fffc, +0x0001fff80001fff8, +0x0003fff00003fff0, +0x0007ffe00007ffe0, +0x000fffc0000fffc0, +0x001fff80001fff80, +0x003fff00003fff00, +0x007ffe00007ffe00, +0x00fffc0000fffc00, +0x01fff80001fff800, +0x03fff00003fff000, +0x07ffe00007ffe000, +0x0fffc0000fffc000, +0x1fff80001fff8000, +0x3fff00003fff0000, +0x7ffe00007ffe0000, +0xfffc0000fffc0000, +0xfff80001fff80001, +0xfff00003fff00003, +0xffe00007ffe00007, +0xffc0000fffc0000f, +0xff80001fff80001f, +0xff00003fff00003f, +0xfe00007ffe00007f, +0xfc0000fffc0000ff, +0xf80001fff80001ff, +0xf00003fff00003ff, +0xe00007ffe00007ff, +0xc0000fffc0000fff, +0x80001fff80001fff, +0x00007fff00007fff, +0x0000fffe0000fffe, +0x0001fffc0001fffc, +0x0003fff80003fff8, +0x0007fff00007fff0, +0x000fffe0000fffe0, +0x001fffc0001fffc0, +0x003fff80003fff80, +0x007fff00007fff00, +0x00fffe0000fffe00, +0x01fffc0001fffc00, +0x03fff80003fff800, +0x07fff00007fff000, +0x0fffe0000fffe000, +0x1fffc0001fffc000, +0x3fff80003fff8000, +0x7fff00007fff0000, +0xfffe0000fffe0000, +0xfffc0001fffc0001, +0xfff80003fff80003, +0xfff00007fff00007, +0xffe0000fffe0000f, +0xffc0001fffc0001f, +0xff80003fff80003f, +0xff00007fff00007f, +0xfe0000fffe0000ff, +0xfc0001fffc0001ff, +0xf80003fff80003ff, +0xf00007fff00007ff, +0xe0000fffe0000fff, +0xc0001fffc0001fff, +0x80003fff80003fff, +0x0000ffff0000ffff, +0x0001fffe0001fffe, +0x0003fffc0003fffc, +0x0007fff80007fff8, +0x000ffff0000ffff0, +0x001fffe0001fffe0, +0x003fffc0003fffc0, +0x007fff80007fff80, +0x00ffff0000ffff00, +0x01fffe0001fffe00, +0x03fffc0003fffc00, +0x07fff80007fff800, +0x0ffff0000ffff000, +0x1fffe0001fffe000, +0x3fffc0003fffc000, +0x7fff80007fff8000, +0xffff0000ffff0000, +0xfffe0001fffe0001, +0xfffc0003fffc0003, +0xfff80007fff80007, +0xfff0000ffff0000f, +0xffe0001fffe0001f, +0xffc0003fffc0003f, +0xff80007fff80007f, +0xff0000ffff0000ff, +0xfe0001fffe0001ff, +0xfc0003fffc0003ff, +0xf80007fff80007ff, +0xf0000ffff0000fff, +0xe0001fffe0001fff, +0xc0003fffc0003fff, +0x80007fff80007fff, +0x0001ffff0001ffff, +0x0003fffe0003fffe, +0x0007fffc0007fffc, +0x000ffff8000ffff8, +0x001ffff0001ffff0, +0x003fffe0003fffe0, +0x007fffc0007fffc0, +0x00ffff8000ffff80, +0x01ffff0001ffff00, +0x03fffe0003fffe00, +0x07fffc0007fffc00, +0x0ffff8000ffff800, +0x1ffff0001ffff000, +0x3fffe0003fffe000, +0x7fffc0007fffc000, +0xffff8000ffff8000, +0xffff0001ffff0001, +0xfffe0003fffe0003, +0xfffc0007fffc0007, +0xfff8000ffff8000f, +0xfff0001ffff0001f, +0xffe0003fffe0003f, +0xffc0007fffc0007f, +0xff8000ffff8000ff, +0xff0001ffff0001ff, +0xfe0003fffe0003ff, +0xfc0007fffc0007ff, +0xf8000ffff8000fff, +0xf0001ffff0001fff, +0xe0003fffe0003fff, +0xc0007fffc0007fff, +0x8000ffff8000ffff, +0x0003ffff0003ffff, +0x0007fffe0007fffe, +0x000ffffc000ffffc, +0x001ffff8001ffff8, +0x003ffff0003ffff0, +0x007fffe0007fffe0, +0x00ffffc000ffffc0, +0x01ffff8001ffff80, +0x03ffff0003ffff00, +0x07fffe0007fffe00, +0x0ffffc000ffffc00, +0x1ffff8001ffff800, +0x3ffff0003ffff000, +0x7fffe0007fffe000, +0xffffc000ffffc000, +0xffff8001ffff8001, +0xffff0003ffff0003, +0xfffe0007fffe0007, +0xfffc000ffffc000f, +0xfff8001ffff8001f, +0xfff0003ffff0003f, +0xffe0007fffe0007f, +0xffc000ffffc000ff, +0xff8001ffff8001ff, +0xff0003ffff0003ff, +0xfe0007fffe0007ff, +0xfc000ffffc000fff, +0xf8001ffff8001fff, +0xf0003ffff0003fff, +0xe0007fffe0007fff, +0xc000ffffc000ffff, +0x8001ffff8001ffff, +0x0007ffff0007ffff, +0x000ffffe000ffffe, +0x001ffffc001ffffc, +0x003ffff8003ffff8, +0x007ffff0007ffff0, +0x00ffffe000ffffe0, +0x01ffffc001ffffc0, +0x03ffff8003ffff80, +0x07ffff0007ffff00, +0x0ffffe000ffffe00, +0x1ffffc001ffffc00, +0x3ffff8003ffff800, +0x7ffff0007ffff000, +0xffffe000ffffe000, +0xffffc001ffffc001, +0xffff8003ffff8003, +0xffff0007ffff0007, +0xfffe000ffffe000f, +0xfffc001ffffc001f, +0xfff8003ffff8003f, +0xfff0007ffff0007f, +0xffe000ffffe000ff, +0xffc001ffffc001ff, +0xff8003ffff8003ff, +0xff0007ffff0007ff, +0xfe000ffffe000fff, +0xfc001ffffc001fff, +0xf8003ffff8003fff, +0xf0007ffff0007fff, +0xe000ffffe000ffff, +0xc001ffffc001ffff, +0x8003ffff8003ffff, +0x000fffff000fffff, +0x001ffffe001ffffe, +0x003ffffc003ffffc, +0x007ffff8007ffff8, +0x00fffff000fffff0, +0x01ffffe001ffffe0, +0x03ffffc003ffffc0, +0x07ffff8007ffff80, +0x0fffff000fffff00, +0x1ffffe001ffffe00, +0x3ffffc003ffffc00, +0x7ffff8007ffff800, +0xfffff000fffff000, +0xffffe001ffffe001, +0xffffc003ffffc003, +0xffff8007ffff8007, +0xffff000fffff000f, +0xfffe001ffffe001f, +0xfffc003ffffc003f, +0xfff8007ffff8007f, +0xfff000fffff000ff, +0xffe001ffffe001ff, +0xffc003ffffc003ff, +0xff8007ffff8007ff, +0xff000fffff000fff, +0xfe001ffffe001fff, +0xfc003ffffc003fff, +0xf8007ffff8007fff, +0xf000fffff000ffff, +0xe001ffffe001ffff, +0xc003ffffc003ffff, +0x8007ffff8007ffff, +0x001fffff001fffff, +0x003ffffe003ffffe, +0x007ffffc007ffffc, +0x00fffff800fffff8, +0x01fffff001fffff0, +0x03ffffe003ffffe0, +0x07ffffc007ffffc0, +0x0fffff800fffff80, +0x1fffff001fffff00, +0x3ffffe003ffffe00, +0x7ffffc007ffffc00, +0xfffff800fffff800, +0xfffff001fffff001, +0xffffe003ffffe003, +0xffffc007ffffc007, +0xffff800fffff800f, +0xffff001fffff001f, +0xfffe003ffffe003f, +0xfffc007ffffc007f, +0xfff800fffff800ff, +0xfff001fffff001ff, +0xffe003ffffe003ff, +0xffc007ffffc007ff, +0xff800fffff800fff, +0xff001fffff001fff, +0xfe003ffffe003fff, +0xfc007ffffc007fff, +0xf800fffff800ffff, +0xf001fffff001ffff, +0xe003ffffe003ffff, +0xc007ffffc007ffff, +0x800fffff800fffff, +0x003fffff003fffff, +0x007ffffe007ffffe, +0x00fffffc00fffffc, +0x01fffff801fffff8, +0x03fffff003fffff0, +0x07ffffe007ffffe0, +0x0fffffc00fffffc0, +0x1fffff801fffff80, +0x3fffff003fffff00, +0x7ffffe007ffffe00, +0xfffffc00fffffc00, +0xfffff801fffff801, +0xfffff003fffff003, +0xffffe007ffffe007, +0xffffc00fffffc00f, +0xffff801fffff801f, +0xffff003fffff003f, +0xfffe007ffffe007f, +0xfffc00fffffc00ff, +0xfff801fffff801ff, +0xfff003fffff003ff, +0xffe007ffffe007ff, +0xffc00fffffc00fff, +0xff801fffff801fff, +0xff003fffff003fff, +0xfe007ffffe007fff, +0xfc00fffffc00ffff, +0xf801fffff801ffff, +0xf003fffff003ffff, +0xe007ffffe007ffff, +0xc00fffffc00fffff, +0x801fffff801fffff, +0x007fffff007fffff, +0x00fffffe00fffffe, +0x01fffffc01fffffc, +0x03fffff803fffff8, +0x07fffff007fffff0, +0x0fffffe00fffffe0, +0x1fffffc01fffffc0, +0x3fffff803fffff80, +0x7fffff007fffff00, +0xfffffe00fffffe00, +0xfffffc01fffffc01, +0xfffff803fffff803, +0xfffff007fffff007, +0xffffe00fffffe00f, +0xffffc01fffffc01f, +0xffff803fffff803f, +0xffff007fffff007f, +0xfffe00fffffe00ff, +0xfffc01fffffc01ff, +0xfff803fffff803ff, +0xfff007fffff007ff, +0xffe00fffffe00fff, +0xffc01fffffc01fff, +0xff803fffff803fff, +0xff007fffff007fff, +0xfe00fffffe00ffff, +0xfc01fffffc01ffff, +0xf803fffff803ffff, +0xf007fffff007ffff, +0xe00fffffe00fffff, +0xc01fffffc01fffff, +0x803fffff803fffff, +0x00ffffff00ffffff, +0x01fffffe01fffffe, +0x03fffffc03fffffc, +0x07fffff807fffff8, +0x0ffffff00ffffff0, +0x1fffffe01fffffe0, +0x3fffffc03fffffc0, +0x7fffff807fffff80, +0xffffff00ffffff00, +0xfffffe01fffffe01, +0xfffffc03fffffc03, +0xfffff807fffff807, +0xfffff00ffffff00f, +0xffffe01fffffe01f, +0xffffc03fffffc03f, +0xffff807fffff807f, +0xffff00ffffff00ff, +0xfffe01fffffe01ff, +0xfffc03fffffc03ff, +0xfff807fffff807ff, +0xfff00ffffff00fff, +0xffe01fffffe01fff, +0xffc03fffffc03fff, +0xff807fffff807fff, +0xff00ffffff00ffff, +0xfe01fffffe01ffff, +0xfc03fffffc03ffff, +0xf807fffff807ffff, +0xf00ffffff00fffff, +0xe01fffffe01fffff, +0xc03fffffc03fffff, +0x807fffff807fffff, +0x01ffffff01ffffff, +0x03fffffe03fffffe, +0x07fffffc07fffffc, +0x0ffffff80ffffff8, +0x1ffffff01ffffff0, +0x3fffffe03fffffe0, +0x7fffffc07fffffc0, +0xffffff80ffffff80, +0xffffff01ffffff01, +0xfffffe03fffffe03, +0xfffffc07fffffc07, +0xfffff80ffffff80f, +0xfffff01ffffff01f, +0xffffe03fffffe03f, +0xffffc07fffffc07f, +0xffff80ffffff80ff, +0xffff01ffffff01ff, +0xfffe03fffffe03ff, +0xfffc07fffffc07ff, +0xfff80ffffff80fff, +0xfff01ffffff01fff, +0xffe03fffffe03fff, +0xffc07fffffc07fff, +0xff80ffffff80ffff, +0xff01ffffff01ffff, +0xfe03fffffe03ffff, +0xfc07fffffc07ffff, +0xf80ffffff80fffff, +0xf01ffffff01fffff, +0xe03fffffe03fffff, +0xc07fffffc07fffff, +0x80ffffff80ffffff, +0x03ffffff03ffffff, +0x07fffffe07fffffe, +0x0ffffffc0ffffffc, +0x1ffffff81ffffff8, +0x3ffffff03ffffff0, +0x7fffffe07fffffe0, +0xffffffc0ffffffc0, +0xffffff81ffffff81, +0xffffff03ffffff03, +0xfffffe07fffffe07, +0xfffffc0ffffffc0f, +0xfffff81ffffff81f, +0xfffff03ffffff03f, +0xffffe07fffffe07f, +0xffffc0ffffffc0ff, +0xffff81ffffff81ff, +0xffff03ffffff03ff, +0xfffe07fffffe07ff, +0xfffc0ffffffc0fff, +0xfff81ffffff81fff, +0xfff03ffffff03fff, +0xffe07fffffe07fff, +0xffc0ffffffc0ffff, +0xff81ffffff81ffff, +0xff03ffffff03ffff, +0xfe07fffffe07ffff, +0xfc0ffffffc0fffff, +0xf81ffffff81fffff, +0xf03ffffff03fffff, +0xe07fffffe07fffff, +0xc0ffffffc0ffffff, +0x81ffffff81ffffff, +0x07ffffff07ffffff, +0x0ffffffe0ffffffe, +0x1ffffffc1ffffffc, +0x3ffffff83ffffff8, +0x7ffffff07ffffff0, +0xffffffe0ffffffe0, +0xffffffc1ffffffc1, +0xffffff83ffffff83, +0xffffff07ffffff07, +0xfffffe0ffffffe0f, +0xfffffc1ffffffc1f, +0xfffff83ffffff83f, +0xfffff07ffffff07f, +0xffffe0ffffffe0ff, +0xffffc1ffffffc1ff, +0xffff83ffffff83ff, +0xffff07ffffff07ff, +0xfffe0ffffffe0fff, +0xfffc1ffffffc1fff, +0xfff83ffffff83fff, +0xfff07ffffff07fff, +0xffe0ffffffe0ffff, +0xffc1ffffffc1ffff, +0xff83ffffff83ffff, +0xff07ffffff07ffff, +0xfe0ffffffe0fffff, +0xfc1ffffffc1fffff, +0xf83ffffff83fffff, +0xf07ffffff07fffff, +0xe0ffffffe0ffffff, +0xc1ffffffc1ffffff, +0x83ffffff83ffffff, +0x0fffffff0fffffff, +0x1ffffffe1ffffffe, +0x3ffffffc3ffffffc, +0x7ffffff87ffffff8, +0xfffffff0fffffff0, +0xffffffe1ffffffe1, +0xffffffc3ffffffc3, +0xffffff87ffffff87, +0xffffff0fffffff0f, +0xfffffe1ffffffe1f, +0xfffffc3ffffffc3f, +0xfffff87ffffff87f, +0xfffff0fffffff0ff, +0xffffe1ffffffe1ff, +0xffffc3ffffffc3ff, +0xffff87ffffff87ff, +0xffff0fffffff0fff, +0xfffe1ffffffe1fff, +0xfffc3ffffffc3fff, +0xfff87ffffff87fff, +0xfff0fffffff0ffff, +0xffe1ffffffe1ffff, +0xffc3ffffffc3ffff, +0xff87ffffff87ffff, +0xff0fffffff0fffff, +0xfe1ffffffe1fffff, +0xfc3ffffffc3fffff, +0xf87ffffff87fffff, +0xf0fffffff0ffffff, +0xe1ffffffe1ffffff, +0xc3ffffffc3ffffff, +0x87ffffff87ffffff, +0x1fffffff1fffffff, +0x3ffffffe3ffffffe, +0x7ffffffc7ffffffc, +0xfffffff8fffffff8, +0xfffffff1fffffff1, +0xffffffe3ffffffe3, +0xffffffc7ffffffc7, +0xffffff8fffffff8f, +0xffffff1fffffff1f, +0xfffffe3ffffffe3f, +0xfffffc7ffffffc7f, +0xfffff8fffffff8ff, +0xfffff1fffffff1ff, +0xffffe3ffffffe3ff, +0xffffc7ffffffc7ff, +0xffff8fffffff8fff, +0xffff1fffffff1fff, +0xfffe3ffffffe3fff, +0xfffc7ffffffc7fff, +0xfff8fffffff8ffff, +0xfff1fffffff1ffff, +0xffe3ffffffe3ffff, +0xffc7ffffffc7ffff, +0xff8fffffff8fffff, +0xff1fffffff1fffff, +0xfe3ffffffe3fffff, +0xfc7ffffffc7fffff, +0xf8fffffff8ffffff, +0xf1fffffff1ffffff, +0xe3ffffffe3ffffff, +0xc7ffffffc7ffffff, +0x8fffffff8fffffff, +0x3fffffff3fffffff, +0x7ffffffe7ffffffe, +0xfffffffcfffffffc, +0xfffffff9fffffff9, +0xfffffff3fffffff3, +0xffffffe7ffffffe7, +0xffffffcfffffffcf, +0xffffff9fffffff9f, +0xffffff3fffffff3f, +0xfffffe7ffffffe7f, +0xfffffcfffffffcff, +0xfffff9fffffff9ff, +0xfffff3fffffff3ff, +0xffffe7ffffffe7ff, +0xffffcfffffffcfff, +0xffff9fffffff9fff, +0xffff3fffffff3fff, +0xfffe7ffffffe7fff, +0xfffcfffffffcffff, +0xfff9fffffff9ffff, +0xfff3fffffff3ffff, +0xffe7ffffffe7ffff, +0xffcfffffffcfffff, +0xff9fffffff9fffff, +0xff3fffffff3fffff, +0xfe7ffffffe7fffff, +0xfcfffffffcffffff, +0xf9fffffff9ffffff, +0xf3fffffff3ffffff, +0xe7ffffffe7ffffff, +0xcfffffffcfffffff, +0x9fffffff9fffffff, +0x7fffffff7fffffff, +0xfffffffefffffffe, +0xfffffffdfffffffd, +0xfffffffbfffffffb, +0xfffffff7fffffff7, +0xffffffefffffffef, +0xffffffdfffffffdf, +0xffffffbfffffffbf, +0xffffff7fffffff7f, +0xfffffefffffffeff, +0xfffffdfffffffdff, +0xfffffbfffffffbff, +0xfffff7fffffff7ff, +0xffffefffffffefff, +0xffffdfffffffdfff, +0xffffbfffffffbfff, +0xffff7fffffff7fff, +0xfffefffffffeffff, +0xfffdfffffffdffff, +0xfffbfffffffbffff, +0xfff7fffffff7ffff, +0xffefffffffefffff, +0xffdfffffffdfffff, +0xffbfffffffbfffff, +0xff7fffffff7fffff, +0xfefffffffeffffff, +0xfdfffffffdffffff, +0xfbfffffffbffffff, +0xf7fffffff7ffffff, +0xefffffffefffffff, +0xdfffffffdfffffff, +0xbfffffffbfffffff, +0x0000000000000001, +0x0000000000000002, +0x0000000000000004, +0x0000000000000008, +0x0000000000000010, +0x0000000000000020, +0x0000000000000040, +0x0000000000000080, +0x0000000000000100, +0x0000000000000200, +0x0000000000000400, +0x0000000000000800, +0x0000000000001000, +0x0000000000002000, +0x0000000000004000, +0x0000000000008000, +0x0000000000010000, +0x0000000000020000, +0x0000000000040000, +0x0000000000080000, +0x0000000000100000, +0x0000000000200000, +0x0000000000400000, +0x0000000000800000, +0x0000000001000000, +0x0000000002000000, +0x0000000004000000, +0x0000000008000000, +0x0000000010000000, +0x0000000020000000, +0x0000000040000000, +0x0000000080000000, +0x0000000100000000, +0x0000000200000000, +0x0000000400000000, +0x0000000800000000, +0x0000001000000000, +0x0000002000000000, +0x0000004000000000, +0x0000008000000000, +0x0000010000000000, +0x0000020000000000, +0x0000040000000000, +0x0000080000000000, +0x0000100000000000, +0x0000200000000000, +0x0000400000000000, +0x0000800000000000, +0x0001000000000000, +0x0002000000000000, +0x0004000000000000, +0x0008000000000000, +0x0010000000000000, +0x0020000000000000, +0x0040000000000000, +0x0080000000000000, +0x0100000000000000, +0x0200000000000000, +0x0400000000000000, +0x0800000000000000, +0x1000000000000000, +0x2000000000000000, +0x4000000000000000, +0x8000000000000000, +0x0000000000000003, +0x0000000000000006, +0x000000000000000c, +0x0000000000000018, +0x0000000000000030, +0x0000000000000060, +0x00000000000000c0, +0x0000000000000180, +0x0000000000000300, +0x0000000000000600, +0x0000000000000c00, +0x0000000000001800, +0x0000000000003000, +0x0000000000006000, +0x000000000000c000, +0x0000000000018000, +0x0000000000030000, +0x0000000000060000, +0x00000000000c0000, +0x0000000000180000, +0x0000000000300000, +0x0000000000600000, +0x0000000000c00000, +0x0000000001800000, +0x0000000003000000, +0x0000000006000000, +0x000000000c000000, +0x0000000018000000, +0x0000000030000000, +0x0000000060000000, +0x00000000c0000000, +0x0000000180000000, +0x0000000300000000, +0x0000000600000000, +0x0000000c00000000, +0x0000001800000000, +0x0000003000000000, +0x0000006000000000, +0x000000c000000000, +0x0000018000000000, +0x0000030000000000, +0x0000060000000000, +0x00000c0000000000, +0x0000180000000000, +0x0000300000000000, +0x0000600000000000, +0x0000c00000000000, +0x0001800000000000, +0x0003000000000000, +0x0006000000000000, +0x000c000000000000, +0x0018000000000000, +0x0030000000000000, +0x0060000000000000, +0x00c0000000000000, +0x0180000000000000, +0x0300000000000000, +0x0600000000000000, +0x0c00000000000000, +0x1800000000000000, +0x3000000000000000, +0x6000000000000000, +0xc000000000000000, +0x8000000000000001, +0x0000000000000007, +0x000000000000000e, +0x000000000000001c, +0x0000000000000038, +0x0000000000000070, +0x00000000000000e0, +0x00000000000001c0, +0x0000000000000380, +0x0000000000000700, +0x0000000000000e00, +0x0000000000001c00, +0x0000000000003800, +0x0000000000007000, +0x000000000000e000, +0x000000000001c000, +0x0000000000038000, +0x0000000000070000, +0x00000000000e0000, +0x00000000001c0000, +0x0000000000380000, +0x0000000000700000, +0x0000000000e00000, +0x0000000001c00000, +0x0000000003800000, +0x0000000007000000, +0x000000000e000000, +0x000000001c000000, +0x0000000038000000, +0x0000000070000000, +0x00000000e0000000, +0x00000001c0000000, +0x0000000380000000, +0x0000000700000000, +0x0000000e00000000, +0x0000001c00000000, +0x0000003800000000, +0x0000007000000000, +0x000000e000000000, +0x000001c000000000, +0x0000038000000000, +0x0000070000000000, +0x00000e0000000000, +0x00001c0000000000, +0x0000380000000000, +0x0000700000000000, +0x0000e00000000000, +0x0001c00000000000, +0x0003800000000000, +0x0007000000000000, +0x000e000000000000, +0x001c000000000000, +0x0038000000000000, +0x0070000000000000, +0x00e0000000000000, +0x01c0000000000000, +0x0380000000000000, +0x0700000000000000, +0x0e00000000000000, +0x1c00000000000000, +0x3800000000000000, +0x7000000000000000, +0xe000000000000000, +0xc000000000000001, +0x8000000000000003, +0x000000000000000f, +0x000000000000001e, +0x000000000000003c, +0x0000000000000078, +0x00000000000000f0, +0x00000000000001e0, +0x00000000000003c0, +0x0000000000000780, +0x0000000000000f00, +0x0000000000001e00, +0x0000000000003c00, +0x0000000000007800, +0x000000000000f000, +0x000000000001e000, +0x000000000003c000, +0x0000000000078000, +0x00000000000f0000, +0x00000000001e0000, +0x00000000003c0000, +0x0000000000780000, +0x0000000000f00000, +0x0000000001e00000, +0x0000000003c00000, +0x0000000007800000, +0x000000000f000000, +0x000000001e000000, +0x000000003c000000, +0x0000000078000000, +0x00000000f0000000, +0x00000001e0000000, +0x00000003c0000000, +0x0000000780000000, +0x0000000f00000000, +0x0000001e00000000, +0x0000003c00000000, +0x0000007800000000, +0x000000f000000000, +0x000001e000000000, +0x000003c000000000, +0x0000078000000000, +0x00000f0000000000, +0x00001e0000000000, +0x00003c0000000000, +0x0000780000000000, +0x0000f00000000000, +0x0001e00000000000, +0x0003c00000000000, +0x0007800000000000, +0x000f000000000000, +0x001e000000000000, +0x003c000000000000, +0x0078000000000000, +0x00f0000000000000, +0x01e0000000000000, +0x03c0000000000000, +0x0780000000000000, +0x0f00000000000000, +0x1e00000000000000, +0x3c00000000000000, +0x7800000000000000, +0xf000000000000000, +0xe000000000000001, +0xc000000000000003, +0x8000000000000007, +0x000000000000001f, +0x000000000000003e, +0x000000000000007c, +0x00000000000000f8, +0x00000000000001f0, +0x00000000000003e0, +0x00000000000007c0, +0x0000000000000f80, +0x0000000000001f00, +0x0000000000003e00, +0x0000000000007c00, +0x000000000000f800, +0x000000000001f000, +0x000000000003e000, +0x000000000007c000, +0x00000000000f8000, +0x00000000001f0000, +0x00000000003e0000, +0x00000000007c0000, +0x0000000000f80000, +0x0000000001f00000, +0x0000000003e00000, +0x0000000007c00000, +0x000000000f800000, +0x000000001f000000, +0x000000003e000000, +0x000000007c000000, +0x00000000f8000000, +0x00000001f0000000, +0x00000003e0000000, +0x00000007c0000000, +0x0000000f80000000, +0x0000001f00000000, +0x0000003e00000000, +0x0000007c00000000, +0x000000f800000000, +0x000001f000000000, +0x000003e000000000, +0x000007c000000000, +0x00000f8000000000, +0x00001f0000000000, +0x00003e0000000000, +0x00007c0000000000, +0x0000f80000000000, +0x0001f00000000000, +0x0003e00000000000, +0x0007c00000000000, +0x000f800000000000, +0x001f000000000000, +0x003e000000000000, +0x007c000000000000, +0x00f8000000000000, +0x01f0000000000000, +0x03e0000000000000, +0x07c0000000000000, +0x0f80000000000000, +0x1f00000000000000, +0x3e00000000000000, +0x7c00000000000000, +0xf800000000000000, +0xf000000000000001, +0xe000000000000003, +0xc000000000000007, +0x800000000000000f, +0x000000000000003f, +0x000000000000007e, +0x00000000000000fc, +0x00000000000001f8, +0x00000000000003f0, +0x00000000000007e0, +0x0000000000000fc0, +0x0000000000001f80, +0x0000000000003f00, +0x0000000000007e00, +0x000000000000fc00, +0x000000000001f800, +0x000000000003f000, +0x000000000007e000, +0x00000000000fc000, +0x00000000001f8000, +0x00000000003f0000, +0x00000000007e0000, +0x0000000000fc0000, +0x0000000001f80000, +0x0000000003f00000, +0x0000000007e00000, +0x000000000fc00000, +0x000000001f800000, +0x000000003f000000, +0x000000007e000000, +0x00000000fc000000, +0x00000001f8000000, +0x00000003f0000000, +0x00000007e0000000, +0x0000000fc0000000, +0x0000001f80000000, +0x0000003f00000000, +0x0000007e00000000, +0x000000fc00000000, +0x000001f800000000, +0x000003f000000000, +0x000007e000000000, +0x00000fc000000000, +0x00001f8000000000, +0x00003f0000000000, +0x00007e0000000000, +0x0000fc0000000000, +0x0001f80000000000, +0x0003f00000000000, +0x0007e00000000000, +0x000fc00000000000, +0x001f800000000000, +0x003f000000000000, +0x007e000000000000, +0x00fc000000000000, +0x01f8000000000000, +0x03f0000000000000, +0x07e0000000000000, +0x0fc0000000000000, +0x1f80000000000000, +0x3f00000000000000, +0x7e00000000000000, +0xfc00000000000000, +0xf800000000000001, +0xf000000000000003, +0xe000000000000007, +0xc00000000000000f, +0x800000000000001f, +0x000000000000007f, +0x00000000000000fe, +0x00000000000001fc, +0x00000000000003f8, +0x00000000000007f0, +0x0000000000000fe0, +0x0000000000001fc0, +0x0000000000003f80, +0x0000000000007f00, +0x000000000000fe00, +0x000000000001fc00, +0x000000000003f800, +0x000000000007f000, +0x00000000000fe000, +0x00000000001fc000, +0x00000000003f8000, +0x00000000007f0000, +0x0000000000fe0000, +0x0000000001fc0000, +0x0000000003f80000, +0x0000000007f00000, +0x000000000fe00000, +0x000000001fc00000, +0x000000003f800000, +0x000000007f000000, +0x00000000fe000000, +0x00000001fc000000, +0x00000003f8000000, +0x00000007f0000000, +0x0000000fe0000000, +0x0000001fc0000000, +0x0000003f80000000, +0x0000007f00000000, +0x000000fe00000000, +0x000001fc00000000, +0x000003f800000000, +0x000007f000000000, +0x00000fe000000000, +0x00001fc000000000, +0x00003f8000000000, +0x00007f0000000000, +0x0000fe0000000000, +0x0001fc0000000000, +0x0003f80000000000, +0x0007f00000000000, +0x000fe00000000000, +0x001fc00000000000, +0x003f800000000000, +0x007f000000000000, +0x00fe000000000000, +0x01fc000000000000, +0x03f8000000000000, +0x07f0000000000000, +0x0fe0000000000000, +0x1fc0000000000000, +0x3f80000000000000, +0x7f00000000000000, +0xfe00000000000000, +0xfc00000000000001, +0xf800000000000003, +0xf000000000000007, +0xe00000000000000f, +0xc00000000000001f, +0x800000000000003f, +0x00000000000000ff, +0x00000000000001fe, +0x00000000000003fc, +0x00000000000007f8, +0x0000000000000ff0, +0x0000000000001fe0, +0x0000000000003fc0, +0x0000000000007f80, +0x000000000000ff00, +0x000000000001fe00, +0x000000000003fc00, +0x000000000007f800, +0x00000000000ff000, +0x00000000001fe000, +0x00000000003fc000, +0x00000000007f8000, +0x0000000000ff0000, +0x0000000001fe0000, +0x0000000003fc0000, +0x0000000007f80000, +0x000000000ff00000, +0x000000001fe00000, +0x000000003fc00000, +0x000000007f800000, +0x00000000ff000000, +0x00000001fe000000, +0x00000003fc000000, +0x00000007f8000000, +0x0000000ff0000000, +0x0000001fe0000000, +0x0000003fc0000000, +0x0000007f80000000, +0x000000ff00000000, +0x000001fe00000000, +0x000003fc00000000, +0x000007f800000000, +0x00000ff000000000, +0x00001fe000000000, +0x00003fc000000000, +0x00007f8000000000, +0x0000ff0000000000, +0x0001fe0000000000, +0x0003fc0000000000, +0x0007f80000000000, +0x000ff00000000000, +0x001fe00000000000, +0x003fc00000000000, +0x007f800000000000, +0x00ff000000000000, +0x01fe000000000000, +0x03fc000000000000, +0x07f8000000000000, +0x0ff0000000000000, +0x1fe0000000000000, +0x3fc0000000000000, +0x7f80000000000000, +0xff00000000000000, +0xfe00000000000001, +0xfc00000000000003, +0xf800000000000007, +0xf00000000000000f, +0xe00000000000001f, +0xc00000000000003f, +0x800000000000007f, +0x00000000000001ff, +0x00000000000003fe, +0x00000000000007fc, +0x0000000000000ff8, +0x0000000000001ff0, +0x0000000000003fe0, +0x0000000000007fc0, +0x000000000000ff80, +0x000000000001ff00, +0x000000000003fe00, +0x000000000007fc00, +0x00000000000ff800, +0x00000000001ff000, +0x00000000003fe000, +0x00000000007fc000, +0x0000000000ff8000, +0x0000000001ff0000, +0x0000000003fe0000, +0x0000000007fc0000, +0x000000000ff80000, +0x000000001ff00000, +0x000000003fe00000, +0x000000007fc00000, +0x00000000ff800000, +0x00000001ff000000, +0x00000003fe000000, +0x00000007fc000000, +0x0000000ff8000000, +0x0000001ff0000000, +0x0000003fe0000000, +0x0000007fc0000000, +0x000000ff80000000, +0x000001ff00000000, +0x000003fe00000000, +0x000007fc00000000, +0x00000ff800000000, +0x00001ff000000000, +0x00003fe000000000, +0x00007fc000000000, +0x0000ff8000000000, +0x0001ff0000000000, +0x0003fe0000000000, +0x0007fc0000000000, +0x000ff80000000000, +0x001ff00000000000, +0x003fe00000000000, +0x007fc00000000000, +0x00ff800000000000, +0x01ff000000000000, +0x03fe000000000000, +0x07fc000000000000, +0x0ff8000000000000, +0x1ff0000000000000, +0x3fe0000000000000, +0x7fc0000000000000, +0xff80000000000000, +0xff00000000000001, +0xfe00000000000003, +0xfc00000000000007, +0xf80000000000000f, +0xf00000000000001f, +0xe00000000000003f, +0xc00000000000007f, +0x80000000000000ff, +0x00000000000003ff, +0x00000000000007fe, +0x0000000000000ffc, +0x0000000000001ff8, +0x0000000000003ff0, +0x0000000000007fe0, +0x000000000000ffc0, +0x000000000001ff80, +0x000000000003ff00, +0x000000000007fe00, +0x00000000000ffc00, +0x00000000001ff800, +0x00000000003ff000, +0x00000000007fe000, +0x0000000000ffc000, +0x0000000001ff8000, +0x0000000003ff0000, +0x0000000007fe0000, +0x000000000ffc0000, +0x000000001ff80000, +0x000000003ff00000, +0x000000007fe00000, +0x00000000ffc00000, +0x00000001ff800000, +0x00000003ff000000, +0x00000007fe000000, +0x0000000ffc000000, +0x0000001ff8000000, +0x0000003ff0000000, +0x0000007fe0000000, +0x000000ffc0000000, +0x000001ff80000000, +0x000003ff00000000, +0x000007fe00000000, +0x00000ffc00000000, +0x00001ff800000000, +0x00003ff000000000, +0x00007fe000000000, +0x0000ffc000000000, +0x0001ff8000000000, +0x0003ff0000000000, +0x0007fe0000000000, +0x000ffc0000000000, +0x001ff80000000000, +0x003ff00000000000, +0x007fe00000000000, +0x00ffc00000000000, +0x01ff800000000000, +0x03ff000000000000, +0x07fe000000000000, +0x0ffc000000000000, +0x1ff8000000000000, +0x3ff0000000000000, +0x7fe0000000000000, +0xffc0000000000000, +0xff80000000000001, +0xff00000000000003, +0xfe00000000000007, +0xfc0000000000000f, +0xf80000000000001f, +0xf00000000000003f, +0xe00000000000007f, +0xc0000000000000ff, +0x80000000000001ff, +0x00000000000007ff, +0x0000000000000ffe, +0x0000000000001ffc, +0x0000000000003ff8, +0x0000000000007ff0, +0x000000000000ffe0, +0x000000000001ffc0, +0x000000000003ff80, +0x000000000007ff00, +0x00000000000ffe00, +0x00000000001ffc00, +0x00000000003ff800, +0x00000000007ff000, +0x0000000000ffe000, +0x0000000001ffc000, +0x0000000003ff8000, +0x0000000007ff0000, +0x000000000ffe0000, +0x000000001ffc0000, +0x000000003ff80000, +0x000000007ff00000, +0x00000000ffe00000, +0x00000001ffc00000, +0x00000003ff800000, +0x00000007ff000000, +0x0000000ffe000000, +0x0000001ffc000000, +0x0000003ff8000000, +0x0000007ff0000000, +0x000000ffe0000000, +0x000001ffc0000000, +0x000003ff80000000, +0x000007ff00000000, +0x00000ffe00000000, +0x00001ffc00000000, +0x00003ff800000000, +0x00007ff000000000, +0x0000ffe000000000, +0x0001ffc000000000, +0x0003ff8000000000, +0x0007ff0000000000, +0x000ffe0000000000, +0x001ffc0000000000, +0x003ff80000000000, +0x007ff00000000000, +0x00ffe00000000000, +0x01ffc00000000000, +0x03ff800000000000, +0x07ff000000000000, +0x0ffe000000000000, +0x1ffc000000000000, +0x3ff8000000000000, +0x7ff0000000000000, +0xffe0000000000000, +0xffc0000000000001, +0xff80000000000003, +0xff00000000000007, +0xfe0000000000000f, +0xfc0000000000001f, +0xf80000000000003f, +0xf00000000000007f, +0xe0000000000000ff, +0xc0000000000001ff, +0x80000000000003ff, +0x0000000000000fff, +0x0000000000001ffe, +0x0000000000003ffc, +0x0000000000007ff8, +0x000000000000fff0, +0x000000000001ffe0, +0x000000000003ffc0, +0x000000000007ff80, +0x00000000000fff00, +0x00000000001ffe00, +0x00000000003ffc00, +0x00000000007ff800, +0x0000000000fff000, +0x0000000001ffe000, +0x0000000003ffc000, +0x0000000007ff8000, +0x000000000fff0000, +0x000000001ffe0000, +0x000000003ffc0000, +0x000000007ff80000, +0x00000000fff00000, +0x00000001ffe00000, +0x00000003ffc00000, +0x00000007ff800000, +0x0000000fff000000, +0x0000001ffe000000, +0x0000003ffc000000, +0x0000007ff8000000, +0x000000fff0000000, +0x000001ffe0000000, +0x000003ffc0000000, +0x000007ff80000000, +0x00000fff00000000, +0x00001ffe00000000, +0x00003ffc00000000, +0x00007ff800000000, +0x0000fff000000000, +0x0001ffe000000000, +0x0003ffc000000000, +0x0007ff8000000000, +0x000fff0000000000, +0x001ffe0000000000, +0x003ffc0000000000, +0x007ff80000000000, +0x00fff00000000000, +0x01ffe00000000000, +0x03ffc00000000000, +0x07ff800000000000, +0x0fff000000000000, +0x1ffe000000000000, +0x3ffc000000000000, +0x7ff8000000000000, +0xfff0000000000000, +0xffe0000000000001, +0xffc0000000000003, +0xff80000000000007, +0xff0000000000000f, +0xfe0000000000001f, +0xfc0000000000003f, +0xf80000000000007f, +0xf0000000000000ff, +0xe0000000000001ff, +0xc0000000000003ff, +0x80000000000007ff, +0x0000000000001fff, +0x0000000000003ffe, +0x0000000000007ffc, +0x000000000000fff8, +0x000000000001fff0, +0x000000000003ffe0, +0x000000000007ffc0, +0x00000000000fff80, +0x00000000001fff00, +0x00000000003ffe00, +0x00000000007ffc00, +0x0000000000fff800, +0x0000000001fff000, +0x0000000003ffe000, +0x0000000007ffc000, +0x000000000fff8000, +0x000000001fff0000, +0x000000003ffe0000, +0x000000007ffc0000, +0x00000000fff80000, +0x00000001fff00000, +0x00000003ffe00000, +0x00000007ffc00000, +0x0000000fff800000, +0x0000001fff000000, +0x0000003ffe000000, +0x0000007ffc000000, +0x000000fff8000000, +0x000001fff0000000, +0x000003ffe0000000, +0x000007ffc0000000, +0x00000fff80000000, +0x00001fff00000000, +0x00003ffe00000000, +0x00007ffc00000000, +0x0000fff800000000, +0x0001fff000000000, +0x0003ffe000000000, +0x0007ffc000000000, +0x000fff8000000000, +0x001fff0000000000, +0x003ffe0000000000, +0x007ffc0000000000, +0x00fff80000000000, +0x01fff00000000000, +0x03ffe00000000000, +0x07ffc00000000000, +0x0fff800000000000, +0x1fff000000000000, +0x3ffe000000000000, +0x7ffc000000000000, +0xfff8000000000000, +0xfff0000000000001, +0xffe0000000000003, +0xffc0000000000007, +0xff8000000000000f, +0xff0000000000001f, +0xfe0000000000003f, +0xfc0000000000007f, +0xf8000000000000ff, +0xf0000000000001ff, +0xe0000000000003ff, +0xc0000000000007ff, +0x8000000000000fff, +0x0000000000003fff, +0x0000000000007ffe, +0x000000000000fffc, +0x000000000001fff8, +0x000000000003fff0, +0x000000000007ffe0, +0x00000000000fffc0, +0x00000000001fff80, +0x00000000003fff00, +0x00000000007ffe00, +0x0000000000fffc00, +0x0000000001fff800, +0x0000000003fff000, +0x0000000007ffe000, +0x000000000fffc000, +0x000000001fff8000, +0x000000003fff0000, +0x000000007ffe0000, +0x00000000fffc0000, +0x00000001fff80000, +0x00000003fff00000, +0x00000007ffe00000, +0x0000000fffc00000, +0x0000001fff800000, +0x0000003fff000000, +0x0000007ffe000000, +0x000000fffc000000, +0x000001fff8000000, +0x000003fff0000000, +0x000007ffe0000000, +0x00000fffc0000000, +0x00001fff80000000, +0x00003fff00000000, +0x00007ffe00000000, +0x0000fffc00000000, +0x0001fff800000000, +0x0003fff000000000, +0x0007ffe000000000, +0x000fffc000000000, +0x001fff8000000000, +0x003fff0000000000, +0x007ffe0000000000, +0x00fffc0000000000, +0x01fff80000000000, +0x03fff00000000000, +0x07ffe00000000000, +0x0fffc00000000000, +0x1fff800000000000, +0x3fff000000000000, +0x7ffe000000000000, +0xfffc000000000000, +0xfff8000000000001, +0xfff0000000000003, +0xffe0000000000007, +0xffc000000000000f, +0xff8000000000001f, +0xff0000000000003f, +0xfe0000000000007f, +0xfc000000000000ff, +0xf8000000000001ff, +0xf0000000000003ff, +0xe0000000000007ff, +0xc000000000000fff, +0x8000000000001fff, +0x0000000000007fff, +0x000000000000fffe, +0x000000000001fffc, +0x000000000003fff8, +0x000000000007fff0, +0x00000000000fffe0, +0x00000000001fffc0, +0x00000000003fff80, +0x00000000007fff00, +0x0000000000fffe00, +0x0000000001fffc00, +0x0000000003fff800, +0x0000000007fff000, +0x000000000fffe000, +0x000000001fffc000, +0x000000003fff8000, +0x000000007fff0000, +0x00000000fffe0000, +0x00000001fffc0000, +0x00000003fff80000, +0x00000007fff00000, +0x0000000fffe00000, +0x0000001fffc00000, +0x0000003fff800000, +0x0000007fff000000, +0x000000fffe000000, +0x000001fffc000000, +0x000003fff8000000, +0x000007fff0000000, +0x00000fffe0000000, +0x00001fffc0000000, +0x00003fff80000000, +0x00007fff00000000, +0x0000fffe00000000, +0x0001fffc00000000, +0x0003fff800000000, +0x0007fff000000000, +0x000fffe000000000, +0x001fffc000000000, +0x003fff8000000000, +0x007fff0000000000, +0x00fffe0000000000, +0x01fffc0000000000, +0x03fff80000000000, +0x07fff00000000000, +0x0fffe00000000000, +0x1fffc00000000000, +0x3fff800000000000, +0x7fff000000000000, +0xfffe000000000000, +0xfffc000000000001, +0xfff8000000000003, +0xfff0000000000007, +0xffe000000000000f, +0xffc000000000001f, +0xff8000000000003f, +0xff0000000000007f, +0xfe000000000000ff, +0xfc000000000001ff, +0xf8000000000003ff, +0xf0000000000007ff, +0xe000000000000fff, +0xc000000000001fff, +0x8000000000003fff, +0x000000000000ffff, +0x000000000001fffe, +0x000000000003fffc, +0x000000000007fff8, +0x00000000000ffff0, +0x00000000001fffe0, +0x00000000003fffc0, +0x00000000007fff80, +0x0000000000ffff00, +0x0000000001fffe00, +0x0000000003fffc00, +0x0000000007fff800, +0x000000000ffff000, +0x000000001fffe000, +0x000000003fffc000, +0x000000007fff8000, +0x00000000ffff0000, +0x00000001fffe0000, +0x00000003fffc0000, +0x00000007fff80000, +0x0000000ffff00000, +0x0000001fffe00000, +0x0000003fffc00000, +0x0000007fff800000, +0x000000ffff000000, +0x000001fffe000000, +0x000003fffc000000, +0x000007fff8000000, +0x00000ffff0000000, +0x00001fffe0000000, +0x00003fffc0000000, +0x00007fff80000000, +0x0000ffff00000000, +0x0001fffe00000000, +0x0003fffc00000000, +0x0007fff800000000, +0x000ffff000000000, +0x001fffe000000000, +0x003fffc000000000, +0x007fff8000000000, +0x00ffff0000000000, +0x01fffe0000000000, +0x03fffc0000000000, +0x07fff80000000000, +0x0ffff00000000000, +0x1fffe00000000000, +0x3fffc00000000000, +0x7fff800000000000, +0xffff000000000000, +0xfffe000000000001, +0xfffc000000000003, +0xfff8000000000007, +0xfff000000000000f, +0xffe000000000001f, +0xffc000000000003f, +0xff8000000000007f, +0xff000000000000ff, +0xfe000000000001ff, +0xfc000000000003ff, +0xf8000000000007ff, +0xf000000000000fff, +0xe000000000001fff, +0xc000000000003fff, +0x8000000000007fff, +0x000000000001ffff, +0x000000000003fffe, +0x000000000007fffc, +0x00000000000ffff8, +0x00000000001ffff0, +0x00000000003fffe0, +0x00000000007fffc0, +0x0000000000ffff80, +0x0000000001ffff00, +0x0000000003fffe00, +0x0000000007fffc00, +0x000000000ffff800, +0x000000001ffff000, +0x000000003fffe000, +0x000000007fffc000, +0x00000000ffff8000, +0x00000001ffff0000, +0x00000003fffe0000, +0x00000007fffc0000, +0x0000000ffff80000, +0x0000001ffff00000, +0x0000003fffe00000, +0x0000007fffc00000, +0x000000ffff800000, +0x000001ffff000000, +0x000003fffe000000, +0x000007fffc000000, +0x00000ffff8000000, +0x00001ffff0000000, +0x00003fffe0000000, +0x00007fffc0000000, +0x0000ffff80000000, +0x0001ffff00000000, +0x0003fffe00000000, +0x0007fffc00000000, +0x000ffff800000000, +0x001ffff000000000, +0x003fffe000000000, +0x007fffc000000000, +0x00ffff8000000000, +0x01ffff0000000000, +0x03fffe0000000000, +0x07fffc0000000000, +0x0ffff80000000000, +0x1ffff00000000000, +0x3fffe00000000000, +0x7fffc00000000000, +0xffff800000000000, +0xffff000000000001, +0xfffe000000000003, +0xfffc000000000007, +0xfff800000000000f, +0xfff000000000001f, +0xffe000000000003f, +0xffc000000000007f, +0xff800000000000ff, +0xff000000000001ff, +0xfe000000000003ff, +0xfc000000000007ff, +0xf800000000000fff, +0xf000000000001fff, +0xe000000000003fff, +0xc000000000007fff, +0x800000000000ffff, +0x000000000003ffff, +0x000000000007fffe, +0x00000000000ffffc, +0x00000000001ffff8, +0x00000000003ffff0, +0x00000000007fffe0, +0x0000000000ffffc0, +0x0000000001ffff80, +0x0000000003ffff00, +0x0000000007fffe00, +0x000000000ffffc00, +0x000000001ffff800, +0x000000003ffff000, +0x000000007fffe000, +0x00000000ffffc000, +0x00000001ffff8000, +0x00000003ffff0000, +0x00000007fffe0000, +0x0000000ffffc0000, +0x0000001ffff80000, +0x0000003ffff00000, +0x0000007fffe00000, +0x000000ffffc00000, +0x000001ffff800000, +0x000003ffff000000, +0x000007fffe000000, +0x00000ffffc000000, +0x00001ffff8000000, +0x00003ffff0000000, +0x00007fffe0000000, +0x0000ffffc0000000, +0x0001ffff80000000, +0x0003ffff00000000, +0x0007fffe00000000, +0x000ffffc00000000, +0x001ffff800000000, +0x003ffff000000000, +0x007fffe000000000, +0x00ffffc000000000, +0x01ffff8000000000, +0x03ffff0000000000, +0x07fffe0000000000, +0x0ffffc0000000000, +0x1ffff80000000000, +0x3ffff00000000000, +0x7fffe00000000000, +0xffffc00000000000, +0xffff800000000001, +0xffff000000000003, +0xfffe000000000007, +0xfffc00000000000f, +0xfff800000000001f, +0xfff000000000003f, +0xffe000000000007f, +0xffc00000000000ff, +0xff800000000001ff, +0xff000000000003ff, +0xfe000000000007ff, +0xfc00000000000fff, +0xf800000000001fff, +0xf000000000003fff, +0xe000000000007fff, +0xc00000000000ffff, +0x800000000001ffff, +0x000000000007ffff, +0x00000000000ffffe, +0x00000000001ffffc, +0x00000000003ffff8, +0x00000000007ffff0, +0x0000000000ffffe0, +0x0000000001ffffc0, +0x0000000003ffff80, +0x0000000007ffff00, +0x000000000ffffe00, +0x000000001ffffc00, +0x000000003ffff800, +0x000000007ffff000, +0x00000000ffffe000, +0x00000001ffffc000, +0x00000003ffff8000, +0x00000007ffff0000, +0x0000000ffffe0000, +0x0000001ffffc0000, +0x0000003ffff80000, +0x0000007ffff00000, +0x000000ffffe00000, +0x000001ffffc00000, +0x000003ffff800000, +0x000007ffff000000, +0x00000ffffe000000, +0x00001ffffc000000, +0x00003ffff8000000, +0x00007ffff0000000, +0x0000ffffe0000000, +0x0001ffffc0000000, +0x0003ffff80000000, +0x0007ffff00000000, +0x000ffffe00000000, +0x001ffffc00000000, +0x003ffff800000000, +0x007ffff000000000, +0x00ffffe000000000, +0x01ffffc000000000, +0x03ffff8000000000, +0x07ffff0000000000, +0x0ffffe0000000000, +0x1ffffc0000000000, +0x3ffff80000000000, +0x7ffff00000000000, +0xffffe00000000000, +0xffffc00000000001, +0xffff800000000003, +0xffff000000000007, +0xfffe00000000000f, +0xfffc00000000001f, +0xfff800000000003f, +0xfff000000000007f, +0xffe00000000000ff, +0xffc00000000001ff, +0xff800000000003ff, +0xff000000000007ff, +0xfe00000000000fff, +0xfc00000000001fff, +0xf800000000003fff, +0xf000000000007fff, +0xe00000000000ffff, +0xc00000000001ffff, +0x800000000003ffff, +0x00000000000fffff, +0x00000000001ffffe, +0x00000000003ffffc, +0x00000000007ffff8, +0x0000000000fffff0, +0x0000000001ffffe0, +0x0000000003ffffc0, +0x0000000007ffff80, +0x000000000fffff00, +0x000000001ffffe00, +0x000000003ffffc00, +0x000000007ffff800, +0x00000000fffff000, +0x00000001ffffe000, +0x00000003ffffc000, +0x00000007ffff8000, +0x0000000fffff0000, +0x0000001ffffe0000, +0x0000003ffffc0000, +0x0000007ffff80000, +0x000000fffff00000, +0x000001ffffe00000, +0x000003ffffc00000, +0x000007ffff800000, +0x00000fffff000000, +0x00001ffffe000000, +0x00003ffffc000000, +0x00007ffff8000000, +0x0000fffff0000000, +0x0001ffffe0000000, +0x0003ffffc0000000, +0x0007ffff80000000, +0x000fffff00000000, +0x001ffffe00000000, +0x003ffffc00000000, +0x007ffff800000000, +0x00fffff000000000, +0x01ffffe000000000, +0x03ffffc000000000, +0x07ffff8000000000, +0x0fffff0000000000, +0x1ffffe0000000000, +0x3ffffc0000000000, +0x7ffff80000000000, +0xfffff00000000000, +0xffffe00000000001, +0xffffc00000000003, +0xffff800000000007, +0xffff00000000000f, +0xfffe00000000001f, +0xfffc00000000003f, +0xfff800000000007f, +0xfff00000000000ff, +0xffe00000000001ff, +0xffc00000000003ff, +0xff800000000007ff, +0xff00000000000fff, +0xfe00000000001fff, +0xfc00000000003fff, +0xf800000000007fff, +0xf00000000000ffff, +0xe00000000001ffff, +0xc00000000003ffff, +0x800000000007ffff, +0x00000000001fffff, +0x00000000003ffffe, +0x00000000007ffffc, +0x0000000000fffff8, +0x0000000001fffff0, +0x0000000003ffffe0, +0x0000000007ffffc0, +0x000000000fffff80, +0x000000001fffff00, +0x000000003ffffe00, +0x000000007ffffc00, +0x00000000fffff800, +0x00000001fffff000, +0x00000003ffffe000, +0x00000007ffffc000, +0x0000000fffff8000, +0x0000001fffff0000, +0x0000003ffffe0000, +0x0000007ffffc0000, +0x000000fffff80000, +0x000001fffff00000, +0x000003ffffe00000, +0x000007ffffc00000, +0x00000fffff800000, +0x00001fffff000000, +0x00003ffffe000000, +0x00007ffffc000000, +0x0000fffff8000000, +0x0001fffff0000000, +0x0003ffffe0000000, +0x0007ffffc0000000, +0x000fffff80000000, +0x001fffff00000000, +0x003ffffe00000000, +0x007ffffc00000000, +0x00fffff800000000, +0x01fffff000000000, +0x03ffffe000000000, +0x07ffffc000000000, +0x0fffff8000000000, +0x1fffff0000000000, +0x3ffffe0000000000, +0x7ffffc0000000000, +0xfffff80000000000, +0xfffff00000000001, +0xffffe00000000003, +0xffffc00000000007, +0xffff80000000000f, +0xffff00000000001f, +0xfffe00000000003f, +0xfffc00000000007f, +0xfff80000000000ff, +0xfff00000000001ff, +0xffe00000000003ff, +0xffc00000000007ff, +0xff80000000000fff, +0xff00000000001fff, +0xfe00000000003fff, +0xfc00000000007fff, +0xf80000000000ffff, +0xf00000000001ffff, +0xe00000000003ffff, +0xc00000000007ffff, +0x80000000000fffff, +0x00000000003fffff, +0x00000000007ffffe, +0x0000000000fffffc, +0x0000000001fffff8, +0x0000000003fffff0, +0x0000000007ffffe0, +0x000000000fffffc0, +0x000000001fffff80, +0x000000003fffff00, +0x000000007ffffe00, +0x00000000fffffc00, +0x00000001fffff800, +0x00000003fffff000, +0x00000007ffffe000, +0x0000000fffffc000, +0x0000001fffff8000, +0x0000003fffff0000, +0x0000007ffffe0000, +0x000000fffffc0000, +0x000001fffff80000, +0x000003fffff00000, +0x000007ffffe00000, +0x00000fffffc00000, +0x00001fffff800000, +0x00003fffff000000, +0x00007ffffe000000, +0x0000fffffc000000, +0x0001fffff8000000, +0x0003fffff0000000, +0x0007ffffe0000000, +0x000fffffc0000000, +0x001fffff80000000, +0x003fffff00000000, +0x007ffffe00000000, +0x00fffffc00000000, +0x01fffff800000000, +0x03fffff000000000, +0x07ffffe000000000, +0x0fffffc000000000, +0x1fffff8000000000, +0x3fffff0000000000, +0x7ffffe0000000000, +0xfffffc0000000000, +0xfffff80000000001, +0xfffff00000000003, +0xffffe00000000007, +0xffffc0000000000f, +0xffff80000000001f, +0xffff00000000003f, +0xfffe00000000007f, +0xfffc0000000000ff, +0xfff80000000001ff, +0xfff00000000003ff, +0xffe00000000007ff, +0xffc0000000000fff, +0xff80000000001fff, +0xff00000000003fff, +0xfe00000000007fff, +0xfc0000000000ffff, +0xf80000000001ffff, +0xf00000000003ffff, +0xe00000000007ffff, +0xc0000000000fffff, +0x80000000001fffff, +0x00000000007fffff, +0x0000000000fffffe, +0x0000000001fffffc, +0x0000000003fffff8, +0x0000000007fffff0, +0x000000000fffffe0, +0x000000001fffffc0, +0x000000003fffff80, +0x000000007fffff00, +0x00000000fffffe00, +0x00000001fffffc00, +0x00000003fffff800, +0x00000007fffff000, +0x0000000fffffe000, +0x0000001fffffc000, +0x0000003fffff8000, +0x0000007fffff0000, +0x000000fffffe0000, +0x000001fffffc0000, +0x000003fffff80000, +0x000007fffff00000, +0x00000fffffe00000, +0x00001fffffc00000, +0x00003fffff800000, +0x00007fffff000000, +0x0000fffffe000000, +0x0001fffffc000000, +0x0003fffff8000000, +0x0007fffff0000000, +0x000fffffe0000000, +0x001fffffc0000000, +0x003fffff80000000, +0x007fffff00000000, +0x00fffffe00000000, +0x01fffffc00000000, +0x03fffff800000000, +0x07fffff000000000, +0x0fffffe000000000, +0x1fffffc000000000, +0x3fffff8000000000, +0x7fffff0000000000, +0xfffffe0000000000, +0xfffffc0000000001, +0xfffff80000000003, +0xfffff00000000007, +0xffffe0000000000f, +0xffffc0000000001f, +0xffff80000000003f, +0xffff00000000007f, +0xfffe0000000000ff, +0xfffc0000000001ff, +0xfff80000000003ff, +0xfff00000000007ff, +0xffe0000000000fff, +0xffc0000000001fff, +0xff80000000003fff, +0xff00000000007fff, +0xfe0000000000ffff, +0xfc0000000001ffff, +0xf80000000003ffff, +0xf00000000007ffff, +0xe0000000000fffff, +0xc0000000001fffff, +0x80000000003fffff, +0x0000000000ffffff, +0x0000000001fffffe, +0x0000000003fffffc, +0x0000000007fffff8, +0x000000000ffffff0, +0x000000001fffffe0, +0x000000003fffffc0, +0x000000007fffff80, +0x00000000ffffff00, +0x00000001fffffe00, +0x00000003fffffc00, +0x00000007fffff800, +0x0000000ffffff000, +0x0000001fffffe000, +0x0000003fffffc000, +0x0000007fffff8000, +0x000000ffffff0000, +0x000001fffffe0000, +0x000003fffffc0000, +0x000007fffff80000, +0x00000ffffff00000, +0x00001fffffe00000, +0x00003fffffc00000, +0x00007fffff800000, +0x0000ffffff000000, +0x0001fffffe000000, +0x0003fffffc000000, +0x0007fffff8000000, +0x000ffffff0000000, +0x001fffffe0000000, +0x003fffffc0000000, +0x007fffff80000000, +0x00ffffff00000000, +0x01fffffe00000000, +0x03fffffc00000000, +0x07fffff800000000, +0x0ffffff000000000, +0x1fffffe000000000, +0x3fffffc000000000, +0x7fffff8000000000, +0xffffff0000000000, +0xfffffe0000000001, +0xfffffc0000000003, +0xfffff80000000007, +0xfffff0000000000f, +0xffffe0000000001f, +0xffffc0000000003f, +0xffff80000000007f, +0xffff0000000000ff, +0xfffe0000000001ff, +0xfffc0000000003ff, +0xfff80000000007ff, +0xfff0000000000fff, +0xffe0000000001fff, +0xffc0000000003fff, +0xff80000000007fff, +0xff0000000000ffff, +0xfe0000000001ffff, +0xfc0000000003ffff, +0xf80000000007ffff, +0xf0000000000fffff, +0xe0000000001fffff, +0xc0000000003fffff, +0x80000000007fffff, +0x0000000001ffffff, +0x0000000003fffffe, +0x0000000007fffffc, +0x000000000ffffff8, +0x000000001ffffff0, +0x000000003fffffe0, +0x000000007fffffc0, +0x00000000ffffff80, +0x00000001ffffff00, +0x00000003fffffe00, +0x00000007fffffc00, +0x0000000ffffff800, +0x0000001ffffff000, +0x0000003fffffe000, +0x0000007fffffc000, +0x000000ffffff8000, +0x000001ffffff0000, +0x000003fffffe0000, +0x000007fffffc0000, +0x00000ffffff80000, +0x00001ffffff00000, +0x00003fffffe00000, +0x00007fffffc00000, +0x0000ffffff800000, +0x0001ffffff000000, +0x0003fffffe000000, +0x0007fffffc000000, +0x000ffffff8000000, +0x001ffffff0000000, +0x003fffffe0000000, +0x007fffffc0000000, +0x00ffffff80000000, +0x01ffffff00000000, +0x03fffffe00000000, +0x07fffffc00000000, +0x0ffffff800000000, +0x1ffffff000000000, +0x3fffffe000000000, +0x7fffffc000000000, +0xffffff8000000000, +0xffffff0000000001, +0xfffffe0000000003, +0xfffffc0000000007, +0xfffff8000000000f, +0xfffff0000000001f, +0xffffe0000000003f, +0xffffc0000000007f, +0xffff8000000000ff, +0xffff0000000001ff, +0xfffe0000000003ff, +0xfffc0000000007ff, +0xfff8000000000fff, +0xfff0000000001fff, +0xffe0000000003fff, +0xffc0000000007fff, +0xff8000000000ffff, +0xff0000000001ffff, +0xfe0000000003ffff, +0xfc0000000007ffff, +0xf8000000000fffff, +0xf0000000001fffff, +0xe0000000003fffff, +0xc0000000007fffff, +0x8000000000ffffff, +0x0000000003ffffff, +0x0000000007fffffe, +0x000000000ffffffc, +0x000000001ffffff8, +0x000000003ffffff0, +0x000000007fffffe0, +0x00000000ffffffc0, +0x00000001ffffff80, +0x00000003ffffff00, +0x00000007fffffe00, +0x0000000ffffffc00, +0x0000001ffffff800, +0x0000003ffffff000, +0x0000007fffffe000, +0x000000ffffffc000, +0x000001ffffff8000, +0x000003ffffff0000, +0x000007fffffe0000, +0x00000ffffffc0000, +0x00001ffffff80000, +0x00003ffffff00000, +0x00007fffffe00000, +0x0000ffffffc00000, +0x0001ffffff800000, +0x0003ffffff000000, +0x0007fffffe000000, +0x000ffffffc000000, +0x001ffffff8000000, +0x003ffffff0000000, +0x007fffffe0000000, +0x00ffffffc0000000, +0x01ffffff80000000, +0x03ffffff00000000, +0x07fffffe00000000, +0x0ffffffc00000000, +0x1ffffff800000000, +0x3ffffff000000000, +0x7fffffe000000000, +0xffffffc000000000, +0xffffff8000000001, +0xffffff0000000003, +0xfffffe0000000007, +0xfffffc000000000f, +0xfffff8000000001f, +0xfffff0000000003f, +0xffffe0000000007f, +0xffffc000000000ff, +0xffff8000000001ff, +0xffff0000000003ff, +0xfffe0000000007ff, +0xfffc000000000fff, +0xfff8000000001fff, +0xfff0000000003fff, +0xffe0000000007fff, +0xffc000000000ffff, +0xff8000000001ffff, +0xff0000000003ffff, +0xfe0000000007ffff, +0xfc000000000fffff, +0xf8000000001fffff, +0xf0000000003fffff, +0xe0000000007fffff, +0xc000000000ffffff, +0x8000000001ffffff, +0x0000000007ffffff, +0x000000000ffffffe, +0x000000001ffffffc, +0x000000003ffffff8, +0x000000007ffffff0, +0x00000000ffffffe0, +0x00000001ffffffc0, +0x00000003ffffff80, +0x00000007ffffff00, +0x0000000ffffffe00, +0x0000001ffffffc00, +0x0000003ffffff800, +0x0000007ffffff000, +0x000000ffffffe000, +0x000001ffffffc000, +0x000003ffffff8000, +0x000007ffffff0000, +0x00000ffffffe0000, +0x00001ffffffc0000, +0x00003ffffff80000, +0x00007ffffff00000, +0x0000ffffffe00000, +0x0001ffffffc00000, +0x0003ffffff800000, +0x0007ffffff000000, +0x000ffffffe000000, +0x001ffffffc000000, +0x003ffffff8000000, +0x007ffffff0000000, +0x00ffffffe0000000, +0x01ffffffc0000000, +0x03ffffff80000000, +0x07ffffff00000000, +0x0ffffffe00000000, +0x1ffffffc00000000, +0x3ffffff800000000, +0x7ffffff000000000, +0xffffffe000000000, +0xffffffc000000001, +0xffffff8000000003, +0xffffff0000000007, +0xfffffe000000000f, +0xfffffc000000001f, +0xfffff8000000003f, +0xfffff0000000007f, +0xffffe000000000ff, +0xffffc000000001ff, +0xffff8000000003ff, +0xffff0000000007ff, +0xfffe000000000fff, +0xfffc000000001fff, +0xfff8000000003fff, +0xfff0000000007fff, +0xffe000000000ffff, +0xffc000000001ffff, +0xff8000000003ffff, +0xff0000000007ffff, +0xfe000000000fffff, +0xfc000000001fffff, +0xf8000000003fffff, +0xf0000000007fffff, +0xe000000000ffffff, +0xc000000001ffffff, +0x8000000003ffffff, +0x000000000fffffff, +0x000000001ffffffe, +0x000000003ffffffc, +0x000000007ffffff8, +0x00000000fffffff0, +0x00000001ffffffe0, +0x00000003ffffffc0, +0x00000007ffffff80, +0x0000000fffffff00, +0x0000001ffffffe00, +0x0000003ffffffc00, +0x0000007ffffff800, +0x000000fffffff000, +0x000001ffffffe000, +0x000003ffffffc000, +0x000007ffffff8000, +0x00000fffffff0000, +0x00001ffffffe0000, +0x00003ffffffc0000, +0x00007ffffff80000, +0x0000fffffff00000, +0x0001ffffffe00000, +0x0003ffffffc00000, +0x0007ffffff800000, +0x000fffffff000000, +0x001ffffffe000000, +0x003ffffffc000000, +0x007ffffff8000000, +0x00fffffff0000000, +0x01ffffffe0000000, +0x03ffffffc0000000, +0x07ffffff80000000, +0x0fffffff00000000, +0x1ffffffe00000000, +0x3ffffffc00000000, +0x7ffffff800000000, +0xfffffff000000000, +0xffffffe000000001, +0xffffffc000000003, +0xffffff8000000007, +0xffffff000000000f, +0xfffffe000000001f, +0xfffffc000000003f, +0xfffff8000000007f, +0xfffff000000000ff, +0xffffe000000001ff, +0xffffc000000003ff, +0xffff8000000007ff, +0xffff000000000fff, +0xfffe000000001fff, +0xfffc000000003fff, +0xfff8000000007fff, +0xfff000000000ffff, +0xffe000000001ffff, +0xffc000000003ffff, +0xff8000000007ffff, +0xff000000000fffff, +0xfe000000001fffff, +0xfc000000003fffff, +0xf8000000007fffff, +0xf000000000ffffff, +0xe000000001ffffff, +0xc000000003ffffff, +0x8000000007ffffff, +0x000000001fffffff, +0x000000003ffffffe, +0x000000007ffffffc, +0x00000000fffffff8, +0x00000001fffffff0, +0x00000003ffffffe0, +0x00000007ffffffc0, +0x0000000fffffff80, +0x0000001fffffff00, +0x0000003ffffffe00, +0x0000007ffffffc00, +0x000000fffffff800, +0x000001fffffff000, +0x000003ffffffe000, +0x000007ffffffc000, +0x00000fffffff8000, +0x00001fffffff0000, +0x00003ffffffe0000, +0x00007ffffffc0000, +0x0000fffffff80000, +0x0001fffffff00000, +0x0003ffffffe00000, +0x0007ffffffc00000, +0x000fffffff800000, +0x001fffffff000000, +0x003ffffffe000000, +0x007ffffffc000000, +0x00fffffff8000000, +0x01fffffff0000000, +0x03ffffffe0000000, +0x07ffffffc0000000, +0x0fffffff80000000, +0x1fffffff00000000, +0x3ffffffe00000000, +0x7ffffffc00000000, +0xfffffff800000000, +0xfffffff000000001, +0xffffffe000000003, +0xffffffc000000007, +0xffffff800000000f, +0xffffff000000001f, +0xfffffe000000003f, +0xfffffc000000007f, +0xfffff800000000ff, +0xfffff000000001ff, +0xffffe000000003ff, +0xffffc000000007ff, +0xffff800000000fff, +0xffff000000001fff, +0xfffe000000003fff, +0xfffc000000007fff, +0xfff800000000ffff, +0xfff000000001ffff, +0xffe000000003ffff, +0xffc000000007ffff, +0xff800000000fffff, +0xff000000001fffff, +0xfe000000003fffff, +0xfc000000007fffff, +0xf800000000ffffff, +0xf000000001ffffff, +0xe000000003ffffff, +0xc000000007ffffff, +0x800000000fffffff, +0x000000003fffffff, +0x000000007ffffffe, +0x00000000fffffffc, +0x00000001fffffff8, +0x00000003fffffff0, +0x00000007ffffffe0, +0x0000000fffffffc0, +0x0000001fffffff80, +0x0000003fffffff00, +0x0000007ffffffe00, +0x000000fffffffc00, +0x000001fffffff800, +0x000003fffffff000, +0x000007ffffffe000, +0x00000fffffffc000, +0x00001fffffff8000, +0x00003fffffff0000, +0x00007ffffffe0000, +0x0000fffffffc0000, +0x0001fffffff80000, +0x0003fffffff00000, +0x0007ffffffe00000, +0x000fffffffc00000, +0x001fffffff800000, +0x003fffffff000000, +0x007ffffffe000000, +0x00fffffffc000000, +0x01fffffff8000000, +0x03fffffff0000000, +0x07ffffffe0000000, +0x0fffffffc0000000, +0x1fffffff80000000, +0x3fffffff00000000, +0x7ffffffe00000000, +0xfffffffc00000000, +0xfffffff800000001, +0xfffffff000000003, +0xffffffe000000007, +0xffffffc00000000f, +0xffffff800000001f, +0xffffff000000003f, +0xfffffe000000007f, +0xfffffc00000000ff, +0xfffff800000001ff, +0xfffff000000003ff, +0xffffe000000007ff, +0xffffc00000000fff, +0xffff800000001fff, +0xffff000000003fff, +0xfffe000000007fff, +0xfffc00000000ffff, +0xfff800000001ffff, +0xfff000000003ffff, +0xffe000000007ffff, +0xffc00000000fffff, +0xff800000001fffff, +0xff000000003fffff, +0xfe000000007fffff, +0xfc00000000ffffff, +0xf800000001ffffff, +0xf000000003ffffff, +0xe000000007ffffff, +0xc00000000fffffff, +0x800000001fffffff, +0x000000007fffffff, +0x00000000fffffffe, +0x00000001fffffffc, +0x00000003fffffff8, +0x00000007fffffff0, +0x0000000fffffffe0, +0x0000001fffffffc0, +0x0000003fffffff80, +0x0000007fffffff00, +0x000000fffffffe00, +0x000001fffffffc00, +0x000003fffffff800, +0x000007fffffff000, +0x00000fffffffe000, +0x00001fffffffc000, +0x00003fffffff8000, +0x00007fffffff0000, +0x0000fffffffe0000, +0x0001fffffffc0000, +0x0003fffffff80000, +0x0007fffffff00000, +0x000fffffffe00000, +0x001fffffffc00000, +0x003fffffff800000, +0x007fffffff000000, +0x00fffffffe000000, +0x01fffffffc000000, +0x03fffffff8000000, +0x07fffffff0000000, +0x0fffffffe0000000, +0x1fffffffc0000000, +0x3fffffff80000000, +0x7fffffff00000000, +0xfffffffe00000000, +0xfffffffc00000001, +0xfffffff800000003, +0xfffffff000000007, +0xffffffe00000000f, +0xffffffc00000001f, +0xffffff800000003f, +0xffffff000000007f, +0xfffffe00000000ff, +0xfffffc00000001ff, +0xfffff800000003ff, +0xfffff000000007ff, +0xffffe00000000fff, +0xffffc00000001fff, +0xffff800000003fff, +0xffff000000007fff, +0xfffe00000000ffff, +0xfffc00000001ffff, +0xfff800000003ffff, +0xfff000000007ffff, +0xffe00000000fffff, +0xffc00000001fffff, +0xff800000003fffff, +0xff000000007fffff, +0xfe00000000ffffff, +0xfc00000001ffffff, +0xf800000003ffffff, +0xf000000007ffffff, +0xe00000000fffffff, +0xc00000001fffffff, +0x800000003fffffff, +0x00000000ffffffff, +0x00000001fffffffe, +0x00000003fffffffc, +0x00000007fffffff8, +0x0000000ffffffff0, +0x0000001fffffffe0, +0x0000003fffffffc0, +0x0000007fffffff80, +0x000000ffffffff00, +0x000001fffffffe00, +0x000003fffffffc00, +0x000007fffffff800, +0x00000ffffffff000, +0x00001fffffffe000, +0x00003fffffffc000, +0x00007fffffff8000, +0x0000ffffffff0000, +0x0001fffffffe0000, +0x0003fffffffc0000, +0x0007fffffff80000, +0x000ffffffff00000, +0x001fffffffe00000, +0x003fffffffc00000, +0x007fffffff800000, +0x00ffffffff000000, +0x01fffffffe000000, +0x03fffffffc000000, +0x07fffffff8000000, +0x0ffffffff0000000, +0x1fffffffe0000000, +0x3fffffffc0000000, +0x7fffffff80000000, +0xffffffff00000000, +0xfffffffe00000001, +0xfffffffc00000003, +0xfffffff800000007, +0xfffffff00000000f, +0xffffffe00000001f, +0xffffffc00000003f, +0xffffff800000007f, +0xffffff00000000ff, +0xfffffe00000001ff, +0xfffffc00000003ff, +0xfffff800000007ff, +0xfffff00000000fff, +0xffffe00000001fff, +0xffffc00000003fff, +0xffff800000007fff, +0xffff00000000ffff, +0xfffe00000001ffff, +0xfffc00000003ffff, +0xfff800000007ffff, +0xfff00000000fffff, +0xffe00000001fffff, +0xffc00000003fffff, +0xff800000007fffff, +0xff00000000ffffff, +0xfe00000001ffffff, +0xfc00000003ffffff, +0xf800000007ffffff, +0xf00000000fffffff, +0xe00000001fffffff, +0xc00000003fffffff, +0x800000007fffffff, +0x00000001ffffffff, +0x00000003fffffffe, +0x00000007fffffffc, +0x0000000ffffffff8, +0x0000001ffffffff0, +0x0000003fffffffe0, +0x0000007fffffffc0, +0x000000ffffffff80, +0x000001ffffffff00, +0x000003fffffffe00, +0x000007fffffffc00, +0x00000ffffffff800, +0x00001ffffffff000, +0x00003fffffffe000, +0x00007fffffffc000, +0x0000ffffffff8000, +0x0001ffffffff0000, +0x0003fffffffe0000, +0x0007fffffffc0000, +0x000ffffffff80000, +0x001ffffffff00000, +0x003fffffffe00000, +0x007fffffffc00000, +0x00ffffffff800000, +0x01ffffffff000000, +0x03fffffffe000000, +0x07fffffffc000000, +0x0ffffffff8000000, +0x1ffffffff0000000, +0x3fffffffe0000000, +0x7fffffffc0000000, +0xffffffff80000000, +0xffffffff00000001, +0xfffffffe00000003, +0xfffffffc00000007, +0xfffffff80000000f, +0xfffffff00000001f, +0xffffffe00000003f, +0xffffffc00000007f, +0xffffff80000000ff, +0xffffff00000001ff, +0xfffffe00000003ff, +0xfffffc00000007ff, +0xfffff80000000fff, +0xfffff00000001fff, +0xffffe00000003fff, +0xffffc00000007fff, +0xffff80000000ffff, +0xffff00000001ffff, +0xfffe00000003ffff, +0xfffc00000007ffff, +0xfff80000000fffff, +0xfff00000001fffff, +0xffe00000003fffff, +0xffc00000007fffff, +0xff80000000ffffff, +0xff00000001ffffff, +0xfe00000003ffffff, +0xfc00000007ffffff, +0xf80000000fffffff, +0xf00000001fffffff, +0xe00000003fffffff, +0xc00000007fffffff, +0x80000000ffffffff, +0x00000003ffffffff, +0x00000007fffffffe, +0x0000000ffffffffc, +0x0000001ffffffff8, +0x0000003ffffffff0, +0x0000007fffffffe0, +0x000000ffffffffc0, +0x000001ffffffff80, +0x000003ffffffff00, +0x000007fffffffe00, +0x00000ffffffffc00, +0x00001ffffffff800, +0x00003ffffffff000, +0x00007fffffffe000, +0x0000ffffffffc000, +0x0001ffffffff8000, +0x0003ffffffff0000, +0x0007fffffffe0000, +0x000ffffffffc0000, +0x001ffffffff80000, +0x003ffffffff00000, +0x007fffffffe00000, +0x00ffffffffc00000, +0x01ffffffff800000, +0x03ffffffff000000, +0x07fffffffe000000, +0x0ffffffffc000000, +0x1ffffffff8000000, +0x3ffffffff0000000, +0x7fffffffe0000000, +0xffffffffc0000000, +0xffffffff80000001, +0xffffffff00000003, +0xfffffffe00000007, +0xfffffffc0000000f, +0xfffffff80000001f, +0xfffffff00000003f, +0xffffffe00000007f, +0xffffffc0000000ff, +0xffffff80000001ff, +0xffffff00000003ff, +0xfffffe00000007ff, +0xfffffc0000000fff, +0xfffff80000001fff, +0xfffff00000003fff, +0xffffe00000007fff, +0xffffc0000000ffff, +0xffff80000001ffff, +0xffff00000003ffff, +0xfffe00000007ffff, +0xfffc0000000fffff, +0xfff80000001fffff, +0xfff00000003fffff, +0xffe00000007fffff, +0xffc0000000ffffff, +0xff80000001ffffff, +0xff00000003ffffff, +0xfe00000007ffffff, +0xfc0000000fffffff, +0xf80000001fffffff, +0xf00000003fffffff, +0xe00000007fffffff, +0xc0000000ffffffff, +0x80000001ffffffff, +0x00000007ffffffff, +0x0000000ffffffffe, +0x0000001ffffffffc, +0x0000003ffffffff8, +0x0000007ffffffff0, +0x000000ffffffffe0, +0x000001ffffffffc0, +0x000003ffffffff80, +0x000007ffffffff00, +0x00000ffffffffe00, +0x00001ffffffffc00, +0x00003ffffffff800, +0x00007ffffffff000, +0x0000ffffffffe000, +0x0001ffffffffc000, +0x0003ffffffff8000, +0x0007ffffffff0000, +0x000ffffffffe0000, +0x001ffffffffc0000, +0x003ffffffff80000, +0x007ffffffff00000, +0x00ffffffffe00000, +0x01ffffffffc00000, +0x03ffffffff800000, +0x07ffffffff000000, +0x0ffffffffe000000, +0x1ffffffffc000000, +0x3ffffffff8000000, +0x7ffffffff0000000, +0xffffffffe0000000, +0xffffffffc0000001, +0xffffffff80000003, +0xffffffff00000007, +0xfffffffe0000000f, +0xfffffffc0000001f, +0xfffffff80000003f, +0xfffffff00000007f, +0xffffffe0000000ff, +0xffffffc0000001ff, +0xffffff80000003ff, +0xffffff00000007ff, +0xfffffe0000000fff, +0xfffffc0000001fff, +0xfffff80000003fff, +0xfffff00000007fff, +0xffffe0000000ffff, +0xffffc0000001ffff, +0xffff80000003ffff, +0xffff00000007ffff, +0xfffe0000000fffff, +0xfffc0000001fffff, +0xfff80000003fffff, +0xfff00000007fffff, +0xffe0000000ffffff, +0xffc0000001ffffff, +0xff80000003ffffff, +0xff00000007ffffff, +0xfe0000000fffffff, +0xfc0000001fffffff, +0xf80000003fffffff, +0xf00000007fffffff, +0xe0000000ffffffff, +0xc0000001ffffffff, +0x80000003ffffffff, +0x0000000fffffffff, +0x0000001ffffffffe, +0x0000003ffffffffc, +0x0000007ffffffff8, +0x000000fffffffff0, +0x000001ffffffffe0, +0x000003ffffffffc0, +0x000007ffffffff80, +0x00000fffffffff00, +0x00001ffffffffe00, +0x00003ffffffffc00, +0x00007ffffffff800, +0x0000fffffffff000, +0x0001ffffffffe000, +0x0003ffffffffc000, +0x0007ffffffff8000, +0x000fffffffff0000, +0x001ffffffffe0000, +0x003ffffffffc0000, +0x007ffffffff80000, +0x00fffffffff00000, +0x01ffffffffe00000, +0x03ffffffffc00000, +0x07ffffffff800000, +0x0fffffffff000000, +0x1ffffffffe000000, +0x3ffffffffc000000, +0x7ffffffff8000000, +0xfffffffff0000000, +0xffffffffe0000001, +0xffffffffc0000003, +0xffffffff80000007, +0xffffffff0000000f, +0xfffffffe0000001f, +0xfffffffc0000003f, +0xfffffff80000007f, +0xfffffff0000000ff, +0xffffffe0000001ff, +0xffffffc0000003ff, +0xffffff80000007ff, +0xffffff0000000fff, +0xfffffe0000001fff, +0xfffffc0000003fff, +0xfffff80000007fff, +0xfffff0000000ffff, +0xffffe0000001ffff, +0xffffc0000003ffff, +0xffff80000007ffff, +0xffff0000000fffff, +0xfffe0000001fffff, +0xfffc0000003fffff, +0xfff80000007fffff, +0xfff0000000ffffff, +0xffe0000001ffffff, +0xffc0000003ffffff, +0xff80000007ffffff, +0xff0000000fffffff, +0xfe0000001fffffff, +0xfc0000003fffffff, +0xf80000007fffffff, +0xf0000000ffffffff, +0xe0000001ffffffff, +0xc0000003ffffffff, +0x80000007ffffffff, +0x0000001fffffffff, +0x0000003ffffffffe, +0x0000007ffffffffc, +0x000000fffffffff8, +0x000001fffffffff0, +0x000003ffffffffe0, +0x000007ffffffffc0, +0x00000fffffffff80, +0x00001fffffffff00, +0x00003ffffffffe00, +0x00007ffffffffc00, +0x0000fffffffff800, +0x0001fffffffff000, +0x0003ffffffffe000, +0x0007ffffffffc000, +0x000fffffffff8000, +0x001fffffffff0000, +0x003ffffffffe0000, +0x007ffffffffc0000, +0x00fffffffff80000, +0x01fffffffff00000, +0x03ffffffffe00000, +0x07ffffffffc00000, +0x0fffffffff800000, +0x1fffffffff000000, +0x3ffffffffe000000, +0x7ffffffffc000000, +0xfffffffff8000000, +0xfffffffff0000001, +0xffffffffe0000003, +0xffffffffc0000007, +0xffffffff8000000f, +0xffffffff0000001f, +0xfffffffe0000003f, +0xfffffffc0000007f, +0xfffffff8000000ff, +0xfffffff0000001ff, +0xffffffe0000003ff, +0xffffffc0000007ff, +0xffffff8000000fff, +0xffffff0000001fff, +0xfffffe0000003fff, +0xfffffc0000007fff, +0xfffff8000000ffff, +0xfffff0000001ffff, +0xffffe0000003ffff, +0xffffc0000007ffff, +0xffff8000000fffff, +0xffff0000001fffff, +0xfffe0000003fffff, +0xfffc0000007fffff, +0xfff8000000ffffff, +0xfff0000001ffffff, +0xffe0000003ffffff, +0xffc0000007ffffff, +0xff8000000fffffff, +0xff0000001fffffff, +0xfe0000003fffffff, +0xfc0000007fffffff, +0xf8000000ffffffff, +0xf0000001ffffffff, +0xe0000003ffffffff, +0xc0000007ffffffff, +0x8000000fffffffff, +0x0000003fffffffff, +0x0000007ffffffffe, +0x000000fffffffffc, +0x000001fffffffff8, +0x000003fffffffff0, +0x000007ffffffffe0, +0x00000fffffffffc0, +0x00001fffffffff80, +0x00003fffffffff00, +0x00007ffffffffe00, +0x0000fffffffffc00, +0x0001fffffffff800, +0x0003fffffffff000, +0x0007ffffffffe000, +0x000fffffffffc000, +0x001fffffffff8000, +0x003fffffffff0000, +0x007ffffffffe0000, +0x00fffffffffc0000, +0x01fffffffff80000, +0x03fffffffff00000, +0x07ffffffffe00000, +0x0fffffffffc00000, +0x1fffffffff800000, +0x3fffffffff000000, +0x7ffffffffe000000, +0xfffffffffc000000, +0xfffffffff8000001, +0xfffffffff0000003, +0xffffffffe0000007, +0xffffffffc000000f, +0xffffffff8000001f, +0xffffffff0000003f, +0xfffffffe0000007f, +0xfffffffc000000ff, +0xfffffff8000001ff, +0xfffffff0000003ff, +0xffffffe0000007ff, +0xffffffc000000fff, +0xffffff8000001fff, +0xffffff0000003fff, +0xfffffe0000007fff, +0xfffffc000000ffff, +0xfffff8000001ffff, +0xfffff0000003ffff, +0xffffe0000007ffff, +0xffffc000000fffff, +0xffff8000001fffff, +0xffff0000003fffff, +0xfffe0000007fffff, +0xfffc000000ffffff, +0xfff8000001ffffff, +0xfff0000003ffffff, +0xffe0000007ffffff, +0xffc000000fffffff, +0xff8000001fffffff, +0xff0000003fffffff, +0xfe0000007fffffff, +0xfc000000ffffffff, +0xf8000001ffffffff, +0xf0000003ffffffff, +0xe0000007ffffffff, +0xc000000fffffffff, +0x8000001fffffffff, +0x0000007fffffffff, +0x000000fffffffffe, +0x000001fffffffffc, +0x000003fffffffff8, +0x000007fffffffff0, +0x00000fffffffffe0, +0x00001fffffffffc0, +0x00003fffffffff80, +0x00007fffffffff00, +0x0000fffffffffe00, +0x0001fffffffffc00, +0x0003fffffffff800, +0x0007fffffffff000, +0x000fffffffffe000, +0x001fffffffffc000, +0x003fffffffff8000, +0x007fffffffff0000, +0x00fffffffffe0000, +0x01fffffffffc0000, +0x03fffffffff80000, +0x07fffffffff00000, +0x0fffffffffe00000, +0x1fffffffffc00000, +0x3fffffffff800000, +0x7fffffffff000000, +0xfffffffffe000000, +0xfffffffffc000001, +0xfffffffff8000003, +0xfffffffff0000007, +0xffffffffe000000f, +0xffffffffc000001f, +0xffffffff8000003f, +0xffffffff0000007f, +0xfffffffe000000ff, +0xfffffffc000001ff, +0xfffffff8000003ff, +0xfffffff0000007ff, +0xffffffe000000fff, +0xffffffc000001fff, +0xffffff8000003fff, +0xffffff0000007fff, +0xfffffe000000ffff, +0xfffffc000001ffff, +0xfffff8000003ffff, +0xfffff0000007ffff, +0xffffe000000fffff, +0xffffc000001fffff, +0xffff8000003fffff, +0xffff0000007fffff, +0xfffe000000ffffff, +0xfffc000001ffffff, +0xfff8000003ffffff, +0xfff0000007ffffff, +0xffe000000fffffff, +0xffc000001fffffff, +0xff8000003fffffff, +0xff0000007fffffff, +0xfe000000ffffffff, +0xfc000001ffffffff, +0xf8000003ffffffff, +0xf0000007ffffffff, +0xe000000fffffffff, +0xc000001fffffffff, +0x8000003fffffffff, +0x000000ffffffffff, +0x000001fffffffffe, +0x000003fffffffffc, +0x000007fffffffff8, +0x00000ffffffffff0, +0x00001fffffffffe0, +0x00003fffffffffc0, +0x00007fffffffff80, +0x0000ffffffffff00, +0x0001fffffffffe00, +0x0003fffffffffc00, +0x0007fffffffff800, +0x000ffffffffff000, +0x001fffffffffe000, +0x003fffffffffc000, +0x007fffffffff8000, +0x00ffffffffff0000, +0x01fffffffffe0000, +0x03fffffffffc0000, +0x07fffffffff80000, +0x0ffffffffff00000, +0x1fffffffffe00000, +0x3fffffffffc00000, +0x7fffffffff800000, +0xffffffffff000000, +0xfffffffffe000001, +0xfffffffffc000003, +0xfffffffff8000007, +0xfffffffff000000f, +0xffffffffe000001f, +0xffffffffc000003f, +0xffffffff8000007f, +0xffffffff000000ff, +0xfffffffe000001ff, +0xfffffffc000003ff, +0xfffffff8000007ff, +0xfffffff000000fff, +0xffffffe000001fff, +0xffffffc000003fff, +0xffffff8000007fff, +0xffffff000000ffff, +0xfffffe000001ffff, +0xfffffc000003ffff, +0xfffff8000007ffff, +0xfffff000000fffff, +0xffffe000001fffff, +0xffffc000003fffff, +0xffff8000007fffff, +0xffff000000ffffff, +0xfffe000001ffffff, +0xfffc000003ffffff, +0xfff8000007ffffff, +0xfff000000fffffff, +0xffe000001fffffff, +0xffc000003fffffff, +0xff8000007fffffff, +0xff000000ffffffff, +0xfe000001ffffffff, +0xfc000003ffffffff, +0xf8000007ffffffff, +0xf000000fffffffff, +0xe000001fffffffff, +0xc000003fffffffff, +0x8000007fffffffff, +0x000001ffffffffff, +0x000003fffffffffe, +0x000007fffffffffc, +0x00000ffffffffff8, +0x00001ffffffffff0, +0x00003fffffffffe0, +0x00007fffffffffc0, +0x0000ffffffffff80, +0x0001ffffffffff00, +0x0003fffffffffe00, +0x0007fffffffffc00, +0x000ffffffffff800, +0x001ffffffffff000, +0x003fffffffffe000, +0x007fffffffffc000, +0x00ffffffffff8000, +0x01ffffffffff0000, +0x03fffffffffe0000, +0x07fffffffffc0000, +0x0ffffffffff80000, +0x1ffffffffff00000, +0x3fffffffffe00000, +0x7fffffffffc00000, +0xffffffffff800000, +0xffffffffff000001, +0xfffffffffe000003, +0xfffffffffc000007, +0xfffffffff800000f, +0xfffffffff000001f, +0xffffffffe000003f, +0xffffffffc000007f, +0xffffffff800000ff, +0xffffffff000001ff, +0xfffffffe000003ff, +0xfffffffc000007ff, +0xfffffff800000fff, +0xfffffff000001fff, +0xffffffe000003fff, +0xffffffc000007fff, +0xffffff800000ffff, +0xffffff000001ffff, +0xfffffe000003ffff, +0xfffffc000007ffff, +0xfffff800000fffff, +0xfffff000001fffff, +0xffffe000003fffff, +0xffffc000007fffff, +0xffff800000ffffff, +0xffff000001ffffff, +0xfffe000003ffffff, +0xfffc000007ffffff, +0xfff800000fffffff, +0xfff000001fffffff, +0xffe000003fffffff, +0xffc000007fffffff, +0xff800000ffffffff, +0xff000001ffffffff, +0xfe000003ffffffff, +0xfc000007ffffffff, +0xf800000fffffffff, +0xf000001fffffffff, +0xe000003fffffffff, +0xc000007fffffffff, +0x800000ffffffffff, +0x000003ffffffffff, +0x000007fffffffffe, +0x00000ffffffffffc, +0x00001ffffffffff8, +0x00003ffffffffff0, +0x00007fffffffffe0, +0x0000ffffffffffc0, +0x0001ffffffffff80, +0x0003ffffffffff00, +0x0007fffffffffe00, +0x000ffffffffffc00, +0x001ffffffffff800, +0x003ffffffffff000, +0x007fffffffffe000, +0x00ffffffffffc000, +0x01ffffffffff8000, +0x03ffffffffff0000, +0x07fffffffffe0000, +0x0ffffffffffc0000, +0x1ffffffffff80000, +0x3ffffffffff00000, +0x7fffffffffe00000, +0xffffffffffc00000, +0xffffffffff800001, +0xffffffffff000003, +0xfffffffffe000007, +0xfffffffffc00000f, +0xfffffffff800001f, +0xfffffffff000003f, +0xffffffffe000007f, +0xffffffffc00000ff, +0xffffffff800001ff, +0xffffffff000003ff, +0xfffffffe000007ff, +0xfffffffc00000fff, +0xfffffff800001fff, +0xfffffff000003fff, +0xffffffe000007fff, +0xffffffc00000ffff, +0xffffff800001ffff, +0xffffff000003ffff, +0xfffffe000007ffff, +0xfffffc00000fffff, +0xfffff800001fffff, +0xfffff000003fffff, +0xffffe000007fffff, +0xffffc00000ffffff, +0xffff800001ffffff, +0xffff000003ffffff, +0xfffe000007ffffff, +0xfffc00000fffffff, +0xfff800001fffffff, +0xfff000003fffffff, +0xffe000007fffffff, +0xffc00000ffffffff, +0xff800001ffffffff, +0xff000003ffffffff, +0xfe000007ffffffff, +0xfc00000fffffffff, +0xf800001fffffffff, +0xf000003fffffffff, +0xe000007fffffffff, +0xc00000ffffffffff, +0x800001ffffffffff, +0x000007ffffffffff, +0x00000ffffffffffe, +0x00001ffffffffffc, +0x00003ffffffffff8, +0x00007ffffffffff0, +0x0000ffffffffffe0, +0x0001ffffffffffc0, +0x0003ffffffffff80, +0x0007ffffffffff00, +0x000ffffffffffe00, +0x001ffffffffffc00, +0x003ffffffffff800, +0x007ffffffffff000, +0x00ffffffffffe000, +0x01ffffffffffc000, +0x03ffffffffff8000, +0x07ffffffffff0000, +0x0ffffffffffe0000, +0x1ffffffffffc0000, +0x3ffffffffff80000, +0x7ffffffffff00000, +0xffffffffffe00000, +0xffffffffffc00001, +0xffffffffff800003, +0xffffffffff000007, +0xfffffffffe00000f, +0xfffffffffc00001f, +0xfffffffff800003f, +0xfffffffff000007f, +0xffffffffe00000ff, +0xffffffffc00001ff, +0xffffffff800003ff, +0xffffffff000007ff, +0xfffffffe00000fff, +0xfffffffc00001fff, +0xfffffff800003fff, +0xfffffff000007fff, +0xffffffe00000ffff, +0xffffffc00001ffff, +0xffffff800003ffff, +0xffffff000007ffff, +0xfffffe00000fffff, +0xfffffc00001fffff, +0xfffff800003fffff, +0xfffff000007fffff, +0xffffe00000ffffff, +0xffffc00001ffffff, +0xffff800003ffffff, +0xffff000007ffffff, +0xfffe00000fffffff, +0xfffc00001fffffff, +0xfff800003fffffff, +0xfff000007fffffff, +0xffe00000ffffffff, +0xffc00001ffffffff, +0xff800003ffffffff, +0xff000007ffffffff, +0xfe00000fffffffff, +0xfc00001fffffffff, +0xf800003fffffffff, +0xf000007fffffffff, +0xe00000ffffffffff, +0xc00001ffffffffff, +0x800003ffffffffff, +0x00000fffffffffff, +0x00001ffffffffffe, +0x00003ffffffffffc, +0x00007ffffffffff8, +0x0000fffffffffff0, +0x0001ffffffffffe0, +0x0003ffffffffffc0, +0x0007ffffffffff80, +0x000fffffffffff00, +0x001ffffffffffe00, +0x003ffffffffffc00, +0x007ffffffffff800, +0x00fffffffffff000, +0x01ffffffffffe000, +0x03ffffffffffc000, +0x07ffffffffff8000, +0x0fffffffffff0000, +0x1ffffffffffe0000, +0x3ffffffffffc0000, +0x7ffffffffff80000, +0xfffffffffff00000, +0xffffffffffe00001, +0xffffffffffc00003, +0xffffffffff800007, +0xffffffffff00000f, +0xfffffffffe00001f, +0xfffffffffc00003f, +0xfffffffff800007f, +0xfffffffff00000ff, +0xffffffffe00001ff, +0xffffffffc00003ff, +0xffffffff800007ff, +0xffffffff00000fff, +0xfffffffe00001fff, +0xfffffffc00003fff, +0xfffffff800007fff, +0xfffffff00000ffff, +0xffffffe00001ffff, +0xffffffc00003ffff, +0xffffff800007ffff, +0xffffff00000fffff, +0xfffffe00001fffff, +0xfffffc00003fffff, +0xfffff800007fffff, +0xfffff00000ffffff, +0xffffe00001ffffff, +0xffffc00003ffffff, +0xffff800007ffffff, +0xffff00000fffffff, +0xfffe00001fffffff, +0xfffc00003fffffff, +0xfff800007fffffff, +0xfff00000ffffffff, +0xffe00001ffffffff, +0xffc00003ffffffff, +0xff800007ffffffff, +0xff00000fffffffff, +0xfe00001fffffffff, +0xfc00003fffffffff, +0xf800007fffffffff, +0xf00000ffffffffff, +0xe00001ffffffffff, +0xc00003ffffffffff, +0x800007ffffffffff, +0x00001fffffffffff, +0x00003ffffffffffe, +0x00007ffffffffffc, +0x0000fffffffffff8, +0x0001fffffffffff0, +0x0003ffffffffffe0, +0x0007ffffffffffc0, +0x000fffffffffff80, +0x001fffffffffff00, +0x003ffffffffffe00, +0x007ffffffffffc00, +0x00fffffffffff800, +0x01fffffffffff000, +0x03ffffffffffe000, +0x07ffffffffffc000, +0x0fffffffffff8000, +0x1fffffffffff0000, +0x3ffffffffffe0000, +0x7ffffffffffc0000, +0xfffffffffff80000, +0xfffffffffff00001, +0xffffffffffe00003, +0xffffffffffc00007, +0xffffffffff80000f, +0xffffffffff00001f, +0xfffffffffe00003f, +0xfffffffffc00007f, +0xfffffffff80000ff, +0xfffffffff00001ff, +0xffffffffe00003ff, +0xffffffffc00007ff, +0xffffffff80000fff, +0xffffffff00001fff, +0xfffffffe00003fff, +0xfffffffc00007fff, +0xfffffff80000ffff, +0xfffffff00001ffff, +0xffffffe00003ffff, +0xffffffc00007ffff, +0xffffff80000fffff, +0xffffff00001fffff, +0xfffffe00003fffff, +0xfffffc00007fffff, +0xfffff80000ffffff, +0xfffff00001ffffff, +0xffffe00003ffffff, +0xffffc00007ffffff, +0xffff80000fffffff, +0xffff00001fffffff, +0xfffe00003fffffff, +0xfffc00007fffffff, +0xfff80000ffffffff, +0xfff00001ffffffff, +0xffe00003ffffffff, +0xffc00007ffffffff, +0xff80000fffffffff, +0xff00001fffffffff, +0xfe00003fffffffff, +0xfc00007fffffffff, +0xf80000ffffffffff, +0xf00001ffffffffff, +0xe00003ffffffffff, +0xc00007ffffffffff, +0x80000fffffffffff, +0x00003fffffffffff, +0x00007ffffffffffe, +0x0000fffffffffffc, +0x0001fffffffffff8, +0x0003fffffffffff0, +0x0007ffffffffffe0, +0x000fffffffffffc0, +0x001fffffffffff80, +0x003fffffffffff00, +0x007ffffffffffe00, +0x00fffffffffffc00, +0x01fffffffffff800, +0x03fffffffffff000, +0x07ffffffffffe000, +0x0fffffffffffc000, +0x1fffffffffff8000, +0x3fffffffffff0000, +0x7ffffffffffe0000, +0xfffffffffffc0000, +0xfffffffffff80001, +0xfffffffffff00003, +0xffffffffffe00007, +0xffffffffffc0000f, +0xffffffffff80001f, +0xffffffffff00003f, +0xfffffffffe00007f, +0xfffffffffc0000ff, +0xfffffffff80001ff, +0xfffffffff00003ff, +0xffffffffe00007ff, +0xffffffffc0000fff, +0xffffffff80001fff, +0xffffffff00003fff, +0xfffffffe00007fff, +0xfffffffc0000ffff, +0xfffffff80001ffff, +0xfffffff00003ffff, +0xffffffe00007ffff, +0xffffffc0000fffff, +0xffffff80001fffff, +0xffffff00003fffff, +0xfffffe00007fffff, +0xfffffc0000ffffff, +0xfffff80001ffffff, +0xfffff00003ffffff, +0xffffe00007ffffff, +0xffffc0000fffffff, +0xffff80001fffffff, +0xffff00003fffffff, +0xfffe00007fffffff, +0xfffc0000ffffffff, +0xfff80001ffffffff, +0xfff00003ffffffff, +0xffe00007ffffffff, +0xffc0000fffffffff, +0xff80001fffffffff, +0xff00003fffffffff, +0xfe00007fffffffff, +0xfc0000ffffffffff, +0xf80001ffffffffff, +0xf00003ffffffffff, +0xe00007ffffffffff, +0xc0000fffffffffff, +0x80001fffffffffff, +0x00007fffffffffff, +0x0000fffffffffffe, +0x0001fffffffffffc, +0x0003fffffffffff8, +0x0007fffffffffff0, +0x000fffffffffffe0, +0x001fffffffffffc0, +0x003fffffffffff80, +0x007fffffffffff00, +0x00fffffffffffe00, +0x01fffffffffffc00, +0x03fffffffffff800, +0x07fffffffffff000, +0x0fffffffffffe000, +0x1fffffffffffc000, +0x3fffffffffff8000, +0x7fffffffffff0000, +0xfffffffffffe0000, +0xfffffffffffc0001, +0xfffffffffff80003, +0xfffffffffff00007, +0xffffffffffe0000f, +0xffffffffffc0001f, +0xffffffffff80003f, +0xffffffffff00007f, +0xfffffffffe0000ff, +0xfffffffffc0001ff, +0xfffffffff80003ff, +0xfffffffff00007ff, +0xffffffffe0000fff, +0xffffffffc0001fff, +0xffffffff80003fff, +0xffffffff00007fff, +0xfffffffe0000ffff, +0xfffffffc0001ffff, +0xfffffff80003ffff, +0xfffffff00007ffff, +0xffffffe0000fffff, +0xffffffc0001fffff, +0xffffff80003fffff, +0xffffff00007fffff, +0xfffffe0000ffffff, +0xfffffc0001ffffff, +0xfffff80003ffffff, +0xfffff00007ffffff, +0xffffe0000fffffff, +0xffffc0001fffffff, +0xffff80003fffffff, +0xffff00007fffffff, +0xfffe0000ffffffff, +0xfffc0001ffffffff, +0xfff80003ffffffff, +0xfff00007ffffffff, +0xffe0000fffffffff, +0xffc0001fffffffff, +0xff80003fffffffff, +0xff00007fffffffff, +0xfe0000ffffffffff, +0xfc0001ffffffffff, +0xf80003ffffffffff, +0xf00007ffffffffff, +0xe0000fffffffffff, +0xc0001fffffffffff, +0x80003fffffffffff, +0x0000ffffffffffff, +0x0001fffffffffffe, +0x0003fffffffffffc, +0x0007fffffffffff8, +0x000ffffffffffff0, +0x001fffffffffffe0, +0x003fffffffffffc0, +0x007fffffffffff80, +0x00ffffffffffff00, +0x01fffffffffffe00, +0x03fffffffffffc00, +0x07fffffffffff800, +0x0ffffffffffff000, +0x1fffffffffffe000, +0x3fffffffffffc000, +0x7fffffffffff8000, +0xffffffffffff0000, +0xfffffffffffe0001, +0xfffffffffffc0003, +0xfffffffffff80007, +0xfffffffffff0000f, +0xffffffffffe0001f, +0xffffffffffc0003f, +0xffffffffff80007f, +0xffffffffff0000ff, +0xfffffffffe0001ff, +0xfffffffffc0003ff, +0xfffffffff80007ff, +0xfffffffff0000fff, +0xffffffffe0001fff, +0xffffffffc0003fff, +0xffffffff80007fff, +0xffffffff0000ffff, +0xfffffffe0001ffff, +0xfffffffc0003ffff, +0xfffffff80007ffff, +0xfffffff0000fffff, +0xffffffe0001fffff, +0xffffffc0003fffff, +0xffffff80007fffff, +0xffffff0000ffffff, +0xfffffe0001ffffff, +0xfffffc0003ffffff, +0xfffff80007ffffff, +0xfffff0000fffffff, +0xffffe0001fffffff, +0xffffc0003fffffff, +0xffff80007fffffff, +0xffff0000ffffffff, +0xfffe0001ffffffff, +0xfffc0003ffffffff, +0xfff80007ffffffff, +0xfff0000fffffffff, +0xffe0001fffffffff, +0xffc0003fffffffff, +0xff80007fffffffff, +0xff0000ffffffffff, +0xfe0001ffffffffff, +0xfc0003ffffffffff, +0xf80007ffffffffff, +0xf0000fffffffffff, +0xe0001fffffffffff, +0xc0003fffffffffff, +0x80007fffffffffff, +0x0001ffffffffffff, +0x0003fffffffffffe, +0x0007fffffffffffc, +0x000ffffffffffff8, +0x001ffffffffffff0, +0x003fffffffffffe0, +0x007fffffffffffc0, +0x00ffffffffffff80, +0x01ffffffffffff00, +0x03fffffffffffe00, +0x07fffffffffffc00, +0x0ffffffffffff800, +0x1ffffffffffff000, +0x3fffffffffffe000, +0x7fffffffffffc000, +0xffffffffffff8000, +0xffffffffffff0001, +0xfffffffffffe0003, +0xfffffffffffc0007, +0xfffffffffff8000f, +0xfffffffffff0001f, +0xffffffffffe0003f, +0xffffffffffc0007f, +0xffffffffff8000ff, +0xffffffffff0001ff, +0xfffffffffe0003ff, +0xfffffffffc0007ff, +0xfffffffff8000fff, +0xfffffffff0001fff, +0xffffffffe0003fff, +0xffffffffc0007fff, +0xffffffff8000ffff, +0xffffffff0001ffff, +0xfffffffe0003ffff, +0xfffffffc0007ffff, +0xfffffff8000fffff, +0xfffffff0001fffff, +0xffffffe0003fffff, +0xffffffc0007fffff, +0xffffff8000ffffff, +0xffffff0001ffffff, +0xfffffe0003ffffff, +0xfffffc0007ffffff, +0xfffff8000fffffff, +0xfffff0001fffffff, +0xffffe0003fffffff, +0xffffc0007fffffff, +0xffff8000ffffffff, +0xffff0001ffffffff, +0xfffe0003ffffffff, +0xfffc0007ffffffff, +0xfff8000fffffffff, +0xfff0001fffffffff, +0xffe0003fffffffff, +0xffc0007fffffffff, +0xff8000ffffffffff, +0xff0001ffffffffff, +0xfe0003ffffffffff, +0xfc0007ffffffffff, +0xf8000fffffffffff, +0xf0001fffffffffff, +0xe0003fffffffffff, +0xc0007fffffffffff, +0x8000ffffffffffff, +0x0003ffffffffffff, +0x0007fffffffffffe, +0x000ffffffffffffc, +0x001ffffffffffff8, +0x003ffffffffffff0, +0x007fffffffffffe0, +0x00ffffffffffffc0, +0x01ffffffffffff80, +0x03ffffffffffff00, +0x07fffffffffffe00, +0x0ffffffffffffc00, +0x1ffffffffffff800, +0x3ffffffffffff000, +0x7fffffffffffe000, +0xffffffffffffc000, +0xffffffffffff8001, +0xffffffffffff0003, +0xfffffffffffe0007, +0xfffffffffffc000f, +0xfffffffffff8001f, +0xfffffffffff0003f, +0xffffffffffe0007f, +0xffffffffffc000ff, +0xffffffffff8001ff, +0xffffffffff0003ff, +0xfffffffffe0007ff, +0xfffffffffc000fff, +0xfffffffff8001fff, +0xfffffffff0003fff, +0xffffffffe0007fff, +0xffffffffc000ffff, +0xffffffff8001ffff, +0xffffffff0003ffff, +0xfffffffe0007ffff, +0xfffffffc000fffff, +0xfffffff8001fffff, +0xfffffff0003fffff, +0xffffffe0007fffff, +0xffffffc000ffffff, +0xffffff8001ffffff, +0xffffff0003ffffff, +0xfffffe0007ffffff, +0xfffffc000fffffff, +0xfffff8001fffffff, +0xfffff0003fffffff, +0xffffe0007fffffff, +0xffffc000ffffffff, +0xffff8001ffffffff, +0xffff0003ffffffff, +0xfffe0007ffffffff, +0xfffc000fffffffff, +0xfff8001fffffffff, +0xfff0003fffffffff, +0xffe0007fffffffff, +0xffc000ffffffffff, +0xff8001ffffffffff, +0xff0003ffffffffff, +0xfe0007ffffffffff, +0xfc000fffffffffff, +0xf8001fffffffffff, +0xf0003fffffffffff, +0xe0007fffffffffff, +0xc000ffffffffffff, +0x8001ffffffffffff, +0x0007ffffffffffff, +0x000ffffffffffffe, +0x001ffffffffffffc, +0x003ffffffffffff8, +0x007ffffffffffff0, +0x00ffffffffffffe0, +0x01ffffffffffffc0, +0x03ffffffffffff80, +0x07ffffffffffff00, +0x0ffffffffffffe00, +0x1ffffffffffffc00, +0x3ffffffffffff800, +0x7ffffffffffff000, +0xffffffffffffe000, +0xffffffffffffc001, +0xffffffffffff8003, +0xffffffffffff0007, +0xfffffffffffe000f, +0xfffffffffffc001f, +0xfffffffffff8003f, +0xfffffffffff0007f, +0xffffffffffe000ff, +0xffffffffffc001ff, +0xffffffffff8003ff, +0xffffffffff0007ff, +0xfffffffffe000fff, +0xfffffffffc001fff, +0xfffffffff8003fff, +0xfffffffff0007fff, +0xffffffffe000ffff, +0xffffffffc001ffff, +0xffffffff8003ffff, +0xffffffff0007ffff, +0xfffffffe000fffff, +0xfffffffc001fffff, +0xfffffff8003fffff, +0xfffffff0007fffff, +0xffffffe000ffffff, +0xffffffc001ffffff, +0xffffff8003ffffff, +0xffffff0007ffffff, +0xfffffe000fffffff, +0xfffffc001fffffff, +0xfffff8003fffffff, +0xfffff0007fffffff, +0xffffe000ffffffff, +0xffffc001ffffffff, +0xffff8003ffffffff, +0xffff0007ffffffff, +0xfffe000fffffffff, +0xfffc001fffffffff, +0xfff8003fffffffff, +0xfff0007fffffffff, +0xffe000ffffffffff, +0xffc001ffffffffff, +0xff8003ffffffffff, +0xff0007ffffffffff, +0xfe000fffffffffff, +0xfc001fffffffffff, +0xf8003fffffffffff, +0xf0007fffffffffff, +0xe000ffffffffffff, +0xc001ffffffffffff, +0x8003ffffffffffff, +0x000fffffffffffff, +0x001ffffffffffffe, +0x003ffffffffffffc, +0x007ffffffffffff8, +0x00fffffffffffff0, +0x01ffffffffffffe0, +0x03ffffffffffffc0, +0x07ffffffffffff80, +0x0fffffffffffff00, +0x1ffffffffffffe00, +0x3ffffffffffffc00, +0x7ffffffffffff800, +0xfffffffffffff000, +0xffffffffffffe001, +0xffffffffffffc003, +0xffffffffffff8007, +0xffffffffffff000f, +0xfffffffffffe001f, +0xfffffffffffc003f, +0xfffffffffff8007f, +0xfffffffffff000ff, +0xffffffffffe001ff, +0xffffffffffc003ff, +0xffffffffff8007ff, +0xffffffffff000fff, +0xfffffffffe001fff, +0xfffffffffc003fff, +0xfffffffff8007fff, +0xfffffffff000ffff, +0xffffffffe001ffff, +0xffffffffc003ffff, +0xffffffff8007ffff, +0xffffffff000fffff, +0xfffffffe001fffff, +0xfffffffc003fffff, +0xfffffff8007fffff, +0xfffffff000ffffff, +0xffffffe001ffffff, +0xffffffc003ffffff, +0xffffff8007ffffff, +0xffffff000fffffff, +0xfffffe001fffffff, +0xfffffc003fffffff, +0xfffff8007fffffff, +0xfffff000ffffffff, +0xffffe001ffffffff, +0xffffc003ffffffff, +0xffff8007ffffffff, +0xffff000fffffffff, +0xfffe001fffffffff, +0xfffc003fffffffff, +0xfff8007fffffffff, +0xfff000ffffffffff, +0xffe001ffffffffff, +0xffc003ffffffffff, +0xff8007ffffffffff, +0xff000fffffffffff, +0xfe001fffffffffff, +0xfc003fffffffffff, +0xf8007fffffffffff, +0xf000ffffffffffff, +0xe001ffffffffffff, +0xc003ffffffffffff, +0x8007ffffffffffff, +0x001fffffffffffff, +0x003ffffffffffffe, +0x007ffffffffffffc, +0x00fffffffffffff8, +0x01fffffffffffff0, +0x03ffffffffffffe0, +0x07ffffffffffffc0, +0x0fffffffffffff80, +0x1fffffffffffff00, +0x3ffffffffffffe00, +0x7ffffffffffffc00, +0xfffffffffffff800, +0xfffffffffffff001, +0xffffffffffffe003, +0xffffffffffffc007, +0xffffffffffff800f, +0xffffffffffff001f, +0xfffffffffffe003f, +0xfffffffffffc007f, +0xfffffffffff800ff, +0xfffffffffff001ff, +0xffffffffffe003ff, +0xffffffffffc007ff, +0xffffffffff800fff, +0xffffffffff001fff, +0xfffffffffe003fff, +0xfffffffffc007fff, +0xfffffffff800ffff, +0xfffffffff001ffff, +0xffffffffe003ffff, +0xffffffffc007ffff, +0xffffffff800fffff, +0xffffffff001fffff, +0xfffffffe003fffff, +0xfffffffc007fffff, +0xfffffff800ffffff, +0xfffffff001ffffff, +0xffffffe003ffffff, +0xffffffc007ffffff, +0xffffff800fffffff, +0xffffff001fffffff, +0xfffffe003fffffff, +0xfffffc007fffffff, +0xfffff800ffffffff, +0xfffff001ffffffff, +0xffffe003ffffffff, +0xffffc007ffffffff, +0xffff800fffffffff, +0xffff001fffffffff, +0xfffe003fffffffff, +0xfffc007fffffffff, +0xfff800ffffffffff, +0xfff001ffffffffff, +0xffe003ffffffffff, +0xffc007ffffffffff, +0xff800fffffffffff, +0xff001fffffffffff, +0xfe003fffffffffff, +0xfc007fffffffffff, +0xf800ffffffffffff, +0xf001ffffffffffff, +0xe003ffffffffffff, +0xc007ffffffffffff, +0x800fffffffffffff, +0x003fffffffffffff, +0x007ffffffffffffe, +0x00fffffffffffffc, +0x01fffffffffffff8, +0x03fffffffffffff0, +0x07ffffffffffffe0, +0x0fffffffffffffc0, +0x1fffffffffffff80, +0x3fffffffffffff00, +0x7ffffffffffffe00, +0xfffffffffffffc00, +0xfffffffffffff801, +0xfffffffffffff003, +0xffffffffffffe007, +0xffffffffffffc00f, +0xffffffffffff801f, +0xffffffffffff003f, +0xfffffffffffe007f, +0xfffffffffffc00ff, +0xfffffffffff801ff, +0xfffffffffff003ff, +0xffffffffffe007ff, +0xffffffffffc00fff, +0xffffffffff801fff, +0xffffffffff003fff, +0xfffffffffe007fff, +0xfffffffffc00ffff, +0xfffffffff801ffff, +0xfffffffff003ffff, +0xffffffffe007ffff, +0xffffffffc00fffff, +0xffffffff801fffff, +0xffffffff003fffff, +0xfffffffe007fffff, +0xfffffffc00ffffff, +0xfffffff801ffffff, +0xfffffff003ffffff, +0xffffffe007ffffff, +0xffffffc00fffffff, +0xffffff801fffffff, +0xffffff003fffffff, +0xfffffe007fffffff, +0xfffffc00ffffffff, +0xfffff801ffffffff, +0xfffff003ffffffff, +0xffffe007ffffffff, +0xffffc00fffffffff, +0xffff801fffffffff, +0xffff003fffffffff, +0xfffe007fffffffff, +0xfffc00ffffffffff, +0xfff801ffffffffff, +0xfff003ffffffffff, +0xffe007ffffffffff, +0xffc00fffffffffff, +0xff801fffffffffff, +0xff003fffffffffff, +0xfe007fffffffffff, +0xfc00ffffffffffff, +0xf801ffffffffffff, +0xf003ffffffffffff, +0xe007ffffffffffff, +0xc00fffffffffffff, +0x801fffffffffffff, +0x007fffffffffffff, +0x00fffffffffffffe, +0x01fffffffffffffc, +0x03fffffffffffff8, +0x07fffffffffffff0, +0x0fffffffffffffe0, +0x1fffffffffffffc0, +0x3fffffffffffff80, +0x7fffffffffffff00, +0xfffffffffffffe00, +0xfffffffffffffc01, +0xfffffffffffff803, +0xfffffffffffff007, +0xffffffffffffe00f, +0xffffffffffffc01f, +0xffffffffffff803f, +0xffffffffffff007f, +0xfffffffffffe00ff, +0xfffffffffffc01ff, +0xfffffffffff803ff, +0xfffffffffff007ff, +0xffffffffffe00fff, +0xffffffffffc01fff, +0xffffffffff803fff, +0xffffffffff007fff, +0xfffffffffe00ffff, +0xfffffffffc01ffff, +0xfffffffff803ffff, +0xfffffffff007ffff, +0xffffffffe00fffff, +0xffffffffc01fffff, +0xffffffff803fffff, +0xffffffff007fffff, +0xfffffffe00ffffff, +0xfffffffc01ffffff, +0xfffffff803ffffff, +0xfffffff007ffffff, +0xffffffe00fffffff, +0xffffffc01fffffff, +0xffffff803fffffff, +0xffffff007fffffff, +0xfffffe00ffffffff, +0xfffffc01ffffffff, +0xfffff803ffffffff, +0xfffff007ffffffff, +0xffffe00fffffffff, +0xffffc01fffffffff, +0xffff803fffffffff, +0xffff007fffffffff, +0xfffe00ffffffffff, +0xfffc01ffffffffff, +0xfff803ffffffffff, +0xfff007ffffffffff, +0xffe00fffffffffff, +0xffc01fffffffffff, +0xff803fffffffffff, +0xff007fffffffffff, +0xfe00ffffffffffff, +0xfc01ffffffffffff, +0xf803ffffffffffff, +0xf007ffffffffffff, +0xe00fffffffffffff, +0xc01fffffffffffff, +0x803fffffffffffff, +0x00ffffffffffffff, +0x01fffffffffffffe, +0x03fffffffffffffc, +0x07fffffffffffff8, +0x0ffffffffffffff0, +0x1fffffffffffffe0, +0x3fffffffffffffc0, +0x7fffffffffffff80, +0xffffffffffffff00, +0xfffffffffffffe01, +0xfffffffffffffc03, +0xfffffffffffff807, +0xfffffffffffff00f, +0xffffffffffffe01f, +0xffffffffffffc03f, +0xffffffffffff807f, +0xffffffffffff00ff, +0xfffffffffffe01ff, +0xfffffffffffc03ff, +0xfffffffffff807ff, +0xfffffffffff00fff, +0xffffffffffe01fff, +0xffffffffffc03fff, +0xffffffffff807fff, +0xffffffffff00ffff, +0xfffffffffe01ffff, +0xfffffffffc03ffff, +0xfffffffff807ffff, +0xfffffffff00fffff, +0xffffffffe01fffff, +0xffffffffc03fffff, +0xffffffff807fffff, +0xffffffff00ffffff, +0xfffffffe01ffffff, +0xfffffffc03ffffff, +0xfffffff807ffffff, +0xfffffff00fffffff, +0xffffffe01fffffff, +0xffffffc03fffffff, +0xffffff807fffffff, +0xffffff00ffffffff, +0xfffffe01ffffffff, +0xfffffc03ffffffff, +0xfffff807ffffffff, +0xfffff00fffffffff, +0xffffe01fffffffff, +0xffffc03fffffffff, +0xffff807fffffffff, +0xffff00ffffffffff, +0xfffe01ffffffffff, +0xfffc03ffffffffff, +0xfff807ffffffffff, +0xfff00fffffffffff, +0xffe01fffffffffff, +0xffc03fffffffffff, +0xff807fffffffffff, +0xff00ffffffffffff, +0xfe01ffffffffffff, +0xfc03ffffffffffff, +0xf807ffffffffffff, +0xf00fffffffffffff, +0xe01fffffffffffff, +0xc03fffffffffffff, +0x807fffffffffffff, +0x01ffffffffffffff, +0x03fffffffffffffe, +0x07fffffffffffffc, +0x0ffffffffffffff8, +0x1ffffffffffffff0, +0x3fffffffffffffe0, +0x7fffffffffffffc0, +0xffffffffffffff80, +0xffffffffffffff01, +0xfffffffffffffe03, +0xfffffffffffffc07, +0xfffffffffffff80f, +0xfffffffffffff01f, +0xffffffffffffe03f, +0xffffffffffffc07f, +0xffffffffffff80ff, +0xffffffffffff01ff, +0xfffffffffffe03ff, +0xfffffffffffc07ff, +0xfffffffffff80fff, +0xfffffffffff01fff, +0xffffffffffe03fff, +0xffffffffffc07fff, +0xffffffffff80ffff, +0xffffffffff01ffff, +0xfffffffffe03ffff, +0xfffffffffc07ffff, +0xfffffffff80fffff, +0xfffffffff01fffff, +0xffffffffe03fffff, +0xffffffffc07fffff, +0xffffffff80ffffff, +0xffffffff01ffffff, +0xfffffffe03ffffff, +0xfffffffc07ffffff, +0xfffffff80fffffff, +0xfffffff01fffffff, +0xffffffe03fffffff, +0xffffffc07fffffff, +0xffffff80ffffffff, +0xffffff01ffffffff, +0xfffffe03ffffffff, +0xfffffc07ffffffff, +0xfffff80fffffffff, +0xfffff01fffffffff, +0xffffe03fffffffff, +0xffffc07fffffffff, +0xffff80ffffffffff, +0xffff01ffffffffff, +0xfffe03ffffffffff, +0xfffc07ffffffffff, +0xfff80fffffffffff, +0xfff01fffffffffff, +0xffe03fffffffffff, +0xffc07fffffffffff, +0xff80ffffffffffff, +0xff01ffffffffffff, +0xfe03ffffffffffff, +0xfc07ffffffffffff, +0xf80fffffffffffff, +0xf01fffffffffffff, +0xe03fffffffffffff, +0xc07fffffffffffff, +0x80ffffffffffffff, +0x03ffffffffffffff, +0x07fffffffffffffe, +0x0ffffffffffffffc, +0x1ffffffffffffff8, +0x3ffffffffffffff0, +0x7fffffffffffffe0, +0xffffffffffffffc0, +0xffffffffffffff81, +0xffffffffffffff03, +0xfffffffffffffe07, +0xfffffffffffffc0f, +0xfffffffffffff81f, +0xfffffffffffff03f, +0xffffffffffffe07f, +0xffffffffffffc0ff, +0xffffffffffff81ff, +0xffffffffffff03ff, +0xfffffffffffe07ff, +0xfffffffffffc0fff, +0xfffffffffff81fff, +0xfffffffffff03fff, +0xffffffffffe07fff, +0xffffffffffc0ffff, +0xffffffffff81ffff, +0xffffffffff03ffff, +0xfffffffffe07ffff, +0xfffffffffc0fffff, +0xfffffffff81fffff, +0xfffffffff03fffff, +0xffffffffe07fffff, +0xffffffffc0ffffff, +0xffffffff81ffffff, +0xffffffff03ffffff, +0xfffffffe07ffffff, +0xfffffffc0fffffff, +0xfffffff81fffffff, +0xfffffff03fffffff, +0xffffffe07fffffff, +0xffffffc0ffffffff, +0xffffff81ffffffff, +0xffffff03ffffffff, +0xfffffe07ffffffff, +0xfffffc0fffffffff, +0xfffff81fffffffff, +0xfffff03fffffffff, +0xffffe07fffffffff, +0xffffc0ffffffffff, +0xffff81ffffffffff, +0xffff03ffffffffff, +0xfffe07ffffffffff, +0xfffc0fffffffffff, +0xfff81fffffffffff, +0xfff03fffffffffff, +0xffe07fffffffffff, +0xffc0ffffffffffff, +0xff81ffffffffffff, +0xff03ffffffffffff, +0xfe07ffffffffffff, +0xfc0fffffffffffff, +0xf81fffffffffffff, +0xf03fffffffffffff, +0xe07fffffffffffff, +0xc0ffffffffffffff, +0x81ffffffffffffff, +0x07ffffffffffffff, +0x0ffffffffffffffe, +0x1ffffffffffffffc, +0x3ffffffffffffff8, +0x7ffffffffffffff0, +0xffffffffffffffe0, +0xffffffffffffffc1, +0xffffffffffffff83, +0xffffffffffffff07, +0xfffffffffffffe0f, +0xfffffffffffffc1f, +0xfffffffffffff83f, +0xfffffffffffff07f, +0xffffffffffffe0ff, +0xffffffffffffc1ff, +0xffffffffffff83ff, +0xffffffffffff07ff, +0xfffffffffffe0fff, +0xfffffffffffc1fff, +0xfffffffffff83fff, +0xfffffffffff07fff, +0xffffffffffe0ffff, +0xffffffffffc1ffff, +0xffffffffff83ffff, +0xffffffffff07ffff, +0xfffffffffe0fffff, +0xfffffffffc1fffff, +0xfffffffff83fffff, +0xfffffffff07fffff, +0xffffffffe0ffffff, +0xffffffffc1ffffff, +0xffffffff83ffffff, +0xffffffff07ffffff, +0xfffffffe0fffffff, +0xfffffffc1fffffff, +0xfffffff83fffffff, +0xfffffff07fffffff, +0xffffffe0ffffffff, +0xffffffc1ffffffff, +0xffffff83ffffffff, +0xffffff07ffffffff, +0xfffffe0fffffffff, +0xfffffc1fffffffff, +0xfffff83fffffffff, +0xfffff07fffffffff, +0xffffe0ffffffffff, +0xffffc1ffffffffff, +0xffff83ffffffffff, +0xffff07ffffffffff, +0xfffe0fffffffffff, +0xfffc1fffffffffff, +0xfff83fffffffffff, +0xfff07fffffffffff, +0xffe0ffffffffffff, +0xffc1ffffffffffff, +0xff83ffffffffffff, +0xff07ffffffffffff, +0xfe0fffffffffffff, +0xfc1fffffffffffff, +0xf83fffffffffffff, +0xf07fffffffffffff, +0xe0ffffffffffffff, +0xc1ffffffffffffff, +0x83ffffffffffffff, +0x0fffffffffffffff, +0x1ffffffffffffffe, +0x3ffffffffffffffc, +0x7ffffffffffffff8, +0xfffffffffffffff0, +0xffffffffffffffe1, +0xffffffffffffffc3, +0xffffffffffffff87, +0xffffffffffffff0f, +0xfffffffffffffe1f, +0xfffffffffffffc3f, +0xfffffffffffff87f, +0xfffffffffffff0ff, +0xffffffffffffe1ff, +0xffffffffffffc3ff, +0xffffffffffff87ff, +0xffffffffffff0fff, +0xfffffffffffe1fff, +0xfffffffffffc3fff, +0xfffffffffff87fff, +0xfffffffffff0ffff, +0xffffffffffe1ffff, +0xffffffffffc3ffff, +0xffffffffff87ffff, +0xffffffffff0fffff, +0xfffffffffe1fffff, +0xfffffffffc3fffff, +0xfffffffff87fffff, +0xfffffffff0ffffff, +0xffffffffe1ffffff, +0xffffffffc3ffffff, +0xffffffff87ffffff, +0xffffffff0fffffff, +0xfffffffe1fffffff, +0xfffffffc3fffffff, +0xfffffff87fffffff, +0xfffffff0ffffffff, +0xffffffe1ffffffff, +0xffffffc3ffffffff, +0xffffff87ffffffff, +0xffffff0fffffffff, +0xfffffe1fffffffff, +0xfffffc3fffffffff, +0xfffff87fffffffff, +0xfffff0ffffffffff, +0xffffe1ffffffffff, +0xffffc3ffffffffff, +0xffff87ffffffffff, +0xffff0fffffffffff, +0xfffe1fffffffffff, +0xfffc3fffffffffff, +0xfff87fffffffffff, +0xfff0ffffffffffff, +0xffe1ffffffffffff, +0xffc3ffffffffffff, +0xff87ffffffffffff, +0xff0fffffffffffff, +0xfe1fffffffffffff, +0xfc3fffffffffffff, +0xf87fffffffffffff, +0xf0ffffffffffffff, +0xe1ffffffffffffff, +0xc3ffffffffffffff, +0x87ffffffffffffff, +0x1fffffffffffffff, +0x3ffffffffffffffe, +0x7ffffffffffffffc, +0xfffffffffffffff8, +0xfffffffffffffff1, +0xffffffffffffffe3, +0xffffffffffffffc7, +0xffffffffffffff8f, +0xffffffffffffff1f, +0xfffffffffffffe3f, +0xfffffffffffffc7f, +0xfffffffffffff8ff, +0xfffffffffffff1ff, +0xffffffffffffe3ff, +0xffffffffffffc7ff, +0xffffffffffff8fff, +0xffffffffffff1fff, +0xfffffffffffe3fff, +0xfffffffffffc7fff, +0xfffffffffff8ffff, +0xfffffffffff1ffff, +0xffffffffffe3ffff, +0xffffffffffc7ffff, +0xffffffffff8fffff, +0xffffffffff1fffff, +0xfffffffffe3fffff, +0xfffffffffc7fffff, +0xfffffffff8ffffff, +0xfffffffff1ffffff, +0xffffffffe3ffffff, +0xffffffffc7ffffff, +0xffffffff8fffffff, +0xffffffff1fffffff, +0xfffffffe3fffffff, +0xfffffffc7fffffff, +0xfffffff8ffffffff, +0xfffffff1ffffffff, +0xffffffe3ffffffff, +0xffffffc7ffffffff, +0xffffff8fffffffff, +0xffffff1fffffffff, +0xfffffe3fffffffff, +0xfffffc7fffffffff, +0xfffff8ffffffffff, +0xfffff1ffffffffff, +0xffffe3ffffffffff, +0xffffc7ffffffffff, +0xffff8fffffffffff, +0xffff1fffffffffff, +0xfffe3fffffffffff, +0xfffc7fffffffffff, +0xfff8ffffffffffff, +0xfff1ffffffffffff, +0xffe3ffffffffffff, +0xffc7ffffffffffff, +0xff8fffffffffffff, +0xff1fffffffffffff, +0xfe3fffffffffffff, +0xfc7fffffffffffff, +0xf8ffffffffffffff, +0xf1ffffffffffffff, +0xe3ffffffffffffff, +0xc7ffffffffffffff, +0x8fffffffffffffff, +0x3fffffffffffffff, +0x7ffffffffffffffe, +0xfffffffffffffffc, +0xfffffffffffffff9, +0xfffffffffffffff3, +0xffffffffffffffe7, +0xffffffffffffffcf, +0xffffffffffffff9f, +0xffffffffffffff3f, +0xfffffffffffffe7f, +0xfffffffffffffcff, +0xfffffffffffff9ff, +0xfffffffffffff3ff, +0xffffffffffffe7ff, +0xffffffffffffcfff, +0xffffffffffff9fff, +0xffffffffffff3fff, +0xfffffffffffe7fff, +0xfffffffffffcffff, +0xfffffffffff9ffff, +0xfffffffffff3ffff, +0xffffffffffe7ffff, +0xffffffffffcfffff, +0xffffffffff9fffff, +0xffffffffff3fffff, +0xfffffffffe7fffff, +0xfffffffffcffffff, +0xfffffffff9ffffff, +0xfffffffff3ffffff, +0xffffffffe7ffffff, +0xffffffffcfffffff, +0xffffffff9fffffff, +0xffffffff3fffffff, +0xfffffffe7fffffff, +0xfffffffcffffffff, +0xfffffff9ffffffff, +0xfffffff3ffffffff, +0xffffffe7ffffffff, +0xffffffcfffffffff, +0xffffff9fffffffff, +0xffffff3fffffffff, +0xfffffe7fffffffff, +0xfffffcffffffffff, +0xfffff9ffffffffff, +0xfffff3ffffffffff, +0xffffe7ffffffffff, +0xffffcfffffffffff, +0xffff9fffffffffff, +0xffff3fffffffffff, +0xfffe7fffffffffff, +0xfffcffffffffffff, +0xfff9ffffffffffff, +0xfff3ffffffffffff, +0xffe7ffffffffffff, +0xffcfffffffffffff, +0xff9fffffffffffff, +0xff3fffffffffffff, +0xfe7fffffffffffff, +0xfcffffffffffffff, +0xf9ffffffffffffff, +0xf3ffffffffffffff, +0xe7ffffffffffffff, +0xcfffffffffffffff, +0x9fffffffffffffff, +0x7fffffffffffffff, +0xfffffffffffffffe, +0xfffffffffffffffd, +0xfffffffffffffffb, +0xfffffffffffffff7, +0xffffffffffffffef, +0xffffffffffffffdf, +0xffffffffffffffbf, +0xffffffffffffff7f, +0xfffffffffffffeff, +0xfffffffffffffdff, +0xfffffffffffffbff, +0xfffffffffffff7ff, +0xffffffffffffefff, +0xffffffffffffdfff, +0xffffffffffffbfff, +0xffffffffffff7fff, +0xfffffffffffeffff, +0xfffffffffffdffff, +0xfffffffffffbffff, +0xfffffffffff7ffff, +0xffffffffffefffff, +0xffffffffffdfffff, +0xffffffffffbfffff, +0xffffffffff7fffff, +0xfffffffffeffffff, +0xfffffffffdffffff, +0xfffffffffbffffff, +0xfffffffff7ffffff, +0xffffffffefffffff, +0xffffffffdfffffff, +0xffffffffbfffffff, +0xffffffff7fffffff, +0xfffffffeffffffff, +0xfffffffdffffffff, +0xfffffffbffffffff, +0xfffffff7ffffffff, +0xffffffefffffffff, +0xffffffdfffffffff, +0xffffffbfffffffff, +0xffffff7fffffffff, +0xfffffeffffffffff, +0xfffffdffffffffff, +0xfffffbffffffffff, +0xfffff7ffffffffff, +0xffffefffffffffff, +0xffffdfffffffffff, +0xffffbfffffffffff, +0xffff7fffffffffff, +0xfffeffffffffffff, +0xfffdffffffffffff, +0xfffbffffffffffff, +0xfff7ffffffffffff, +0xffefffffffffffff, +0xffdfffffffffffff, +0xffbfffffffffffff, +0xff7fffffffffffff, +0xfeffffffffffffff, +0xfdffffffffffffff, +0xfbffffffffffffff, +0xf7ffffffffffffff, +0xefffffffffffffff, +0xdfffffffffffffff, +0xbfffffffffffffff, +/* +#include +#include + +// Dumps all legal bitmask immediates for ARM64 +// Total number of unique 64-bit patterns: +// 1*2 + 3*4 + 7*8 + 15*16 + 31*32 + 63*64 = 5334 + +const char *uint64_to_binary(uint64_t x) { + static char b[65]; + unsigned i; + for (i = 0; i < 64; i++, x <<= 1) + b[i] = (0x8000000000000000ULL & x)? '1' : '0'; + b[64] = '\0'; + return b; +} + +int main() { + uint64_t result; + unsigned size, length, rotation, e; + for (size = 2; size <= 64; size *= 2) + for (length = 1; length < size; ++length) { + result = 0xffffffffffffffffULL >> (64 - length); + for (e = size; e < 64; e *= 2) + result |= result << e; + for (rotation = 0; rotation < size; ++rotation) { +#if 0 + printf("0x%016llx %s (size=%u, length=%u, rotation=%u)\n", + (unsigned long long)result, uint64_to_binary(result), + size, length, rotation); +#endif + printf("0x%016llx\n", (unsigned long long)result ); + result = (result >> 63) | (result << 1); + } + } + return 0; +} +*/ diff --git a/ecmascript/mapleall/maple_be/include/cg/visitor_common.h b/ecmascript/mapleall/maple_be/include/cg/visitor_common.h new file mode 100644 index 0000000000000000000000000000000000000000..9572dc1b9b18d76a82d1aa00d162862037083279 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/visitor_common.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H +#define MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H +namespace maplebe { +class OperandVisitorBase { + public: + virtual ~OperandVisitorBase() = default; +}; + +template +class OperandVisitor { + public: + virtual ~OperandVisitor() = default; + virtual void Visit(Visitable *v) = 0; +}; + +template +class OperandVisitors { + public: + virtual ~OperandVisitors() = default; +}; + +template +class OperandVisitors : + public OperandVisitor, + public OperandVisitor, + public OperandVisitor ... +{}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/asm_assembler.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/asm_assembler.h new file mode 100644 index 0000000000000000000000000000000000000000..26f09a99f9ce47b11323c755d1dafd6847a28fb5 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/asm_assembler.h @@ -0,0 +1,520 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef ASM_ASSEMBLER_H +#define ASM_ASSEMBLER_H + +#include "assembler.h" + +namespace assembler { + +#if TARGRISCV64 +#define CMNT "\t# " +#else +#define CMNT "\t// " +#endif +#define TEXT_BEGIN text0 +#define TEXT_END etext0 +#define DEBUG_INFO_0 debug_info0 +#define DEBUG_ABBREV_0 debug_abbrev0 +#define DEBUG_LINE_0 debug_line0 +#define DEBUG_STR_LABEL ASF + +#define XSTR(s) #s + +enum Directive : uint8 { + kAlign, + kComm, + kFile, + kFuncType, + kHidden, + kName, + kObjType, + kSection, + kSize, + kZero +}; + +class AsmAssembler : public Assembler { + public: + explicit AsmAssembler(const std::string &outputFileName) : Assembler() { + outStream.open(outputFileName, std::ios::trunc); + } + + ~AsmAssembler() = default; + + void InitialFileInfo(const std::string &inputFileName) override; + void EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) override; + void EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) override; + void EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelIdxs) override; + void EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) override; + /* emit variable's value */ + void EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr, + SectionKind sectionKind) override; + void EmitDirectString(const std::string &ustr, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) override; + void EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) override; + void EmitIntValue(int64 value, size_t valueSize, bool belongsToDataSec) override; + void EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) override; + void EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) override; + void EmitLabelValue(int64 symIdx, bool belongsToDataSec) override; + void EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) override; + void EmitNull(uint64 sizeInByte) override; + void PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte) override; + void FinalizeFileInfo() override {} + + /* emit debug info */ + void EmitDIHeader() override; + void EmitDIFooter() override; + void EmitDIHeaderFileInfo() override; + void EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) override; + void EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, uint32 offset, + uint32 size) override; + void EmitDIFormSpecification(unsigned int dwform) override; + /* EmitDIAttrValue */ + void EmitDwFormString(const std::string &name) override; + void EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) override; + void EmitDwFormData(int32 attrValue, uint8 sizeInByte) override; + void EmitDwFormData8() override; + void EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, + uint32 endLabelIdx, uint32 startLabelIdx) override; + void EmitLabel(uint32 funcPuIdx, uint32 labIdx) override; + void EmitDwFormSecOffset() override; + void EmitDwFormAddr(bool emitTextBegin) override; + void EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) override; + void EmitDwFormExprlocCfa(uint32 dwOp) override; + void EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) override; + void EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) override; + void EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) override; /* n=0~7 */ + void EmitDwFormExprloc(uintptr elp) override; + + void EmitDIDwName(const std::string &dwAtName, const std::string &dwForName) override; + void EmitDIDWFormStr(const std::string &formStr) override { + Emit(" : "); + Emit(formStr); + } + + void EmitDIDWDataMemberLocaltion(unsigned int lableIdx, uintptr_t attr) override { + Emit(" : "); + Emit(lableIdx); + Emit(" attr= "); + EmitHexUnsigned(attr); + } + + void EmitDIDebugAbbrevSectionHeader() override { + Emit("\t.section\t.debug_abbrev,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_ABBREV_0) ":\n"); + } + + void EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) override; + void EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, const std::string &dwAtName, + const std::string &dwFromName) override; + + void EmitDIDebugSectionEnd(SectionKind secKind) override { + Emit("\t.byte 0x0\n"); + } + + void EmitDIDebugARangesSection() override { + Emit("\t.section\t.debug_aranges,\"\",@progbits\n"); + } + + void EmitDIDebugRangesSection() override { + Emit("\t.section\t.debug_ranges,\"\",@progbits\n"); + } + + void EmitDIDebugLineSection() override { + Emit("\t.section\t.debug_line,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_LINE_0) ":\n"); + } + + void EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) override; + void EmitHexUnsigned(uint64 num); + void EmitDecUnsigned(uint64 num); + void EmitDecSigned(int64 num); + + void EmitLine() override { + Emit("\n"); + } + + /* start of X64 instructions */ + /* mov */ + void Mov(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Mov(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Mov(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* movabs */ + void Movabs(const ImmOpnd &immOpnd, Reg reg) override; + void Movabs(int64 symIdx, Reg reg) override; + /* push */ + void Push(InsnSize insnSize, Reg reg) override; + /* pop */ + void Pop(InsnSize insnSize, Reg reg) override; + /* lea */ + void Lea(InsnSize insnSize, const Mem &mem, Reg reg) override; + /* movzx */ + void MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) override; + void MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) override; + /* movsx */ + void MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) override; + void MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) override; + /* add */ + void Add(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Add(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Add(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* sub */ + void Sub(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Sub(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Sub(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* and */ + void And(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void And(InsnSize insnSize, const Mem &mem, Reg reg) override; + void And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void And(InsnSize insnSize, Reg reg, const Mem &mem) override; + void And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* or */ + void Or(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Or(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Or(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* xor */ + void Xor(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Xor(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Xor(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* not */ + void Not(InsnSize insnSize, Reg reg) override; + void Not(InsnSize insnSize, const Mem &mem) override; + /* neg */ + void Neg(InsnSize insnSize, Reg reg) override; + void Neg(InsnSize insnSize, const Mem &mem) override; + /* div & cwd, cdq, cqo */ + void Idiv(InsnSize insnSize, Reg reg) override; + void Idiv(InsnSize insnSize, const Mem &mem) override; + void Div(InsnSize insnSize, Reg reg) override; + void Div(InsnSize insnSize, const Mem &mem) override; + void Cwd() override; + void Cdq() override; + void Cqo() override; + /* shl */ + void Shl(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Shl(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* sar */ + void Sar(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Sar(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* shr */ + void Shr(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Shr(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* jmp */ + void Jmp(Reg reg) override; + void Jmp(const Mem &mem) override; + void Jmp(int64 symIdx) override; + /* jump condition */ + void Je(int64 symIdx) override; + void Ja(int64 symIdx) override; + void Jae(int64 symIdx) override; + void Jne(int64 symIdx) override; + void Jb(int64 symIdx) override; + void Jbe(int64 symIdx) override; + void Jg(int64 symIdx) override; + void Jge(int64 symIdx) override; + void Jl(int64 symIdx) override; + void Jle(int64 symIdx) override; + /* cmp */ + void Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmp(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmp(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* test */ + void Test(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* set */ + void Setbe(Reg reg) override; + void Setbe(const Mem &mem) override; + void Setle(Reg reg) override; + void Setle(const Mem &mem) override; + void Setae(Reg reg) override; + void Setae(const Mem &mem) override; + void Setge(Reg reg) override; + void Setge(const Mem &mem) override; + void Setne(Reg reg) override; + void Setne(const Mem &mem) override; + void Setb(Reg reg) override; + void Setb(const Mem &mem) override; + void Setl(Reg reg) override; + void Setl(const Mem &mem) override; + void Seta(Reg reg) override; + void Seta(const Mem &mem) override; + void Setg(Reg reg) override; + void Setg(const Mem &mem) override; + void Sete(Reg reg) override; + void Sete(const Mem &mem) override; + /* cmov */ + void Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmova(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmove(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) override; + /* call */ + void Call(InsnSize insnSize, Reg reg) override; + void Call(InsnSize insnSize, const Mem &mem) override; + void Call(InsnSize insnSize, int64 symIdx) override; + /* ret */ + void Ret() override; + /* leave */ + void Leave() override; + /* imul */ + void Imul(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* nop */ + void Nop(InsnSize insnSize, const Mem &mem) override; + void Nop() override; + /* byte swap */ + void Bswap(InsnSize insnSize, Reg reg) override; + void Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* pseudo insn */ + void DealWithPseudoInst(const std::string &insn) override; + /* end of X64 instructions */ + + private: + void EmitComment(std::string comment) { + Emit("\t// "); + Emit(comment); + Emit("\n"); + } + + void EmitSizeDirective(uint8 elemSize, int64 value, bool isSymbol, bool isLocal = false) { + std::unordered_map symSizeDirMap = {{k1Byte, ".byte"}, {k2Bytes, ".value"}, + {k4Bytes, ".long"}, {k8Bytes, ".quad"}}; + Emit("\t"); + Emit(symSizeDirMap.at(elemSize)); + Emit("\t"); + if (isSymbol) { + std::string name = GetNameFromSymMap(value, isLocal); + Emit(name); + } else { + Emit(value); + } + Emit("\n"); + } + + void EmitSectionDirective(SectionKind sectionKind) { + std::unordered_map secDirMap = {{kSBss, ".bss\t"}, {kSData, ".data\n"}, + {kSRodata, ".rodata\n"}, {kSTbss, ".tbss\t"}, {kSTdata, ".tdata\n"}, {kSText, ".text\n"}}; + Emit("\t.section\t"); + Emit(secDirMap.at(sectionKind)); + } + + void EmitSymbolAttrDirective(SymbolAttr symAttr, int64 symIdx, bool isLocal = false) { + std::unordered_map symAttrDirMap = {{kSAGlobal, ".global"}, {kSALocal, ".local"}, + {kSAHidden, ".hidden"}, {kSAStatic, ".local"}, {kSAWeak, ".weak"}}; + std::string name = GetNameFromSymMap(symIdx, isLocal); + Emit("\t"); + Emit(symAttrDirMap.at(symAttr)); + Emit("\t"); + Emit(name); + Emit("\n"); + } + + void EmitDirective(Directive directive, int64 symIdx = 0, bool isLocal = false, uint8 alignInByte = 0) { + std::string name = ""; + if (symIdx != 0) { + name = GetNameFromSymMap(symIdx, isLocal); + } + switch (directive) { + case kAlign: { + if (alignInByte > 0) { + Emit("\t.align\t"); + Emit(alignInByte); + Emit("\n"); + } + break; + } + case kFile: + Emit("\t.file\t"); + break; + case kFuncType: + Emit("\t.type\t"); + Emit(name); + Emit(", @function\n"); + break; + case kHidden: + + Emit(name); + Emit("\n"); + break; + case kName: + Emit(name); + Emit(":\n"); + break; + case kObjType: + Emit("\t.type\t"); + Emit(name); + Emit(", @object\n"); + break; + case kSection: + Emit("\t.section\t"); + break; + case kSize: + Emit("\t.size\t"); + Emit(name); + Emit(", .-"); + Emit(name); + Emit("\n"); + break; + case kZero: + Emit("\t.zero\t"); + break; + default: + assert(false && "EmitDirective: unsupport directive"); + break; + } + } + + void EmitInsnSuffix(InsnSize insnSize) { + std::unordered_map insnSuffixDirMap = {{kB, "b"}, {kW, "w"}, {kL, "l"}, {kQ, "q"}}; + Emit(insnSuffixDirMap.at(insnSize)); + } + + void EmitReg(Reg reg) { + std::string regStr = kRegStrMap.at(reg); + Emit("%"); + Emit(regStr); + } + + void EmitSymbol(int64 symIdx) { + std::string symbolName = GetNameFromSymMap(symIdx); + Emit("$"); + Emit(symbolName); + } + + void EmitMem(const Mem &mem) { + /* emit displacement */ + if (mem.disp.first != 0) { + std::string dispSymName = GetNameFromSymMap(mem.disp.first); + Emit(dispSymName); + if (mem.disp.second != 0) { + Emit("+"); + Emit(mem.disp.second); + } + } else { + Emit(mem.disp.second); + } + /* emit base & index registers */ + Emit("("); + if (mem.base != ERR) { + EmitReg(mem.base); + } + if (mem.index != ERR) { + Emit(", "); + EmitReg(mem.index); + Emit(", "); + Emit(mem.s); + } + Emit(")"); + } + + void EmitImm(int64 imm) { + Emit("$"); + Emit(imm); + } + + void EmitLabel(int64 symIdx) { + std::string labelName = GetNameFromSymMap(symIdx); + if (symIdx < 0 && labelName[0] != '.') { + Emit("$"); + } + Emit(labelName); + } + + void EmitRegReg(Reg srcReg, Reg destReg) { + EmitReg(srcReg); + Emit(",\t"); + EmitReg(destReg); + } + + void EmitImmOrSymbolReg(int64 val, bool isSymbol, Reg reg) { + if (isSymbol) { + EmitSymbol(val); + } else { + EmitImm(val); + } + Emit(",\t"); + EmitReg(reg); + } + + void EmitLabelReg(int64 labelIdx, Reg reg) { + EmitLabel(labelIdx); + Emit(",\t"); + EmitReg(reg); + } + + void EmitMemReg(const Mem &mem, Reg reg) { + EmitMem(mem); + Emit(",\t"); + EmitReg(reg); + } + + void EmitRegMem(Reg reg, const Mem &mem) { + EmitReg(reg); + Emit(",\t"); + EmitMem(mem); + } + + void EmitImmOrSymbolMem(int64 val, bool isSymbol, Mem mem) { + if (isSymbol) { + EmitSymbol(val); + } else { + EmitImm(val); + } + Emit(",\t"); + EmitMem(mem); + } + + void EmitLabelMem(int64 labelIdx, const Mem &mem) { + EmitLabel(labelIdx); + Emit(",\t"); + EmitMem(mem); + } +}; /* class AsmAssembler */ +} /* namespace assembler */ + +#endif /* ASM_ASSEMBLER_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/assembler.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/assembler.h new file mode 100644 index 0000000000000000000000000000000000000000..573c33f2d67ab81d284ebc9f5398914b2512a3ed --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/assembler.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef X64_ASSEMBLER_H +#define X64_ASSEMBLER_H + +#define NDEBUG +#include +#include +#include +#include +#include +#include "operand.h" + +namespace assembler { +enum SymbolAttr { + kSAGlobal, + kSALocal, + kSAWeak, + kSAHidden, + kSAStatic, +}; + +enum SectionKind { + kSBss, + kSComm, + kSData, + kSRodata, + kSTbss, + kSTdata, + kSText, + kSDebugInfo, + kSDebugAbbrev, + kSDebugStr, +}; + +constexpr uint32 kDwarfVersion = 4; +constexpr uint32 kSizeOfPTR = 8; +constexpr uint32 k9ByteSize = 9; + +class Assembler { + public: + Assembler() = default; + virtual ~Assembler() = default; + + void CloseOutput() { + if (outStream.is_open()) { + outStream.close(); + } + } + + void Emit(int64 val) { + outStream << val; + } + + void Emit(const std::string &str) { + outStream << str; + } + + void Emit(const void *data, uint64 sizeInByte) { + outStream.write(reinterpret_cast(data), sizeInByte); + } + + void StoreNameIntoSymMap(int64 symIdx, const std::string &name, bool isLocal = false) { + if (isLocal) { + (void)localSymMap.emplace(symIdx, name); + } else { + (void)globalSymMap.emplace(symIdx, name); + } + } + + const std::string &GetNameFromSymMap(int64 symIdx, bool isLocal = false) { + if (isLocal) { + return localSymMap.at(symIdx); + } else { + return globalSymMap.at(symIdx); + } + } + + void ClearLocalSymMap() { + localSymMap.clear(); + } + + virtual void InitialFileInfo(const std::string &inputFileName) = 0; + virtual void EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr, + SectionKind sectionKind) = 0; + virtual void EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) = 0; + virtual void EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName = nullptr) = 0; + virtual void EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo = false, uint32 freq = 0, + const std::string *mirName = nullptr) = 0; + virtual void EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelIdxs) = 0; + virtual void EmitDirectString(const std::string &ustr, bool belongsToDataSec = true, + int64 strSymIdx = 0, bool emitAscii = false) = 0; + /* Indirect strings refer to string pointers, such as "char *buf". */ + virtual void EmitIndirectString(int64 strSymIdx, bool belongsToDataSec = true) = 0; + virtual void EmitIntValue(int64 value, size_t valueSize, bool belongsToDataSec = true) = 0; + virtual void EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec = true) = 0; + virtual void EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec = true) = 0; + virtual void EmitLabelValue(int64 symIdx, bool belongsToDataSec = true) = 0; + virtual void EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec = true) = 0; + virtual void EmitNull(uint64 sizeInByte) = 0; + virtual void PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte) = 0; + virtual void FinalizeFileInfo() = 0; + + /* emit debug info */ + virtual void EmitDIHeader() = 0; + virtual void EmitDIFooter() = 0; + virtual void EmitDIHeaderFileInfo() = 0; + virtual void EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) = 0; + virtual void EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, + uint32 offset, uint32 size) = 0; + virtual void EmitDIFormSpecification(unsigned int dwform) = 0; + /* EmitDIAttrValue */ + virtual void EmitDwFormString(const std::string &name) = 0; + /* strTableSize is used to calculate unique id for the debug string */ + virtual void EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) = 0; + virtual void EmitDwFormData(int32 attrValue, uint8 sizeInByte) = 0; + virtual void EmitDwFormData8() = 0; + virtual void EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, + uint32 endLabelIdx, uint32 startLabelIdx) = 0; + virtual void EmitLabel(uint32 funcPuIdx, uint32 labIdx) = 0; + virtual void EmitDwFormSecOffset() = 0; + virtual void EmitDwFormAddr(bool emitTextBegin = false) = 0; + virtual void EmitDwFormRef4(uint64 offsetOrValue, bool unknownType = false, bool emitOffset = false) = 0; + virtual void EmitDwFormExprlocCfa(uint32 dwOp) = 0; + virtual void EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) = 0; + virtual void EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) = 0; + virtual void EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) = 0; /* n=0~7 */ + virtual void EmitDwFormExprloc(uintptr elp) = 0; + + virtual void EmitDIDwName(const std::string &dwAtName, const std::string &dwForName) = 0; + virtual void EmitDIDWFormStr(const std::string &formStr) = 0; + virtual void EmitDIDWDataMemberLocaltion(unsigned int lableIdx, uintptr_t attr) = 0; + virtual void EmitDIDebugAbbrevSectionHeader() = 0; + virtual void EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) = 0; + virtual void EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, const std::string &dwAtName, + const std::string &dwFromName) = 0; + virtual void EmitDIDebugSectionEnd(SectionKind secKind) = 0; + virtual void EmitDIDebugARangesSection() = 0; + virtual void EmitDIDebugRangesSection() = 0; + virtual void EmitDIDebugLineSection() = 0; + virtual void EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) = 0; + virtual void EmitLine() = 0; + + /* start of X64 instructions */ + /* mov */ + virtual void Mov(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Mov(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Mov(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* movabs */ + virtual void Movabs(const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Movabs(int64 symIdx, Reg reg) = 0; + /* push */ + virtual void Push(InsnSize insnSize, Reg reg) = 0; + /* pop */ + virtual void Pop(InsnSize insnSize, Reg reg) = 0; + /* lea */ + virtual void Lea(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + /* movzx */ + virtual void MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) = 0; + virtual void MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) = 0; + /* movsx */ + virtual void MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) = 0; + virtual void MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) = 0; + /* add */ + virtual void Add(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Add(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Add(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* sub */ + virtual void Sub(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Sub(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Sub(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* and */ + virtual void And(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void And(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void And(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* or */ + virtual void Or(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Or(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Or(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* xor */ + virtual void Xor(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Xor(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Xor(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* not */ + virtual void Not(InsnSize insnSize, Reg reg) = 0; + virtual void Not(InsnSize insnSize, const Mem &mem) = 0; + /* neg */ + virtual void Neg(InsnSize insnSize, Reg reg) = 0; + virtual void Neg(InsnSize insnSize, const Mem &mem) = 0; + /* div & cwd, cdq, cqo */ + virtual void Idiv(InsnSize insnSize, Reg reg) = 0; + virtual void Idiv(InsnSize insnSize, const Mem &mem) = 0; + virtual void Div(InsnSize insnSize, Reg reg) = 0; + virtual void Div(InsnSize insnSize, const Mem &mem) = 0; + virtual void Cwd() = 0; + virtual void Cdq() = 0; + virtual void Cqo() = 0; + /* shl */ + virtual void Shl(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Shl(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* sar */ + virtual void Sar(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Sar(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* shr */ + virtual void Shr(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Shr(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* jmp */ + virtual void Jmp(Reg reg) = 0; + virtual void Jmp(const Mem &mem) = 0; + virtual void Jmp(int64 symIdx) = 0; + /* jump condition */ + virtual void Je(int64 symIdx) = 0; + virtual void Ja(int64 symIdx) = 0; + virtual void Jae(int64 symIdx) = 0; + virtual void Jne(int64 symIdx) = 0; + virtual void Jb(int64 symIdx) = 0; + virtual void Jbe(int64 symIdx) = 0; + virtual void Jg(int64 symIdx) = 0; + virtual void Jge(int64 symIdx) = 0; + virtual void Jl(int64 symIdx) = 0; + virtual void Jle(int64 symIdx) = 0; + /* cmp */ + virtual void Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmp(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmp(InsnSize insnSize, Reg reg, const Mem &mem) = 0; + virtual void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) = 0; + virtual void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) = 0; + /* test */ + virtual void Test(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + /* set */ + virtual void Setbe(Reg reg) = 0; + virtual void Setbe(const Mem &mem) = 0; + virtual void Setle(Reg reg) = 0; + virtual void Setle(const Mem &mem) = 0; + virtual void Setae(Reg reg) = 0; + virtual void Setae(const Mem &mem) = 0; + virtual void Setge(Reg reg) = 0; + virtual void Setge(const Mem &mem) = 0; + virtual void Setne(Reg reg) = 0; + virtual void Setne(const Mem &mem) = 0; + virtual void Setb(Reg reg) = 0; + virtual void Setb(const Mem &mem) = 0; + virtual void Setl(Reg reg) = 0; + virtual void Setl(const Mem &mem) = 0; + virtual void Seta(Reg reg) = 0; + virtual void Seta(const Mem &mem) = 0; + virtual void Setg(Reg reg) = 0; + virtual void Setg(const Mem &mem) = 0; + virtual void Sete(Reg reg) = 0; + virtual void Sete(const Mem &mem) = 0; + /* cmov */ + virtual void Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmova(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmove(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + virtual void Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + virtual void Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) = 0; + /* call */ + virtual void Call(InsnSize insnSize, Reg reg) = 0; + virtual void Call(InsnSize insnSize, const Mem &mem) = 0; + virtual void Call(InsnSize insnSize, int64 symIdx) = 0; + /* ret */ + virtual void Ret() = 0; + /* leave */ + virtual void Leave() = 0; + /* imul */ + virtual void Imul(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + /* nop */ + virtual void Nop(InsnSize insnSize, const Mem &mem) = 0; + virtual void Nop() = 0; + /* byte swap */ + virtual void Bswap(InsnSize insnSize, Reg reg) = 0; + virtual void Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) = 0; + /* pseudo insn */ + virtual void DealWithPseudoInst(const std::string &insn) = 0; + /* end of X64 instructions */ + + protected: + std::ofstream outStream; + std::string fileName; + std::unordered_map globalSymMap; /* store global variable symbols */ + std::unordered_map localSymMap; /* store local variable symbols for each function */ +}; +} /* namespace assembler */ + +#endif /* X64_ASSEMBLER_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_assembler.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_assembler.h new file mode 100644 index 0000000000000000000000000000000000000000..3bdd16e2db171afc9ee81a6561c92abc9975746c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_assembler.h @@ -0,0 +1,548 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef ELF_ASSEMBLER_H +#define ELF_ASSEMBLER_H + +#include "assembler.h" +#include "elf_file.h" +#include "cg_option.h" + +namespace assembler { +const uint8 kLeftShift6Bits = 6; +const uint8 kLeftShift3Bits = 3; +const uint8 kGetLow3Bits = 0b111; +const uint8 kLeftShift32Bits = 32; +const uint8 kLeftShift4Bits = 4; + +class ElfAssembler : public Assembler { + public: + ElfAssembler(const std::string &outputFileName, const maplebe::CGOptions::EmitMemoryManager &emitMemoryManager) + : Assembler(), emitMemoryManager(emitMemoryManager) { + outStream.open(outputFileName, std::ios::trunc | std::ios::binary); + } + + ~ElfAssembler() = default; + + /* override function in base class */ + void InitialFileInfo(const std::string &inputFileName) override; + void EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, + SymbolAttr symAttr, SectionKind sectionKind) override; + void EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) override; + void EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) override; + void EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelSymIdxs) override; + void EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) override; + void EmitDirectString(const std::string &ustr, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) override; + void EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) override; + void EmitIntValue(int64 value, size_t valueSize, bool belongsToDataSec) override; + void EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) override; + void EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) override; + void EmitLabelValue(int64 symIdx, bool belongsToDataSec) override; + void EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) override; + void EmitNull(uint64 sizeInByte) override; + void PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte) override; + void FinalizeFileInfo() override; + void EmitBssSectionVar(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr); + void EmitDataSectionVar(int64 symIdx); + + /* emit debug info */ + void EmitDIHeader() override {}; + void EmitDIFooter() override {}; + void EmitDIHeaderFileInfo() override {}; + void EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) override; + void EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, uint32 offset, + uint32 size) override; + /* nothing to do in ElfAssembler. */ + void EmitDIFormSpecification(unsigned int dwform) override {}; + /* EmitDIAttrValue */ + void EmitDwFormString(const std::string &name) override; + void EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) override; + void EmitDwFormData(int32 attrValue, uint8 sizeInByte) override; + void EmitDwFormData8() override; + void EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, + uint32 endLabelIdx, uint32 startLabelIdx) override; + void EmitLabel(uint32 funcPuIdx, uint32 labIdx) override; + void EmitDwFormSecOffset() override; + void EmitDwFormAddr(bool emitTextBegin) override; + void EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) override; + void EmitDwFormExprlocCfa(uint32 dwOp) override; + void EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) override; + void EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) override; + void EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) override; /* n=0~7 */ + void EmitDwFormExprloc(uintptr elp) override; + + void EmitDIDwName(const std::string &dwAtName, const std::string &dwForName) override {}; + void EmitDIDWFormStr(const std::string &formStr) override {}; + void EmitDIDWDataMemberLocaltion(unsigned int lableIdx, uintptr_t attr) override {}; + void EmitDIDebugAbbrevSectionHeader() override {}; + void EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) override; + void EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, const std::string &dwAtName, + const std::string &dwFromName) override; + void EmitDIDebugSectionEnd(SectionKind secKind) override; + void EmitDIDebugARangesSection() override { + debugARangesSection = new DataSection(".debug_aranges", SHT_PROGBITS, 0, 1); + RegisterSection(*debugARangesSection); + } + + void EmitDIDebugRangesSection() override { + debugRangesSection = new DataSection(".debug_ranges", SHT_PROGBITS, 0, 1); + RegisterSection(*debugRangesSection); + } + + void EmitDIDebugLineSection() override {}; + void EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) override; + void EmitLine() override {}; + void HandleDebugInfoSectionFixup(); + + const std::vector& EncodeULEB128(uint64 value) { + static std::vector uleb128Value; + if (value == 0) { + uleb128Value.push_back(0); + } + int rightShift7Bits = 7; + while (value != 0) { + uint8 byte = value & 0x7F; + value >>= rightShift7Bits; + byte = value != 0 ? byte |= 0x80 : byte; + uleb128Value.push_back(byte); + } + return uleb128Value; + } + + const std::vector& EncodeSLEB128(int64 value) { + static std::vector sleb128Value; + int more = 1; + int rightShift7Bits = 7; + while (more != 0) { + uint8 byte = value & 0x7F; + uint8 sign = value & 0x40; + value >>= rightShift7Bits; + if ((value == 0 && sign == 0) || (value == -1 && sign != 0)) { + more = 0; + } else { + byte |= 0x80; + } + sleb128Value.push_back(byte); + } + return sleb128Value; + } + + /* start of X64 instructions */ + /* mov */ + void Mov(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Mov(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Mov(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* movabs */ + void Movabs(const ImmOpnd &immOpnd, Reg reg) override; + void Movabs(int64 symIdx, Reg reg) override; + /* push */ + void Push(InsnSize insnSize, Reg reg) override; + /* pop */ + void Pop(InsnSize insnSize, Reg reg) override; + /* lea */ + void Lea(InsnSize insnSize, const Mem &mem, Reg reg) override; + /* movzx */ + void MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) override; + void MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) override; + /* movsx */ + void MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) override; + void MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) override; + /* add */ + void Add(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Add(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Add(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* sub */ + void Sub(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Sub(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Sub(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* and */ + void And(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void And(InsnSize insnSize, const Mem &mem, Reg reg) override; + void And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void And(InsnSize insnSize, Reg reg, const Mem &mem) override; + void And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* or */ + void Or(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Or(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Or(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* xor */ + void Xor(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Xor(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Xor(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* not */ + void Not(InsnSize insnSize, Reg reg) override; + void Not(InsnSize insnSize, const Mem &mem) override; + /* neg */ + void Neg(InsnSize insnSize, Reg reg) override; + void Neg(InsnSize insnSize, const Mem &mem) override; + /* div & cwd, cdq, cqo */ + void Idiv(InsnSize insnSize, Reg reg) override; + void Idiv(InsnSize insnSize, const Mem &mem) override; + void Div(InsnSize insnSize, Reg reg) override; + void Div(InsnSize insnSize, const Mem &mem) override; + void Cwd() override; + void Cdq() override; + void Cqo() override; + /* shl */ + void Shl(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Shl(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* sar */ + void Sar(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Sar(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* shr */ + void Shr(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Shr(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* jmp */ + void Jmp(Reg reg) override; + void Jmp(const Mem &mem) override; + void Jmp(int64 symIdx) override; + /* jump condition */ + void Je(int64 symIdx) override; + void Ja(int64 symIdx) override; + void Jae(int64 symIdx) override; + void Jne(int64 symIdx) override; + void Jb(int64 symIdx) override; + void Jbe(int64 symIdx) override; + void Jg(int64 symIdx) override; + void Jge(int64 symIdx) override; + void Jl(int64 symIdx) override; + void Jle(int64 symIdx) override; + /* cmp */ + void Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmp(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmp(InsnSize insnSize, Reg reg, const Mem &mem) override; + void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) override; + void Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) override; + /* test */ + void Test(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* set */ + void Setbe(Reg reg) override; + void Setbe(const Mem &mem) override; + void Setle(Reg reg) override; + void Setle(const Mem &mem) override; + void Setae(Reg reg) override; + void Setae(const Mem &mem) override; + void Setge(Reg reg) override; + void Setge(const Mem &mem) override; + void Setne(Reg reg) override; + void Setne(const Mem &mem) override; + void Setb(Reg reg) override; + void Setb(const Mem &mem) override; + void Setl(Reg reg) override; + void Setl(const Mem &mem) override; + void Seta(Reg reg) override; + void Seta(const Mem &mem) override; + void Setg(Reg reg) override; + void Setg(const Mem &mem) override; + void Sete(Reg reg) override; + void Sete(const Mem &mem) override; + /* cmov */ + void Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmova(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmove(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) override; + void Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) override; + void Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) override; + /* call */ + void Call(InsnSize insnSize, Reg reg) override; + void Call(InsnSize insnSize, const Mem &mem) override; + void Call(InsnSize insnSize, int64 symIdx) override; + /* ret */ + void Ret() override; + /* leave */ + void Leave() override; + /* imul */ + void Imul(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* nop */ + void Nop(InsnSize insnSize, const Mem &mem) override; + void Nop() override; + /* byte swap */ + void Bswap(InsnSize insnSize, Reg reg) override; + void Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) override; + /* pseudo insn */ + void DealWithPseudoInst(const std::string &insn) override {} + /* end of X64 instructions */ + + private: + /* insn encode function */ + void Encodeb(uint64 code) { + codeBuff.push_back(static_cast(code)); + } + + void Encodeb(const uint8 *code, size_t codeSize) { + for (size_t i = 0; i < codeSize; i++) { + Encodeb(code[i]); + } + } + + void Encodeb(uint64 code, size_t codeSize) { + for (size_t i = 0; i < codeSize; i++) { + /* Use little endian for immediate value or displacement. */ + Encodeb(static_cast(code >> (i * k8Bits))); + } + } + + void Encodew(uint64 code) { + Encodeb(code, k16Bits / k8Bits); /* 2bytes, 16bits */ + } + + void Encoded(uint64 code) { + Encodeb(code, k32Bits / k8Bits); /* 4bytes, 32bits */ + } + + void Encodeq(uint64 code) { + Encodeb(code, k64Bits / k8Bits); /* 8bytes, 64bits */ + } + + void FixupEncode(size_t position, uint32 relOffset, size_t codeSize) { + uint64 code = static_cast(relOffset - position - codeSize); + for (size_t i = 0; i < codeSize; i++) { + codeBuff[position + i] = static_cast(code >> (i * k8Bits)); + } + } + + uint8 GetRegSize(Reg reg) const { + const uint8 kRegSizeBits = 8; /* The high 8 bits is register's size. */ + return reg >> kRegSizeBits; + } + + uint8 GetRegId(Reg reg) const { + return static_cast(reg & 0xF); + } + + bool IsRegExt(Reg reg) const { + /* 0xF is 4 bits, the lower 4 bits represent the id of the register. */ + uint8 regId = reg & 0xF; + const uint8 kStartExtId = 8; + return (regId & kStartExtId) == kStartExtId; + } + + uint8 GetRegCodeId(Reg reg) const { + /* The lower 3 bits are used to store the encoding id of the register. */ + return reg & kGetLow3Bits; + } + + bool Need8BitRegPrefix(Reg reg) const { + if (reg == ERR || GetRegSize(reg) != k8Bits) { + return false; + } + uint8 regCodeId = GetRegCodeId(reg); + /* From right to left, the eighth bit in reg equals 1 represents the lower 8-bit register. */ + const int kEighthBit = 8; + bool isLow8BitReg = ((((reg & 0xFF) >> (kEighthBit - 1)) & 1) == 1); + const uint8 kMinHigh8BitRegId = 4; + const uint8 kMaxHigh8BitRegId = 7; + return isLow8BitReg && (regCodeId >= kMinHigh8BitRegId && regCodeId <= kMaxHigh8BitRegId); + } + + bool HasOpndSizePrefix(Reg reg) const { + return GetRegSize(reg) == k16Bits ? true : false; + } + + bool HasOpndSizePrefix(const Mem &mem) const { + return mem.size == k16Bits ? true : false; + } + + bool HasAddrSizePrefix(const Mem &mem) const { + Reg reg = mem.base != ERR ? mem.base : (mem.index != ERR ? mem.index : ERR); + uint8 regSize = reg == ERR ? k64Bits : GetRegSize(reg); + return regSize == k32Bits ? true : false; + } + + uint8 GetRex(Reg reg) const { + uint8 rexW = GetRegSize(reg) == k64Bits ? 8 : 0; + uint8 rexB = IsRegExt(reg) ? 1 : 0; + uint8 rex = rexW | rexB; + if (rex || Need8BitRegPrefix(reg)) { + rex |= 0x40; /* 0x40 is rex fixed prefix. */ + } + return rex; + } + + uint8 GetRex(Reg reg1, Reg reg2) const { + uint8 rexW = GetRegSize(reg1) == k64Bits || GetRegSize(reg2) == k64Bits ? 8 : 0; + uint8 rexR = IsRegExt(reg1) ? 4 : 0; + uint8 rexB = IsRegExt(reg2) ? 1 : 0; + uint8 rex = rexW | rexR | rexB; + if (rex || Need8BitRegPrefix(reg1) || Need8BitRegPrefix(reg2)) { + rex |= 0x40; /* 0x40 is rex fixed prefix. */ + } + return rex; + } + + uint8 GetRex(const Mem &mem, Reg reg = ERR) const { + uint8 rexW = (reg != ERR && GetRegSize(reg) == k64Bits) || mem.size == k64Bits ? 8 : 0; + uint8 rexR = reg != ERR && IsRegExt(reg) ? 4 : 0; + uint8 rexX = mem.index != ERR && IsRegExt(mem.index) ? 2 : 0; + uint8 rexB = mem.base != ERR && IsRegExt(mem.base) ? 1 : 0; + uint8 rex = rexW | rexR | rexX | rexB; + if (rex || Need8BitRegPrefix(reg)) { + rex |= 0x40; /* 0x40 is rex fixed prefix. */ + } + return rex; + } + + uint8 GetMod(Reg reg) const { return 0b11; } /* mod=b11, register direct addressing. */ + + uint8 GetMod(const Mem &mem) const { + int64 symIdx = mem.disp.first; + int64 offset = mem.disp.second; + if ((symIdx == 0 && offset != 0 && Is8Bits(offset) && mem.base != RIP) || + (mem.memType == kOnlyBase && (mem.base == RBP || mem.base == R13))) { + return 0b01; /* mod=b01, ModRM=b01 Reg r/m */ + } else if (symIdx != 0 || (offset != 0 && Is32Bits(offset))) { + if (mem.base == ERR || mem.base == RIP) { + return 0b00; /* mod=b00, ModRM=b00 Reg r/m */ + } else { + return 0b10; /* mod=b10, ModRM=b01 Reg r/m */ + } + } + return 0b00; /* mod=b00, ModRM=b00 Reg r/m */ + } + + uint8 GetModRM(Reg reg1, Reg reg2) const { + uint8 modReg = GetRegCodeId(reg1); + uint8 mod = GetMod(reg1); + uint8 modrm = GetRegCodeId(reg2); + return ((mod << kLeftShift6Bits) | (modReg << kLeftShift3Bits) | (modrm & kGetLow3Bits)); + } + + uint8 GetModRM(Reg reg, const Mem &mem) const { + uint8 modReg = GetRegCodeId(reg); + uint8 mod = GetMod(mem); + uint8 modrm = !HasSIB(mem) ? GetRegCodeId(mem.base) : 0b100; /* r/m=b100, use SIB */ + return ((mod << kLeftShift6Bits) | (modReg << kLeftShift3Bits) | (modrm & kGetLow3Bits)); + } + + void SetModRM(uint8 mod, uint8 modReg, uint8 modrm) { + uint8 modRM = (mod << kLeftShift6Bits) | (modReg << kLeftShift3Bits) | (modrm & kGetLow3Bits); + Encodeb(modRM); + } + + bool HasSIB(const Mem &mem) const { + if (mem.memType == kBaseAndDisp || mem.memType == kOnlyBase) { + return GetRegCodeId(mem.base) == 0b100; + } + return mem.memType != kBaseAndDisp && mem.memType != kOnlyBase && mem.memType != kNone; + } + + uint8 GetSIB(const Mem &mem) const { + std::unordered_map log2Map = {{1, 0}, {2, 1}, {4, 2}, {8, 3}}; + uint8 scale = log2Map[mem.s]; + uint8 index = mem.index != ERR ? GetRegCodeId(mem.index) : 0b100; + uint8 base = mem.base != ERR ? GetRegCodeId(mem.base) : 0b101; + return ((scale << kLeftShift6Bits) | ((index & kGetLow3Bits) << kLeftShift3Bits) | (base & kGetLow3Bits)); + } + + void OpReg(Reg reg, uint8 opCode1, uint8 opCode2, uint8 modReg); + void OpMem(const Mem &mem, uint8 opCode1, uint8 opCode2, uint8 modReg); + void OpDisp(const Mem &mem); + void OpRR(Reg reg1, Reg reg2, uint8 opCode1, uint8 opCode2 = 0, bool extInsn = false); + void OpRM(Reg reg, const Mem &mem, uint8 opCode1, uint8 opCode2 = 0, bool extInsn = false); + void OpImmAndReg(const ImmOpnd &immOpnd, Reg reg, uint8 opCode, uint8 modReg); + void OpImmAndMem(const ImmOpnd &immOpnd, const Mem &mem, uint8 modReg); + void MovRegAndDisp(Reg reg, const Mem &mem, uint8 opCode); + void OpPushPop(Reg reg, uint8 opCode); + void JmpToLabel(int64 labelIdx, uint8 opCode1, uint8 opCode2 = 0, size_t offsetSize = 4); + void OpCmovcc(Reg srcReg, Reg dstReg, uint8 opCode1, uint8 opCode2); + void UpdateLabel(int64 labelIdx, LabelType type = LabelType::kLNone, uint32 relOffset = 0xFFFFFFFFU); + bool CanEncodeLabel(int64 labelIdx); + uint32 GetLabelSize(int64 labelIdx) const; + uint32 GetLabelRelOffset(int64 labelIdx) const; + void AppendFixup(int64 labelIdx, FixupKind kind, const std::pair &offsetPair, + std::vector &tmpFixups, int64 disp = 0); + /* elf file */ + void InitElfHeader(); + void RegisterSection(Section §ion); + void LayoutSections(); + void UpdateSectionOffset(Section §ion); + void UpdateGlobalOffset(Section §ion); + void SetFileOffset(uint64 offset); + /* symIdx is the key used to get symbol's index in .symtab */ + void AddSymToSymTab(const Symbol &symbol, int64 symIdx); + void AppendRela(const Label &label, const std::pair &offsetPair, uint64 type, Sxword addend); + uint64 GetRelaType(FixupKind kind) const; + void HandleTextSectionFixup(); + void HandleDataSectionFixup(); + void HandleRodataSectionFixup(); + void WriteElfFile(); + void AppendSecSymsToSymTabSec(); + void AppendSymsToSymTabSec(); + + private: + std::vector codeBuff; + std::unordered_map labelManager; + std::vector> localSymTab; + std::vector> symTab; + std::vector fixups; + std::vector dataFixups; + std::vector rodataFixups; + std::vector debugInfoFixups; + std::vector sections; + Offset globalOffset = 0; /* global offset of the elf file */ + ElfFileHeader header {}; + DataSection *textSection = nullptr; + DataSection *rodataSection = nullptr; + DataSection *dataSection = nullptr; + DataSection *bssSection = nullptr; + RelaSection *relaSection = nullptr; + RelaSection *relaDataSection = nullptr; + RelaSection *relaRodataSection = nullptr; + StringSection *strTabSection = nullptr; + SymbolSection *symbolTabSection = nullptr; + /* debug section */ + DataSection *debugInfoSection = nullptr; + RelaSection *relaDebugInfoSection = nullptr; + DataSection *debugAbbrevSection = nullptr; + DataSection *debugStrSection = nullptr; + DataSection *debugARangesSection = nullptr; + DataSection *debugRangesSection = nullptr; + DataSection *debugLineSection = nullptr; + const maplebe::CGOptions::EmitMemoryManager &emitMemoryManager; +}; /* class ElfAssembler */ +} /* namespace assembler */ + +#endif /* ELF_ASSEMBLER_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_file.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_file.h new file mode 100644 index 0000000000000000000000000000000000000000..e7633aff1caa4bf4b9f48d9e99c8e787dbf0b716 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/elf_file.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef X64_ASSEMBLER_ELF_FILE_H +#define X64_ASSEMBLER_ELF_FILE_H + +#include +#include +#include "operand.h" + +namespace assembler { +using ElfFileHeader = Elf64_Ehdr; +using SectionHeader = Elf64_Shdr; +using SegmentHeader = Elf64_Phdr; +using Address = Elf64_Addr; +using Offset = Elf64_Off; +using Word = Elf64_Word; +using Xword = Elf64_Xword; +using Sxword = Elf64_Sxword; +using SegmentHeader = Elf64_Phdr; +using SectionIndex = Elf64_Section; +using Symbol = Elf64_Sym; +using Rela = Elf64_Rela; + +class Alignment { + public: + template + static T Align(T offset, T align) { + if (align <= 1) { + return offset; + } + return (offset + align - 1) & (~(align - 1)); + } +}; /* class Alignment */ + +class Section { + public: + Section(const std::string &name, Word type, Xword flags, Xword align) : name(name) { + sectionHeader.sh_type = type; + sectionHeader.sh_flags = flags; + sectionHeader.sh_addralign = align; + } + + virtual ~Section() = default; + virtual void GenerateData() = 0; + virtual void WriteSection(std::ofstream &outStream) = 0; + + virtual void ClearData() { + return; + } + + void SetIndex(SectionIndex idx) { + index = idx; + } + + SectionIndex GetIndex() const { + return index; + } + + void SetInfo(Word value) { + sectionHeader.sh_info = value; + } + + void SetLink(const Section §ion) { + sectionHeader.sh_link = static_cast(section.GetIndex()); + } + + void SetEntSize(Xword value) { + sectionHeader.sh_entsize = value; + } + + void SetSectionSize(Xword size) { + sectionHeader.sh_size = size; + } + + virtual Xword GetSectionSize() { + return sectionHeader.sh_size; + } + + void SetAddr(Address addr) { + sectionHeader.sh_addr = addr; + } + + Address GetAddr() const { + return sectionHeader.sh_addr; + } + + Xword GetFlags() const { + return sectionHeader.sh_flags; + } + + void SetOffset(Offset value) { + sectionHeader.sh_offset = value; + } + + Offset GetOffset() const { + return sectionHeader.sh_offset; + } + + Xword GetAlign() const { + return sectionHeader.sh_addralign; + } + + const std::string &GetName() const { + return name; + } + + void SetSectionHeaderNameIndex(Word index) { + sectionHeader.sh_name = index; + } + + Word GetType() const { + return sectionHeader.sh_type; + } + + const SectionHeader &GetSectionHeader() const { + return sectionHeader; + } + + private: + std::string name; + SectionIndex index {}; + SectionHeader sectionHeader {}; +}; /* class Section */ + +class DataSection : public Section { + public: + DataSection(const std::string &name, Word type, Xword flags, Xword align) + : Section(name, type, flags, align) {} + + ~DataSection() = default; + + virtual void GenerateData() override { + SetSectionSize(data.size()); + } + + virtual void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(data.data()), data.size()); + } + + void AppendData(const void *value, size_t size) { + auto pdata = reinterpret_cast(value); + data.insert(data.end(), pdata, pdata + size); + } + + void AppendData(int64 value, size_t size) { + for (size_t i = 0; i < size; i++) { + auto pdata = static_cast(value >> (i * k8Bits)); + data.push_back(pdata); + } + } + + void ClearData() override { + data.clear(); + } + + uint32 GetDataSize() const { + return data.size(); + } + + const std::vector &GetData() const { + return data; + } + + protected: + std::vector data; +}; /* class DataSection */ + + +class StringSection : public DataSection { + public: + StringSection(const std::string &name, Word type, Xword flags, Xword align) + : DataSection(name, type, flags, align) { + AddString("\0"); + } + + ~StringSection() = default; + + size_t AddString(const std::string &str) { + size_t pos = data.size(); + AppendData(str.c_str(), str.size() + 1); + return pos; + } +}; /* class StringSection */ + +class RelaSection : public Section { + public: + RelaSection(const std::string &name, Word type, Xword flags, Word info, Xword align, const Section &link) + : Section(name, type, flags, align) { + SetEntSize(sizeof(Rela)); + SetInfo(info); + SetLink(link); + } + + ~RelaSection() = default; + + void GenerateData() override { + SetSectionSize(relas.size() * sizeof(Rela)); + } + + void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(relas.data()), relas.size() * sizeof(Rela)); + } + + void AppendRela(Rela rela) { + relas.push_back(rela); + } + + private: + std::vector relas; +}; /* class RelaSection */ + +class SymbolSection : public Section { + public: + SymbolSection(const std::string &name, Word type, Xword flags, Xword align, const Section &link) + : Section(name, type, flags, align) { + SetEntSize(sizeof(Symbol)); + SetLink(link); + SetInfo(1); + AppendSymbol({ 0, 0, 0, 0, 0, 0 }); + } + + ~SymbolSection() = default; + + void GenerateData() override { + SetSectionSize(symbols.size() * sizeof(Symbol)); + } + + void WriteSection(std::ofstream &outStream) override { + outStream.write(reinterpret_cast(symbols.data()), symbols.size() * sizeof(Symbol)); + } + + void AppendSymbol(const Symbol &symbol) { + symbols.push_back(symbol); + } + + uint32 GetSymbolsSize() const { + return symbols.size(); + } + + uint64 GetIdxInSymbols(int64 symIdx) const { + return symbolIdxMap.at(symIdx); + } + + void AppendIdxInSymbols(int64 symIdx) { + symbolIdxMap[symIdx] = static_cast(GetSymbolsSize() - 1); + } + + bool ExistSymInSymbols(int64 symIdx) { + return symbolIdxMap.count(symIdx) != 0; + } + + private: + std::vector symbols; + std::unordered_map symbolIdxMap; +}; /* class SymbolSection */ +} /* namespace assembler */ + +#endif /* X64_ASSEMBLER_ELF_FILE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/operand.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/operand.h new file mode 100644 index 0000000000000000000000000000000000000000..a62272ef9bc43e25f95ec74c33494eae06431047 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/operand.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef X64_ASSEMBLER_OPERAND_H +#define X64_ASSEMBLER_OPERAND_H + +#include +#include +#include "util.h" + +namespace assembler { +using ImmOpnd = std::pair; + +/* Use 16 bits to represent a register: + The high 8 bits is register's size, + the low 4 bits is register's id, + the fifth bit from right to left is used to identity register rip, + the eighth bit from right to left is used to determine whether + it is the high 8-bit register or the lower 8-bit register. + The specific distribution of the 16 bits is shown below: + +-----------------------------------+-----------+-----------+-------+-------------------+ + | 15 14 13 12 11 10 9 8 | 7 | 6 5 | 4 | 3 2 1 0 | + +-----------------------------------+-----------+-----------+-------+-------------------+ + | Reg's size in bits | H/L8-reg | unuse | IsRIP | Reg's id | + +-----------------------------------+-----------+-----------+-------+-------------------+ + | 0 0 0 0 0 0 0 0 | 0 | 0 0 | 0 | 0 0 0 0 | + +-----------------------------------+-----------+-----------+-------+-------------------+ +*/ +enum Reg : uint16 { + XMM0 = 0x8000, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, + RAX = 0x4000, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, + RIP = 0x4015, + EAX = 0x2000, ECX, EDX, EBX, ESP, EBP, ESI, EDI, R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D, + AX = 0x1000, CX, DX, BX, SP, BP, SI, DI, R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W, + AL = 0x0880, CL, DL, BL, SPL, BPL, SIL, DIL, R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B, + AH = 0x0804, CH, DH, BH, + ES = 0, CS, SS, DS, FS, GS, + ERR = 0xFFFF +}; + +static const uint8 kMaxRegNum = 35; +static const uint8 kRegSizeType = 5; +static const std::array, kRegSizeType> kRegArray = { + {{ERR, AL, BL, CL, DL, SPL, BPL, SIL, DIL, R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B}, + {ERR, AH, BH, CH, DH}, + {ERR, AX, BX, CX, DX, SP, BP, SI, DI, R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W}, + {ERR, EAX, EBX, ECX, EDX, ESP, EBP, ESI, EDI, R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D}, + {ERR, RAX, RBX, RCX, RDX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, + RIP, + XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15}} +}; + +static const std::unordered_map kRegStrMap = { + {XMM0, "xmm0"}, {XMM1, "xmm1"}, {XMM2, "xmm2"}, {XMM3, "xmm3"}, {XMM4, "xmm4"}, {XMM5, "xmm5"}, {XMM6, "xmm6"}, + {XMM7, "xmm7"}, {XMM8, "xmm8"}, {XMM9, "xmm9"}, {XMM10, "xmm10"}, {XMM11, "xmm11"}, {XMM12, "xmm12"}, + {XMM13, "xmm13"}, {XMM14, "xmm14"}, {XMM15, "xmm15"}, + {RAX, "rax"}, {RDX, "rdx"}, {RCX, "rcx"}, {RBX, "rbx"}, {RSP, "rsp"}, {RBP, "rbp"}, {RSI, "rsi"}, {RDI, "rdi"}, + {R8, "r8"}, {R9, "r9"}, {R10, "r10"}, {R11, "r11"}, {R12, "r12"}, {R13, "r13"}, {R14, "r14"}, {R15, "r15"}, + {RIP, "rip"}, + {EAX, "eax"}, {ECX, "ecx"}, {EDX, "edx"}, {EBX, "ebx"}, {ESP, "esp"}, {EBP, "ebp"}, {ESI, "esi"}, {EDI, "edi"}, + {R8D, "r8d"}, {R9D, "r9d"}, {R10D, "r10d"}, {R11D, "r11d"}, {R12D, "r12d"}, {R13D, "r13d"}, {R14D, "r14d"}, + {R15D, "r15d"}, + {AX, "ax"}, {CX, "cx"}, {DX, "dx"}, {BX, "bx"}, {SP, "sp"}, {BP, "bp"}, {SI, "si"}, {DI, "di"}, {R8W, "r8w"}, + {R9W, "r9w"}, {R10W, "r10w"}, {R11W, "r11w"}, {R12W, "r12w"}, {R13W, "r13w"}, {R14W, "r14w"}, {R15W, "r15w"}, + {AL, "al"}, {CL, "cl"}, {DL, "dl"}, {BL, "bl"}, {SPL, "spl"}, {BPL, "bpl"}, {SIL, "sil"}, {DIL, "dil"}, + {R8B, "r8b"}, {R9B, "r9b"}, {R10B, "r10b"}, {R11B, "r11b"}, {R12B, "r12b"}, {R13B, "r13b"}, {R14B, "r14b"}, + {R15B, "r15b"}, + {AH, "ah"}, {CH, "ch"}, {DH, "dh"}, {BH, "bh"}, + {ES, "es"}, {CS, "cs"}, {SS, "ss"}, {DS, "ds"}, {FS, "fs"}, {GS, "gs"}, + {ERR, "err"} +}; + +enum MemType { + kNone, + kOnlyBase, + kOnlyDisp, + kBaseAndDisp, + kIndexAndDisp, /* If there is a index register in the mem, there must be a scasle. */ + kSIB, /* SIB is abbreviation of Scale, Index, Base. */ + kSIBAndDisp +}; + +struct Mem { + Reg base = ERR; + Reg index = ERR; + uint8 s = 0; /* scale = log2(s) */ + std::pair disp = {0, 0}; /* first: symbol id, second: offset */ + uint32 size = 32; + MemType memType = kNone; + + Mem() {} + + void SetMemType() { + if (base != ERR && index != ERR && (disp.second != 0 || disp.first != 0)) { + memType = kSIBAndDisp; + } else if (base != ERR && index != ERR && disp.second == 0 && disp.first == 0) { + memType = kSIB; + } else if (base == ERR && index != ERR && (disp.second != 0 || disp.first != 0)) { + memType = kIndexAndDisp; + } else if (base != ERR && index == ERR && (disp.second != 0 || disp.first != 0)) { + memType = kBaseAndDisp; + } else if (base == ERR && index == ERR && (disp.second != 0 || disp.first != 0)) { + memType = kOnlyDisp; + } else if (base != ERR && index == ERR && disp.second == 0 && disp.first == 0) { + memType = kOnlyBase; + } else { + memType = kNone; + } + } +}; /* struct Mem */ + +enum FixupKind { + kFNone, + kRelative, + kRelative64, + kAbsolute, + kAbsolute64, + kPLT, +}; + +enum class LabelType { + kLNone, + kBBLabel, + kFunc, + kConst, + kGlobal, + kStatic, + kGlobalUninitialized, + kLocalUninitialized, + kStrLabel, + kJmpLabel, + /* for debug */ + kDebugStrLabel, +}; + +class Label { + public: + Label(int64 index, uint32 relOffsetVal, LabelType type) + : labelIndex(index), relOffset(relOffsetVal), labelType(type) {} + + ~Label() = default; + + int64 GetlabelIdx() const { + return labelIndex; + } + + uint32 GetRelOffset() const { + return relOffset; + } + + LabelType GetLabelType() const { + return labelType; + } + + void SetRelOffset(uint32 value) { + relOffset = value; + } + + void SetLabelType(LabelType type) { + labelType = type; + } + + private: + int64 labelIndex; /* target label index */ + uint32 relOffset; /* record the label's position */ + LabelType labelType; /* target label type */ +}; /* class Label */ + +class Fixup { + public: + Fixup(int64 index, FixupKind kind, const std::pair &offset, int64 dispVal) + : labelIndex(index), fixupKind(kind), offset(offset), disp(dispVal) {} + + int64 GetlabelIdx() const { + return labelIndex; + } + + void SetLabelIdx(int64 index) { + labelIndex = index; + } + + FixupKind GetFixupKind() const { + return fixupKind; + } + + void SetFixupKind(FixupKind kind) { + fixupKind = kind; + } + + const std::pair &GetOffset() const { + return offset; + } + + void SetOffset(const std::pair &fixupOffset) { + offset = fixupOffset; + } + + int64 GetDisp() const { + return disp; + } + + void SetDisp(int64 value) { + disp = value; + } + + private: + int64 labelIndex; /* record the label needs to fix up */ + FixupKind fixupKind; /* record how to fix up */ + std::pair offset; /* record the location and size to fixup, + the first is offset, the second is offset's size */ + int64 disp; /* record the symbol's addend for relocation */ +}; /* class Fixup */ +} /* namespace assembler */ + +#endif /* X64_ASSEMBLER_OPERAND_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/util.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/util.h new file mode 100644 index 0000000000000000000000000000000000000000..a9ec43cc149d9ba9a63fccf8f659936d4735bfcf --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/assembler/util.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef X64_ASSEMBLER_UTIL_H +#define X64_ASSEMBLER_UTIL_H + + +namespace assembler { +using uint8 = uint8_t; +using uint16 = uint16_t; +using uint32 = uint32_t; +using uint64 = uint64_t; +using int8 = int8_t; +using int16 = int16_t; +using int32 = int32_t; +using int64 = int64_t; +using uintptr = uintptr_t; + +/* InsnSize is in byte. */ +enum InsnSize : uint8 { + kB = 1, + kW = 2, + kL = 4, + kQ = 8, +}; + +/* size in bytes */ +static const uint8 k1Byte = 1; +static const uint8 k2Bytes = 2; +static const uint8 k4Bytes = 4; +static const uint8 k8Bytes = 8; + +/* size in bits */ +static const uint8 k8Bits = 8; +static const uint8 k16Bits = 16; +static const uint8 k32Bits = 32; +static const uint8 k64Bits = 64; + +inline bool Is8Bits(uint32 val) { + return val >= 0xFFFFFF80 || val <= 0x7F; +} + +inline bool Is16Bits(uint32 val) { + return val >= 0xFFFF8000 || val <= 0x7FFF; +} + +inline bool Is32Bits(uint64 val) { + return val >= ~uint64(0x7FFFFFFFU) || val <= 0x7FFFFFFFU; +} + +inline bool Is64Bits(uint64 val) { + return val >= ~uint64(0x7FFFFFFFFFFFFFFFU) || val <= 0x7FFFFFFFFFFFFFFFU; +} + +inline int64 CalculateLabelSymIdx(int64 funcUniqueId, int64 labelIdx) { + /* 16: make sure stIdx is large enough to be unique */ + const int kLeftShiftBits = 16; + return ((funcUniqueId << kLeftShiftBits) + labelIdx) * (-1); /* -1: BBLabel's stIdx is negative */ +} + +inline int64 CalculateStrLabelSymIdx(uint64 size, int64 labelIdx, size_t strTableSize = 0) { + const int kLeftShiftBits = 8; + return (static_cast(size + strTableSize) * kLeftShiftBits + labelIdx); +} +} /* namespace assembler */ + +#endif /* X64_ASSEMBLER_UTIL_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h new file mode 100644 index 0000000000000000000000000000000000000000..c75f70c28d14842128e6e1f40ea4edf8edd0661a --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_MPISEL_H +#define MAPLEBE_INCLUDE_X64_MPISEL_H + +#include "isel.h" +#include "x64_call_conv.h" + +namespace maplebe { +class X64MPIsel : public MPISel { + public: + X64MPIsel(MemPool &mp, MapleAllocator &allocator, CGFunc &f) : MPISel(mp, allocator, f) {} + ~X64MPIsel() override = default; + void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; + void SelectReturn() override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) override; + Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + void SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + void SelectPseduoForReturn(std::vector &retRegs); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + RegOperand &SelectSpecialRegread(PregIdx pregIdx, PrimType primType) override; + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_X64_MPISEL_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abi.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..21c4f29d929598cfa60137251ad5c83acfff8ac1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abi.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ABI_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ABI_H + +#include "x64_isa.h" +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace x64 { + +/* + * Refer to: + * x64-bit Architecture. + */ +bool IsAvailableReg(X64reg reg); +bool IsCalleeSavedReg(X64reg reg); +bool IsCallerSaveReg(X64reg reg); +bool IsParamReg(X64reg reg); +bool IsSpillReg(X64reg reg); +bool IsExtraSpillReg(X64reg reg); +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd); +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize); +} /* namespace x64 */ + +/* + * X64-bit Architecture. + * After the argument values have been computed, they are placed either in registers + * or pushed on the stack. The way how values are passed is described in the + * following sections. + * - INTEGER This class consists of integral types that fit into one of the general + purpose registers. + - SSE The class consists of types that fit into a vector register. + - SSEUP The class consists of types that fit into a vector register and can be passed + and returned in the upper bytes of it. + - X87, X87UP These classes consists of types that will be returned via the x87 FPU. + - COMPLEX_X87 This class consists of types that will be returned via the x87 FPU. + - NO_CLASS This class is used as initializer in the algorithms. It will be used for + padding and empty structures and unions. + - MEMORY This class consists of types that will be passed and returned in memory via the stack. + * + */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ABI_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def new file mode 100644 index 0000000000000000000000000000000000000000..2d98ba71746fe43b9ddab6dfd2951795c1f5d438 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -0,0 +1,99 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +/* Mapping between abstract maple machine IR and machine operation code of X86_64*/ +/* {mmir, mop} */ +DEFINE_MAPPING(abstract::MOP_undef, x64::MOP_begin) + +/* Mov */ +DEFINE_MAPPING(abstract::MOP_copy_ri_8, x64::MOP_movb_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_16, x64::MOP_movw_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_32, x64::MOP_movl_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_64, x64::MOP_movq_i_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_8, x64::MOP_movb_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_16, x64::MOP_movw_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_64, x64::MOP_movq_r_r) + +/* str/load */ +DEFINE_MAPPING(abstract::MOP_str_8, x64::MOP_movb_r_m) +DEFINE_MAPPING(abstract::MOP_str_16, x64::MOP_movw_r_m) +DEFINE_MAPPING(abstract::MOP_str_32, x64::MOP_movl_r_m) +DEFINE_MAPPING(abstract::MOP_str_64, x64::MOP_movq_r_m) +DEFINE_MAPPING(abstract::MOP_load_8, x64::MOP_movb_m_r) +DEFINE_MAPPING(abstract::MOP_load_16, x64::MOP_movw_m_r) +DEFINE_MAPPING(abstract::MOP_load_32, x64::MOP_movl_m_r) +DEFINE_MAPPING(abstract::MOP_load_64, x64::MOP_movq_m_r) + +/* shift -- shl/ashr/lshr */ +DEFINE_MAPPING(abstract::MOP_shl_8, x64::MOP_shlb_r_r) +DEFINE_MAPPING(abstract::MOP_shl_16, x64::MOP_shlw_r_r) +DEFINE_MAPPING(abstract::MOP_shl_32, x64::MOP_shll_r_r) +DEFINE_MAPPING(abstract::MOP_shl_64, x64::MOP_shlq_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_8, x64::MOP_sarb_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_16, x64::MOP_sarw_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_32, x64::MOP_sarl_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_64, x64::MOP_sarq_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_8, x64::MOP_shrb_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_16, x64::MOP_shrw_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_32, x64::MOP_shrl_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_64, x64::MOP_shrq_r_r) + +/* BasicOp */ +DEFINE_MAPPING(abstract::MOP_and_8, x64::MOP_andb_r_r) +DEFINE_MAPPING(abstract::MOP_and_16, x64::MOP_andw_r_r) +DEFINE_MAPPING(abstract::MOP_and_32, x64::MOP_andl_r_r) +DEFINE_MAPPING(abstract::MOP_and_64, x64::MOP_andq_r_r) +DEFINE_MAPPING(abstract::MOP_or_8, x64::MOP_orb_r_r) +DEFINE_MAPPING(abstract::MOP_or_16, x64::MOP_orw_r_r) +DEFINE_MAPPING(abstract::MOP_or_32, x64::MOP_orl_r_r) +DEFINE_MAPPING(abstract::MOP_or_64, x64::MOP_orq_r_r) +DEFINE_MAPPING(abstract::MOP_xor_8, x64::MOP_xorb_r_r) +DEFINE_MAPPING(abstract::MOP_xor_16, x64::MOP_xorw_r_r) +DEFINE_MAPPING(abstract::MOP_xor_32, x64::MOP_xorl_r_r) +DEFINE_MAPPING(abstract::MOP_xor_64, x64::MOP_xorq_r_r) +DEFINE_MAPPING(abstract::MOP_add_8, x64::MOP_addb_r_r) +DEFINE_MAPPING(abstract::MOP_add_16, x64::MOP_addw_r_r) +DEFINE_MAPPING(abstract::MOP_add_32, x64::MOP_addl_r_r) +DEFINE_MAPPING(abstract::MOP_add_64, x64::MOP_addq_r_r) +DEFINE_MAPPING(abstract::MOP_sub_8, x64::MOP_subb_r_r) +DEFINE_MAPPING(abstract::MOP_sub_16, x64::MOP_subw_r_r) +DEFINE_MAPPING(abstract::MOP_sub_32, x64::MOP_subl_r_r) +DEFINE_MAPPING(abstract::MOP_sub_64, x64::MOP_subq_r_r) + +/* UnaryOp */ +DEFINE_MAPPING(abstract::MOP_not_8, x64::MOP_notb_r) +DEFINE_MAPPING(abstract::MOP_not_16, x64::MOP_notw_r) +DEFINE_MAPPING(abstract::MOP_not_32, x64::MOP_notl_r) +DEFINE_MAPPING(abstract::MOP_not_64, x64::MOP_notq_r) +DEFINE_MAPPING(abstract::MOP_neg_8, x64::MOP_negb_r) +DEFINE_MAPPING(abstract::MOP_neg_16, x64::MOP_negw_r) +DEFINE_MAPPING(abstract::MOP_neg_32, x64::MOP_negl_r) +DEFINE_MAPPING(abstract::MOP_neg_64, x64::MOP_negq_r) + +/* CvtOp */ +DEFINE_MAPPING(abstract::MOP_zext_rr_16_8, x64::MOP_movzbw_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_8, x64::MOP_movsbw_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_8, x64::MOP_movsbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_16, x64::MOP_movswl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_8, x64::MOP_movsbq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_args.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_args.h new file mode 100644 index 0000000000000000000000000000000000000000..49cce317b550f59953d6cef1f23d9d44c8643785 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_args.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H + +#include "args.h" +#include "x64_isa.h" +#include "x64_cgfunc.h" +#include "x64_call_conv.h" + +namespace maplebe { +using namespace maple; +using namespace x64; + +struct ArgInfo { + X64reg reg; + MIRType *mirTy; + uint32 symSize; + RegType regType; + MIRSymbol *sym; + const X64SymbolAlloc *symLoc; + uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ + bool doMemPairOpt; + bool createTwoStores; + bool isTwoRegParm; +}; + +class X64MoveRegArgs : public MoveRegArgs { + public: + explicit X64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {} + ~X64MoveRegArgs() override = default; + void Run() override; + + private: + void CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const; + ArgInfo GetArgInfo(std::map &argsList, uint32 argIndex, + std::vector &numFpRegs, std::vector &fpSize) const; + void GenerateMovInsn(ArgInfo &argInfo, X64reg reg2); + void MoveRegisterArgs(); + void MoveVRegisterArgs(); + void LoadStackArgsToVReg(MIRSymbol &mirSym); + void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc5d70f12b7b77a7c712003fa2614b72532b6f4 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" +#include "call_conv.h" +#include "abi.h" +#include "x64_abi.h" +#include "x64_isa.h" +#include + +namespace maplebe { +using namespace maple; +using namespace x64; +constexpr const uint32 kMaxStructParamByReg = 4; + +enum CallConvKind { + kCCall, + kWebKitJS, + kGHC +}; + +class CallConventionInfo { + public: + virtual const std::vector &GetIntParamRegs() const = 0; + virtual size_t GetIntParamRegsNum() const = 0; + virtual const std::vector &GetIntReturnRegs() const = 0; + virtual size_t GetIntReturnRegsNum() const = 0; + virtual const std::vector &GetFloatParamRegs() const = 0; + virtual size_t GetFloatParamRegsNum() const = 0; + virtual const std::vector &GetFloatReturnRegs() const = 0; + virtual size_t GetFloatReturnRegsNum() const = 0; + virtual int32 Classification(const BECommon &be, MIRType &mirType, std::vector &classes) const = 0; +}; + +#define CALL_CONVENTION_INFO_SUBCLASS_DECLARE_BEGIN(CLASSNAME) \ +class CLASSNAME : public CallConventionInfo { \ + public: \ + const std::vector &GetIntParamRegs() const override { \ + return intParmRegs; \ + } \ + size_t GetIntParamRegsNum() const override { \ + return intParmRegs.size(); \ + } \ + const std::vector &GetIntReturnRegs() const override { \ + return intReturnRegs; \ + } \ + size_t GetIntReturnRegsNum() const override { \ + return intReturnRegs.size(); \ + } \ + const std::vector &GetFloatParamRegs() const override { \ + return floatParmRegs; \ + } \ + size_t GetFloatParamRegsNum() const override { \ + return floatParmRegs.size(); \ + } \ + const std::vector &GetFloatReturnRegs() const override { \ + return floatReturnRegs; \ + } \ + size_t GetFloatReturnRegsNum() const override { \ + return floatReturnRegs.size(); \ + } \ + const static CLASSNAME &GetCallConvInfo() { \ + static CLASSNAME callConvInfo; \ + return callConvInfo; \ + } \ + int32 Classification(const BECommon &be, MIRType &mirType, std::vector &classes) const override; \ + private: \ + CLASSNAME() {} \ + ~CLASSNAME() {} \ + CLASSNAME& operator= (const CLASSNAME &); \ + CLASSNAME(const CLASSNAME&); + +#define CALL_CONVENTION_INFO_SUBCLASS_DECLARE_END \ +}; + +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_BEGIN(WebKitJSCallConventionInfo) + const std::vector intParmRegs = { R0 }; + const std::vector intReturnRegs = { R0 }; + // TODO: + const std::vector floatParmRegs = {}; + // TODO: + const std::vector floatReturnRegs = {}; +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_END + +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_BEGIN(CCallConventionInfo) + const std::vector intParmRegs = { R7, R6, R3, R2, R8, R9 }; + const std::vector intReturnRegs = { R0, R3 }; + const std::vector floatParmRegs = { V8, V9, V10, V11, V12, V13, V14, V15 }; + const std::vector floatReturnRegs = { V8, V9 }; + + int32 ClassifyAggregate(MIRType &mirType, uint64 sizeOfTy, std::vector &classes) const; +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_END + +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_BEGIN(GHCCallConventionInfo) + const std::vector intParmRegs = { R0 }; + const std::vector intReturnRegs = { R0 }; + // TODO: + const std::vector floatParmRegs = {}; + // TODO: + const std::vector floatReturnRegs = {}; +CALL_CONVENTION_INFO_SUBCLASS_DECLARE_END + +class X64CallConvImpl { + public: + X64CallConvImpl(BECommon &be) : beCommon(be) { + convKind = GetCallConvKind(*(be.GetMIRModule().CurFunction())); + } + X64CallConvImpl(BECommon &be, CallConvKind convKind) : beCommon(be), convKind(convKind) {} + + ~X64CallConvImpl() = default; + + const CallConventionInfo &GetCallConvInfo() const { + return GetCallConvInfo(convKind); + } + + static const CallConventionInfo &GetCallConvInfo(CallConvKind convKind_) { + switch (convKind_) { + case kCCall: + return CCallConventionInfo::GetCallConvInfo(); + case kWebKitJS: + return WebKitJSCallConventionInfo::GetCallConvInfo(); + case kGHC: + return GHCCallConventionInfo::GetCallConvInfo(); + default: + return CCallConventionInfo::GetCallConvInfo(); + } + } + + static const CallConvKind GetCallConvKind(MIRFunction &mirFunction) { + if (mirFunction.GetAttr(FUNCATTR_ccall)) { + return kCCall; + } else if (mirFunction.GetAttr(FUNCATTR_webkitjscall)) { + return kWebKitJS; + } else if (mirFunction.GetAttr(FUNCATTR_ghcall)) { + return kGHC; + } else { + return kCCall; + } + } + + static const CallConvKind GetCallConvKind(StmtNode &node) { + if (node.GetAttr(STMTATTR_ccall)) { + return kCCall; + } else if (node.GetAttr(STMTATTR_webkitjscall)) { + return kWebKitJS; + } else if (node.GetAttr(STMTATTR_ghcall)) { + return kGHC; + } else { + return kCCall; + } + } + + void InitCCLocInfo(CCLocInfo &pLoc) const; + + /* Passing value related */ + int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr); + + /* return value related */ + int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc); + + private: + X64reg AllocateGPParmRegister() { + const std::vector &intParamRegs = GetCallConvInfo().GetIntParamRegs(); + return (nextGeneralParmRegNO < intParamRegs.size()) ? intParamRegs[nextGeneralParmRegNO++] : kRinvalid; + } + + void AllocateTwoGPParmRegisters(CCLocInfo &pLoc) { + const std::vector &intParamRegs = GetCallConvInfo().GetIntParamRegs(); + if ((nextGeneralParmRegNO + 1) < intParamRegs.size()) { + pLoc.reg0 = intParamRegs[nextGeneralParmRegNO++]; + pLoc.reg1 = intParamRegs[nextGeneralParmRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + X64reg AllocateGPReturnRegister() { + const std::vector &intReturnRegs = GetCallConvInfo().GetIntReturnRegs(); + return (nextGeneralReturnRegNO < intReturnRegs.size()) ? + intReturnRegs[nextGeneralReturnRegNO++] : kRinvalid; + } + + void AllocateTwoGPReturnRegisters(CCLocInfo &pLoc) { + const std::vector &intReturnRegs = GetCallConvInfo().GetIntReturnRegs(); + if ((nextGeneralReturnRegNO + 1) < intReturnRegs.size()) { + pLoc.reg0 = intReturnRegs[nextGeneralReturnRegNO++]; + pLoc.reg1 = intReturnRegs[nextGeneralReturnRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + BECommon &beCommon; + CallConvKind convKind = kCCall; + uint64 paramNum = 0; /* number of all types of parameters processed so far */ + int32 nextGeneralParmRegNO = 0; /* number of integer parameters processed so far */ + int32 nextGeneralReturnRegNO = 0; /* number of integer return processed so far */ + int32 nextStackArgAdress = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..78aba629f8db858f2edb25944a475b82266a716e --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H +#define MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H + +#include "cfgo.h" + +namespace maplebe { +class X64CFGOptimizer : public CFGOptimizer { + public: + X64CFGOptimizer(CGFunc &func, MemPool &memPool) : CFGOptimizer(func, memPool) {} + ~X64CFGOptimizer() = default; + void InitOptimizePatterns() override; +}; + +class X64FlipBRPattern : public FlipBRPattern { + public: + explicit X64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {} + ~X64FlipBRPattern() = default; + + private: + uint32 GetJumpTargetIdx(const Insn &insn) override; + MOperator FlipConditionOp(MOperator flippedOp) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cg.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cg.h new file mode 100644 index 0000000000000000000000000000000000000000..e6a35becf5444dda3b4a1e7f969d0b9e3acd25a0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cg.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +/* sub Target info & implement */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_CG_H +#define MAPLEBE_INCLUDE_CG_X86_64_CG_H + +#include "cg.h" +#include "x64_isa.h" +#include "x64_live.h" +#include "x64_reaching.h" +#include "x64_MPISel.h" +#include "x64_standardize.h" +#include "x64_args.h" +#include "x64_local_opt.h" +#include "x64_cfgo.h" + +namespace maplebe { +constexpr int32 kIntRegTypeNum = 5; + +class X64CG : public CG { + public: + X64CG(MIRModule &mod, const CGOptions &opts) : CG(mod, opts) {} + + static const InsnDesc kMd[x64::kMopLast]; + void EnrollTargetPhases(MaplePhaseManager *pm) const override; + /* Init SubTarget phase */ + /* AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override;*/ + + LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition& rd) const override { + return mp.New(mp, f, rd); + } + MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + + MPISel *CreateMPIsel(MemPool &mp, MapleAllocator &allocator, CGFunc &f) const override { + return mp.New(mp, allocator, f); + } + + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + + CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + + /* Init SubTarget optimization */ + + Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override; + + PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override; + + CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override; + + bool IsExclusiveFunc(MIRFunction &mirFunc) override; + + /* NOTE: Consider making be_common a field of CG. */ + void GenerateObjectMaps(BECommon &beCommon) override; + + /* Used for GCTIB pattern merging */ + std::string FindGCTIBPatternName(const std::string &name) const override; + enum : uint8 { + kR8LowList, + kR8HighList, + kR16List, + kR32List, + kR64List + }; + bool IsEffectiveCopy(Insn &insn) const final; + bool IsTargetInsn(MOperator mOp) const final; + bool IsClinitInsn(MOperator mOp) const final; + bool IsPseudoInsn(MOperator mOp) const final; + void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const final; + const InsnDesc &GetTargetMd(MOperator mOp) const final { + return kMd[mOp]; + } +}; +} // namespace maplebe +#endif /* MAPLEBE_INCLUDE_CG_X86_64_CG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..96a3ad1d0bdfc84ddaef39aade171d4295a2a1a6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H + +#include "cgfunc.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "x64_reg_info.h" +#include "x64_optimize_common.h" + +namespace maplebe { +class X64CGFunc : public CGFunc { + public: + X64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, + MemPool &memPool, StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) + : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId), + calleeSavedRegs(mallocator.Adapter()) { + CGFunc::SetMemlayout(*memPool.New(b, f, mallocator)); + CGFunc::GetMemlayout()->SetCurrFunction(*this); + CGFunc::SetTargetRegInfo(*memPool.New(mallocator, X64CallConvImpl::GetCallConvKind(f))); + CGFunc::GetTargetRegInfo()->SetCurrFunction(*this); + } + /* null implementation yet */ + InsnVisitor *NewInsnModifier() override { + return memPool->New(*this); + } + void GenSaveMethodInfoCode(BB &bb) override; + void GenerateCleanupCode(BB &bb) override; + bool NeedCleanup() override; + void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + void LmbcGenSaveSpForAlloca() override; + void MergeReturn() override; + void DetermineReturnTypeofCall() override; + void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) override; + void HandleRetCleanup(NaryStmtNode &retNode) override; + void SelectDassign(DassignNode &stmt, Operand &opnd0) override; + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; + void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; + void SelectAbort() override; + void SelectAssertNull(UnaryStmtNode &stmt) override; + void SelectAsm(AsmNode &node) override; + void SelectAggDassign(DassignNode &stmt) override; + void SelectIassign(IassignNode &stmt) override; + void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; + void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; + void SelectReturnSendOfStructInRegs(BaseNode *x) override; + void SelectReturn(Operand *opnd) override; + void SelectIgoto(Operand *opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; + void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectGoto(GotoNode &stmt) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) override; + Operand *SelectCclz(IntrinsicopNode &intrinopNode) override; + Operand *SelectCctz(IntrinsicopNode &intrinopNode) override; + Operand *SelectCpopcount(IntrinsicopNode &intrinopNode) override; + Operand *SelectCparity(IntrinsicopNode &intrinopNode) override; + Operand *SelectCclrsb(IntrinsicopNode &intrinopNode) override; + Operand *SelectCisaligned(IntrinsicopNode &intrinopNode) override; + Operand *SelectCalignup(IntrinsicopNode &intrinopNode) override; + Operand *SelectCaligndown(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) override; + Operand *SelectCReturnAddress(IntrinsicopNode &intrinopNode) override; + void SelectMembar(StmtNode &membar) override; + void SelectComment(CommentNode &comment) override; + void HandleCatch() override; + Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) override; + RegOperand *SelectRegread(RegreadNode &expr) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, + PrimType finalBitFieldDestType = kPtyInvalid) override; + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; + Operand *SelectIntConst(MIRIntConst &intConst) override; + Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) override; + Operand *SelectStrConst(MIRStrConst &strConst) override; + Operand *SelectStr16Const(MIRStr16Const &strConst) override; + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) override; + Operand *SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) override; + Operand *SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) override; + Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr = false) override; + void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; + Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) override; + Operand *SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare = false) override; + Operand *SelectMalloc(UnaryNode &call, Operand &opnd0) override; + RegOperand &SelectCopy(Operand &src, PrimType srcType, PrimType dstType) override; + Operand *SelectAlloca(UnaryNode &call, Operand &opnd0) override; + Operand *SelectGCMalloc(GCMallocNode &call) override; + Operand *SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) override; + Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) override; + Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) override; + Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) override; + void GenerateYieldpoint(BB &bb) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetOrCreateRflag() override; + const Operand *GetRflag() const override; + const Operand *GetFloatRflag() const override; + const LabelOperand *GetLabelOperand(LabelIdx labIdx) const override; + LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; + LabelOperand &GetOrCreateLabelOperand(BB &bb) override; + RegOperand &CreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; + RegOperand &GetOrCreateFramePointerRegOperand() override; + RegOperand &GetOrCreateStackBaseRegOperand() override; + RegOperand &GetZeroOpnd(uint32 size) override; + Operand &CreateCfiRegOperand(uint32 reg, uint32 size) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand &CreateImmOperand(PrimType primType, int64 val) override; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) override; + void CleanupDeadMov(bool dump = false) override; + void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) override; + bool IsFrameReg(const RegOperand &opnd) const override; + RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) override; + RegOperand *SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) override; + RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) override; + RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) override;; + RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) override; + RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) override; + RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) override; + RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) override; + RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) override; + RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) override; + RegOperand *SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) override; + RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) override; + RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) override; + RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) override; + RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorNot(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) override; + RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) override; + RegOperand *SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) override; + RegOperand *SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, + int32 lane) override; + RegOperand *SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) override; + RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) override; + RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) override; + RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, + bool isLow, bool isWide) override; + RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) override; + RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) override; + RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) override; + void ProcessLazyBinding() override; + void DBGFixCallFrameLocationOffsets() override; + MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) override; + + int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) override; + RegOperand *GetBaseReg(const SymbolAlloc &symAlloc); + + void AddtoCalleeSaved(regno_t reg) override { + const auto &[_, flag] = calleeSavedRegs.insert(static_cast(reg)); + DEBUG_ASSERT((IsGPRegister(static_cast(reg)) || + IsFPSIMDRegister(static_cast(reg))), "Int or FP registers are expected"); + if (flag) { + if (IsGPRegister(static_cast(reg))) { + ++numIntregToCalleeSave; + } else { + ++numFpregToCalleeSave; + } + } + } + + const MapleSet &GetCalleeSavedRegs() const { + return calleeSavedRegs; + } + + uint32 SizeOfCalleeSaved() const { + uint32 size = numIntregToCalleeSave * kIntregBytelen + numFpregToCalleeSave * kFpregBytelen; + return RoundUp(size, GetMemlayout()->GetStackPtrAlignment()); + } + + MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize); + void FreeSpillRegMem(regno_t vrNum); + int64 GetOrCreatSpillRegLocation(regno_t vrNum) { + auto symLoc = GetMemlayout()->GetLocOfSpillRegister(vrNum); + return static_cast(GetBaseOffset(*symLoc)); + } + private: + MapleSet calleeSavedRegs; + uint32 numIntregToCalleeSave = 0; + uint32 numFpregToCalleeSave = 0; +}; + +class X64OpndDumpVisitor : public OpndDumpVisitor { + public: + explicit X64OpndDumpVisitor(const OpndDesc &operandDesc) : OpndDumpVisitor(operandDesc) {}; + ~X64OpndDumpVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(PhiOperand *v) final; + void DumpRegInfo(RegOperand &v); +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_emitter.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..2398de553ade2107169426c0c36871ef08d54af8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_emitter.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H +#define MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H + +#include "emit.h" +#include "assembler/asm_assembler.h" +#include "assembler/elf_assembler.h" + +namespace maplebe { +class X64Emitter : public Emitter { + public: + X64Emitter(CG &cg, assembler::Assembler &newAssembler) : + Emitter(cg, ""), assmbler(newAssembler) {} + ~X64Emitter() = default; + + assembler::Assembler &GetAssembler() const { + return assmbler; + } + + assembler::Reg TransferReg(Operand *opnd) const; + std::pair TransferImm(Operand *opnd); + assembler::Mem TransferMem(Operand *opnd, uint32 funcUniqueId); + int64 TransferLabel(Operand *opnd, uint32 funcUniqueId); + uint32 TransferFuncName(Operand *opnd); + + void EmitFunctionHeader(maplebe::CGFunc &cgFunc); + void EmitBBHeaderLabel(maplebe::CGFunc &cgFunc, LabelIdx labIdx, uint32 freq); + void EmitInsn(Insn &insn, uint32 funcUniqueId); + void EmitJmpTable(const maplebe::CGFunc &cgFunc); + void EmitFunctionFoot(maplebe::CGFunc &cgFunc); + uint8 GetSymbolAlign(const maple::MIRSymbol &mirSymbol, bool isComm = false); + uint64 GetSymbolSize(maple::TyIdx typeIndex); + void EmitLocalVariable(maplebe::CGFunc &cgFunc); + void EmitGlobalVariable(maplebe::CG& cg); + uint64 EmitStructure(maple::MIRConst &mirConst, maplebe::CG& cg, bool belongsToDataSec = true); + uint64 EmitStructure(maple::MIRConst &mirConst, maplebe::CG& cg, uint32 &subStructFieldCounts, + bool belongsToDataSec = true); + uint64 EmitVector(maple::MIRConst &mirConst, bool belongsToDataSec = true); + uint64 EmitArray(maple::MIRConst &mirConst, maplebe::CG& cg, bool belongsToDataSec = true); + void EmitAddrofElement(MIRConst &mirConst, bool belongsToDataSec); + uint32 EmitSingleElement(maple::MIRConst &mirConst, bool belongsToDataSec = true, bool isIndirect = false); + void EmitBitField(maplebe::StructEmitInfo &structEmitInfo, maple::MIRConst &mirConst, const maple::MIRType *nextType, + uint64 fieldOffset, bool belongsToDataSec = true); + void EmitCombineBfldValue(maplebe::StructEmitInfo &structEmitInfo, bool belongsToDataSec = true); + void EmitStringPointers(); + void Run(maplebe::CGFunc &cgFunc); + + /* Dwarf debug info */ + void EmitDIHeaderFileInfo(); + void UpdateAttrAndEmit(const std::string& sfile, DebugInfo &mirdi, DBGAbbrevEntry &diae, DBGDie &die, + const std::string& spath); + void EmitDIDebugInfoSection(maplebe::DebugInfo &mirdi); + void EmitDwFormAddr(const DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di); + void EmitDwFormRef4(DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di); + void EmitDwFormData8(const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di, + MapleVector &attrvec); + void EmitDIAttrValue(DBGDie &die, DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di); + void EmitDIDebugAbbrevSection(maplebe::DebugInfo &mirdi); + void EmitDIDebugStrSection(); + void EmitDebugInfo(maplebe::CG& cg); + + private: + assembler::Assembler &assmbler; + std::vector stringPtr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..7c4dafda4ac80fec5f9967d03eae1ab7256ebeec --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * - %xmm0–%xmm1 used to pass and return floating point arguments + - %xmm2–%xmm7 used to pass floating point arguments + * + */ + +/* + * ID, prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill + */ +/*XMM0 ~ XMM15*/ +FP_SIMD_REG(0 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(1 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(2 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(3 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(4 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(5 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(6 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(7 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(8 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(9 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(10, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(11, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(12, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(13, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(14, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(15, "B", "H", "S", "D", "Q", true, false, false, false, false) +/*ST0 ~ ST7*/ +FP_SIMD_REG(16, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(17, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(18, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(19, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(20, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(21, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(22, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(23, "B", "H", "S", "D", "Q", true, false, false, false, false) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..10426073d7c4daf8338c6b0e21bb56f118fde866 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * Registers in x86-64 + * + * - caller-save registers: %rax, %rcx, %rdx, %rdi, %rsi, %rsp, and %r8-r11 + * - callee-saved registers: %r12, %r13, %r14, %r15, %rbx, %rsp, %rbp. + * - In contrast to the Intel386 ABI, %rdi, and %rsi in x86-64 belong to the called function, not + * the caller. So, It's caller-save registers + * - User-level applications use as integer registers for passing the sequence %rdi, %rsi, %rdx, %rcx, + * %r8 and %r9. The kernel interface uses %rdi, %rsi, %rdx, %r10, %r8 and %r9. + * - the sequence %rax, %rdx is used to return INTEGER, + * - rdx is used to pass 3rd argument to functions; 2nd return register + * - %r11 is neither required to be preserved, nor is it used to pass arguments + */ +/* ID, 8-bit prefix, 8-16 bit prefix, 16-bit prefix, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */ +INT_REG(0 , "BL", "BH", "W", "L", "Q", true, false, false, false, false) +INT_REG(1 , "BL", "BH", "W", "L", "Q", true, true, false, false, false) +INT_REG(2 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(3 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(4 , "B", "", "W", "L", "Q", false, false, false, false, false) +INT_REG(5 , "B", "", "W", "L", "Q", false, true, false, false, false) +INT_REG(6 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(7 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(8 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(9 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(10, "B", "", "W", "L", "Q", true, false, false, true, false) +INT_REG(11, "B", "", "W", "L", "Q", true, false, false, true, false) +INT_REG(12, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(13, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(14, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(15, "B", "", "W", "L", "Q", true, true, false, false, false) +/* instruction pointer */ +INT_REG(16, "B", "", "W", "L", "Q", false, false, false, false, false) + +/* Alias */ +INT_REG_ALIAS(AX, 0) +INT_REG_ALIAS(BX, 1) +INT_REG_ALIAS(CX, 2) +INT_REG_ALIAS(DX, 3) +INT_REG_ALIAS(SP, 4) +INT_REG_ALIAS(BP, 5) +INT_REG_ALIAS(SI, 6) +INT_REG_ALIAS(DI, 7) + +INT_REG_ALIAS(FP, 5) +INT_REG_ALIAS(YP, 12) +INT_REG_ALIAS(IP, 16) +INT_REG_ALIAS(LAST_GP_REG, 16) + diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa.h new file mode 100644 index 0000000000000000000000000000000000000000..1c72f9803d910f277cedacbe1865135a3f5bb208 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_H + +#include "operand.h" +#include "mad.h" +#include "isa.h" + +namespace maplebe { +/* + * X64 Architecture Reference Manual + */ +constexpr int kX64StackPtrAlignment = 16; + +constexpr int32 kOffsetAlign = 8; +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr int kSizeOfFplr = 16; + +class Insn; + +namespace x64 { +/* machine instruction description */ +#define DEFINE_MOP(op, ...) op, +enum X64MOP_t : maple::uint32 { +#include "x64_md.def" + kMopLast +}; +#undef DEFINE_MOP + +/* Registers in x64 state */ +enum X64reg : uint32 { + kRinvalid = kInvalidRegNO, +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) R##ID, +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) V##ID, +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + kMaxRegNum, + kRFLAG, + kAllRegNum, +/* integer registers alias */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) +#define INT_REG_ALIAS(ALIAS, ID) R##ALIAS = R##ID, +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +}; + +static inline bool IsGPRegister(X64reg r) { + return R0 <= r && r <= RLAST_GP_REG; +} + +static inline bool IsFPSIMDRegister(X64reg r) { + return V0 <= r && r <= V23; +} + +static inline bool IsFPRegister(X64reg r) { + return V0 <= r && r <= V7; +} + +static inline bool IsSIMDRegister(X64reg r) { + return V8 <= r && r <= V23; +} + +static inline bool IsPhysicalRegister(regno_t r) { + return r < kMaxRegNum; +} + +static inline RegType GetRegType(X64reg r) { + if (IsGPRegister(r)) { + return kRegTyInt; + } + if (IsFPSIMDRegister(r)) { + return kRegTyFloat; + } + DEBUG_ASSERT(false, "No suitable register type to return?"); + return kRegTyUndef; +} +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_jmp_m, MOP_jmp_r is a jump instruction, but the target is unknown at compile time. + */ +uint32 GetJumpTargetIdx(const Insn &insn); + +MOperator FlipConditionOp(MOperator flippedOp); +} /* namespace x64 */ + +/* + * We save callee-saved registers from lower stack area to upper stack area. + * If possible, we store a pair of registers (int/int and fp/fp) in the stack. + * The Stack Pointer has to be aligned at 16-byte boundary. + * On X64, kIntregBytelen == 8 (see the above) + */ +inline void GetNextOffsetCalleeSaved(int &offset) { + offset += (kIntregBytelen << 1); +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..009f3c836a641edcd59522fc3941f35090dec6e7 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H + +#include "x64_isa.h" +#include "operand.h" + +namespace maplebe { + +namespace x64 { + /* register, imm , memory, cond */ +#define DEF_X64_CMP_MAPPING_INT(SIZE) \ +static const X64MOP_t cmpIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = {\ + {MOP_cmp##SIZE##_r_r, MOP_begin, MOP_cmp##SIZE##_r_m, MOP_begin}, \ + {MOP_cmp##SIZE##_i_r, MOP_begin, MOP_cmp##SIZE##_i_m, MOP_begin}, \ + {MOP_cmp##SIZE##_m_r, MOP_begin, MOP_begin, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ +}; +DEF_X64_CMP_MAPPING_INT(b) +DEF_X64_CMP_MAPPING_INT(w) +DEF_X64_CMP_MAPPING_INT(l) +DEF_X64_CMP_MAPPING_INT(q) + +static inline X64MOP_t GetCmpMop(Operand::OperandType dTy, Operand::OperandType sTy, PrimType primType) { + X64MOP_t cmpOp= MOP_begin; + switch(GetPrimTypeBitSize(primType)){ + case k8BitSize: + cmpOp = cmpIselMapb[sTy][dTy]; + break; + case k16BitSize: + cmpOp = cmpIselMapw[sTy][dTy]; + break; + case k32BitSize: + cmpOp = cmpIselMapl[sTy][dTy]; + break; + case k64BitSize: + cmpOp = cmpIselMapq[sTy][dTy]; + break; + default: + cmpOp= MOP_begin; + break; + } + return cmpOp; +} + + /* {OPCODE, {register, imm , memory, cond}} */ +#define DEF_X64_SET_MAPPING_INT(OPCODE, TYPE) \ +{OPCODE, {x64::MOP_##TYPE##_r, x64::MOP_begin, x64::MOP_##TYPE##_m, x64::MOP_begin}} + +using SetIselMappingType = std::unordered_map>; +static const SetIselMappingType setUnsignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setbe), + DEF_X64_SET_MAPPING_INT(OP_ge, setae), + DEF_X64_SET_MAPPING_INT(OP_gt, seta), + DEF_X64_SET_MAPPING_INT(OP_lt, setb), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +static const SetIselMappingType setSignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setle), + DEF_X64_SET_MAPPING_INT(OP_ge, setge), + DEF_X64_SET_MAPPING_INT(OP_gt, setg), + DEF_X64_SET_MAPPING_INT(OP_lt, setl), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +#undef DEF_X64_SET_MAPPING_INT + +static inline X64MOP_t GetSetCCMop(maple::Opcode opcode, Operand::OperandType dTy, bool isSigned) { + DEBUG_ASSERT(dTy < Operand::OperandType::kOpdPhi, "illegal operand type"); + const SetIselMappingType& setIselMapping = isSigned ? setSignedIselMapping : + setUnsignedIselMapping; + auto iter = setIselMapping.find(opcode); + if (iter == setIselMapping.end()) { + return x64::MOP_begin; + } + return iter->second[dTy]; +} + +#define DEF_X64_CMOV_MAPPING_INT(OPCODE, TYPE) \ +{OPCODE, {x64::MOP_begin, x64::MOP_##TYPE##w_r_r, x64::MOP_##TYPE##l_r_r, x64::MOP_##TYPE##q_r_r}} +using CMovIselMappingType = std::unordered_map>; +static const CMovIselMappingType cmovUnsignedIselMapping = { + DEF_X64_CMOV_MAPPING_INT(OP_le, cmovbe), + DEF_X64_CMOV_MAPPING_INT(OP_ge, cmovae), + DEF_X64_CMOV_MAPPING_INT(OP_gt, cmova), + DEF_X64_CMOV_MAPPING_INT(OP_lt, cmovb), + DEF_X64_CMOV_MAPPING_INT(OP_ne, cmovne), + DEF_X64_CMOV_MAPPING_INT(OP_eq, cmove), +}; +static const CMovIselMappingType cmovSignedIselMapping = { + DEF_X64_CMOV_MAPPING_INT(OP_le, cmovle), + DEF_X64_CMOV_MAPPING_INT(OP_ge, cmovge), + DEF_X64_CMOV_MAPPING_INT(OP_gt, cmovg), + DEF_X64_CMOV_MAPPING_INT(OP_lt, cmovl), + DEF_X64_CMOV_MAPPING_INT(OP_ne, cmovne), + DEF_X64_CMOV_MAPPING_INT(OP_eq, cmove), +}; +#undef DEF_X64_CMOV_MAPPING_INT + +static inline X64MOP_t GetCMovCCMop(maple::Opcode opcode, int32 bitSize, bool isSigned) { + const auto &cmovIselMapping = isSigned ? cmovSignedIselMapping : cmovUnsignedIselMapping; + auto iter = cmovIselMapping.find(opcode); + if (iter == cmovIselMapping.end()) { + return x64::MOP_begin; + } + return iter->second[GetBitIndex(bitSize)]; +} +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_live.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_live.h new file mode 100644 index 0000000000000000000000000000000000000000..8ccdb9c1c89d9feb70a4f10ad471145b519ce33f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_live.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H +#define MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H + +#include "live.h" + +namespace maplebe { +class X64LiveAnalysis : public LiveAnalysis { + public: + X64LiveAnalysis(CGFunc &func, MemPool &memPool) : LiveAnalysis(func, memPool) {} + ~X64LiveAnalysis() override = default; + bool CleanupBBIgnoreReg(regno_t reg) override; + void InitEhDefine(BB &bb) override {}; + void GenerateReturnBBDefUse(BB &bb) const override {}; + void ProcessCallInsnParam(BB &bb, const Insn &insn) const override {}; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..af7df9c4c72d13f4295eada723f4ea470a8e534e --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_LOCALO_H +#define MAPLEBE_INCLUDE_X64_LOCALO_H + +#include "local_opt.h" +namespace maplebe{ +class X64LocalOpt : public LocalOpt { + public: + X64LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition& rd) + : LocalOpt(memPool, func, rd){} + ~X64LocalOpt() = default; + private: + void DoLocalCopyProp() override; +}; + +class CopyRegProp : public LocalPropOptimizePattern { + public: + CopyRegProp(CGFunc &cgFunc, ReachingDefinition &rd) : LocalPropOptimizePattern(cgFunc, rd) {} + ~CopyRegProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(BB &bb, Insn &insn) final; + private: + bool propagateOperand(Insn &insn, RegOperand& oldOpnd, RegOperand& replaceOpnd); +}; + +class X64RedundantDefRemove : public RedundantDefRemove { + public: + X64RedundantDefRemove(CGFunc &cgFunc, ReachingDefinition &rd) : RedundantDefRemove(cgFunc, rd) {} + ~X64RedundantDefRemove() override = default; + void Optimize(BB &bb, Insn &insn) final; +}; + +} + +#endif /* MAPLEBE_INCLUDE_X64_LOCALO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_md.def b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_md.def new file mode 100644 index 0000000000000000000000000000000000000000..229f27285979d2a2a9cff274b0257ee84eef7d50 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_md.def @@ -0,0 +1,470 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* {mop, opnds, prop, latency, name, format, length} */ +/* begin machine operation code of X86_64 instruction , */ +DEFINE_MOP(MOP_begin, {},0,0,"","",0) + +/* # Definitions + * use x64 style b/w/l/q for 8b/16b/32b/64b operation + * and using AT&T style assembly + */ + +/* X64 MOVES */ +// TODO: fix intruction opnds, prop, latency, format and length +// TODO: the encoding and enumeration seems too verbose +// TODO: understand how other system represent these MOPs (especially for x86-64) +// TODO: this is still an experiment +// TODO: should make sure the convention is consistent with (AT&T style?) +// TODO: how the general Machine instruction is designed? + +/* mov */ +DEFINE_MOP(MOP_movb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8ID},ISMOVE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8ID},ISMOVE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8ID},ISLOAD,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) + +DEFINE_MOP(MOP_movw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16ID},ISMOVE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16ID},ISMOVE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16ID},ISLOAD,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISSTORE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISSTORE,kLtAlu,"movw","0,1",1) + +DEFINE_MOP(MOP_movl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32ID},ISLOAD,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) + +DEFINE_MOP(MOP_movq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_i_r, {&OpndDesc::Imm64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64ID},ISLOAD,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE,kLtAlu,"movq","0,1",1) + +/* movabs */ +//The movabs instruction to load arbitrary 64-bit constant into register and to load/store integer register from/to arbitrary constant 64-bit address is available +DEFINE_MOP(MOP_movabs_i_r, {&OpndDesc::StImm64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movabs","0,1",1) +DEFINE_MOP(MOP_movabs_l_r, {&OpndDesc::Lbl64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movabs","0,1",1) + +/* push & pop & lea */ +DEFINE_MOP(MOP_pushq_r, {&OpndDesc::Reg64IS},0,kLtAlu,"pushq","0",1) +DEFINE_MOP(MOP_popq_r, {&OpndDesc::Reg32IS},0,kLtAlu,"popq","0",1) + +DEFINE_MOP(MOP_leaq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leal_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leaw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) + +/* Moving from a smaller data size to 32 bits */ +/* zero extension */ +DEFINE_MOP(MOP_movzbw_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movzbw","0,1",1) +DEFINE_MOP(MOP_movzbw_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movzbw","0,1",1) +DEFINE_MOP(MOP_movzbl_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzbl_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzwl_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzwl","0,1",1) +DEFINE_MOP(MOP_movzwl_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzwl","0,1",1) +/* sign extension */ +DEFINE_MOP(MOP_movsbw_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movsbw","0,1",1) +DEFINE_MOP(MOP_movsbw_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movsbw","0,1",1) +DEFINE_MOP(MOP_movsbl_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movsbl_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movswl_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movswl","0,1",1) +DEFINE_MOP(MOP_movswl_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movswl","0,1",1) + +/* Moving from a smaller data size to 64 bits */ +/* zero extension */ +/* + * Perhaps unexpectedly, instructions that move or generate 32-bit register values also set the upper 32 bits of the register to zero. + * Consequently, there is no need for an instruction movzlq. + */ +DEFINE_MOP(MOP_movzbq_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzbq","0,1",1) +DEFINE_MOP(MOP_movzbq_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzbq","0,1",1) +DEFINE_MOP(MOP_movzwq_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzwq","0,1",1) +DEFINE_MOP(MOP_movzwq_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzwq","0,1",1) +/* sign extension */ +DEFINE_MOP(MOP_movsbq_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movsbq_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movswq_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movswq_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movslq_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movslq","0,1",1) +DEFINE_MOP(MOP_movslq_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movslq","0,1",1) + +/* BasicOp */ +/* add */ +DEFINE_MOP(MOP_addb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"addq","0,1",1) +/* sub */ +DEFINE_MOP(MOP_subb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"subq","0,1",1) +/* and */ +DEFINE_MOP(MOP_andb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"andq","0,1",1) +/* or */ +DEFINE_MOP(MOP_orb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"orq","0,1",1) +/* xor */ +DEFINE_MOP(MOP_xorb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"xorq","0,1",1) + +/* UnaryOp */ +/* neg */ +DEFINE_MOP(MOP_negb_r, {&OpndDesc::Reg8IDS},ISUNARYOP,kLtAlu,"negb","0",1) +DEFINE_MOP(MOP_negw_r, {&OpndDesc::Reg16IDS},ISUNARYOP,kLtAlu,"negw","0",1) +DEFINE_MOP(MOP_negl_r, {&OpndDesc::Reg32IDS},ISUNARYOP,kLtAlu,"negl","0",1) +DEFINE_MOP(MOP_negq_r, {&OpndDesc::Reg64IDS},ISUNARYOP,kLtAlu,"negq","0",1) +DEFINE_MOP(MOP_negb_m, {&OpndDesc::Mem8S},ISUNARYOP,kLtAlu,"negb","0",1) +DEFINE_MOP(MOP_negw_m, {&OpndDesc::Mem16S},ISUNARYOP,kLtAlu,"negw","0",1) +DEFINE_MOP(MOP_negl_m, {&OpndDesc::Mem32S},ISUNARYOP,kLtAlu,"negl","0",1) +DEFINE_MOP(MOP_negq_m, {&OpndDesc::Mem64S},ISUNARYOP,kLtAlu,"negq","0",1) +/* not */ +DEFINE_MOP(MOP_notb_r, {&OpndDesc::Reg8IDS},ISUNARYOP,kLtAlu,"notb","0",1) +DEFINE_MOP(MOP_notw_r, {&OpndDesc::Reg16IDS},ISUNARYOP,kLtAlu,"notw","0",1) +DEFINE_MOP(MOP_notl_r, {&OpndDesc::Reg32IDS},ISUNARYOP,kLtAlu,"notl","0",1) +DEFINE_MOP(MOP_notq_r, {&OpndDesc::Reg64IDS},ISUNARYOP,kLtAlu,"notq","0",1) +DEFINE_MOP(MOP_notb_m, {&OpndDesc::Mem8S},ISUNARYOP,kLtAlu,"notb","0",1) +DEFINE_MOP(MOP_notw_m, {&OpndDesc::Mem16S},ISUNARYOP,kLtAlu,"notw","0",1) +DEFINE_MOP(MOP_notl_m, {&OpndDesc::Mem32S},ISUNARYOP,kLtAlu,"notl","0",1) +DEFINE_MOP(MOP_notq_m, {&OpndDesc::Mem64S},ISUNARYOP,kLtAlu,"notq","0",1) + +/* shift -- shl/sar/shr reg8, use cl */ +/* shl */ +DEFINE_MOP(MOP_shlb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shlq","0,1",1) +/* sar */ +DEFINE_MOP(MOP_sarb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"sarq","0,1",1) +/* shr */ +DEFINE_MOP(MOP_shrb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shrq","0,1",1) + +/* idiv, div -- opnd(use), rax(def,use), rdx(def,use) */ +DEFINE_MOP(MOP_idivw_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"idivw","0",1) +DEFINE_MOP(MOP_idivl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"idivl","0",1) +DEFINE_MOP(MOP_idivq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"idivq","0",1) +DEFINE_MOP(MOP_idivw_m, {&OpndDesc::Mem16S, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"idivw","0",1) +DEFINE_MOP(MOP_idivl_m, {&OpndDesc::Mem32S, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"idivl","0",1) +DEFINE_MOP(MOP_idivq_m, {&OpndDesc::Mem64S, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"idivq","0",1) +DEFINE_MOP(MOP_divw_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"divw","0",1) +DEFINE_MOP(MOP_divl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"divl","0",1) +DEFINE_MOP(MOP_divq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"divq","0",1) +DEFINE_MOP(MOP_divw_m, {&OpndDesc::Mem16S, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"divw","0",1) +DEFINE_MOP(MOP_divl_m, {&OpndDesc::Mem32S, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"divl","0",1) +DEFINE_MOP(MOP_divq_m, {&OpndDesc::Mem64S, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"divq","0",1) +/* cwd, cdq, cqo -- rax(def use), rdx(def) */ +DEFINE_MOP(MOP_cwd, {&OpndDesc::Reg16IDS, &OpndDesc::Reg16ID},0,kLtAlu,"cwd","",1) +DEFINE_MOP(MOP_cdq, {&OpndDesc::Reg32IDS, &OpndDesc::Reg32ID},0,kLtAlu,"cdq","",1) +DEFINE_MOP(MOP_cqo, {&OpndDesc::Reg64IDS, &OpndDesc::Reg64ID},0,kLtAlu,"cqo","",1) + +/* jmp, je, jne */ +DEFINE_MOP(MOP_jmpq_r, {&OpndDesc::Reg64IS},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_m, {&OpndDesc::Mem64S},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_l, {&OpndDesc::Lbl64},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) // ip relative + +DEFINE_MOP(MOP_je_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"je","0",1) +DEFINE_MOP(MOP_ja_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"ja","0",1) // unsigned > +DEFINE_MOP(MOP_jae_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jae","0",1) // unsigned >= +DEFINE_MOP(MOP_jne_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jne","0",1) +DEFINE_MOP(MOP_jb_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jb","0",1) // unsigned < +DEFINE_MOP(MOP_jbe_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jbe","0",1) // unsigned <= +DEFINE_MOP(MOP_jg_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jg","0",1) // signed > +DEFINE_MOP(MOP_jge_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jge","0",1) // signed >= +DEFINE_MOP(MOP_jl_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jl","0",1) // signed < +DEFINE_MOP(MOP_jle_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jle","0",1) // signed <= + +/* cmp */ +DEFINE_MOP(MOP_cmpb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_m_r, {&OpndDesc::Mem8S, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8S},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8S},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpw_r_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_m_r, {&OpndDesc::Mem16S, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_i_r, {&OpndDesc::Imm16, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_r_m, {&OpndDesc::Reg16IS, &OpndDesc::Mem16S},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_i_m, {&OpndDesc::Imm16, &OpndDesc::Mem16S},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpl_r_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_m_r, {&OpndDesc::Mem32S, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_i_r, {&OpndDesc::Imm32, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_r_m, {&OpndDesc::Reg32IS, &OpndDesc::Mem32S},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_i_m, {&OpndDesc::Imm32, &OpndDesc::Mem32S},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpq_r_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_m_r, {&OpndDesc::Mem64S, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_i_r, {&OpndDesc::Imm32, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_r_m, {&OpndDesc::Reg64IS, &OpndDesc::Mem64S},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_i_m, {&OpndDesc::Imm32, &OpndDesc::Mem64S},0,kLtAlu,"cmpq","0,1",1) + +/* test */ +DEFINE_MOP(MOP_testq_r_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},0,kLtAlu,"testq","0,1",1) + +/* setcc -- use ccreg(CF/ZF/SF/OF) */ +DEFINE_MOP(MOP_setbe_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_r, {&OpndDesc::Reg8ID},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_r, {&OpndDesc::Reg8ID},0,kLtAlu,"sete","0",1) +DEFINE_MOP(MOP_setbe_m, {&OpndDesc::Mem8D},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_m, {&OpndDesc::Mem8D},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_m, {&OpndDesc::Mem8D},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_m, {&OpndDesc::Mem8D},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_m, {&OpndDesc::Mem8D},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_m, {&OpndDesc::Mem8D},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_m, {&OpndDesc::Mem8D},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_m, {&OpndDesc::Mem8D},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_m, {&OpndDesc::Mem8D},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_m, {&OpndDesc::Mem8D},0,kLtAlu,"sete","0",1) + +/* cmov */ +/* condition move if below or equal */ +DEFINE_MOP(MOP_cmovbew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbew","0,1",1) +DEFINE_MOP(MOP_cmovbel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbel","0,1",1) +DEFINE_MOP(MOP_cmovbeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbeq","0,1",1) +DEFINE_MOP(MOP_cmovbew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbew","0,1",1) +DEFINE_MOP(MOP_cmovbel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbel","0,1",1) +DEFINE_MOP(MOP_cmovbeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbeq","0,1",1) +/* condition move if less or equal */ +DEFINE_MOP(MOP_cmovlew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlew","0,1",1) +DEFINE_MOP(MOP_cmovlel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovlel","0,1",1) +DEFINE_MOP(MOP_cmovleq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovleq","0,1",1) +DEFINE_MOP(MOP_cmovlew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlew","0,1",1) +DEFINE_MOP(MOP_cmovlel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovlel","0,1",1) +DEFINE_MOP(MOP_cmovleq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovleq","0,1",1) +/* condition move if above or equal */ +DEFINE_MOP(MOP_cmovaew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaew","0,1",1) +DEFINE_MOP(MOP_cmovael_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovael","0,1",1) +DEFINE_MOP(MOP_cmovaeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaeq","0,1",1) +DEFINE_MOP(MOP_cmovaew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaew","0,1",1) +DEFINE_MOP(MOP_cmovael_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovael","0,1",1) +DEFINE_MOP(MOP_cmovaeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaeq","0,1",1) +/* condition move if greater or equal */ +DEFINE_MOP(MOP_cmovgew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgew","0,1",1) +DEFINE_MOP(MOP_cmovgel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgel","0,1",1) +DEFINE_MOP(MOP_cmovgeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgeq","0,1",1) +DEFINE_MOP(MOP_cmovgew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgew","0,1",1) +DEFINE_MOP(MOP_cmovgel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgel","0,1",1) +DEFINE_MOP(MOP_cmovgeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgeq","0,1",1) +/* condition move if not equal */ +DEFINE_MOP(MOP_cmovnew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovnew","0,1",1) +DEFINE_MOP(MOP_cmovnel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovnel","0,1",1) +DEFINE_MOP(MOP_cmovneq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovneq","0,1",1) +DEFINE_MOP(MOP_cmovnew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovnew","0,1",1) +DEFINE_MOP(MOP_cmovnel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovnel","0,1",1) +DEFINE_MOP(MOP_cmovneq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovneq","0,1",1) +/* condition move if below */ +DEFINE_MOP(MOP_cmovbw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbw","0,1",1) +DEFINE_MOP(MOP_cmovbl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbl","0,1",1) +DEFINE_MOP(MOP_cmovbq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbq","0,1",1) +DEFINE_MOP(MOP_cmovbw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbw","0,1",1) +DEFINE_MOP(MOP_cmovbl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbl","0,1",1) +DEFINE_MOP(MOP_cmovbq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbq","0,1",1) +/* condition move if less */ +DEFINE_MOP(MOP_cmovlw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlw","0,1",1) +DEFINE_MOP(MOP_cmovll_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovll","0,1",1) +DEFINE_MOP(MOP_cmovlq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovlq","0,1",1) +DEFINE_MOP(MOP_cmovlw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlw","0,1",1) +DEFINE_MOP(MOP_cmovll_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovll","0,1",1) +DEFINE_MOP(MOP_cmovlq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovlq","0,1",1) +/* condition move if above */ +DEFINE_MOP(MOP_cmovaw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaw","0,1",1) +DEFINE_MOP(MOP_cmoval_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmoval","0,1",1) +DEFINE_MOP(MOP_cmovaq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaq","0,1",1) +DEFINE_MOP(MOP_cmovaw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaw","0,1",1) +DEFINE_MOP(MOP_cmoval_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmoval","0,1",1) +DEFINE_MOP(MOP_cmovaq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaq","0,1",1) +/* condition move if greater */ +DEFINE_MOP(MOP_cmovgw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgw","0,1",1) +DEFINE_MOP(MOP_cmovgl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgl","0,1",1) +DEFINE_MOP(MOP_cmovgq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgq","0,1",1) +DEFINE_MOP(MOP_cmovgw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgw","0,1",1) +DEFINE_MOP(MOP_cmovgl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgl","0,1",1) +DEFINE_MOP(MOP_cmovgq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgq","0,1",1) +/* condition move if equal */ +DEFINE_MOP(MOP_cmovew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovew","0,1",1) +DEFINE_MOP(MOP_cmovel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovel","0,1",1) +DEFINE_MOP(MOP_cmoveq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmoveq","0,1",1) +DEFINE_MOP(MOP_cmovew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovew","0,1",1) +DEFINE_MOP(MOP_cmovel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovel","0,1",1) +DEFINE_MOP(MOP_cmoveq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmoveq","0,1",1) + +/* call, ret, leave */ +DEFINE_MOP(MOP_callq_l, {&OpndDesc::Lbl64,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_m, {&OpndDesc::Mem64S,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_r, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) + +DEFINE_MOP(MOP_retq, {},CANTHROW,kLtBranch,"ret","",1) + +DEFINE_MOP(MOP_leaveq, {},CANTHROW,kLtBranch,"leave","",1) + +/* imul */ +DEFINE_MOP(MOP_imulw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"imulw","0,1",1) +DEFINE_MOP(MOP_imull_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"imull","0,1",1) +DEFINE_MOP(MOP_imulq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"imulq","0,1",1) + +/* nop */ +// TODO: still not clear why we need so many forms of nop (except for patch) +DEFINE_MOP(MOP_nopb, {&OpndDesc::Mem8S},0,kLtAlu,"nopb","",1) +DEFINE_MOP(MOP_nopw, {&OpndDesc::Mem16S},0,kLtAlu,"nopw","",1) +DEFINE_MOP(MOP_nopl, {&OpndDesc::Mem32S},0,kLtAlu,"nopl","",1) +DEFINE_MOP(MOP_nop, {},0,0,"nop","",1) + +/* Byte Swap */ +DEFINE_MOP(MOP_bswapl_r, {&OpndDesc::Reg32IDS},0,kLtAlu,"bswapl","0",1) +DEFINE_MOP(MOP_bswapq_r, {&OpndDesc::Reg64IDS},0,kLtAlu,"bswapq","0",1) + +/* xchg */ +DEFINE_MOP(MOP_xchgb_r_r, {&OpndDesc::Reg8IDS,&OpndDesc::Reg8IDS},0,kLtAlu,"xchgb","0,1",1) + +/* end of X64 instructions */ + +/* invalid operation */ +DEFINE_MOP(MOP_movq_i_m, {&OpndDesc::Imm64,&OpndDesc::Mem64D},0,kLtAlu,"invalid","0,1",1) + +/* pseudo operation */ +DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS},0,kLtUndef,"//MOP_pseudo_ret_int","", 0) diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..8e51132d63b81e3653786cc596798e6e3872ef64 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H + +#include "memlayout.h" +#include "x64_abi.h" + +namespace maplebe { +class X64SymbolAlloc : public SymbolAlloc { + public: + X64SymbolAlloc() = default; + + ~X64SymbolAlloc() = default; + + void SetRegisters(bool isR) { + isRegister = isR; + } + + inline bool IsRegister() const { + return isRegister; + } + + private: + bool isRegister = false; +}; +/* + * On X64, stack frames are structured as follows: + * + * The stack grows downward -- full descending (SP points + * to a filled slot). + * + * Any of the parts of a frame is optional, i.e., it is + * possible to write a caller-callee pair in such a way + * that the particular part is absent in the frame. + * + * Before a call is made, the frame looks like: + * | | + * ||----------------------------| + * | args passed on the stack | (we call them up-formals) + * ||----------------------------|<- Stack Pointer + * | | + * + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | GR Arg Save Area | + * ||----------------------------| + * | VR Arg Save Area | + * ||----------------------------| + * | callee-saved registers | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------|<- Stack Pointer + * | red zone | + * + * callee-saved registers include + * 1. rbx rbp r12 r14 r14 r15 + * 2. XMM0-XMM7 + */ + +class X64MemLayout : public MemLayout { + public: + X64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator) + : MemLayout(b, f, mallocator, kX64StackPtrAlignment) {} + + ~X64MemLayout() override = default; + + uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmtNode, int32 &aggCopySize, bool isIcall) override; + void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override; + + uint64 StackFrameSize() const; + + const MemSegment &locals() const { + return segLocals; + } + /* + * "Pseudo-registers can be regarded as local variables of a + * primitive type whose addresses are never taken" + */ + virtual void AssignSpillLocationsToPseudoRegisters() override; + + virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; + + uint32 GetSizeOfSpillReg() const { + return segSpillReg.GetSize(); + } + + uint32 GetSizeOfLocals() const { + return segLocals.GetSize(); + } + + void SetSizeOfGRSaveArea(uint32 sz) { + segGrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfGRSaveArea() const { + return segGrSaveArea.GetSize(); + } + + inline void SetSizeOfVRSaveArea(uint32 sz) { + segVrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfVRSaveArea() const { + return segVrSaveArea.GetSize(); + } + + int32 GetGRSaveAreaBaseLoc(); + int32 GetVRSaveAreaBaseLoc(); + private: + /* Layout function */ + void LayoutFormalParams(); + void LayoutLocalVariables(); + void LayoutVarargParams(); + + /* util function */ + void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const; + void LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize); + + MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ + MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); + MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); + MemSegment segSpillReg = MemSegment(kMsSpillReg); +}; +} +#endif // MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..17eb40c65ff7d7a22b5366a1c2ee8b7d2476dcdd --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H + +#include "x64_isa.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + +class X64InsnVisitor : public InsnVisitor { + public: + explicit X64InsnVisitor(CGFunc &func) : InsnVisitor(func) {} + + ~X64InsnVisitor() = default; + + void ModifyJumpTarget(LabelIdx targetLabel, BB &bb) override; + void ModifyJumpTarget(Operand &targetOperand, BB &bb) override; + void ModifyJumpTarget(BB &newTarget, BB &bb) override; + /* Check if it requires to add extra gotos when relocate bb */ + Insn *CloneInsn(Insn &originalInsn) override; + LabelIdx GetJumpLabel(const Insn &insn) const override; + bool IsCompareInsn(const Insn &insn) const override; + bool IsCompareAndBranchInsn(const Insn &insn) const override; + bool IsAddOrSubInsn(const Insn &insn) const override; + RegOperand *CreateVregFromReg(const RegOperand &pReg) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_peep.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_peep.h new file mode 100644 index 0000000000000000000000000000000000000000..488c82ad455d5816d2d563b96026aa7598c25e7c --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_peep.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_PEEP_H +#define MAPLEBE_INCLUDE_CG_X64_X64_PEEP_H + +#include +#include "peep.h" + +namespace maplebe { +class X64CGPeepHole : CGPeepHole { + public: + /* normal constructor */ + X64CGPeepHole(CGFunc &f, MemPool *memPool) : CGPeepHole(f, memPool) {}; + /* constructor for ssa */ + X64CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) : CGPeepHole(f, memPool, cgssaInfo) {}; + ~X64CGPeepHole() = default; + void Run() override; + bool DoSSAOptimize(BB &bb, Insn &insn) override; + void DoNormalOptimize(BB &bb, Insn &insn) override; +}; + +class RemoveMovingtoSameRegPattern : public CGPeepPattern { + public: + RemoveMovingtoSameRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveMovingtoSameRegPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveMovingtoSameRegPattern"; + } +}; + +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_phases.def b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_phases.def new file mode 100644 index 0000000000000000000000000000000000000000..c873072e6a4f7c0dca88b84e4b0958fdad787806 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_phases.def @@ -0,0 +1,28 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + + ADDTARGETPHASE("layoutstackframe", true); + ADDTARGETPHASE("createstartendlabel", true); + ADDTARGETPHASE("buildehfunc", GetMIRModule()->GetSrcLang() != kSrcLangC && false); + ADDTARGETPHASE("instructionselector", true); + ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("cfgo", false); + ADDTARGETPHASE("localcopyprop", true); + ADDTARGETPHASE("regalloc", true); + ADDTARGETPHASE("postcfgo", false); + ADDTARGETPHASE("cgpostpeephole", true); + ADDTARGETPHASE("generateproepilog", true); + /* ASM EMIT */ + ADDTARGETPHASE("cgemit", true); diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..22896194a919b18dc5d8c7649a57247bd0a8265f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H + +#include "proepilog.h" +#include "x64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +class X64GenProEpilog : public GenProEpilog { + public: + explicit X64GenProEpilog(CGFunc &func) : GenProEpilog(func) { + } + ~X64GenProEpilog() override = default; + + bool TailCallOpt() override; + bool NeedProEpilog() override; + void Run() override; + private: + void GenerateProlog(BB &bb); + void GenerateEpilog(BB &bb); + void GenerateCalleeSavedRegs(bool isPush); + void GeneratePushCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize); + void GeneratePopCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize); + void GeneratePushUnnamedVarargRegs(); + void GeneratePushRbpInsn(); + void GenerateMovRspToRbpInsn(); + void GenerateSubFrameSizeFromRspInsn(); + void GenerateAddFrameSizeToRspInsn(); + void GeneratePopInsn(); + void GenerateRetInsn(); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reaching.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..f53023c23e64aa73aa29a813b3da60c7fae34dc1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reaching.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_REACHING_H +#define MAPLEBE_INCLUDE_CG_X64_REACHING_H + +#include "reaching.h" + +namespace maplebe { +class X64ReachingDefinition : public ReachingDefinition { + public: + X64ReachingDefinition(CGFunc &func, MemPool &memPool) : ReachingDefinition(func, memPool) {} + ~X64ReachingDefinition() override = default; + bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const final; + std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const final; + std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const final; + bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const final; + bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const final; + bool HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn); + bool DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const; + InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const final; + InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const final; + InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const final; + bool FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const; + protected: + void InitStartGen() final; + void InitEhDefine(BB &bb) final; + void InitGenUse(BB &bb, bool firstTime = true) final; + void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllCallerSavedRegs(BB &bb, Insn &insn) final; + bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const final; + bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const final; + void AddRetPseudoInsn(BB &bb) final; + + void AddRetPseudoInsns() final; bool IsCallerSavedReg(uint32 regNO) const final; + void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const final; + void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const final; + void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + int32 GetStackSize() const final; + private: + bool IsDiv(const Insn &insn) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..2a11e8d8b6cb2aa9dec040455503a37a127f01df --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H +#include "reg_info.h" +#include "x64_abi.h" +#include "x64_cg.h" + +namespace maplebe { +class X64CallConvImpl; +static const std::map x64IntParamsRegIdx = + {{x64::RAX, 0}, {x64::RDI, 1}, {x64::RSI, 2}, {x64::RDX, 3}, {x64::RCX, 4}, {x64::R8, 5}, {x64::R9, 6}}; + +class X64RegInfo : public RegisterInfo { + public: + X64RegInfo(MapleAllocator &mallocator, CallConvKind callConv): RegisterInfo(mallocator), callConv(callConv) { + } + + ~X64RegInfo() override = default; + + void Init() override; + void Fini() override; + void SaveCalleeSavedReg(MapleSet savedRegs) override; + bool IsSpecialReg(regno_t regno) const override; + bool IsCalleeSavedReg(regno_t regno) const override; + bool IsYieldPointReg(regno_t regNO) const override; + bool IsUnconcernedReg(regno_t regNO) const override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const override; + RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) override; + ListOperand *CreateListOperand() override; + Insn *BuildMovInstruction(Operand &opnd0, Operand &opnd1) override; + Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildCommentInsn(const std::string &comment) override; + void FreeSpillRegMem(regno_t vrNum) override; + MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) override; + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) override; + bool IsGPRegister(regno_t regNO) const override { + return x64::IsGPRegister(static_cast(regNO)); + } + /* Those registers can not be overwrite. */ + bool IsUntouchableReg(regno_t regNO) const override{ + return false; + } + /* Refactor later: Integrate parameters and return Reg */ + uint32 GetIntRegsParmsNum() override { + /*Parms: rdi, rsi, rdx, rcx, r8, r9; Ret: rax, rdx */ + return X64CallConvImpl::GetCallConvInfo(callConv).GetIntParamRegsNum() + 1; + } + uint32 GetIntRetRegsNum() override { + return X64CallConvImpl::GetCallConvInfo(callConv).GetIntReturnRegsNum(); + } + uint32 GetFpRetRegsNum() override { + return X64CallConvImpl::GetCallConvInfo(callConv).GetFloatReturnRegsNum(); + } + regno_t GetLastParamsIntReg() override { + return static_cast(X64CallConvImpl::GetCallConvInfo(callConv).GetIntParamRegs().back()); + } + uint32 GetNormalUseOperandNum() override { + return 0; + } + regno_t GetIntRetReg(uint32 idx) override { + CHECK_FATAL(idx <= GetIntRetRegsNum(), "index out of range in IntRetReg"); + return static_cast(X64CallConvImpl::GetCallConvInfo(callConv).GetIntReturnRegs()[idx]); + } + regno_t GetFpRetReg(uint32 idx) override { + CHECK_FATAL(idx <= GetFpRetRegsNum(), "index out of range in IntRetReg"); + return static_cast(X64CallConvImpl::GetCallConvInfo(callConv).GetFloatReturnRegs()[idx]); + } + /* phys reg which can be pre-Assignment: + * INT param regs -- rdi, rsi, rdx, rcx, r8, r9 + * INT return regs -- rdx, rax + * FP param regs -- xmm0 ~ xmm7 + * FP return regs -- xmm0 ~ xmm1 + */ + bool IsPreAssignedReg(regno_t regNO) const override { + return x64::IsParamReg(static_cast(regNO)) || + regNO == x64::RAX || regNO == x64::RDX; + } + uint32 GetIntParamRegIdx(regno_t regNO) const override { + CHECK_FATAL(GetIntRegs().size(), "should be init before"); + return static_cast(regNO - *GetIntRegs().begin()); + } + uint32 GetFpParamRegIdx(regno_t regNO) const override { + CHECK_FATAL(GetFpRegs().size(), "should be init before"); + return static_cast(regNO - *GetFpRegs().begin()); + } + regno_t GetLastParamsFpReg() override { + return x64::kRinvalid; + } + uint32 GetFloatRegsParmsNum() override { + return X64CallConvImpl::GetCallConvInfo(callConv).GetFloatParamRegsNum(); + } + uint32 GetFloatRegsRetsNum() { + return X64CallConvImpl::GetCallConvInfo(callConv).GetFloatReturnRegsNum(); + } + uint32 GetAllRegNum() override { + return x64::kAllRegNum; + } + regno_t GetInvalidReg() override { + return x64::kRinvalid; + } + bool IsAvailableReg(regno_t regNO) const override { + return x64::IsAvailableReg(static_cast(regNO)); + } + bool IsVirtualRegister(const RegOperand ®Opnd) override { + return regOpnd.GetRegisterNumber() > kAllRegNum; + } + bool IsVirtualRegister(regno_t regno) override { + return regno > kAllRegNum; + } + uint32 GetReservedSpillReg() override { + return x64::kRinvalid; + } + uint32 GetSecondReservedSpillReg() override { + return x64::kRinvalid; + } + bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) override { + return x64::IsSpillRegInRA(static_cast(regNO), has3RegOpnd); + } +private: + CallConvKind callConv; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..c94c5dc56b432d9957bcd1457464dc38f5d0f69e --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_X64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { +class X64Standardize : public Standardize { + public: + explicit X64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(true); + } + + ~X64Standardize() override = default; + + private: + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn) override; + void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; + void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; +}; +} +#endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/ecmascript/mapleall/maple_be/include/cg/yieldpoint.h b/ecmascript/mapleall/maple_be/include/cg/yieldpoint.h new file mode 100644 index 0000000000000000000000000000000000000000..d66103c59e93c1f4d0192098ccf64d7550235917 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/cg/yieldpoint.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_YIELDPOINT_H +#define MAPLEBE_INCLUDE_CG_YIELDPOINT_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class YieldPointInsertion { + public: + explicit YieldPointInsertion(CGFunc &func) : cgFunc(&func) {} + + virtual ~YieldPointInsertion() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "yieldpoint"; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgYieldPointInsertion, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_YIELDPOINT_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/include/litecg/litecg.h b/ecmascript/mapleall/maple_be/include/litecg/litecg.h new file mode 100644 index 0000000000000000000000000000000000000000..abbbe8d76bcdf8bf8ebc31512bf50aa85ba87a89 --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/litecg/litecg.h @@ -0,0 +1,63 @@ +#ifndef __MAPLE_LITECG_LITECG_H +#define __MAPLE_LITECG_LITECG_H + +#include "lmir_builder.h" +#include "cg_option.h" + +namespace maple { +namespace litecg { + +enum OutputType { + kAsm, // AT&T assembly file + kElf, // elf object file +}; + +enum TargetType { + kX86_64, + kAarch64, +}; + +enum DebugType { + kDebug, + kNoDebug, +}; + +enum InfoType { + kQuiet, + kVerbose +}; + +class LiteCG { +public: + LiteCG(Module& mirModule); + ~LiteCG() = default; + + // configurations API. + // If not specified, default to: + // Ofastcompile, Elf, X86_64, NoDebug, Quiet + + // O0/O1/Ofastcompile + // LiteCG& SetOptLevel(); // default to fastcompile + + // return LiteCG& enables chaining of config functions. + LiteCG& SetOutputType(OutputType config); + LiteCG& SetTargetType(TargetType config); + LiteCG& SetDebugType(DebugType config); + LiteCG& SetVerbose(InfoType config); + LiteCG& SetupLiteCGEmitMemoryManager(void *codeSpace, + maplebe::MemoryManagerAllocateDataSectionCallback dataSectionAllocator, + maplebe::MemoryManagerSaveFunc2AddressInfoCallback funcInfoSaver); + + void DumpIRToFile(const std::string& fileName); + void DoCG(); + +private: + Module &module; + maplebe::CGOptions *cgOptions; +}; + +} // namespace litecg + +} // namespace maple + +#endif // __MAPLE_LITECG_LITECG_H diff --git a/ecmascript/mapleall/maple_be/include/litecg/lmir_builder.h b/ecmascript/mapleall/maple_be/include/litecg/lmir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..c6d86a582f3b7a393a5496076fa06acf7118ee7f --- /dev/null +++ b/ecmascript/mapleall/maple_be/include/litecg/lmir_builder.h @@ -0,0 +1,703 @@ +#ifndef __MAPLE_LITECG_LMIR_BUILDER_H +#define __MAPLE_LITECG_LMIR_BUILDER_H + +#include +#include +#include +#include +#include + +/* + LMIR API exported. + + LMIR, the low-level MIR, will serve as the canonical input for LiteCG. + + Currently, it only contains the minimum set of features enough to + support eCompiler scenarioes. Later it will grow to be a complete core + set, but still with "just enough" features for general cases. + + Additional features, will be defined with x-extension strategy. + + The motivation of LMIR, is to hide the whole complexity of MIR interfaces, + and this should make integration of maple components easier. + */ +namespace maple { + +/* import types for MIR: this is basically a simplification + we just need declaration, not headers. + */ +class MIRBuilder; // currently we just delegate MIRBuilder +class MIRModule; +class MIRFunction; +class MIRType; +class MIRStructType; +class MIRArrayType; +class MIRFuncType; +class MIRConst; +class MIRAggConst; +class MIRSymbol; +class StmtNode; +class BaseNode; +class BlockNode; +class MIRPreg; + +namespace litecg { + + +// Our type abstraction. currently delegate to MIR + +using String = std::string; + +using Module = MIRModule; +using Function = MIRFunction; + +// Note: Type is base class of all other Types +using Type = MIRType; // base class of all Types +using StructType = MIRStructType; // |__ StructType +using ArrayType = MIRArrayType; +using Const = MIRConst; +using StructConst = MIRAggConst; +using ArrayConst = MIRAggConst; +using Var = MIRSymbol; +using Stmt = StmtNode; +using BB = BlockNode; // A temporary faked BB + +using Param = std::pair; +using Params = std::vector; +using FieldOffset = std::pair; // (byteoffset, bitoffset) +using PregIdx = int32_t; + +// enumerations +enum class MIRIntrinsic { +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) INTRN_##STR, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +using IntrinsicId = MIRIntrinsic; +/* available intrinsic should be list here. like: + INTRN_memcpy, INTRN_memset, etc. + */ +class Expr +{ +public: + Expr(BaseNode *baseNode, Type *nodeType) : node(baseNode), type(nodeType) {} + + Expr() : node(nullptr), type(nullptr) {} + + ~Expr() = default; + + BaseNode *GetNode() + { + return node; + } + + BaseNode *GetNode() const + { + return node; + } + + Type *GetType() + { + return type; + } + + Type *GetType() const + { + return type; + } +private: + BaseNode *node; + Type *type; +}; + +using Args = std::vector; + +enum FuncAttr { // visibility of the defined function + FUNC_global, // function has global visibility + FUNC_weak, // weak function defined in this module + FUNC_internal, // function defined and only used in this module +}; + +enum GlobalRegister { + kSregSp = -1, + kSregFp = -2, +}; + +enum ConvAttr { + CCall, + Web_Kit_JS_Call, + GHC_Call, +}; + +enum GlobalVarAttr { + VAR_external, // global variable declaration (no definition) + VAR_weak, // weak function defined in this module + VAR_internal, // variable defined and only used in this module + VAR_global, // exported variable defined in this module + VAR_readonly, // TODO: this is additional flag, default is R/W +}; + +enum IntCmpCondition { + kEQ, + kNE, + kULT, + kULE, + kUGT, + kUGE, + kSLT, + kSLE, + kSGT, + kSGE, +}; + +enum LiteCGTypeKind { + kLiteCGTypeInvalid, + kLiteCGTypeUnknown, + kLiteCGTypeScalar, + kLiteCGTypeBitField, + kLiteCGTypeArray, + kLiteCGTypeFArray, + kLiteCGTypeJArray, + kLiteCGTypeStruct, + kLiteCGTypeUnion, + kLiteCGTypeClass, + kLiteCGTypeInterface, + kLiteCGTypeStructIncomplete, + kLiteCGTypeClassIncomplete, + kLiteCGTypeConstString, + kLiteCGTypeInterfaceIncomplete, + kLiteCGTypePointer, + kLiteCGTypeFunction, + kLiteCGTypeVoid, + kLiteCGTypeByName, + kLiteCGTypeParam, + kLiteCGTypeInstantVector, + kLiteCGTypeGenericInstant, +}; + +// TODO: FieldAttr: do we need to support volatile here? +// using FieldAttr = AttrKind; +/* used attribute for field: + ATTR_volatile + */ + +using FieldId = uint32_t; +/* Use FieldId from MIR directly: it's a uint32_t, but with special meaning + for FieldId == 0: refer to the "whole" of the type + To avoid conflict with MIR type define, here using type name "FieldId" + */ + + +Module *CreateModuleWithName(const std::string &name); +void ReleaseModule(Module *module); +void LiteCGSetDeoptBundleInfo(Stmt &icallNode, const std::map &deoptBundleInfo); + +/** + * a simplified, specialized MapleIR builder for LiteCG + * The basic IR set + * + * General rule for the interface: + * + if function returns value type, then returns type is value type + * + otherwise if the return value can be null, return type is pointer + * caller should check for null + * + otherwise the return type is reference. + * + * + for compound IR (need to be constructed with a sequence of calls, + * e.g., struct type/const, array const, function, switch), using + * specific builder class to do the "chained" construction. + * + */ +class LMIRBuilder { + public: + LMIRBuilder(Module& module); + ~LMIRBuilder() = default; + + void DumpIRToFile(const std::string fileName); + + LiteCGTypeKind LiteCGGetTypeKind(Type *type) const; + + // Type creation (currently all in global scope) + /* + For primitive types, using LMIRBuilder's public member: + i8Type, u8Type, etc. + */ + + // derived type creation + Type *CreatePtrType(Type *mirType); + Type *CreateRefType(Type *mirType); + + bool IsHeapPointerType(Type *mirType) const; + + // (multi-dim) array of fixed size array + ArrayType *CreateArrayType(Type* elemType, std::vector& dimSize); + + /* using StructTypeBuilder interface for StructType creation + auto structType = CreateStructType("mystruct") + .Field("field1", i32Type) + .Field("field2", i64Type) + .Done(); + */ + Type *GetStructType(const String& name); // query for existing struct type + + // TODO: usage of this function has precondition, should be documented + FieldOffset GetFieldOffset(StructType *structType, FieldId fieldId); + + // for function pointer + Type *CreateFuncType(std::vector params, Type *retType, bool isVarg); + + Type *LiteCGGetPointedType(Type *type); + + std::vector LiteCGGetFuncParamTypes(Type *type); + + Type *LiteCGGetFuncReturnType(Type *type); + // TODO: still need interface for AddressOfFunction + + // Function declaration and definition + /* using FunctionBuilder interface for Function creation: + // i32 myfunc(i32 param1, i64 param2) { } + auto structType = DefineFunction("myfunc") + .Param(i32Type, "param1") + .Param(i64Type, "param2") + .Ret(i32Type) // optional for void + .Done(); + + // i32 myfunc1(i32 param1, i64 param2); + auto structType = DeclareFunction("myfunc1") + .Param(i32Type, "param1") + .Param(i64Type, "param2") + .Ret(i32Type) + .Done(); + */ + + // This is to enable forwarded call before its definition + // can return null. caller should check for null. + Function *GetFunc(const String& name); // get a function by its unique name + + // when a function is set as current function (of the module), local + // declarations and statements are insert into it. + void SetCurFunc(Function& function); + + Function& GetCurFunction() const; + + MIRPreg *LiteCGGetPreg(Function& func, int32_t pRegNo); + Expr LiteCGGetPregFP(Function& func); + Expr LiteCGGetPregSP(); + + // var creation + // TODO: refine the interface for attributes here. and also storage-class? + // initialized to zero if defined here, by default not exported + Var& CreateGlobalVar(Type *type, const String& name, + GlobalVarAttr attr = VAR_internal); + // initialized to const, by default not exported + Var& CreateGlobalVar(Type *type, const String& name, Const& init, + GlobalVarAttr attr = VAR_internal); + Var *GetGlobalVar(const String& name); + + Var& CreateLocalVar(Type *type, const String& name); + Var* GetLocalVar(const String& name); + Var* GetLocalVarFromExpr(Expr inExpr); + Var& GetParam(Function& function, size_t index) const; + + Const& CreateIntConst(Type *type, int64_t val); + Const& CreateFloatConst(float val); + Const& CreateDoubleConst(double val); + Const& CreateStrConst(const String& constStr); + + // In MIR, the const for struct & array are the same. But we separate it here. + /* using StructConstBuilder interface for StructConst creation: + auto structConst = CreateStructConst(structType) + .Field(1, CreateIntConst(i32Type, 0)) + .Filed(2, CreateIntConst(i64Type, 0)) + .Done(); + */ + + /* using ArrayConstBuilder interface for ArrayConst creation: + Note: the elements should be added consequentially, and match the dim size. + auto arrayConst = CreateArrayConst(arrayType) + .Element(CreateIntConst(i32Type, 0)) + .Element(CreateIntConst(i32Type, 0)) + .Done(); + or using the following form: + auto arrayConst = CreateArrayConst(arrayType).Dim({0, 0}).Done(); + */ + + /* + BB is the container node for a sequence or linear statements, + if needLabel == true, implicitly create a label node as its first statement. + BB also servers as target for Gotos, when it's a goto target, it + should have needLabel == true + */ + BB& CreateBB(bool needLabel = true); + void AppendStmt(BB& bb, Stmt& stmt); // append stmt to the back of BB + void AppendStmtBeforeBranch(BB& bb, Stmt& stmt); // append stmt after the first non-jump stmt in back of BB + bool IsEmptyBB(BB& bb); + void AppendBB(BB& bb); // append BB to the back of current function; + BB& GetLastAppendedBB(); // get last appended BB of current function + + void SetStmtCallConv(Stmt& stmt, ConvAttr convAttr); + + // statements + Stmt& Goto(BB& dest); // jump without condition + /* conditional goto: + when inverseCond == true, using (!cond) as condition + + 1. if(cond)-then form code should be generated this way: + + if(!cond) goto BB_end // CondGoto(cond, BB_end, true); + BB_ifTrue: {...} + BB_end: {...} + + 2. if-then-else form code should be generated this way: + if(cond) goto BB_ifTrue // CondGoto(cond, BB_ifTrue); + BB_ifFalse: { + ... + goto BB_end // should be generated in BB_ifFalse + } + BB_ifTrue: {...} + BB_end: {...} + */ + Stmt& CondGoto(Var& cond, BB& target, bool inverseCond = false); + Stmt& CondGoto(Expr cond, BB& target, bool inverseCond = false); + + /* using SwitchBuilder interface for switch statement creation + auto switchStmt = Switch(type, cond, defaultBB) + .Case(0, bb1) + .Case(1, bb2) + .Done(); + */ + + // when result is nullptr, don't need the result (or no result) + Stmt& Call(Function& func, Args& args, Var *result = nullptr); + + Stmt& ICall(Expr funcAddr, Args& args, Var *result = nullptr); + + // when result is nullptr, don't need the result (or no result) + Stmt& IntrinsicCall(IntrinsicId func, Args& valueArgs, Var *result = nullptr); + + Stmt& Return(Expr returnVal); + + // debug info + Stmt& Comment(std::string comment); + + Stmt& Dassign(Expr src, Var& var, FieldId fieldId = 0); + Stmt& Iassign(Expr src, Expr addr, Type *baseType, FieldId fieldId = 0); + + // expressions + Expr Dread(Var& var); // TODO: do we need other forms? + inline Expr Dread(Var *var) { // shortcut for read from local-var + return Dread(*var); + } + + Expr DreadWithField(Var& var, FieldId id); + + Expr Iread(Type *type, Expr addr, Type *baseType, FieldId fieldId = 0); + PregIdx CreatePreg(Type *mtype); + Stmt& Regassign(Expr src, PregIdx reg); + Expr Regread(PregIdx pregIdx); + Expr Addrof(Var& var); // TODO: do we need other forms? + Expr ConstVal(Const& constVal); // a const operand + + Expr Not(Type *type, Expr src); + Expr Sqrt(Type *type, Expr src); + + Expr Add(Type *type, Expr src1, Expr src2); + Expr Sub(Type *type, Expr src1, Expr src2); + Expr Mul(Type *type, Expr src1, Expr src2); + Expr UDiv(Type *type, Expr src1, Expr src2); // unsigned + Expr SDiv(Type *type, Expr src1, Expr src2); // signed + Expr URem(Type *type, Expr src1, Expr src2); // unsigned + Expr SRem(Type *type, Expr src1, Expr src2); // signed + Expr Shl(Type *type, Expr src1, Expr src2); + Expr LShr(Type *type, Expr src1, Expr src2); + Expr AShr(Type *type, Expr src1, Expr src2); + Expr And(Type *type, Expr src1, Expr src2); + Expr Or(Type *type, Expr src1, Expr src2); + Expr Xor(Type *type, Expr src1, Expr src2); + +#if 0 // move support for floating point to an optional set of API + Expr FAdd(Type *type, Expr src1, Expr src2); + Expr FSub(Type *type, Expr src1, Expr src2); + Expr FMul(Type *type, Expr src1, Expr src2); + Expr FDiv(Type *type, Expr src1, Expr src2); + Expr FRem(Type *type, Expr src1, Expr src2); +#endif + + Expr ICmpEQ(Type *type, Expr src1, Expr src2); + Expr ICmpNE(Type *type, Expr src1, Expr src2); + // unsigned compare + Expr ICmpULT(Type *type, Expr src1, Expr src2); + Expr ICmpULE(Type *type, Expr src1, Expr src2); + Expr ICmpUGT(Type *type, Expr src1, Expr src2); + Expr ICmpUGE(Type *type, Expr src1, Expr src2); + // signed compare + Expr ICmpSLT(Type *type, Expr src1, Expr src2); + Expr ICmpSLE(Type *type, Expr src1, Expr src2); + Expr ICmpSGT(Type *type, Expr src1, Expr src2); + Expr ICmpSGE(Type *type, Expr src1, Expr src2); + Expr ICmp(Type *type, Expr src1, Expr src2, IntCmpCondition cond); + +#if 0 // move support for floating point to an optional set of API + Expr FCmpOLT(Type *type, Expr src1, Expr src2); // overflow + Expr FCmpOEQ(Type *type, Expr src1, Expr src2); + Expr FCmpOLE(Type *type, Expr src1, Expr src2); + Expr FCmpOGT(Type *type, Expr src1, Expr src2); + Expr FCmpONE(Type *type, Expr src1, Expr src2); + Expr FCmpOGE(Type *type, Expr src1, Expr src2); + Expr FCmpORD(Type *type, Expr src1, Expr src2); + Expr FCmpUNO(Type *type, Expr src1, Expr src2); // underflow + Expr FCmpULT(Type *type, Expr src1, Expr src2); + Expr FCmpUEQ(Type *type, Expr src1, Expr src2); + Expr FCmpULE(Type *type, Expr src1, Expr src2); + Expr FCmpUGT(Type *type, Expr src1, Expr src2); + Expr FCmpUNE(Type *type, Expr src1, Expr src2); + Expr FCmpUGE(Type *type, Expr src1, Expr src2); +#endif + + // Type conversion + // Type of opnd should be consistent with fromType: no implicient conversion + Expr Trunc(Type *fromType, Type *toType, Expr opnd); + Expr ZExt(Type *fromType, Type *toType, Expr opnd); + Expr SExt(Type *fromType, Type *toType, Expr opnd); + Expr BitCast(Type *fromType, Type *toType, Expr opnd); + Expr Cvt(Type *fromType, Type *toType, Expr opnd); + +#if 0 // move support for floating point to an optional set of API + Expr FTrunc(Type *fromType, Type *toType, Expr opnd); + Expr FExt(Type *fromType, Type *toType, Expr opnd); + Expr FToUI(Type *fromType, Type *toType, Expr opnd); + Expr FToSI(Type *fromType, Type *toType, Expr opnd); + Expr UIToF(Type *fromType, Type *toType, Expr opnd); + Expr SIToF(Type *fromType, Type *toType, Expr opnd); +#endif + + Expr Select(Type *type, Expr cond, Expr ifTrue, Expr ifFalse); + + void SetFuncFrameResverdSlot(int slot); + void SetFuncFramePointer(const String& val); +public: + // helper classes for compound IR entity building + class SwitchBuilder { + public: + SwitchBuilder(LMIRBuilder& builder_, Type *type_, Expr cond_, BB& defaultBB_) + : builder(builder_), type(type_), cond(cond_), defaultBB(defaultBB_) {} + + SwitchBuilder& Case(int64_t value, BB& bb) { + cases.push_back(std::make_pair(value, &bb)); + return *this; + } + + Stmt &Done() { + return builder.CreateSwitchInternal(type, cond, defaultBB, cases); + } + + private: + LMIRBuilder& builder; + Type *type; + Expr cond; + BB& defaultBB; + std::vector> cases; + }; + + SwitchBuilder Switch(Type *type, Expr cond, BB& defaultBB) { + return SwitchBuilder(*this, type, cond, defaultBB); + } + + class StructTypeBuilder { + public: + StructTypeBuilder(LMIRBuilder& builder_, const String& name_) + : builder(builder_), name(name_) {} + + StructTypeBuilder& Field(std::string_view fieldName, Type *fieldType) { + // TODO: field type attribute? + fields.push_back(std::make_pair(fieldName, fieldType)); + return *this; + } + + Type *Done() { + return builder.CreateStructTypeInternal(name, fields); + } + + private: + LMIRBuilder& builder; + const String& name; + std::vector> fields; + }; + + StructTypeBuilder CreateStructType(const String& name) { + return StructTypeBuilder(*this, name); + } + + class StructConstBuilder { + public: + StructConstBuilder(LMIRBuilder& builder_, StructType *type_) + : builder(builder_) { + structConst = &builder_.CreateStructConstInternal(type_); + } + + StructConstBuilder& Field(FieldId fieldId, Const& field) { + builder.AddConstItemInternal(*structConst, fieldId, field); + return *this; + } + + StructConst &Done() { + return *structConst; + } + + private: + LMIRBuilder& builder; + StructConst *structConst; + }; + + StructConstBuilder CreateStructConst(StructType *type) { + return StructConstBuilder(*this, type); + } + + class ArrayConstBuilder { + public: + ArrayConstBuilder(LMIRBuilder& builder_, ArrayType *type_) + : builder(builder_) { + arrayConst = &builder.CreateArrayConstInternal(type_); + } + + ArrayConstBuilder& Element(Const& element) { + builder.AddConstItemInternal(*arrayConst, element); + return *this; + } + + template + ArrayConstBuilder& Dim(const std::vector init) { + for (const auto& value : init) { + // TODO: fix the element type. + Const& element = builder.CreateIntConst(builder.i32Type, + static_cast(value)); + Element(element); + } + return *this; + } + + template + ArrayConstBuilder& Dim(std::initializer_list literal) { + return Dim(std::vector(literal)); + } + + ArrayConst &Done() { + return *arrayConst; + } + + private: + LMIRBuilder& builder; + ArrayConst *arrayConst; + //ArrayType *type; + }; + + ArrayConstBuilder CreateArrayConst(ArrayType *type) { + return ArrayConstBuilder(*this, type); + } + + class FunctionBuilder { + public: + FunctionBuilder(LMIRBuilder& builder_, const String& name_, bool needBody_) + : builder(builder_), name(name_), needBody(needBody_) { + // retType = voidType; + attr = FUNC_internal; + convAttr = CCall; + isVargs = false; + } + + // optional: indicate the function has variable args + FunctionBuilder& Vargs() { + isVargs = true; + return *this; + } + + FunctionBuilder& Param(Type *type, const String paramName) { + params.push_back(std::make_pair(paramName, type)); + return *this; + } + + // optional: if not called, return nothing (return void) + FunctionBuilder& Return(Type *type) { + retType = type; + return *this; + } + + // optional: if not called, default to FUNC_local + FunctionBuilder& Attribute(FuncAttr attr_) { + attr = attr_; + return *this; + } + + // optional: if not called, default to Func_CCall + FunctionBuilder& CallConvAttribute(ConvAttr convAttr_) { + convAttr = convAttr_; + return *this; + } + + Function &Done() { + return builder.CreateFunctionInternal(name, retType, params, + isVargs, needBody, attr, convAttr); + } + + private: + LMIRBuilder& builder; + const String& name; + Type *retType; + FuncAttr attr; + ConvAttr convAttr; + bool isVargs; + bool needBody; // indicate whether is a declaration or definition. + Params params; + }; + + // only declare the function in current module (means it's an external function) + FunctionBuilder DeclareFunction(const String& name) { + return FunctionBuilder(*this, name, false); + } + + // define the function in current module + FunctionBuilder DefineFunction(const String& name) { + return FunctionBuilder(*this, name, true); + } + + public: + // builtin types: primitive types (all MIR primitive types except PTY_ptr) + Type *i8Type; + Type *i16Type; + Type *i32Type; + Type *i64Type; + Type *i128Type; + Type *u1Type; + Type *u8Type; + Type *u16Type; + Type *u32Type; + Type *u64Type; + Type *u128Type; + Type *voidType; + Type *f32Type; + Type *f64Type; + + // builtin types: commonly used derived types + Type *strType; + Type *i64PtrType; + Type *i64RefType; + + private: + Stmt& CreateSwitchInternal(Type *type, Expr cond, BB& defaultBB, + std::vector>& cases); + Type *CreateStructTypeInternal(const String& name, + std::vector>& fields); + StructConst& CreateStructConstInternal(StructType *type); + void AddConstItemInternal(StructConst& structConst, FieldId fieldId, Const& field); + void AddConstItemInternal(ArrayConst& structConst, Const& element); + ArrayConst& CreateArrayConstInternal(ArrayType *type); + Function& CreateFunctionInternal(const String& name, Type *retType, Params& params, + bool isVargs, bool needBody, FuncAttr attr, ConvAttr convAttr); + + private: + MIRBuilder& mirBuilder; // The real IR-builder: current implementation + Module& module; // and the module to process +}; + +} // namespace litecg +} // namespace maple +#endif // __MAPLE_LITECG_LMIR_BUILDER_H diff --git a/ecmascript/mapleall/maple_be/mdgen/gendef.py b/ecmascript/mapleall/maple_be/mdgen/gendef.py new file mode 100755 index 0000000000000000000000000000000000000000..a49862a0b8de538f732651ca65edf3d8c8c8d7fb --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/gendef.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# coding=utf-8 +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +import os, sys, subprocess, shlex, re, argparse +def Gendef(execTool, mdFiles, outputDir, asanLib=None): + tdList = [] + for mdFile in mdFiles: + if mdFile.find('sched') >= 0: + schedInfo = mdFile + mdCmd = "%s --genSchdInfo %s -o %s" %(execTool, schedInfo , outputDir) + isMatch = re.search(r'[;\\|\\&\\$\\>\\<`]', mdCmd, re.M|re.I) + if (isMatch): + print("Command Injection !") + return + print("[*] %s" % (mdCmd)) + localEnv = os.environ + if asanLib is not None: + asanEnv = asanLib.split("=") + localEnv[asanEnv[0]] = asanEnv[1] + print("env :" + str(asanEnv)) + subprocess.check_call(shlex.split(mdCmd), shell = False, env = localEnv) + else: + tdList.append(i) + return + +def Process(execTool, mdFileDir, outputDir, asanLib=None): + if not (os.path.exists(execTool)): + print("maplegen is required before generating def files automatically") + return + if not (os.path.exists(mdFileDir)): + print("td/md files is required as input!!!") + print("Generate def files FAILED!!!") + return + + mdFiles = [] + for root,dirs,allfiles in os.walk(mdFileDir): + for mdFile in allfiles: + mdFiles.append("%s/%s"%(mdFileDir, mdFile)) + + if not (os.path.exists(outputDir)): + print("Create the " + outputDir) + os.makedirs(outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) + + defFile = "%s/mplad_arch_define.def" % (outputDir) + if not (os.path.exists(defFile)): + Gendef(execTool, mdFiles, outputDir, asanLib) + for mdfile in mdFiles: + if (os.stat(mdfile).st_mtime > os.stat(defFile).st_mtime): + Gendef(execTool, mdFiles, outputDir, asanLib) + if (os.stat(execTool).st_mtime > os.stat(defFile).st_mtime): + Gendef(execTool, mdFiles, outputDir, asanLib) + +def get_arg_parser(): + parser = argparse.ArgumentParser( + description="maplegen") + parser.add_argument('-e', '--exe', + help='maplegen_exe_directory') + parser.add_argument('-m', '--md', + help='mdfiles_directory') + parser.add_argument('-o', '--out', + help='output_defiless_directory') + parser.add_argument('-a', '--asan', + help='enabled asan and followed env LD_PRELOAD=xxxx') + return parser + +def main(): + parser = get_arg_parser() + args = parser.parse_args() + if (args.exe is None or args.md is None or args.out is None): + print(str(args)) + parser.print_help() + exit(-1) + + Process(args.exe, args.md, args.out, args.asan) + +if __name__ == "__main__": + main() diff --git a/ecmascript/mapleall/maple_be/mdgen/include/mdgenerator.h b/ecmascript/mapleall/maple_be/mdgen/include/mdgenerator.h new file mode 100644 index 0000000000000000000000000000000000000000..a8877ba79f55589b7aa0b719682318762536dd1b --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/include/mdgenerator.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H +#define MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H +#include +#include "mdrecord.h" +#include "mpl_logging.h" + +namespace MDGen { +class MDCodeGen { + public: + MDCodeGen(const MDClassRange &inputRange, const std::string &oFileDirArg) + : curKeeper (inputRange), + outputFileDir(oFileDirArg) {} + virtual ~MDCodeGen() = default; + + const std::string &GetOFileDir() const { + return outputFileDir; + } + void SetTargetArchName(const std::string &archName) const { + targetArchName = archName; + } + + void EmitCheckPtr(std::ofstream &outputFile, const std::string &emitName, const std::string &name, + const std::string &ptrType) const; + void EmitFileHead(std::ofstream &outputFile, const std::string &headInfo) const; + MDClass GetSpecificClass (const std::string &className); + + protected: + MDClassRange curKeeper; + + private: + static std::string targetArchName; + std::string outputFileDir; +}; + +class SchedInfoGen : public MDCodeGen { + public: + SchedInfoGen(const MDClassRange &inputRange, const std::string &oFileDirArg) + : MDCodeGen(inputRange, oFileDirArg) {} + ~SchedInfoGen() override { + if (outFile.is_open()) { + outFile.close(); + } + } + + void EmitArchDef(); + const std::string &GetArchName(); + void EmitUnitIdDef(); + void EmitUnitDef(); + void EmitUnitNameDef(); + void EmitLatencyDef(); + void EmitResvDef(); + void EmitBypassDef(); + void Run(); + + private: + std::ofstream outFile; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/mdgen/include/mdlexer.h b/ecmascript/mapleall/maple_be/mdgen/include/mdlexer.h new file mode 100644 index 0000000000000000000000000000000000000000..adebf6dca4717a17956dc015911f6c8ba4deb6e9 --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/include/mdlexer.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDLEXER_H +#define MAPLEBE_MDGEN_INCLUDE_MDLEXER_H + +#include +#include +#include +#include +#include "stdio.h" +#include "mdtokens.h" +#include "mpl_logging.h" +#include "types_def.h" + +namespace MDGen{ +using namespace maple; +class MDLexer { + public: + MDLexer() { + keywords.clear(); + /* input can be improved */ + (void)keywords.insert(std::make_pair("Def", kMDDef)); + (void)keywords.insert(std::make_pair("Class", kMDClass)); + (void)keywords.insert(std::make_pair("DefType", kMDDefType)); + }; + ~MDLexer() { + if (mdFileInternal.is_open()) { + mdFileInternal.close(); + } + }; + + MDTokenKind ReturnError() const; + MDTokenKind NextToken(); + MDTokenKind LexToken(); + MDTokenKind GetTokenIdentifier(); + MDTokenKind GetTokenConstVal(); + int ReadOneLine(); + bool SkipCComment(); + void SkipALineComment(); + + void PrepareFile(const std::string &mdfileName); + const std::string &GetStrToken() const { + return strToken; + } + int64_t GetIntVal() const { + return intVal; + } + const std::string &GetStrLine() const { + return strLine; + } + size_t GetStrLineSize() const { + return strLine.size(); + } + void RemoveInValidAtBack() { + if (strLine.length() == 0) { + return; + } + if (strLine.back() == '\n') { + strLine.pop_back(); + } + if (strLine.back() == '\r') { + strLine.pop_back(); + } + } + MDTokenKind GetCurKind() const { + return curKind; + } + char GetCurChar() { + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char GetNextChar() { + ++curPos; + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char ViewNextChar() const { + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char GetCharAt(uint32 pos) { + if (pos >= GetStrLineSize()) { + return 0; + } + return strLine[pos]; + } + int GetLineNumber() const { + return lineNumber; + } + + MDTokenKind GetHexConst(uint32 startPos, bool isNegative); + MDTokenKind GetIntConst(uint32 digitStartPos, bool isNegative); + MDTokenKind GetFloatConst(); + + private: + static constexpr int maxNumLength = 10; + std::ifstream *mdFile = nullptr; + std::ifstream mdFileInternal; + uint32 lineNumber = 0; /* current Processing Line */ + uint32 curPos = 0; /* Position in a line */ + std::string strLine = ""; /* current token line */ + std::string strToken = ""; /* store ID,keywords ... */ + int32 intVal = 0; /* store integer when token */ + float floatVal = 0; /* store float value when token */ + MDTokenKind curKind = kMDInvalid; /* current token kind */ + std::unordered_map keywords; /* store keywords defined for md files */ +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDLEXER_H */ diff --git a/ecmascript/mapleall/maple_be/mdgen/include/mdparser.h b/ecmascript/mapleall/maple_be/mdgen/include/mdparser.h new file mode 100644 index 0000000000000000000000000000000000000000..964995110bc63b131dc240fde80226d75db45d8a --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/include/mdparser.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDPARSER_H +#define MAPLEBE_MDGEN_INCLUDE_MDPARSER_H + +#include "mdlexer.h" +#include "mdrecord.h" +#include "mempool.h" + +namespace MDGen{ +class MDParser { + public: + MDParser(MDClassRange &newKeeper, maple::MemPool *memPool) : dataKeeper(newKeeper), mdMemPool(memPool) { + } + ~MDParser() = default; + + bool ParseFile(const std::string &inputFile); + bool ParseObjectStart(); + bool ParseObject(); + bool IsObjectStart(MDTokenKind k) const; + bool ParseDefType(); + bool ParseMDClass(); + bool ParseMDClassBody(MDClass &oneClass); + bool ParseMDObject(); + bool ParseMDObjBody(MDObject &curObj); + bool ParseIntElement(MDObject &curObj, bool isVec); + bool ParseStrElement(MDObject &curObj, bool isVec); + bool ParseDefTyElement(MDObject &curObj, bool isVec, const std::set &childSet); + bool ParseDefObjElement(MDObject &curObj, bool isVec, const MDClass &pClass); + + /* error process */ + bool EmitError(const std::string &errMsg); + + private: + MDLexer lexer; + MDClassRange &dataKeeper; + maple::MemPool *mdMemPool; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDPARSER_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/mdgen/include/mdrecord.h b/ecmascript/mapleall/maple_be/mdgen/include/mdrecord.h new file mode 100644 index 0000000000000000000000000000000000000000..190821b13dbd6079a7bb15b12cf13c93e46e47df --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/include/mdrecord.h @@ -0,0 +1,292 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDREORD_H +#define MAPLEBE_MDGEN_INCLUDE_MDREORD_H + +#include +#include +#include +#include +#include +#include "mempool_allocator.h" +#include "mempool.h" +#include "mpl_logging.h" +#include "types_def.h" + +/* Define base data structure which is used to store information in .md files */ +namespace MDGen { +class MDClass; /* circular dependency */ + +enum RecordType : maple::uint32 { + kClassName, + kAnonClassName, + kObjectName, + kElementName, + kIntType, + kStringType, + kTypeName, + kTypeMemberName, + kUndefinedStr +}; + +struct StrInfo { + unsigned int idx; + RecordType sType; + StrInfo(unsigned int curIdx, RecordType curTy) : idx(curIdx), sType(curTy) {} +}; + +class MDElement { + public: + MDElement() = default; + virtual ~MDElement() = default; + enum ElementTy : maple::uint32 { + kEleIntTy, + kEleStrTy, + kEleDefTyTy, + kEleDefObjTy, + kEleVecTy, + kEleDefaultTy, + kEleInValidTy + }; + + unsigned int GetContent() const { + return DoGetContent(); + } + + ElementTy GetRecDataTy() const { + return eleType; + } + + protected: + ElementTy eleType = kEleInValidTy; + + private: + virtual unsigned int DoGetContent() const = 0; +}; + +class DefaultElement : public MDElement { + public: + DefaultElement() { + eleType = kEleDefaultTy; + } + + ~DefaultElement() override = default; + + private: + unsigned int DoGetContent() const override { + CHECK_FATAL(false, "Cannnot load default element's content"); + return UINT_MAX; + } +}; + +class IntElement : public MDElement { + public: + explicit IntElement(unsigned int curVal) : intEleVal(curVal) { + eleType = kEleIntTy; + } + + ~IntElement() override = default; + + private: + unsigned int intEleVal; + unsigned int DoGetContent() const override { + return intEleVal; + } +}; + +class StringElement : public MDElement { + public: + explicit StringElement(unsigned int curIdx) : strElemntIdx(curIdx) { + eleType = kEleStrTy; + } + + ~StringElement() override = default; + + private: + unsigned int strElemntIdx; + unsigned int DoGetContent() const override { + return strElemntIdx; + } +}; + +class DefTyElement : public MDElement { + public: + DefTyElement() { + eleType = kEleDefTyTy; + } + + ~DefTyElement() override = default; + + bool SetContent(const StrInfo curInfo, const std::set &childTySet); + + private: + unsigned int elementIdx = UINT_MAX; + unsigned int DoGetContent() const override { + return elementIdx; + } +}; + +class DefObjElement : public MDElement { + public: + DefObjElement() { + eleType = kEleDefObjTy; + } + + ~DefObjElement() override = default; + + bool SetContent(const StrInfo curInfo, const MDClass &parentClass); + + private: + unsigned int elementIdx = UINT_MAX; + unsigned int DoGetContent() const override { + return elementIdx; + } +}; + +class VecElement : public MDElement { + public: + explicit VecElement(maple::MemPool &mem) : alloc(&mem), vecData(alloc.Adapter()) { + eleType = kEleVecTy; + } + + ~VecElement() override = default; + + void appendElement(MDElement *curElement) { + vecData.emplace_back(curElement); + } + + const maple::MapleVector GetVecData() const { + return vecData; + } + + size_t GetVecDataSize() const { + return vecData.size(); + } + + private: + maple::MapleAllocator alloc; + maple::MapleVector vecData; + + unsigned int DoGetContent() const override { + CHECK_FATAL(false, "Vector element does not have a single content"); + return UINT_MAX; + } +}; + +class MDObject { + public: + MDObject(unsigned int curIdx, MDClass &pClass, maple::MemPool &memPool) + : objectIdx(curIdx), parentClass(&pClass), alloc(&memPool), mdElements(alloc.Adapter()) {} + + ~MDObject() = default; + + const MDElement *GetOneMDElement(size_t index) const; + + void AddMDElements(MDElement* curElement) { + mdElements.emplace_back(curElement); + } + + unsigned int GetIdx() const { + return objectIdx; + } + + const MDClass *GetParentClass() const { + return parentClass; + } + + private: + unsigned int objectIdx; + MDClass *parentClass; + maple::MapleAllocator alloc; + maple::MapleVector mdElements; +}; + +class MDClass { + public: + MDClass(unsigned int classIdx, bool isAnonymous) { + this->classIdx = classIdx; + this->isAnonymous = isAnonymous; + } + ~MDClass() = default; + + const MDObject &GetOneMDObject(size_t index) const; + void AddClassMember(MDObject inputObj); + bool IsClassMember(unsigned int curIdx) const; + bool IsValidStructEle(RecordType curTy) const; + unsigned int GetClassIdx() const { + return classIdx; + } + bool IsAnonymousClass() const { + return isAnonymous; + } + const std::vector> GetFormalTypes() const { + return formalTypes; + } + const std::set GetchildObjNames() const { + return childObjNames; + } + size_t GetFormalTypeSize() const { + return formalTypes.size(); + } + size_t GetMDObjectSize() const { + return mdObjects.size(); + } + void BuildFormalTypes(unsigned int memberIdx, bool isVec); + + private: + unsigned int classIdx; + bool isAnonymous; + std::vector mdObjects; + std::vector> formalTypes; + std::set childObjNames; +}; + +class MDClassRange { + public: + explicit MDClassRange(std::string module) : moduleName(module) { + stringTable.clear(); + stringHashTable.clear(); + /* init common types such as unsigned int ,string , float */ + std::set initTypes; + AddDefinedType(CreateStrInTable("int", kIntType), initTypes); + AddDefinedType(CreateStrInTable("string", kStringType), initTypes); + } + ~MDClassRange() = default; + + StrInfo GetStrInTable(const std::string &inStr); + RecordType GetStrTyByIdx(size_t curIdx); + const std::string &GetStrByIdx(size_t curIdx); + void AddMDClass(MDClass curClass); + MDClass GetOneMDClass(unsigned int givenIdx); + std::set GetOneSpcType(unsigned int givenTyIdx); + size_t GetStringTableSize() const { + return stringTable.size(); + } + unsigned int CreateStrInTable(const std::string &inStr, RecordType curTy); + void ModifyStrTyInTable(const std::string &inStr, RecordType newTy); + void AddDefinedType(unsigned int typesName, std::set typesSet); + void FillMDClass(unsigned int givenIdx, const MDObject &insertObj); + + private: + std::string moduleName; + std::unordered_map stringHashTable; + std::vector stringTable; + unsigned int totalStr = 0; + std::unordered_map> definedTypes; + std::unordered_map allClasses; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDREORD_H */ diff --git a/ecmascript/mapleall/maple_be/mdgen/include/mdtokens.h b/ecmascript/mapleall/maple_be/mdgen/include/mdtokens.h new file mode 100644 index 0000000000000000000000000000000000000000..41a703feead3d2add9f6ee522d429112860e3dfd --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/include/mdtokens.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H +#define MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H +enum MDTokenKind { + /* special symbols */ + kMDError, + kMDEOF, + kMDInvalid, + + /* normal symbols */ + kMDOpenParen, /* ( */ + kMDCloseParen, /* ) */ + kMDOpenBrace, /* { */ + kMDCloseBrace, /* } */ + kMDOpenSquare, /* [ */ + kMDCloseSquare, /* ] */ + kMDEqual, /* = */ + kMDSemi, /* ; */ + kMDComma, /* , */ + kMDColon, /* : */ + kMDLess, /* < */ + kMDGreater, /* > */ + kMDLgAnd, /* & */ + kMDLgOr, /* | */ + + kMDIdentifier, + /* const values */ + kMDIntVal, + kMDFloatVal, + kMDDoubleVal, + + /* keywords */ + kMDDef, + kMDClass, + kMDAnonClass, + kMDDefType, +}; + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/mdgen/src/mdgenerator.cpp b/ecmascript/mapleall/maple_be/mdgen/src/mdgenerator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..29a3fdca5f4077c01b97109eaada060135adc55d --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/src/mdgenerator.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "mdgenerator.h" + +namespace MDGen { +std::string MDCodeGen::targetArchName = ""; + +void MDCodeGen::EmitCheckPtr(std::ofstream &outputFile, const std::string &emitName, const std::string &name, + const std::string &ptrType) const { + outputFile << "if(" << emitName << " == nullptr) {\n" << + " maple::LogInfo::MapleLogger(maple::kLlErr) << \"" << ptrType << " allocation for " << name << + " failed.\" << std::endl;\n" << "}\n" << + "DEBUG_ASSERT(" << emitName << ", \"" << ptrType << " allocation for " << name << + " failed.\");\n" << "\n"; +} + +void MDCodeGen::EmitFileHead(std::ofstream &outputFile, const std::string &headInfo) const { + outputFile << "/* " << targetArchName << " " << headInfo << " definition : */\n"; +} + +MDClass MDCodeGen::GetSpecificClass(const std::string &className) { + unsigned int classIdx = curKeeper.GetStrInTable(className).idx; + CHECK_FATAL(classIdx != UINT_MAX, "Load Class Failed!"); + return curKeeper.GetOneMDClass(classIdx); +} + +const std::string &SchedInfoGen::GetArchName() { + MDClass archClass = GetSpecificClass("ArchitectureName"); + const MDObject &archObj = archClass.GetOneMDObject(0); + auto *archStrEle = static_cast(archObj.GetOneMDElement(0)); + return curKeeper.GetStrByIdx(archStrEle->GetContent()); +} + +void SchedInfoGen::EmitArchDef() { + MDClass parallelClass = GetSpecificClass("Parallelism"); + CHECK_FATAL(parallelClass.GetMDObjectSize() > 0, "specific class failed, maybe illegal input"); + const MDObject ¶lleObj = parallelClass.GetOneMDObject(0); + auto *parallelEle = static_cast(paralleObj.GetOneMDElement(0)); + outFile.open(GetOFileDir() + "/mplad_arch_define.def", std::ios::out); + EmitFileHead(outFile, "Architecture"); + outFile << "SetMaxParallelism(" << parallelEle->GetContent() << ");\n"; + outFile.close(); +} + +void SchedInfoGen::EmitUnitIdDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_id.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_id.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function unit ID"); + for (auto unitIdx : unitClass.GetchildObjNames()) { + outFile << " " << curKeeper.GetStrByIdx(unitIdx) << ",\n"; + } + outFile.close(); +} + +void SchedInfoGen::EmitUnitNameDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_name.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_name.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function unit name"); + for (auto unitIdx : unitClass.GetchildObjNames()) { + std::string unitPureName = curKeeper.GetStrByIdx(unitIdx); + std::string unitPrefix = "kUnitId"; + if (unitPrefix.length() < unitPureName.length()) { + unitPureName = unitPureName.substr(unitPrefix.length()); + outFile << "\"" << unitPureName << "\",\n"; + } + } + outFile.close(); +} + +void SchedInfoGen::EmitUnitDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_define.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_define.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function units "); + bool isUnitNumDef = false; + for (size_t i = 0; i < unitClass.GetMDObjectSize(); ++i) { + const MDObject &singleUnit = unitClass.GetOneMDObject(i); + if (singleUnit.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + auto *curUnitTy = static_cast(singleUnit.GetOneMDElement(0)); + std::string curUnitName = curKeeper.GetStrByIdx(singleUnit.GetIdx()); + std::string emitUnitName = "instance" + curUnitName; + std::string unitPrefix = "Unit *" + emitUnitName + " = new Unit("; + if (!isUnitNumDef) { + outFile << "\n"; + outFile << "const unsigned int kunitNum = 2;\n"; + isUnitNumDef = true; + } + outFile << unitPrefix; + if (curUnitTy->GetContent() == curKeeper.GetStrInTable("Primary").idx) { + outFile << curUnitName << ");\n"; + } else { + std::string unitTypeStr = ""; + if (curUnitTy->GetContent() == curKeeper.GetStrInTable("And").idx) { + unitTypeStr = "kUnitTypeAnd"; + } else if (curUnitTy->GetContent() == curKeeper.GetStrInTable("Or").idx) { + unitTypeStr = "kUnitTypeOr"; + } + CHECK_FATAL(unitTypeStr.size() != 0, "Haven't support this kind of Unit yet"); + outFile << unitTypeStr << ", " << curUnitName << ", kunitNum,\n"; + outFile << std::setiosflags(std::ios::right) << std::setw(unitPrefix.length()) << std::setfill(' ') << " "; + unsigned int dependUnitsIndex = 1; + auto *dependUnitEle = static_cast(singleUnit.GetOneMDElement(dependUnitsIndex)); + for (size_t k = 0; k < dependUnitEle->GetVecDataSize(); ++k) { + auto *dependUnit = static_cast(dependUnitEle->GetVecData()[k]); + outFile << "instance" << curKeeper.GetStrByIdx(dependUnit->GetContent()); + if (k != dependUnitEle->GetVecDataSize() - 1) { + outFile << ", "; + } + } + outFile << ");\n"; + } + EmitCheckPtr(outFile, emitUnitName, curUnitName, "Unit"); + } + outFile.close(); +} + +void SchedInfoGen::EmitLatencyDef() { + MDClass resvClass = GetSpecificClass("Reservation"); + outFile.open(GetOFileDir() + "/mplad_latency_type.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_latency_type.def", GetOFileDir().c_str()); + EmitFileHead(outFile, " latency type definition "); + for (auto resvIdx : resvClass.GetchildObjNames()) { + outFile << " " << curKeeper.GetStrByIdx(resvIdx) << ",\n"; + } + outFile.close(); +} + +void SchedInfoGen::EmitResvDef() { + MDClass resvClass = GetSpecificClass("Reservation"); + outFile.open(GetOFileDir() + "/mplad_reservation_define.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_reservation_define.def", + GetOFileDir().c_str()); + EmitFileHead(outFile, "reservations"); + for (size_t i = 0; i < resvClass.GetMDObjectSize(); ++i) { + const MDObject &singleResv = resvClass.GetOneMDObject(i); + if (singleResv.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + auto *curResvLatency = static_cast(singleResv.GetOneMDElement(0)); + std::string curResvName = curKeeper.GetStrByIdx(singleResv.GetIdx()); + std::string emitResvName = "resvInst" + curResvName; + std::string resvPrefix = "Reservation *" + emitResvName + " = new Reservation("; + outFile << resvPrefix << curResvName << ", " << curResvLatency->GetContent() << ", "; + if (singleResv.GetOneMDElement(1)->GetRecDataTy() == MDElement::kEleDefaultTy) { + outFile << "0);\n"; + } else { + size_t dependUnitsIndex = 1; + auto *dependUnitEle = static_cast(singleResv.GetOneMDElement(dependUnitsIndex)); + outFile << dependUnitEle->GetVecDataSize() << ",\n"; + for (size_t k = 0; k < dependUnitEle->GetVecDataSize(); ++k) { + auto *dependUnit = static_cast(dependUnitEle->GetVecData()[k]); + if (curKeeper.GetStrByIdx(dependUnit->GetContent()) != "nothing") { + outFile << std::setiosflags(std::ios::right) << std::setw(resvPrefix.length()) << std::setfill(' ') + << "GetUnitByUnitId(" << curKeeper.GetStrByIdx(dependUnit->GetContent()) << ")"; + } else { + outFile << std::setiosflags(std::ios::right) << std::setw(resvPrefix.length()) << std::setfill(' ') + << "nullptr"; + } + if (k < dependUnitEle->GetVecDataSize() - 1) { + outFile << ",\n"; + } + } + outFile << ");\n"; + } + EmitCheckPtr(outFile, emitResvName, curResvName, "Reservation"); + } + outFile.close(); +} + +void SchedInfoGen::EmitBypassDef() { + MDClass bypassClass = GetSpecificClass("Bypass"); + outFile.open(GetOFileDir() + "/mplad_bypass_define.def", std::ios::out); + for (size_t i = 0; i < bypassClass.GetMDObjectSize(); ++i) { + const MDObject &singleBypass = bypassClass.GetOneMDObject(i); + if (singleBypass.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + constexpr size_t fromVecIndex = 1; + constexpr size_t toVecIndex = 2; + constexpr size_t curBpTyIndex = 3; + auto *bpTyEle = singleBypass.GetOneMDElement(curBpTyIndex); + std::string curBypassTy = (bpTyEle->GetRecDataTy() == MDElement::kEleDefaultTy) ? + "" : curKeeper.GetStrByIdx(bpTyEle->GetContent()); + transform(curBypassTy.begin(), curBypassTy.end(), curBypassTy.begin(), ::toupper); + + CHECK_FATAL(singleBypass.GetOneMDElement(0)->GetRecDataTy() == MDElement::ElementTy::kEleIntTy, "Bypass illegal"); + CHECK_FATAL(singleBypass.GetOneMDElement(fromVecIndex)->GetRecDataTy() == MDElement::ElementTy::kEleVecTy, + "Bypass illegal"); + CHECK_FATAL(singleBypass.GetOneMDElement(toVecIndex)->GetRecDataTy() == MDElement::ElementTy::kEleVecTy, + "Bypass illegal"); + + unsigned int bypassNum = static_cast(singleBypass.GetOneMDElement(0))->GetContent(); + auto *fromVec = static_cast(singleBypass.GetOneMDElement(fromVecIndex)); + auto *toVec = static_cast(singleBypass.GetOneMDElement(toVecIndex)); + for (auto itTo : toVec->GetVecData()) { + for (auto itFrom : fromVec->GetVecData()) { + auto *fromResv = static_cast(itFrom); + auto *toResv = static_cast(itTo); + outFile << "ADD" << curBypassTy << "BYPASS(" << curKeeper.GetStrByIdx(fromResv->GetContent()) << ", " + << curKeeper.GetStrByIdx(toResv->GetContent()) << ", " << bypassNum <<");\n"; + } + } + } + outFile.close(); +} + +void SchedInfoGen::Run() { + SetTargetArchName(GetArchName()); + EmitArchDef(); + EmitResvDef(); + EmitBypassDef(); + EmitUnitDef(); + EmitUnitNameDef(); + EmitLatencyDef(); + EmitUnitIdDef(); +} +} /* namespace MDGen */ diff --git a/ecmascript/mapleall/maple_be/mdgen/src/mdlexer.cpp b/ecmascript/mapleall/maple_be/mdgen/src/mdlexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3bf1b807c38f1744ee96b624bb2ededb4d717b93 --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/src/mdlexer.cpp @@ -0,0 +1,290 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "mdlexer.h" + +namespace MDGen { +void MDLexer::PrepareFile(const std::string &mdfileName) { + mdFileInternal.open(mdfileName); + if (!mdFileInternal.is_open()) { + CHECK_FATAL(false, "Open target file failed"); + } + mdFile = &mdFileInternal; +} + +MDTokenKind MDLexer::ReturnError() const { + maple::LogInfo::MapleLogger() << "Unexpect character at Line" << lineNumber << "\n"; + return kMDError; +} + +int MDLexer::ReadOneLine() { + if (mdFile == nullptr) { + strLine = ""; + return -1; + } + curPos = 0; + if (!std::getline(*mdFile, strLine)) { /* EOF */ + strLine = ""; + mdFile = nullptr; + return -1; + } + RemoveInValidAtBack(); + return GetStrLineSize(); +} + +MDTokenKind MDLexer::NextToken() { + curKind = LexToken(); + return curKind; +} + +MDTokenKind MDLexer::LexToken() { + char c = GetCurChar(); + while (c == ' ' || c == '\t') { /* skip space && tab */ + c = GetNextChar(); + } + while (c == 0) { + if (ReadOneLine() < 0) { + return kMDEOF; + } + lineNumber++; + c = GetCurChar(); + while (c == ' ' || c == '\t') { + c = GetNextChar(); + } + } + curPos++; + switch (c) { + case '(': + return kMDOpenParen; + case ')': + return kMDCloseParen; + case '{': + return kMDOpenBrace; + case '}': + return kMDCloseBrace; + case '[': + return kMDOpenSquare; + case ']': + return kMDCloseSquare; + case '<': + return kMDLess; + case '>': + return kMDGreater; + case ';': + return kMDSemi; + case ',': + return kMDComma; + case ':': + return kMDColon; + case '=': + return kMDEqual; + case '&': + return kMDLgAnd; + case '|': + return kMDLgOr; + case '0': /* start handling number */ + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + curPos--; /* support HEX AND INTERGER at present */ + return GetTokenConstVal(); + case '/': { /* handle comment; */ + char cn = GetCurChar(); + if (cn == '/') { + SkipALineComment(); + } else if (cn == '*') { + if (!SkipCComment()) { + return kMDError; + } + } else { + return ReturnError(); + } + return LexToken(); + } + default: + if (isalpha(c) || c == '_') { + return GetTokenIdentifier(); /* identifier need to be modify */ + } + return ReturnError(); + } +} + +MDTokenKind MDLexer::GetTokenConstVal() { + bool negative = false; + char curC = GetCurChar(); + if (curC == '-') { + curC = GetNextChar(); + /* have Special Float const? */ + negative = true; + } + const uint32 hexPrefixLength = 2; + if (strLine.compare(curPos, hexPrefixLength, "0x") == 0) { + curPos += hexPrefixLength; + return GetHexConst(curPos, negative); + } + uint32 digitStartPos = curPos; + char digitStartC = GetCurChar(); + while (isdigit(curC)) { + curC = GetNextChar(); + } + if (!isdigit(digitStartC) && curC != '.') { + return kMDInvalid; + } + if (curC != '.' && curC != 'e' && curC != 'E') { + return GetIntConst(digitStartPos, negative); + } + return GetFloatConst(); +} + +MDTokenKind MDLexer::GetHexConst(uint32 digitStartPos, bool isNegative) { + if (digitStartPos >= strLine.length()) { + return ReturnError(); + } + char c = GetCurChar(); + if (!isxdigit(c)) { + return kMDInvalid; + } + int loopDepth = 0; + while (isxdigit(c)) { + c = GetNextChar(); + ++loopDepth; + if (loopDepth > maxNumLength) { + return ReturnError(); + } + } + std::string hexStr = strLine.substr(digitStartPos, curPos - digitStartPos); + const char *hexStrPtr = hexStr.c_str(); + errno = 0; + constexpr int hexInDec = 16; + intVal = static_cast(std::strtoll(hexStrPtr, nullptr, hexInDec)); + if (errno == EINVAL) { /* Invalid hexadecimal number */ + return ReturnError(); + } + if (errno == ERANGE) { + errno = 0; + intVal = static_cast(std::strtoll(hexStrPtr, nullptr, hexInDec)); + if (errno == EINVAL) { /* Invalid hexadecimal number */ + return ReturnError(); + } + if (errno == ERANGE) { /* input number is out of range */ + return ReturnError(); + } + } + if (isNegative) { + intVal = -intVal; + } + return kMDIntVal; +} + +MDTokenKind MDLexer::GetIntConst(uint32 digitStartPos, bool isNegative) { + char c = GetCharAt(digitStartPos); + /* no ULL LL suffix at present */ + int loopDepth = 0; + while (isdigit(c)) { + c = GetNextChar(); + ++loopDepth; + if (loopDepth > maxNumLength) { + return ReturnError(); + } + } + curPos--; + if (digitStartPos >= strLine.length() || digitStartPos > curPos) { + return ReturnError(); + } + std::string intStr = strLine.substr(digitStartPos, curPos - digitStartPos); + const char *intStrPtr = intStr.c_str(); + errno = 0; + constexpr int decInDec = 10; + intVal = static_cast(std::strtoll(intStrPtr, nullptr, decInDec)); + if (errno == ERANGE) { + return ReturnError(); + } + if (isNegative) { + intVal = -intVal; + } + return kMDIntVal; +} + +MDTokenKind MDLexer::GetFloatConst() { + floatVal = 0; + return kMDInvalid; +} + +MDTokenKind MDLexer::GetTokenIdentifier() { + --curPos; + uint32 startPos = curPos; + char curC = GetCurChar(); + + while (isalnum(curC) || curC == '_' || curC == '-' || curC < 0) { + curC = GetNextChar(); + } + if (startPos >= strLine.length()) { + return ReturnError(); + } + strToken = strLine.substr(startPos, curPos - startPos); + auto it = keywords.find(strToken); + if (it != keywords.end()) { + return it->second; + } + return kMDIdentifier; +} + +void MDLexer::SkipALineComment() { + while (curPos < GetStrLineSize()) { + curPos++; + } + /* if comment is required to be stored. it can be done here */ +} + +bool MDLexer::SkipCComment() { + bool startAnewLine = false; + char commentNext; + while (true) { + if (!startAnewLine) { + commentNext = GetNextChar(); + } else { + commentNext = GetCurChar(); + startAnewLine = false; + } + switch (commentNext) { + case 0: + if (ReadOneLine() < 0) { + DEBUG_ASSERT(false, "Untermianted comment"); + return false; + } + ++lineNumber; + startAnewLine = true; + break; + case '*': + commentNext = GetNextChar(); + if (commentNext == '/') { + ++curPos; + return true; + } + break; + default: + break; + } + } + return false; +} +} diff --git a/ecmascript/mapleall/maple_be/mdgen/src/mdmain.cpp b/ecmascript/mapleall/maple_be/mdgen/src/mdmain.cpp new file mode 100644 index 0000000000000000000000000000000000000000..628943ff9d14e1d27873cf336c5d1f47737ce835 --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/src/mdmain.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "mdparser.h" +#include "mdgenerator.h" + +using namespace MDGen; +namespace { + bool isGenSched = false; + std::string schedSrcPath = ""; + std::string oFileDir = ""; +} + +static int PrintHelpAndExit() { + maple::LogInfo::MapleLogger() << "Maplegen is usd to process md files and " << + "generate architecture specific information in def files\n" << + "usage: ./mplgen xxx.md outputdirectroy\n"; + return 1; +} + +void ParseCommandLine(int argc, char **argv) { + int opt; + int gOptionIndex = 0; + std::string optStr = "s:o:"; + static struct option longOptions[] = { + {"genSchdInfo", required_argument, NULL, 's'}, + {"outDirectory", required_argument, NULL, 'o'}, + {0, 0, 0, 0} + }; + while ((opt = getopt_long(argc, argv, optStr.c_str(), longOptions, &gOptionIndex)) != -1) { + switch (opt) { + case 's': + isGenSched = true; + schedSrcPath = optarg; + break; + case 'o': + oFileDir = optarg; + break; + default: + break; + } + } +} + +bool GenSchedFiles(const std::string &fileName, const std::string &fileDir) { + maple::MemPool *schedInfoMemPool = memPoolCtrler.NewMemPool("schedInfoMp", false /* isLcalPool */); + MDClassRange moduleData("Schedule"); + MDParser parser(moduleData, schedInfoMemPool); + if (!parser.ParseFile(fileName)) { + delete schedInfoMemPool; + return false; + } + SchedInfoGen schedEmiiter(moduleData, fileDir); + schedEmiiter.Run(); + delete schedInfoMemPool; + return true; +} + +int main(int argc, char **argv) { + constexpr int minimumArgNum = 2; + if (argc <= minimumArgNum) { + return PrintHelpAndExit(); + } + ParseCommandLine(argc, argv); + if (isGenSched) { + if (!GenSchedFiles(schedSrcPath, oFileDir)) { + return 1; + } + } + return 0; +} diff --git a/ecmascript/mapleall/maple_be/mdgen/src/mdparser.cpp b/ecmascript/mapleall/maple_be/mdgen/src/mdparser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1bd058c6d93388c2d0b49ef1f6253f65c8ea364 --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/src/mdparser.cpp @@ -0,0 +1,380 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mdparser.h" + +namespace MDGen { +bool MDParser::ParseFile(const std::string &inputFile) { + lexer.PrepareFile(inputFile); + if (!ParseObjectStart()) { + return false; + } + if (lexer.GetCurKind() == kMDEOF) { + return true; + } + return EmitError("Unexpected input at begin"); +} + +bool MDParser::IsObjectStart(MDTokenKind k) const { + return (k == kMDDef || k == kMDClass || k == kMDDefType); +} + +bool MDParser::ParseObjectStart() { + while (IsObjectStart(lexer.NextToken())) { + if (!ParseObject()) { + return false; + } + } + return true; +} + +bool MDParser::ParseObject() { + switch (lexer.GetCurKind()) { + case kMDDefType: + return ParseDefType(); + case kMDClass: + return ParseMDClass(); + case kMDDef: + return ParseMDObject(); + default: + return EmitError("Unexpected key word at start"); + } +} + +bool MDParser::ParseDefType() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific type defined"); + } + unsigned int defTypeIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kTypeName); + if (defTypeIdx == UINT_MAX) { + return EmitError("InValid defType is defined"); + } + if (lexer.NextToken() != kMDEqual) { + return EmitError("Expect a equal when a specific type is going to be instantiated"); + } + std::set defTypeMembers; + while (lexer.NextToken() != kMDSemi) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + unsigned int defTypeMemberIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kTypeMemberName); + if (defTypeMemberIdx == UINT_MAX || !defTypeMembers.insert(defTypeMemberIdx).second) { + return EmitError("InValid defType member is defined"); + } + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + dataKeeper.AddDefinedType(defTypeIdx, defTypeMembers); + return (lexer.GetCurKind() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDClass() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific class defined"); + } + unsigned int classIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kClassName); + if (classIdx == UINT_MAX) { + return EmitError("InValid class name. Please change a class name"); + } + bool isAnon = true; + if (lexer.NextToken() == kMDColon) { + isAnon = false; + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific class defined"); + } + if (lexer.GetStrToken() != "string") { + return EmitError("Only Support string as a class name type at current stage"); + } + static_cast(lexer.NextToken()); + } + if (isAnon) { + dataKeeper.ModifyStrTyInTable(lexer.GetStrToken(), kAnonClassName); + } + MDClass oneMDclass(classIdx, isAnon); + if (lexer.GetCurKind() != kMDLess) { + return EmitError("Expect a 'less' before class structure being defined"); + } + + while (lexer.NextToken() != kMDGreater) { + if (!ParseMDClassBody(oneMDclass)) { + return false; + } + } + dataKeeper.AddMDClass(oneMDclass); + return (lexer.NextToken() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDClassBody(MDClass &oneClass) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + if (defTypeInfo.idx == UINT_MAX || !oneClass.IsValidStructEle(defTypeInfo.sType)) { + return EmitError("Expect a defined Type to be a memeber of a class"); + } + bool isVec = false; + if (lexer.ViewNextChar() == '[') { + if (lexer.NextToken() != kMDOpenSquare || lexer.NextToken() != kMDCloseSquare) { + return EmitError("Expect a \"[]\" to represent a list element"); + } + isVec = true; + } + oneClass.BuildFormalTypes(defTypeInfo.idx, isVec); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + return true; +} + +bool MDParser::ParseMDObject() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific object defined"); + } + StrInfo parentInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + if (parentInfo.idx == UINT_MAX || (parentInfo.sType != kClassName && parentInfo.sType != kAnonClassName)) { + return EmitError("A new object should be belong to a defined class"); + } + MDClass parentClass = dataKeeper.GetOneMDClass(parentInfo.idx); + unsigned int objectIdx = UINT_MAX; + if (!parentClass.IsAnonymousClass()) { + if (lexer.NextToken() != kMDColon) { + return EmitError("Expect a colon when a object name is going to be defined"); + } + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name for a specific object"); + } + objectIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kObjectName); + if (objectIdx == UINT_MAX) { + return EmitError("InValid ObjectName!"); + } + } + MDObject *curObj = mdMemPool->New(objectIdx, parentClass, *mdMemPool); + if (lexer.NextToken() != kMDOpenBrace) { + return EmitError("Expect a OpenBrace before a object body is defined"); + } + if (!ParseMDObjBody(*curObj)) { + return false; + } + dataKeeper.FillMDClass(parentInfo.idx, *curObj); + return (lexer.NextToken() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDObjBody(MDObject &curObj) { + bool hasDefault = false; + for (size_t i = 0; i < curObj.GetParentClass()->GetFormalTypeSize(); ++i) { + if (hasDefault) { + DefaultElement *defaultEle = mdMemPool->New(); + curObj.AddMDElements(defaultEle); + continue; + } + MDTokenKind curKind = lexer.NextToken(); + if (i != 0 && (curKind != kMDComma && curKind != kMDCloseBrace)) { + return EmitError("Unexpected Gramma when define a object"); + } + if (curKind == kMDComma) { + curKind = lexer.NextToken(); + } + if (curKind == kMDCloseBrace) { + hasDefault = true; + DefaultElement *defaultEle = mdMemPool->New(); + curObj.AddMDElements(defaultEle); + continue; + } + unsigned int typeIdx = curObj.GetParentClass()->GetFormalTypes().at(i).first; + bool isVec = curObj.GetParentClass()->GetFormalTypes().at(i).second; + if (dataKeeper.GetStrTyByIdx(typeIdx) == kIntType) { + if (!ParseIntElement(curObj, isVec)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kStringType) { + if (!ParseStrElement(curObj, isVec)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kTypeName) { + std::set childSet = dataKeeper.GetOneSpcType(typeIdx); + if (!ParseDefTyElement(curObj, isVec, childSet)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kClassName) { + MDClass pClass = dataKeeper.GetOneMDClass(typeIdx); + if (!ParseDefObjElement(curObj, isVec, pClass)) { + return false; + } + } + } + if (lexer.GetCurKind() == kMDCloseBrace) { + return true; + } + return (lexer.NextToken() != kMDCloseBrace) ? EmitError("Expect a CloseBrace as end of object definition") : true; +} + +bool MDParser::ParseIntElement(MDObject &curObj, bool isVec) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIntVal: { + IntElement *singleEle = mdMemPool->New(lexer.GetIntVal()); + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIntVal) { + return EmitError("Expect a integer elemet as defined"); + } + IntElement *curEle = mdMemPool->New(lexer.GetIntVal()); + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseStrElement(MDObject &curObj, bool isVec) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + unsigned int elementIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kElementName); + if (elementIdx == UINT_MAX) { + return EmitError("Duplicate string name has already been defined"); + } + StringElement *singleEle = mdMemPool->New(elementIdx); + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a string elemet as defined"); + } + unsigned int elementIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kElementName); + if (elementIdx == UINT_MAX) { + return EmitError("Duplicate string name has already been defined"); + } + StringElement *curEle = mdMemPool->New(elementIdx); + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseDefTyElement(MDObject &curObj, bool isVec, const std::set &childSet) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefTyElement *singleEle = mdMemPool->New(); + if (!singleEle->SetContent(defTypeInfo, childSet)) { + return EmitError("Expect a input element which has been defined as a type"); + } + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a string elemet as defined"); + } + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefTyElement *curEle = mdMemPool->New(); + if (!curEle->SetContent(defTypeInfo, childSet)) { + return EmitError("Expect a input element which has been defined as a type"); + } + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseDefObjElement(MDObject &curObj, bool isVec, const MDClass &pClass) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defObjInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefObjElement *singleEle = mdMemPool->New(); + if (!singleEle->SetContent(defObjInfo, pClass)) { + return EmitError("Expect a input element which has been defined as a object"); + } + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a integer elemet as defined"); + } + StrInfo defObjInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefObjElement *curEle = mdMemPool->New(); + if (!curEle->SetContent(defObjInfo, pClass)) { + return EmitError("Expect a input element which has been defined as a object"); + } + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::EmitError(const std::string &errMsg) { + maple::LogInfo::MapleLogger() << errMsg << "\n"; + maple::LogInfo::MapleLogger() << "A Error Appear At Line " << lexer.GetLineNumber() << "\n"; + maple::LogInfo::MapleLogger() << "Source code : " << lexer.GetStrLine() << "\n"; + return false; +} +} /* namespace MDGen */ diff --git a/ecmascript/mapleall/maple_be/mdgen/src/mdrecord.cpp b/ecmascript/mapleall/maple_be/mdgen/src/mdrecord.cpp new file mode 100644 index 0000000000000000000000000000000000000000..50037bbad74ba5a8da0b32b3708f39f7007cd3e1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/mdgen/src/mdrecord.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mdrecord.h" + +namespace MDGen { +constexpr unsigned int kInValidStrIdx = UINT_MAX; + +bool DefTyElement::SetContent(const StrInfo curInfo, const std::set &childTySet) { + if (!childTySet.count(curInfo.idx)) { + return false; + } + elementIdx = curInfo.idx; + return true; +} + +bool DefObjElement::SetContent(const StrInfo curInfo, const MDClass &parentClass) { + if (!parentClass.IsClassMember(curInfo.idx)) { + return false; + } + elementIdx = curInfo.idx; + return true; +} + +const MDElement *MDObject::GetOneMDElement(size_t index) const { + CHECK_FATAL(index < mdElements.size(), "Array boundary check failed"); + return mdElements[index]; +} + +const MDObject &MDClass::GetOneMDObject(size_t index) const { + CHECK_FATAL(index < mdObjects.size(), "Array boundary check failed"); + return mdObjects[index]; +} + +void MDClass::AddClassMember(MDObject inputObj) { + mdObjects.emplace_back(inputObj); + (void)childObjNames.insert(inputObj.GetIdx()); +} + +bool MDClass::IsClassMember(unsigned int curIdx) const { + return childObjNames.count(curIdx); +} + +void MDClass::BuildFormalTypes(unsigned int memberIdx, bool isVec) { + formalTypes.emplace_back(std::make_pair(memberIdx, isVec)); +} + +bool MDClass::IsValidStructEle(RecordType curTy) const { + return (curTy == kTypeName || curTy == kClassName || curTy == kIntType || curTy == kStringType); +} + +unsigned int MDClassRange::CreateStrInTable(const std::string &inStr, RecordType curTy) { + unsigned int result = kInValidStrIdx; + StrInfo curInfo (totalStr, curTy); + auto ret = stringHashTable.insert(std::make_pair(inStr, curInfo)); + if (ret.second) { + unsigned int temp = totalStr; + stringTable.emplace_back(inStr); + ++totalStr; + return temp; + } + return result; +} + +StrInfo MDClassRange::GetStrInTable(const std::string &inStr) { + auto ret = stringHashTable.find(inStr); + StrInfo inValidInfo (UINT_MAX, kUndefinedStr); + return (ret != stringHashTable.end()) ? ret->second : inValidInfo; +} + +RecordType MDClassRange::GetStrTyByIdx(size_t curIdx) { + CHECK_FATAL(curIdx < stringTable.size(), "Array boundary check failed"); + return GetStrInTable(stringTable[curIdx]).sType; +} + +const std::string &MDClassRange::GetStrByIdx(size_t curIdx) { + CHECK_FATAL(curIdx < stringTable.size(), "Array boundary check failed"); + return stringTable[curIdx]; +} + +void MDClassRange::ModifyStrTyInTable(const std::string &inStr, RecordType newTy) { + auto ret = stringHashTable.find(inStr); + CHECK_FATAL(ret != stringHashTable.end(), "find string failed!"); + ret->second.sType = newTy; +} + +void MDClassRange::AddDefinedType(unsigned int typesName, std::set typesSet) { + (void)definedTypes.insert(std::make_pair(typesName, typesSet)); +} + +void MDClassRange::AddMDClass(MDClass curClass) { + (void)allClasses.insert(std::make_pair(curClass.GetClassIdx(), curClass)); +} + +void MDClassRange::FillMDClass(unsigned int givenIdx, const MDObject &insertObj) { + auto ret = allClasses.find(givenIdx); + CHECK_FATAL(ret != allClasses.end(), "Cannot achieve target MD Class"); + ret->second.AddClassMember(insertObj); +} + +MDClass MDClassRange::GetOneMDClass(unsigned int givenIdx) { + auto ret = allClasses.find(givenIdx); + CHECK_FATAL(ret != allClasses.end(), "Cannot achieve target MD Class"); + return ret->second; +} + +std::set MDClassRange::GetOneSpcType(unsigned int givenTyIdx) { + auto ret = definedTypes.find(givenTyIdx); + CHECK_FATAL(ret != definedTypes.end(), "Cannot achieve a defined type"); + return ret->second; +} +} /* namespace MDGen */ diff --git a/ecmascript/mapleall/maple_be/src/ad/mad.cpp b/ecmascript/mapleall/maple_be/src/ad/mad.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c53940c4a1a5d28b32595c2d2cce2c2592b41839 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/ad/mad.cpp @@ -0,0 +1,389 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mad.h" +#include +#if TARGAARCH64 +#include "aarch64_operand.h" +#elif TARGRISCV64 +#include "riscv64_operand.h" +#endif +#include "schedule.h" +#include "insn.h" + +namespace maplebe { +const std::string kUnitName[] = { +#include "mplad_unit_name.def" + "None", +}; +/* Unit */ +Unit::Unit(enum UnitId theUnitId) + : unitId(theUnitId), unitType(kUnitTypePrimart), occupancyTable(0), compositeUnits() { + MAD::AddUnit(*this); +} + +Unit::Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ...) + : unitId(theUnitId), unitType(theUnitType), occupancyTable(0) { + DEBUG_ASSERT(numOfUnits > 1, "CG internal error, composite unit with less than 2 unit elements."); + va_list ap; + va_start(ap, numOfUnits); + + for (int i = 0; i < numOfUnits; ++i) { + compositeUnits.emplace_back(static_cast(va_arg(ap, Unit*))); + } + va_end(ap); + + MAD::AddUnit(*this); +} + +/* return name of unit */ +std::string Unit::GetName() const { + DEBUG_ASSERT(GetUnitId() <= kUnitIdLast, "Unexpected UnitID"); + return kUnitName[GetUnitId()]; +} + +/* Check if unit is free at next "cycle" cycle. */ +bool Unit::IsFree(uint32 cycle) const { + if (GetUnitType() == kUnitTypeOr) { + for (auto unit : compositeUnits) { + if (unit->IsFree(cycle)) { + return true; + } + } + return false; + } else if (GetUnitType() == kUnitTypeAnd) { + for (auto unit : compositeUnits) { + if (!unit->IsFree(cycle)) { + return false; + } + } + return true; + } + if ((occupancyTable & (1u << cycle)) != 0) { + return false; + } + return true; +} + +/* Occupy unit at next "cycle" cycle. */ +void Unit::Occupy(const Insn &insn, uint32 cycle) { + if (GetUnitType() == kUnitTypeOr) { + for (auto unit : GetCompositeUnits()) { + if (unit->IsFree(cycle)) { + unit->Occupy(insn, cycle); + return; + } + } + + DEBUG_ASSERT(false, "CG internal error, should not be reach here."); + return; + } else if (GetUnitType() == kUnitTypeAnd) { + for (auto unit : GetCompositeUnits()) { + unit->Occupy(insn, cycle); + } + return; + } + occupancyTable |= (1u << cycle); +} + +/* Advance all units one cycle */ +void Unit::AdvanceCycle() { + if (GetUnitType() != kUnitTypePrimart) { + return; + } + occupancyTable = (occupancyTable >> 1); +} + +/* Release all units. */ +void Unit::Release() { + if (GetUnitType() != kUnitTypePrimart) { + return; + } + occupancyTable = 0; +} + +const std::vector &Unit::GetCompositeUnits() const { + return compositeUnits; +} + +void Unit::PrintIndent(int indent) const { + for (int i = 0; i < indent; ++i) { + LogInfo::MapleLogger() << " "; + } +} + +void Unit::Dump(int indent) const { + PrintIndent(indent); + LogInfo::MapleLogger() << "Unit " << GetName() << " (ID " << GetUnitId() << "): "; + LogInfo::MapleLogger() << "occupancyTable = " << occupancyTable << '\n'; +} + +uint32 Unit::GetOccupancyTable() const { + return occupancyTable; +} + +/* MAD */ +int MAD::parallelism; +std::vector MAD::allUnits; +std::vector MAD::allReservations; +std::array, kLtLast> MAD::bypassArrays; + +MAD::~MAD() { + for (auto unit : allUnits) { + delete unit; + } + for (auto rev : allReservations) { + delete rev; + } + for (auto &bypassArray : bypassArrays) { + for (auto &bypassVector : bypassArray) { + for (auto *bypass : bypassVector) { + delete bypass; + } + } + } + allUnits.clear(); + allReservations.clear(); +} + +void MAD::InitUnits() const { +#include "mplad_unit_define.def" +} + +void MAD::InitReservation() const { +#include "mplad_reservation_define.def" +} + +void MAD::InitParallelism() const { +#include "mplad_arch_define.def" +} + +/* according insn's insnType to get a reservation */ +Reservation *MAD::FindReservation(const Insn &insn) const { + uint32 insnType = insn.GetLatencyType(); + for (auto reservation : allReservations) { + if (reservation->IsEqual(insnType)) { + return reservation; + } + } + return nullptr; +} + +/* Get latency that is def insn to use insn */ +int MAD::GetLatency(const Insn &def, const Insn &use) const { + int latency = BypassLatency(def, use); + if (latency < 0) { + latency = DefaultLatency(def); + } + return latency; +} + +/* Get bypass latency that is def insn to use insn */ +int MAD::BypassLatency(const Insn &def, const Insn &use) const { + int latency = -1; + DEBUG_ASSERT(def.GetLatencyType() < kLtLast, "out of range"); + DEBUG_ASSERT(use.GetLatencyType() < kLtLast, "out of range"); + BypassVector &bypassVec = bypassArrays[def.GetLatencyType()][use.GetLatencyType()]; + for (auto bypass : bypassVec) { + if (bypass->CanBypass(def, use)) { + latency = bypass->GetLatency(); + break; + } + } + return latency; +} + +/* Get insn's default latency */ +int MAD::DefaultLatency(const Insn &insn) const { + Reservation *res = insn.GetDepNode()->GetReservation(); + return res != nullptr ? res->GetLatency() : 0; +} + +void MAD::AdvanceCycle() const { + for (auto unit : allUnits) { + unit->AdvanceCycle(); + } +} + +void MAD::ReleaseAllUnits() const { + for (auto unit : allUnits) { + unit->Release(); + } +} + +void MAD::SaveStates(std::vector &occupyTable, int size) const { + int i = 0; + for (auto unit : allUnits) { + CHECK_FATAL(i < size, "unit number error"); + occupyTable[i] = unit->GetOccupancyTable(); + ++i; + } +} + +#define ADDBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new Bypass(DEFLTTY, USELTTY, LT))) +#define ADDALUSHIFTBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new AluShiftBypass(DEFLTTY, USELTTY, LT))) +#define ADDACCUMULATORBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new AccumulatorBypass(DEFLTTY, USELTTY, LT))) +#define ADDSTOREBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new StoreBypass(DEFLTTY, USELTTY, LT))) + +void MAD::InitBypass() const { +#include "mplad_bypass_define.def" +} + +bool MAD::IsSlot0Free() const { + if (GetUnitByUnitId(kUnitIdSlot0)->IsFree(0)) { + return false; + } + return true; +} + +bool MAD::IsFullIssued() const { + if (GetUnitByUnitId(kUnitIdSlot0)->IsFree(0) || GetUnitByUnitId(kUnitIdSlot1)->IsFree(0)) { + return false; + } + return true; +} + +void MAD::RestoreStates(std::vector &occupyTable, int size) const { + int i = 0; + for (auto unit : allUnits) { + CHECK_FATAL(i < size, "unit number error"); + unit->SetOccupancyTable(occupyTable[i]); + ++i; + } +} + +bool Bypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + (void)defInsn; + (void)useInsn; + return true; +} + +bool AluShiftBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r1=r2+x1 -> r3=r2<<0x2+r1 + * false:r1=r2+x1 -> r3=r1<<0x2+r2 + */ + return &(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnSecondOpnd)); +} + +bool AccumulatorBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r98=x0*x1 -> x0=x2*x3+r98 + * false:r98=x0*x1 -> x0=x2*r98+x3 + */ + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnSecondOpnd)) && + &(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnThirdOpnd))); +} + +bool StoreBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r96=r92+x2 -> str r96, [r92] + * false:r96=r92+x2 -> str r92, [r96] + * false:r96=r92+x2 -> str r92, [r94, r96] + */ +#if TARGAARCH64 + switch (useInsn.GetMachineOpcode()) { + case MOP_wstrb: + case MOP_wstrh: + case MOP_wstr: + case MOP_xstr: + case MOP_sstr: + case MOP_dstr: { + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnSecondOpnd)); + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && + &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); + } + case MOP_wstp: + case MOP_xstp: { + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && + &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); + } + + default: + return false; + } +#endif + return false; +} + +/* Reservation */ +Reservation::Reservation(LatencyType t, int l, int n, ...) : type(t), latency(l), unitNum(n) { + DEBUG_ASSERT(l >= 0, "CG internal error, latency and unitNum should not be less than 0."); + DEBUG_ASSERT(n >= 0, "CG internal error, latency and unitNum should not be less than 0."); + + errno_t ret = memset_s(units, sizeof(Unit*) * kMaxUnit, 0, sizeof(Unit*) * kMaxUnit); + CHECK_FATAL(ret == EOK, "call memset_s failed in Reservation"); + + va_list ap; + va_start(ap, n); + for (uint32 i = 0; i < unitNum; ++i) { + units[i] = static_cast(va_arg(ap, Unit*)); + } + va_end(ap); + + MAD::AddReservation(*this); + /* init slot */ + if (n > 0) { + /* if there are units, init slot by units[0] */ + slot = GetSlotType(units[0]->GetUnitId()); + } else { + slot = kSlotNone; + } +} + +const std::string kSlotName[] = { + "SlotNone", + "Slot0", + "Slot1", + "SlotAny", + "Slots", +}; + +const std::string &Reservation::GetSlotName() const { + DEBUG_ASSERT(GetSlot() <= kSlots, "Unexpected slot"); + return kUnitName[GetSlot()]; +} + +/* Get slot type by unit id */ +SlotType Reservation::GetSlotType(UnitId unitID) const { + switch (unitID) { + case kUnitIdSlot0: + case kUnitIdSlot0LdAgu: + case kUnitIdSlot0StAgu: + return kSlot0; + + case kUnitIdSlot1: + return kSlot1; + + case kUnitIdSlotS: + case kUnitIdSlotSHazard: + case kUnitIdSlotSMul: + case kUnitIdSlotSBranch: + case kUnitIdSlotSAgen: + return kSlotAny; + + case kUnitIdSlotD: + case kUnitIdSlotDAgen: + return kSlots; + + default: + DEBUG_ASSERT(false, "unknown slot type!"); + return kSlotNone; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/bbt.cpp b/ecmascript/mapleall/maple_be/src/be/bbt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bc00a8b515ecc5d0059a4de275bb7ce1eefdb897 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/bbt.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bbt.h" +namespace maplebe { +#if DEBUG +void BBT::Dump(const MIRModule &mod) const { + if (IsTry()) { + LogInfo::MapleLogger() << "Try" << '\n'; + } else if (IsEndTry()) { + LogInfo::MapleLogger() << "EndTry" << '\n'; + } else if (IsCatch()) { + LogInfo::MapleLogger() << "Catch" << '\n'; + } else { + LogInfo::MapleLogger() << "Plain" << '\n'; + } + if (firstStmt != nullptr) { + firstStmt->Dump(0); + LogInfo::MapleLogger() << '\n'; + if (keyStmt != nullptr) { + keyStmt->Dump(0); + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << "<>" << '\n'; + } + if (lastStmt != nullptr) { + lastStmt->Dump(0); + } + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << "<>" << '\n'; + } +} + +void BBT::ValidateStmtList(StmtNode *head, StmtNode *detached) { + static int nStmts = 0; + int n = 0; + int m = 0; + if (head == nullptr && detached == nullptr) { + nStmts = 0; + return; + } + for (StmtNode *s = head; s != nullptr; s = s->GetNext()) { + if (s->GetNext() != nullptr) { + CHECK_FATAL(s->GetNext()->GetPrev() == s, "make sure the prev node of s' next is s"); + } + if (s->GetPrev() != nullptr) { + CHECK_FATAL(s->GetPrev()->GetNext() == s, "make sure the next node of s' prev is s"); + } + ++n; + } + for (StmtNode *s = detached; s != nullptr; s = s->GetNext()) { + if (s->GetNext() != nullptr) { + CHECK_FATAL(s->GetNext()->GetPrev() == s, "make sure the prev node of s' next is s"); + } + if (s->GetPrev() != nullptr) { + CHECK_FATAL(s->GetPrev()->GetNext() == s, "make sure the next node of s' prev is s"); + } + ++m; + } + CHECK_FATAL(nStmts <= n + m, "make sure nStmts <= n + m"); + nStmts = n + m; +} +#endif +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/becommon.cpp b/ecmascript/mapleall/maple_be/src/be/becommon.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e20b1f9bb0a577fc789e3da4cffa492837c05e44 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/becommon.cpp @@ -0,0 +1,843 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "becommon.h" +#include "rt.h" +#include "cg_option.h" +#include "mir_builder.h" +#include "mpl_logging.h" +#include +#include + +namespace maplebe { +using namespace maple; + +BECommon::BECommon(MIRModule &mod) + : mirModule(mod), + typeSizeTable(GlobalTables::GetTypeTable().GetTypeTable().size(), 0, mirModule.GetMPAllocator().Adapter()), + typeAlignTable(GlobalTables::GetTypeTable().GetTypeTable().size(), static_cast(mirModule.IsCModule()), + mirModule.GetMPAllocator().Adapter()), + typeHasFlexibleArray(GlobalTables::GetTypeTable().GetTypeTable().size(), 0, mirModule.GetMPAllocator().Adapter()), + structFieldCountTable(GlobalTables::GetTypeTable().GetTypeTable().size(), + 0, mirModule.GetMPAllocator().Adapter()), + jClassLayoutTable(mirModule.GetMPAllocator().Adapter()), + funcReturnType(mirModule.GetMPAllocator().Adapter()) { + for (uint32 i = 1; i < GlobalTables::GetTypeTable().GetTypeTable().size(); ++i) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[i]; + ComputeTypeSizesAligns(*ty); + LowerTypeAttribute(*ty); + } + + if (mirModule.IsJavaModule()) { + for (uint32 i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbol(i); + if (sym == nullptr) { + continue; + } + LowerJavaVolatileForSymbol(*sym); + } + } +} + +/* + * try to find an available padding slot, and allocate the given field in it. + * return the offset of the allocated memory. 0 if not available + * Note: this will update lists in paddingSlots + * Note: padding slots is a list of un-occupied (small size) slots + * available to allocate new fields. so far, just for 1, 2, 4 bytes + * types (map to array index 0, 1, 2) + */ +static uint32 TryAllocInPaddingSlots(std::list paddingSlots[], + uint32 fieldSize, + uint32 fieldAlign, + size_t paddingSlotsLength) { + CHECK_FATAL(paddingSlotsLength > 0, "expect paddingSlotsLength > 0"); + if (fieldSize > 4) { + return 0; /* padding slots are for size 1/2/4 bytes */ + } + + uint32 fieldOffset = 0; + /* here is a greedy search */ + for (size_t freeSlot = static_cast(fieldSize >> 1); freeSlot < paddingSlotsLength; ++freeSlot) { + if (!paddingSlots[freeSlot].empty()) { + uint32 paddingOffset = paddingSlots[freeSlot].front(); + if (IsAlignedTo(paddingOffset, fieldAlign)) { + /* reuse one padding slot */ + paddingSlots[freeSlot].pop_front(); + fieldOffset = paddingOffset; + /* check whether there're still space left in this slot */ + uint32 leftSize = (1u << freeSlot) - fieldSize; + if (leftSize != 0) { + uint32 leftOffset = paddingOffset + fieldSize; + if (leftSize & 0x1) { /* check whether the last bit is 1 */ + paddingSlots[0].push_front(leftOffset); + leftOffset += 1; + } + if (leftSize & 0x2) { /* check whether the penultimate bit is 1 */ + paddingSlots[1].push_front(leftOffset); + } + } + break; + } + } + } + return fieldOffset; +} + +static void AddPaddingSlot(std::list paddingSlots[], uint32 offset, uint32 size, size_t paddingSlotsLength) { + CHECK_FATAL(paddingSlotsLength > 0, "expect paddingSlotsLength > 0"); + /* + * decompose the padding into 1/2/4 bytes slots. + * to satisfy alignment constraints. + */ + for (size_t i = 0; i < paddingSlotsLength; ++i) { + if (size & (1u << i)) { + paddingSlots[i].push_front(offset); + offset += (1u << i); + } + } +} + +void BECommon::AddNewTypeAfterBecommon(uint32 oldTypeTableSize, uint32 newTypeTableSize) { + for (auto i = oldTypeTableSize; i < newTypeTableSize; ++i) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(i); + CHECK_NULL_FATAL(ty); + typeSizeTable.emplace_back(0); + typeAlignTable.emplace_back(static_cast(mirModule.IsCModule())); + typeHasFlexibleArray.emplace_back(0); + structFieldCountTable.emplace_back(0); + ComputeTypeSizesAligns(*ty); + LowerTypeAttribute(*ty); + } +} + +void BECommon::ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + auto &structType = static_cast(ty); + const FieldVector &fields = structType.GetFields(); + uint64 allocedSize = 0; + uint64 allocedSizeInBits = 0; + SetStructFieldCount(structType.GetTypeIndex(), fields.size()); + if (fields.size() == 0) { + if (structType.IsCPlusPlus()) { + SetTypeSize(tyIdx.GetIdx(), 1); /* empty struct in C++ has size 1 */ + SetTypeAlign(tyIdx.GetIdx(), 1); + } else { + SetTypeSize(tyIdx.GetIdx(), 0); + SetTypeAlign(tyIdx.GetIdx(), k8ByteSize); + } + return; + } + auto structAttr = structType.GetTypeAttrs(); + auto structPack = static_cast(structAttr.GetPack()); + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); + if (fieldTypeSize == 0) { + ComputeTypeSizesAligns(*fieldType); + fieldTypeSize = GetTypeSize(fieldTyIdx); + } + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto attrAlign = static_cast(fieldAttr.GetAlign()); + auto originAlign = std::max(attrAlign, GetTypeAlign(fieldTyIdx)); + uint8 fieldAlign = fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack); + uint64 fieldAlignBits = fieldAlign * kBitsPerByte; + CHECK_FATAL(fieldAlign != 0, "expect fieldAlign not equal 0"); + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType != nullptr) { + AppendStructFieldCount(structType.GetTypeIndex(), GetStructFieldCount(subStructType->GetTypeIndex())); + } + if (structType.GetKind() != kTypeUnion) { + if (fieldType->GetKind() == kTypeBitField) { + uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); + /* is this field is crossing the align boundary of its base type? */ + if ((!structAttr.IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || + fieldSize == 0) { + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); + } + /* allocate the bitfield */ + allocedSizeInBits += fieldSize; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + bool leftoverbits = false; + + if (allocedSizeInBits == allocedSize * kBitsPerByte) { + allocedSize = RoundUp(allocedSize, fieldAlign); + } else { + /* still some leftover bits on allocated words, we calculate things based on bits then. */ + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - 1) / fieldAlignBits) { + /* the field is crossing the align boundary of its base type */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); + } + leftoverbits = true; + } + if (leftoverbits) { + allocedSizeInBits += fieldSizeBits; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + /* pad alloced_size according to the field alignment */ + allocedSize = RoundUp(allocedSize, fieldAlign); + allocedSize += fieldTypeSize; + allocedSizeInBits = allocedSize * kBitsPerByte; + } + } + } else { /* for unions, bitfields are treated as non-bitfields */ + allocedSize = std::max(allocedSize, static_cast(fieldTypeSize)); + } + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + /* C99 + * Last struct element of a struct with more than one member + * is a flexible array if it is an array of size 0. + */ + if ((j != 0) && ((j + 1) == fields.size()) && + (fieldType->GetKind() == kTypeArray) && + (GetTypeSize(fieldTyIdx.GetIdx()) == 0)) { + SetHasFlexibleArray(tyIdx.GetIdx(), true); + } + } + SetTypeSize(tyIdx, RoundUp(allocedSize, GetTypeAlign(tyIdx.GetIdx()))); +} + +void BECommon::ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint8 align) { + uint64 allocedSize = 0; + const FieldVector &fields = static_cast(ty).GetFields(); + + auto &classType = static_cast(ty); + TyIdx prntTyIdx = classType.GetParentTyIdx(); + /* process parent class */ + if (prntTyIdx != 0u) { + MIRClassType *parentType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(prntTyIdx)); + uint32 prntSize = GetTypeSize(prntTyIdx); + if (prntSize == 0) { + ComputeTypeSizesAligns(*parentType); + prntSize = GetTypeSize(prntTyIdx); + } + uint8 prntAlign = GetTypeAlign(prntTyIdx); + AppendStructFieldCount(tyIdx, GetStructFieldCount(prntTyIdx) + 1); + /* pad alloced_size according to the field alignment */ + allocedSize = RoundUp(allocedSize, prntAlign); + + JClassLayout *layout = mirModule.GetMemPool()->New(mirModule.GetMPAllocator().Adapter()); + /* add parent's record to the front */ + layout->emplace_back(JClassFieldInfo(false, false, false, allocedSize)); + /* copy parent's layout plan into my plan */ + if (HasJClassLayout(*parentType)) { /* parent may have incomplete type definition. */ + const JClassLayout &parentLayout = GetJClassLayout(*parentType); + layout->insert(layout->end(), parentLayout.begin(), parentLayout.end()); + allocedSize += prntSize; + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), prntAlign)); + } else { + LogInfo::MapleLogger() << "Warning:try to layout class with incomplete type:" << parentType->GetName() << "\n"; + } + jClassLayoutTable[&classType] = layout; + } else { + /* This is the root class, say, The Object */ + jClassLayoutTable[&classType] = mirModule.GetMemPool()->New(mirModule.GetMPAllocator().Adapter()); + } + + /* + * a list of un-occupied (small size) slots available for insertion + * so far, just for 1, 2, 4 bytes types (map to array index 0, 1, 2) + */ + std::list paddingSlots[3]; + /* process fields */ + AppendStructFieldCount(tyIdx, fields.size()); + if (fields.size() == 0 && mirModule.IsCModule()) { + SetTypeAlign(tyIdx.GetIdx(), 1); + SetTypeSize(tyIdx.GetIdx(), 1); + return; + } + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + FieldAttrs fieldAttr = fields[j].second.second; + uint32 fieldSize = GetTypeSize(fieldTyIdx); + if (fieldSize == 0) { + ComputeTypeSizesAligns(*fieldType); + fieldSize = GetTypeSize(fieldTyIdx); + } + uint8 fieldAlign = GetTypeAlign(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = static_cast(RTSupport::GetRTSupportInstance().GetFieldSize()); + fieldAlign = RTSupport::GetRTSupportInstance().GetFieldAlign(); + } + + /* try to alloc the field in one of previously created padding slots */ + uint32 currentFieldOffset = TryAllocInPaddingSlots(paddingSlots, fieldSize, fieldAlign, + sizeof(paddingSlots) / sizeof(paddingSlots[0])); + /* cannot reuse one padding slot. layout to current end */ + if (currentFieldOffset == 0) { + /* pad alloced_size according to the field alignment */ + currentFieldOffset = RoundUp(allocedSize, fieldAlign); + if (currentFieldOffset != allocedSize) { + /* rounded up, create one padding-slot */ + uint32 paddingSize = currentFieldOffset - allocedSize; + AddPaddingSlot(paddingSlots, allocedSize, paddingSize, + sizeof(paddingSlots) / sizeof(paddingSlots[0])); + allocedSize = currentFieldOffset; + } + /* need new memory for this field */ + allocedSize += fieldSize; + } + AddElementToJClassLayout(classType, JClassFieldInfo(fieldType->GetKind() == kTypePointer, + fieldAttr.GetAttr(FLDATTR_rcunowned), + fieldAttr.GetAttr(FLDATTR_rcweak), + currentFieldOffset)); + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + } + SetTypeSize(tyIdx, RoundUp(allocedSize, align)); +} + +void BECommon::ComputeArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + MIRArrayType &arrayType = static_cast(ty); + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType.GetElemTyIdx()); + uint32 elemSize = GetTypeSize(elemType->GetTypeIndex()); + if (elemSize == 0) { + ComputeTypeSizesAligns(*elemType); + elemSize = GetTypeSize(elemType->GetTypeIndex()); + } + if (!mirModule.IsCModule()) { + CHECK_FATAL(elemSize != 0, "elemSize should not equal 0"); + CHECK_FATAL(elemType->GetTypeIndex() != 0u, "elemType's idx should not equal 0"); + } + uint32 arrayAlign = arrayType.GetTypeAttrs().GetAlign(); + elemSize = std::max(elemSize, static_cast(GetTypeAlign(elemType->GetTypeIndex()))); + elemSize = std::max(elemSize, arrayAlign); + /* compute total number of elements from the multipel dimensions */ + uint64 numElems = 1; + for (int d = 0; d < arrayType.GetDim(); ++d) { + numElems *= arrayType.GetSizeArrayItem(d); + } + auto typeSize = elemSize * numElems; + SetTypeSize(tyIdx, typeSize); + if (typeSize == 0) { + SetTypeAlign(tyIdx, static_cast(arrayAlign)); + } else { + auto maxAlign = std::max(static_cast(GetTypeAlign(elemType->GetTypeIndex())), arrayAlign); + SetTypeAlign(tyIdx, static_cast(maxAlign)); + } +} + +void BECommon::ComputeFArrayOrJArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + MIRFarrayType &arrayType = static_cast(ty); + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType.GetElemTyIdx()); + uint32 elemSize = GetTypeSize(elemType->GetTypeIndex()); + if (elemSize == 0) { + ComputeTypeSizesAligns(*elemType); + elemSize = GetTypeSize(elemType->GetTypeIndex()); + } + CHECK_FATAL(elemSize != 0, "elemSize should not equal 0"); + CHECK_FATAL(GetTypeAlign(elemType->GetTypeIndex()) != 0u, "GetTypeAlign return 0 is not expected"); + elemSize = std::max(elemSize, static_cast(GetTypeAlign(elemType->GetTypeIndex()))); + SetTypeSize(tyIdx, 0); + SetTypeAlign(tyIdx, GetTypeAlign(elemType->GetTypeIndex())); +} + +/* Note: also do java class layout */ +void BECommon::ComputeTypeSizesAligns(MIRType &ty, uint8 align) { + TyIdx tyIdx = ty.GetTypeIndex(); + if ((structFieldCountTable.size() > tyIdx) && (GetStructFieldCount(tyIdx) != 0)) { + return; /* processed before */ + } + + if ((ty.GetPrimType() == PTY_ptr) || (ty.GetPrimType() == PTY_ref)) { + ty.SetPrimType(GetLoweredPtrType()); + } + + switch (ty.GetKind()) { + case kTypeScalar: + case kTypePointer: + case kTypeBitField: + case kTypeFunction: + SetTypeSize(tyIdx, GetPrimTypeSize(ty.GetPrimType())); + SetTypeAlign(tyIdx, GetTypeSize(tyIdx)); + break; + case kTypeArray: { + ComputeArrayTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeFArray: + case kTypeJArray: { + ComputeFArrayOrJArrayTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeUnion: + case kTypeStruct: { + ComputeStructTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeInterface: { /* interface shouldn't have instance fields */ + SetTypeAlign(tyIdx, 0); + SetTypeSize(tyIdx, 0); + SetStructFieldCount(tyIdx, 0); + break; + } + case kTypeClass: { /* cannot have union or bitfields */ + ComputeClassTypeSizesAligns(ty, tyIdx, align); + break; + } + case kTypeByName: + case kTypeVoid: + default: + SetTypeSize(tyIdx, 0); + break; + } + /* there may be passed-in align attribute declared with the symbol */ + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), align)); +} + +void BECommon::LowerTypeAttribute(MIRType &ty) { + if (mirModule.IsJavaModule()) { + LowerJavaTypeAttribute(ty); + } +} + +void BECommon::LowerJavaTypeAttribute(MIRType &ty) { + /* we process volatile only for now */ + switch (ty.GetKind()) { + case kTypeClass: /* cannot have union or bitfields */ + LowerJavaVolatileInClassType(static_cast(ty)); + break; + + default: + break; + } +} + +void BECommon::LowerJavaVolatileInClassType(MIRClassType &ty) { + for (auto &field : ty.GetFields()) { + if (field.second.second.GetAttr(FLDATTR_volatile)) { + field.second.second.SetAttr(FLDATTR_memory_order_acquire); + field.second.second.SetAttr(FLDATTR_memory_order_release); + } else { + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(field.second.first); + if (fieldType->GetKind() == kTypeClass) { + LowerJavaVolatileInClassType(static_cast(*fieldType)); + } + } + } +} + +bool BECommon::IsRefField(MIRStructType &structType, FieldID fieldID) const { + if (structType.GetKind() == kTypeClass) { + CHECK_FATAL(HasJClassLayout(static_cast(structType)), "Cannot found java class layout information"); + const JClassLayout &layout = GetJClassLayout(static_cast(structType)); + if (layout.empty()) { + ERR(kLncErr, "layout is null in BECommon::IsRefField"); + return false; + } + return layout[fieldID - 1].IsRef(); + } + return false; +} + +void BECommon::LowerJavaVolatileForSymbol(MIRSymbol &sym) const { + /* type attr is associated with symbol */ + if (sym.GetAttr(ATTR_volatile)) { + sym.SetAttr(ATTR_memory_order_acquire); + sym.SetAttr(ATTR_memory_order_release); + } +} + +void BECommon::GenFieldOffsetMap(const std::string &className) { + MIRType *type = GlobalTables::GetTypeTable().GetOrCreateClassType(className, mirModule); + CHECK_FATAL(type != nullptr, "unknown class, type should not be nullptr"); + MIRClassType *classType = static_cast(type); + for (FieldID i = 1; i <= GetStructFieldCount(classType->GetTypeIndex()); ++i) { + FieldID fieldID = i; + FieldPair fp = classType->TraverseToFieldRef(fieldID); + GStrIdx strIdx = fp.first; + if (strIdx == 0u) { + continue; + } + + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + + TyIdx fieldTyIdx = fp.second.first; + uint64 fieldSize = GetTypeSize(fieldTyIdx); + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize(); + } + + std::pair p = GetFieldOffset(*classType, i); + CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + LogInfo::MapleLogger() << "CLASS_FIELD_OFFSET_MAP(" << className.c_str() << "," << fieldName.c_str() << "," + << p.first << "," << fieldSize << ")\n"; + } +} + +void BECommon::GenFieldOffsetMap(MIRClassType &classType, FILE &outFile) { + const std::string &className = classType.GetName(); + + /* + * We only enumerate fields defined in the current class. There are cases + * where a parent classes may define private fields that have the same name as + * a field in the current class.This table is generated for the convenience of + * C programmers. If the C programmer wants to access parent class fields, + * the programmer should access them as `Parent.field`. + */ + FieldID myEnd = structFieldCountTable.at(classType.GetTypeIndex()); + FieldID myBegin = (myEnd - static_cast(classType.GetFieldsSize())) + 1; + + for (FieldID i = myBegin; i <= myEnd; ++i) { + FieldID fieldID = i; + FieldPair fp = classType.TraverseToFieldRef(fieldID); + GStrIdx strIdx = fp.first; + if (strIdx == 0u) { + continue; + } + FieldAttrs attrs = fp.second.second; + if (attrs.GetAttr(FLDATTR_static)) { + continue; + } + + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + + TyIdx fieldTyIdx = fp.second.first; + uint64 fieldSize = GetTypeSize(fieldTyIdx); + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize();; + } + + std::pair p = GetFieldOffset(classType, i); + CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + (void)fprintf(&outFile, "__MRT_CLASS_FIELD(%s, %s, %d, %lu)\n", className.c_str(), fieldName.c_str(), + p.first, fieldSize); + } +} + +void BECommon::GenObjSize(const MIRClassType &classType, FILE &outFile) { + const std::string &className = classType.GetName(); + uint64_t objSize = GetTypeSize(classType.GetTypeIndex()); + if (objSize == 0) { + return; + } + + TyIdx parentTypeIdx = classType.GetParentTyIdx(); + MIRType *parentType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(parentTypeIdx); + const char *parentName = nullptr; + if (parentType != nullptr) { + MIRClassType *parentClass = static_cast(parentType); + parentName = parentClass->GetName().c_str(); + } else { + parentName = "THIS_IS_ROOT"; + } + fprintf(&outFile, "__MRT_CLASS(%s, %" PRIu64 ", %s)\n", className.c_str(), objSize, parentName); +} + +/* + * compute the offset of the field given by fieldID within the structure type + * structy; it returns the answer in the pair (byteoffset, bitoffset) such that + * if it is a bitfield, byteoffset gives the offset of the container for + * extracting the bitfield and bitoffset is with respect to the container + */ +std::pair BECommon::GetFieldOffset(MIRStructType &structType, FieldID fieldID) { + CHECK_FATAL(fieldID <= GetStructFieldCount(structType.GetTypeIndex()), "GetFieldOFfset: fieldID too large"); + uint64 allocedSize = 0; + uint64 allocedSizeInBits = 0; + FieldID curFieldID = 1; + if (fieldID == 0) { + return std::pair(0, 0); + } + + if (structType.GetKind() == kTypeClass) { + CHECK_FATAL(HasJClassLayout(static_cast(structType)), "Cannot found java class layout information"); + const JClassLayout &layout = GetJClassLayout(static_cast(structType)); + CHECK_FATAL(static_cast(fieldID) - 1 < layout.size(), "subscript out of range"); + return std::pair(static_cast(layout[fieldID - 1].GetOffset()), 0); + } + + /* process the struct fields */ + FieldVector fields = structType.GetFields(); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto originAlign = GetTypeAlign(fieldTyIdx); + auto fieldAlign = fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack); + uint64 fieldAlignBits = fieldAlign * kBitsPerByte; + CHECK_FATAL(fieldAlign != 0, "fieldAlign should not equal 0"); + if (structType.GetKind() != kTypeUnion) { + if (fieldType->GetKind() == kTypeBitField) { + uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); + /* + * Is this field is crossing the align boundary of its base type? Or, + * is field a zero-with bit field? + * Refer to C99 standard (§6.7.2.1) : + * > As a special case, a bit-field structure member with a width of 0 indicates that no further + * > bit-field is to be packed into the unit in which the previous bit-field, if any, was placed. + * + * We know that A zero-width bit field can cause the next field to be aligned on the next container + * boundary where the container is the same size as the underlying type of the bit field. + */ + if ((!structType.GetTypeAttrs().IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || + fieldSize == 0) { + /* + * the field is crossing the align boundary of its base type; + * align alloced_size_in_bits to fieldAlign + */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); + } + /* allocate the bitfield */ + if (curFieldID == fieldID) { + return std::pair((allocedSizeInBits / fieldAlignBits) * fieldAlign, + allocedSizeInBits % fieldAlignBits); + } else { + ++curFieldID; + } + allocedSizeInBits += fieldSize; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + bool leftOverBits = false; + uint64 offset = 0; + + if (allocedSizeInBits == allocedSize * k8BitSize) { + allocedSize = RoundUp(allocedSize, fieldAlign); + offset = allocedSize; + } else { + /* still some leftover bits on allocated words, we calculate things based on bits then. */ + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - k1BitSize) / fieldAlignBits) { + /* the field is crossing the align boundary of its base type */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); + } + allocedSize = RoundUp(allocedSize, fieldAlign); + offset = (allocedSizeInBits / fieldAlignBits) * fieldAlign; + leftOverBits = true; + } + + if (curFieldID == fieldID) { + return std::pair(offset, 0); + } else { + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType == nullptr) { + ++curFieldID; + } else { + if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { + curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; + } else { + std::pair result = GetFieldOffset(*subStructType, fieldID - curFieldID); + return std::pair(result.first + allocedSize, result.second); + } + } + } + + if (leftOverBits) { + allocedSizeInBits += fieldSizeBits; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + allocedSize += fieldTypeSize; + allocedSizeInBits = allocedSize * kBitsPerByte; + } + } + } else { /* for unions, bitfields are treated as non-bitfields */ + if (curFieldID == fieldID) { + return std::pair(0, 0); + } else { + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType == nullptr) { + curFieldID++; + } else { + if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { + curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; + } else { + return GetFieldOffset(*subStructType, fieldID - curFieldID); + } + } + } + } + } + CHECK_FATAL(false, "GetFieldOffset() fails to find field"); + return std::pair(0, 0); +} + +bool BECommon::TyIsInSizeAlignTable(const MIRType &ty) const { + if (typeSizeTable.size() != typeAlignTable.size()) { + return false; + } + return ty.GetTypeIndex() < typeSizeTable.size(); +} + +void BECommon::AddAndComputeSizeAlign(MIRType &ty) { + FinalizeTypeTable(ty); + typeAlignTable.emplace_back(mirModule.IsCModule()); + typeSizeTable.emplace_back(0); + ComputeTypeSizesAligns(ty); +} + +void BECommon::AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info) { + JClassLayout &layout = *(jClassLayoutTable.at(&klass)); + layout.emplace_back(info); +} + +void BECommon::AddElementToFuncReturnType(MIRFunction &func, const TyIdx tyIdx) { + funcReturnType[&func] = tyIdx; +} + +MIRType *BECommon::BeGetOrCreatePointerType(const MIRType &pointedType) { + MIRType *newType = GlobalTables::GetTypeTable().GetOrCreatePointerType(pointedType, GetLoweredPtrType()); + if (TyIsInSizeAlignTable(*newType)) { + return newType; + } + AddAndComputeSizeAlign(*newType); + return newType; +} + +MIRType *BECommon::BeGetOrCreateFunctionType(TyIdx tyIdx, const std::vector &vecTy, + const std::vector &vecAt) { + MIRType *newType = GlobalTables::GetTypeTable().GetOrCreateFunctionType(tyIdx, vecTy, vecAt); + if (TyIsInSizeAlignTable(*newType)) { + return newType; + } + AddAndComputeSizeAlign(*newType); + return newType; +} + +void BECommon::FinalizeTypeTable(const MIRType &ty) { + if (ty.GetTypeIndex() > GetSizeOfTypeSizeTable()) { + if (mirModule.GetSrcLang() == kSrcLangC) { + for (uint32 i = GetSizeOfTypeSizeTable(); i < ty.GetTypeIndex(); ++i) { + MIRType *tyTmp = GlobalTables::GetTypeTable().GetTypeFromTyIdx(i); + AddAndComputeSizeAlign(*tyTmp); + } + } else { + CHECK_FATAL(ty.GetTypeIndex() == typeSizeTable.size(), "make sure the ty idx is exactly the table size"); + } + } +} + +BaseNode *BECommon::GetAddressOfNode(const BaseNode &node) { + switch (node.GetOpCode()) { + case OP_dread: { + const DreadNode &dNode = static_cast(node); + const StIdx &index = dNode.GetStIdx(); + return mirModule.GetMIRBuilder()->CreateAddrof(*mirModule.CurFunction()->GetLocalOrGlobalSymbol(index)); + } + case OP_iread: { + const IreadNode &iNode = static_cast(node); + if (iNode.GetFieldID() == 0) { + return iNode.Opnd(0); + } + + uint32 index = static_cast(GlobalTables::GetTypeTable().GetTypeTable().at( + iNode.GetTyIdx()))->GetPointedTyIdx(); + MIRType *pointedType = GlobalTables::GetTypeTable().GetTypeTable().at(index); + std::pair byteBitOffset = + GetFieldOffset(static_cast(*pointedType), iNode.GetFieldID()); +#if TARGAARCH64 || TARGRISCV64 + DEBUG_ASSERT(GetAddressPrimType() == GetLoweredPtrType(), "incorrect address type, expect a GetLoweredPtrType()"); +#endif + return mirModule.GetMIRBuilder()->CreateExprBinary( + OP_add, *GlobalTables::GetTypeTable().GetPrimType(GetAddressPrimType()), + static_cast(iNode.Opnd(0)), + mirModule.GetMIRBuilder()->CreateIntConst(byteBitOffset.first, PTY_u32)); + } + default: + return nullptr; + } +} + +bool BECommon::CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const { + (void) attr; + (void) narynode; + return false; + + /* For now, all 64x1_t types object are not propagated to become pregs by mplme, so the following + is not needed for now. We need to revisit this later when types are enhanced with attributes */ +#if TO_BE_RESURRECTED + bool attrFunc = false; + if (narynode->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(narynode); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + attrFunc = (mirModule.GetSrcLang() == kSrcLangC && func->GetAttr(attr)) ? true : false; + } else if (narynode->GetOpCode() == OP_icall) { + IcallNode *icallNode = static_cast(narynode); + BaseNode *fNode = icallNode->Opnd(0); + MIRFuncType *fType = nullptr; + MIRPtrType *pType = nullptr; + if (fNode->GetOpCode() == OP_dread) { + DreadNode *dNode = static_cast(fNode); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dNode->GetStIdx()); + pType = static_cast(symbol->GetType()); + MIRType *ty = pType; + if (dNode->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair; + if (ty->GetKind() == kTypeStruct) { + thepair = static_cast(ty)->TraverseToField(dNode->GetFieldID()); + } else { + thepair = static_cast(ty)->TraverseToField(dNode->GetFieldID()); + } + pType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first)); + } + fType = static_cast(pType->GetPointedType()); + } else if (fNode->GetOpCode() == OP_iread) { + IreadNode *iNode = static_cast(fNode); + MIRPtrType *pointerty = static_cast(GlobalTables:: + GetTypeTable().GetTypeFromTyIdx(iNode->GetTyIdx())); + MIRType *pointedType = pointerty->GetPointedType(); + if (iNode->GetFieldID() != 0) { + pointedType = static_cast(pointedType)->GetFieldType(iNode->GetFieldID()); + } + if (pointedType->GetKind() == kTypeFunction) { + fType = static_cast(pointedType); + } else if (pointedType->GetKind() == kTypePointer) { + return false; /* assert? */ + } + } else if (fNode->GetOpCode() == OP_select) { + TernaryNode *sNode = static_cast(fNode); + BaseNode *expr = sNode->Opnd(1); + // both function ptrs under select should have the same signature, chk op1 only + AddroffuncNode *afNode = static_cast(expr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(afNode->GetPUIdx()); + attrFunc = mirModule.GetSrcLang() == kSrcLangC && func->GetAttr(attr); + } else if (fNode->GetOpCode() == OP_regread) { + RegreadNode *rNode = static_cast(fNode); + PregIdx pregidx = rNode->GetRegIdx(); + MIRPreg *preg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(pregidx); + MIRType *type = preg->GetMIRType(); + if (type == nullptr) { + return false; + } + MIRPtrType *pType = static_cast(type); + type = pType->GetPointedType(); + if (type == nullptr) { + return false; + } + } else if (fNode->GetOpCode() == OP_retype) { + RetypeNode *rNode = static_cast(fNode); + pType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rNode->GetTyIdx())); + fType = static_cast(pType->GetPointedType()); + } else { + return false; /* assert? */ + } + } + return attrFunc; +#endif +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/lower.cpp b/ecmascript/mapleall/maple_be/src/be/lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2b5c427615110f526702f5ba57353bd2943514d1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/lower.cpp @@ -0,0 +1,4069 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "lower.h" +#include +#include +#include +#include "mir_symbol.h" +#include "mir_function.h" +#include "cg_option.h" +#include "switch_lowerer.h" +#include "try_catch.h" +#include "intrinsic_op.h" +#include "mir_builder.h" +#include "opcode_info.h" +#include "rt.h" +#include "securec.h" +#include "string_utils.h" +#include "cast_opt.h" +#include "simplify.h" +#include "me_safety_warning.h" + +namespace maplebe { +namespace arrayNameForLower { +const std::set kArrayKlassName{ +#include "array_klass_name.def" +}; + +const std::set kArrayBaseName{ +#include "array_base_name.def" +}; +} + +using namespace maple; + +#define JAVALANG (mirModule.IsJavaModule()) +#define TARGARM32 0 + +enum ExtFuncT : uint8 { kFmodDouble, kFmodFloat }; + +struct ExtFuncDescrT { + ExtFuncT fid; + const char *name; + PrimType retType; + PrimType argTypes[kMaxModFuncArgSize]; +}; + +namespace { +std::pair cgBuiltins[] = { + { INTRN_JAVA_ARRAY_LENGTH, "MCC_DexArrayLength" }, + { INTRN_JAVA_ARRAY_FILL, "MCC_DexArrayFill" }, + { INTRN_JAVA_CHECK_CAST, "MCC_DexCheckCast" }, + { INTRN_JAVA_INSTANCE_OF, "MCC_DexInstanceOf" }, + { INTRN_JAVA_INTERFACE_CALL, "MCC_DexInterfaceCall" }, + { INTRN_JAVA_POLYMORPHIC_CALL, "MCC_DexPolymorphicCall" }, + { INTRN_MCC_DeferredFillNewArray, "MCC_DeferredFillNewArray" }, + { INTRN_MCC_DeferredInvoke, "MCC_DeferredInvoke" }, + { INTRN_JAVA_CONST_CLASS, "MCC_GetReferenceToClass" }, + { INTRN_JAVA_GET_CLASS, "MCC_GetClass" }, + { INTRN_MPL_SET_CLASS, "MCC_SetJavaClass" }, + { INTRN_MPL_MEMSET_LOCALVAR, "memset_s" }, +}; + +ExtFuncDescrT extFnDescrs[] = { + { kFmodDouble, "fmod", PTY_f64, { PTY_f64, PTY_f64, kPtyInvalid } }, + { kFmodFloat, "fmodf", PTY_f32, { PTY_f32, PTY_f32, kPtyInvalid } }, +}; + +std::vector> extFuncs; +const std::string kOpAssertge = "OP_assertge"; +const std::string kOpAssertlt = "OP_assertlt"; +const std::string kOpCallAssertle = "OP_callassertle"; +const std::string kOpReturnAssertle = "OP_returnassertle"; +const std::string kOpAssignAssertle = "OP_assignassertle"; +const std::string kFileSymbolNamePrefix = "symname"; +} + +const std::string CGLowerer::kIntrnRetValPrefix = "__iret"; +const std::string CGLowerer::kUserRetValPrefix = "__uret"; + +std::string CGLowerer::GetFileNameSymbolName(const std::string &fileName) const { + return kFileSymbolNamePrefix + std::regex_replace(fileName, std::regex("-"), "_"); +} + +MIRSymbol *CGLowerer::CreateNewRetVar(const MIRType &ty, const std::string &prefix) { + const uint32 bufSize = 257; + char buf[bufSize] = {'\0'}; + MIRFunction *func = GetCurrentFunc(); + MIRSymbol *var = func->GetSymTab()->CreateSymbol(kScopeLocal); + int eNum = sprintf_s(buf, bufSize - 1, "%s%" PRId64, prefix.c_str(), ++seed); + if (eNum == -1) { + FATAL(kLncFatal, "sprintf_s failed"); + } + std::string strBuf(buf); + var->SetNameStrIdx(mirModule.GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + var->SetTyIdx(ty.GetTypeIndex()); + var->SetStorageClass(kScAuto); + var->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*var); + return var; +} + +void CGLowerer::RegisterExternalLibraryFunctions() { + for (uint32 i = 0; i < sizeof(extFnDescrs) / sizeof(extFnDescrs[0]); ++i) { + ExtFuncT id = extFnDescrs[i].fid; + CHECK_FATAL(id == i, "make sure id equal i"); + + MIRFunction *func = mirModule.GetMIRBuilder()->GetOrCreateFunction(extFnDescrs[i].name, + TyIdx(extFnDescrs[i].retType)); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + /* return type */ + MIRType *retTy = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[i].retType); + + /* use void* for PTY_dynany */ + if (retTy->GetPrimType() == PTY_dynany) { + retTy = GlobalTables::GetTypeTable().GetPtr(); + } + + std::vector formals; + for (uint32 j = 0; extFnDescrs[i].argTypes[j] != kPtyInvalid; ++j) { + PrimType primTy = extFnDescrs[i].argTypes[j]; + MIRType *argTy = GlobalTables::GetTypeTable().GetPrimType(primTy); + /* use void* for PTY_dynany */ + if (argTy->GetPrimType() == PTY_dynany) { + argTy = GlobalTables::GetTypeTable().GetPtr(); + } + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + const uint32 bufSize = 18; + char buf[bufSize] = {'\0'}; + int eNum = sprintf_s(buf, bufSize - 1, "p%u", j); + if (eNum == -1) { + FATAL(kLncFatal, "sprintf_s failed"); + } + std::string strBuf(buf); + argSt->SetNameStrIdx(mirModule.GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + formals.emplace_back(argSt); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); + auto *funcType = func->GetMIRFuncType(); + DEBUG_ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + extFuncs.emplace_back(std::pair(id, func->GetPuidx())); + } +} + +BaseNode *CGLowerer::NodeConvert(PrimType mType, BaseNode &expr) { + PrimType srcType = expr.GetPrimType(); + if (GetPrimTypeSize(mType) == GetPrimTypeSize(srcType)) { + return &expr; + } + TypeCvtNode *cvtNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt); + cvtNode->SetFromType(srcType); + cvtNode->SetPrimType(mType); + cvtNode->SetOpnd(&expr, 0); + return cvtNode; +} + +BaseNode *CGLowerer::LowerIaddrof(const IreadNode &iaddrof) { + if (iaddrof.GetFieldID() == 0) { + return iaddrof.Opnd(0); + } + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iaddrof.GetTyIdx()); + MIRPtrType *pointerTy = static_cast(type); + CHECK_FATAL(pointerTy != nullptr, "LowerIaddrof: expect a pointer type at iaddrof node"); + MIRStructType *structTy = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx())); + CHECK_FATAL(structTy != nullptr, "LowerIaddrof: non-zero fieldID for non-structure"); + int32 offset = beCommon.GetFieldOffset(*structTy, iaddrof.GetFieldID()).first; + if (offset == 0) { + return iaddrof.Opnd(0); + } + uint32 loweredPtrType = static_cast(GetLoweredPtrType()); + MIRIntConst *offsetConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst( + offset, *GlobalTables::GetTypeTable().GetTypeTable().at(loweredPtrType)); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(offsetConst); + offsetNode->SetPrimType(GetLoweredPtrType()); + + BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + addNode->SetPrimType(GetLoweredPtrType()); + addNode->SetBOpnd(iaddrof.Opnd(0), 0); + addNode->SetBOpnd(offsetNode, 1); + return addNode; +} + +BaseNode *CGLowerer::SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + return &bNode; + } + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + static uint32 val = 0; + std::string name("bnaryTmp"); + name.append(std::to_string(val++)); + + BaseNode *opnd1 = bNode.Opnd(1); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd1->GetPrimType())); + MIRSymbol *dnodeSt = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); + DassignNode *dnode = mirbuilder->CreateStmtDassign(const_cast(*dnodeSt), 0, opnd1); + blkNode.InsertAfter(blkNode.GetLast(), dnode); + + BaseNode *dreadNode = mirbuilder->CreateExprDread(*dnodeSt); + bNode.SetOpnd(dreadNode, 1); + + return &bNode; +} + +BaseNode *CGLowerer::SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + return &tNode; + } + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + static uint32 val = 0; + std::string name("tnaryTmp"); + name.append(std::to_string(val++)); + + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(tNode.GetPrimType())); + MIRSymbol *dassignNodeSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); + DassignNode *dassignNode = mirbuilder->CreateStmtDassign(const_cast(*dassignNodeSym), 0, &tNode); + blkNode.InsertAfter(blkNode.GetLast(), dassignNode); + + BaseNode *dreadNode = mirbuilder->CreateExprDread(*dassignNodeSym); + for (size_t i = 0; i < parent.NumOpnds(); i++) { + if (parent.Opnd(i) == &tNode) { + parent.SetOpnd(dreadNode, i); + break; + } + } + + return dreadNode; +} + +/* Check if the operand of the select node is complex enough for either + * functionality or performance reason so we need to lower it to if-then-else. + */ +bool CGLowerer::IsComplexSelect(const TernaryNode &tNode) const { + if (tNode.GetPrimType() == PTY_agg) { + return true; + } + /* Iread may have side effect which may cause correctness issue. */ + if (HasIreadExpr(tNode.Opnd(1)) || HasIreadExpr(tNode.Opnd(2))) { + return true; + } + // it will be generated many insn for complex expr, leading to + // worse performance than punishment of branch prediction error + constexpr size_t maxDepth = 3; + if (MaxDepth(tNode.Opnd(1)) > maxDepth || MaxDepth(tNode.Opnd(1)) > maxDepth) { + return true; + } + return false; +} + +int32 CGLowerer::FindTheCurrentStmtFreq(const StmtNode *stmt) const { + while (stmt != nullptr) { + int32 freq = mirModule.CurFunction()->GetFreqFromLastStmt(stmt->GetStmtID()); + if (freq != -1) { + return freq; + } + stmt = stmt->GetPrev(); + } + return -1; +} + +/* Lower agg select node back to if-then-else stmt. */ +/* + 0(brfalse) + | \ + 1 2 + \ | + \ | + 3 +*/ +BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode) { + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + + MIRType *resultTy = 0; + MIRFunction *func = mirModule.CurFunction(); + if (tNode.GetPrimType() == PTY_agg) { + if (tNode.Opnd(1)->op == OP_dread) { + DreadNode *trueNode = static_cast(tNode.Opnd(1)); + resultTy = mirModule.CurFunction()->GetLocalOrGlobalSymbol(trueNode->GetStIdx())->GetType(); + } else if (tNode.Opnd(1)->op == OP_iread) { + IreadNode *trueNode = static_cast(tNode.Opnd(1)); + MIRPtrType *ptrty = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(trueNode->GetTyIdx())); + resultTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrty->GetPointedTyIdx())); + if (trueNode->GetFieldID() != 0) { + MIRStructType *structty = static_cast(resultTy); + resultTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(trueNode->GetFieldID())); + } + } else { + CHECK_FATAL(false, "NYI: LowerComplexSelect"); + } + } else { + resultTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(tNode.GetPrimType())); + } + + CondGotoNode *brTargetStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brTargetStmt->SetOpnd(tNode.Opnd(0), 0); + LabelIdx targetIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(targetIdx); + brTargetStmt->SetOffset(targetIdx); + // Update the current stmt frequence + int32 currentStmtFreq = 0; + if (kOpcodeInfo.IsStmt(parent.GetOpCode())) { + currentStmtFreq = FindTheCurrentStmtFreq(static_cast(&parent)); + } + currentStmtFreq = currentStmtFreq == -1 ? 0 : currentStmtFreq; + func->SetLastFreqMap(brTargetStmt->GetStmtID(), static_cast(currentStmtFreq)); + blkNode.InsertAfter(blkNode.GetLast(), brTargetStmt); + union { + MIRSymbol *resSym; + PregIdx resPreg; + } cplxSelRes; // complex select result + uint32 fallthruStmtFreq = static_cast((currentStmtFreq + 1) / 2); + if (tNode.GetPrimType() == PTY_agg) { + static uint32 val = 0; + std::string name("ComplexSelectTmp"); + name.append(std::to_string(val++)); + cplxSelRes.resSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *resultTy); + DassignNode *dassignTrue = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(1)); + // Fallthru: update the frequence 1 + func->SetFirstFreqMap(dassignTrue->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), dassignTrue); + } else { + cplxSelRes.resPreg = mirbuilder->GetCurrentFunction()->GetPregTab()->CreatePreg(tNode.GetPrimType()); + RegassignNode *regassignTrue = + mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(1)); + // Update the frequence first opnd + func->SetFirstFreqMap(regassignTrue->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), regassignTrue); + } + + GotoNode *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + LabelIdx EndIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(EndIdx); + gotoStmt->SetOffset(EndIdx); + // Update the frequence first opnd + func->SetLastFreqMap(gotoStmt->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), gotoStmt); + + uint32 targetStmtFreq = static_cast(currentStmtFreq / 2); + LabelNode *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(targetIdx); + func->SetFirstFreqMap(lableStmt->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), lableStmt); + + if (tNode.GetPrimType() == PTY_agg) { + DassignNode *dassignFalse = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(2)); + // Update the frequence second opnd + func->SetLastFreqMap(dassignFalse->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), dassignFalse); + } else { + RegassignNode *regassignFalse = + mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(2)); + // Update the frequence 2 + func->SetLastFreqMap(regassignFalse->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), regassignFalse); + } + + lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(EndIdx); + // Update the frequence third opnd + func->SetFirstFreqMap(lableStmt->GetStmtID(), static_cast(currentStmtFreq)); + blkNode.InsertAfter(blkNode.GetLast(), lableStmt); + + BaseNode *exprNode = (tNode.GetPrimType() == PTY_agg) ? + static_cast(mirbuilder->CreateExprDread(*cplxSelRes.resSym)) : + static_cast(mirbuilder->CreateExprRegread(tNode.GetPrimType(), cplxSelRes.resPreg)); + for (size_t i = 0; i < parent.NumOpnds(); i++) { + if (parent.Opnd(i) == &tNode) { + parent.SetOpnd(exprNode, i); + break; + } + } + + return exprNode; +} + +BaseNode *CGLowerer::LowerFarray(ArrayNode &array) { + auto *farrayType = static_cast(array.GetArrayType(GlobalTables::GetTypeTable())); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx())->GetSize(); + if (farrayType->GetKind() == kTypeJArray) { + if (farrayType->GetElemType()->GetKind() != kTypeScalar) { + /* not the last dimension of primitive array */ + eSize = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + } + } + + MIRType &arrayType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType())); + /* how about multi-dimension array? */ + if (array.GetIndex(0)->GetOpCode() == OP_constval) { + const ConstvalNode *constvalNode = static_cast(array.GetIndex(0)); + if (constvalNode->GetConstVal()->GetKind() == kConstInt) { + const MIRIntConst *pIntConst = static_cast(constvalNode->GetConstVal()); + CHECK_FATAL(JAVALANG || !pIntConst->IsNegative(), "Array index should >= 0."); + uint64 eleOffset = pIntConst->GetExtValue() * eSize; + + if (farrayType->GetKind() == kTypeJArray) { + eleOffset += RTSupport::GetRTSupportInstance().GetArrayContentOffset(); + } + + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + if (eleOffset == 0) { + return baseNode; + } + + MIRIntConst *eleConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(eleOffset, arrayType); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(eleConst); + offsetNode->SetPrimType(array.GetPrimType()); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(offsetNode, 1); + return rAdd; + } + } + + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + BaseNode *rMul = nullptr; + + if ((farrayType->GetKind() == kTypeJArray) && (resNode->GetOpCode() == OP_constval)) { + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = safe_cast(idxNode->GetConstVal())->GetExtValue(); + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(idx * eSize, arrayType); + rMul = mirModule.CurFuncCodeMemPool()->New(eConst); + rMul->SetPrimType(array.GetPrimType()); + } else { + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(eSize), arrayType); + BaseNode *eSizeNode = mirModule.CurFuncCodeMemPool()->New(eConst); + eSizeNode->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSizeNode, 1); + } + + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + + if (farrayType->GetKind() == kTypeJArray) { + BaseNode *jarrayBaseNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + RTSupport::GetRTSupportInstance().GetArrayContentOffset(), arrayType); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(array.GetPrimType()); + jarrayBaseNode->SetPrimType(array.GetPrimType()); + jarrayBaseNode->SetOpnd(baseNode, 0); + jarrayBaseNode->SetOpnd(arrayHeaderCstNode, 1); + baseNode = jarrayBaseNode; + } + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +BaseNode *CGLowerer::LowerArrayDim(ArrayNode &array, int32 dim) { + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(dim - 1)); + /* process left dimension index, resNode express the last dim, so dim need sub 2 */ + CHECK_FATAL(dim > (std::numeric_limits::min)() + 1, "out of range"); + int leftDim = dim - 2; + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + MIRArrayType *arrayType = static_cast(aType); + for (int i = leftDim; i >= 0; --i) { + BaseNode *mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + BaseNode *item = NodeConvert(array.GetPrimType(), *array.GetDim(mirModule, GlobalTables::GetTypeTable(), dim - 1)); + if (mirModule.IsCModule()) { + item = NodeConvert(array.GetPrimType(), *array.GetIndex(static_cast(static_cast(i)))); + int64 offsetSize = 1; + for (int32 j = i + 1; j < dim; ++j) { + offsetSize *= arrayType->GetSizeArrayItem(static_cast(j)); + } + MIRIntConst *offsetCst = mirModule.CurFuncCodeMemPool()->New( + offsetSize, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(array.GetPrimType())); + BaseNode *eleOffset = mirModule.CurFuncCodeMemPool()->New(offsetCst); + eleOffset->SetPrimType(array.GetPrimType()); + mpyNode->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(eleOffset, 0); + mpyNode->SetOpnd(item, 1); + } else { + for (int j = leftDim; j > i; --j) { + BaseNode *mpyNodes = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNodes->SetPrimType(array.GetPrimType()); + mpyNodes->SetOpnd(item, 0); + mpyNodes->SetOpnd( + NodeConvert(array.GetPrimType(), *array.GetDim(mirModule, GlobalTables::GetTypeTable(), j)), 1); + item = mpyNodes; + } + mpyNode->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(NodeConvert(array.GetPrimType(), *array.GetIndex(i)), 0); + mpyNode->SetOpnd(item, 1); + } + + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array.GetPrimType()); + newResNode->SetOpnd(resNode, 0); + newResNode->SetOpnd(mpyNode, 1); + resNode = newResNode; + } + return resNode; +} + +BaseNode *CGLowerer::LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offsetNode, const BaseNode &parent) { + if (parent.GetOpCode() == OP_iread && (baseNode.GetOpCode() == maple::OP_addrof)) { + const MIRSymbol *st = + mirModule.CurFunction()->GetLocalOrGlobalSymbol(static_cast(baseNode).GetStIdx()); + if (StringUtils::StartsWith(st->GetName(), namemangler::kDecoupleStaticValueStr) || + ((StringUtils::StartsWith(st->GetName(), namemangler::kMuidFuncUndefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidFuncDefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidDataDefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidDataUndefTabPrefixStr)) && + CGOptions::IsLazyBinding())) { + /* for decouple static or lazybinding def/undef tables, replace it with intrinsic */ + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(&baseNode); + args.emplace_back(&offsetNode); + return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_STATIC_OFFSET_TAB, OP_intrinsicop, + *GlobalTables::GetTypeTable().GetPrimType(parent.GetPrimType()), args); + } + } + return nullptr; +} + +BaseNode *CGLowerer::LowerArray(ArrayNode &array, const BaseNode &parent) { + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + return LowerFarray(array); + } + MIRArrayType *arrayType = static_cast(aType); + int32 dim = arrayType->GetDim(); + BaseNode *resNode = LowerArrayDim(array, dim); + BaseNode *rMul = nullptr; + size_t eSize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + Opcode opAdd = OP_add; + MIRType &arrayTypes = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType())); + if (resNode->GetOpCode() == OP_constval) { + /* index is a constant, we can calculate the offset now */ + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = safe_cast(idxNode->GetConstVal())->GetExtValue(); + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(idx * eSize, arrayTypes); + rMul = mirModule.CurFuncCodeMemPool()->New(eConst); + rMul->SetPrimType(array.GetPrimType()); + if (dim == 1) { + opAdd = OP_CG_array_elem_add; + } + } else { + MIRIntConst *eConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(eSize), arrayTypes); + BaseNode *tmpNode = mirModule.CurFuncCodeMemPool()->New(eConst); + tmpNode->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(tmpNode, 1); + } + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + if (rMul->GetOpCode() == OP_constval) { + BaseNode *intrnNode = LowerArrayForLazyBiding(*baseNode, *rMul, parent); + if (intrnNode != nullptr) { + return intrnNode; + } + } + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opAdd); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + return LowerFarray(array); + } + + MIRArrayType *arrayType = static_cast(aType); + /* There are two cases where dimension > 1. + * 1) arrayType->dim > 1. Process the current arrayType. (nestedArray = false) + * 2) arrayType->dim == 1, but arraytype->eTyIdx is another array. (nestedArray = true) + * Assume at this time 1) and 2) cannot mix. + * Along with the array dimension, there is the array indexing. + * It is allowed to index arrays less than the dimension. + * This is dictated by the number of indexes. + */ + bool nestedArray = false; + int dim = arrayType->GetDim(); + MIRType *innerType = nullptr; + MIRArrayType *innerArrayType = nullptr; + uint64 elemSize = 0; + if (dim == 1) { + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + if (innerType->GetKind() == kTypeArray) { + nestedArray = true; + do { + innerArrayType = static_cast(innerType); + elemSize = RoundUp(beCommon.GetTypeSize(innerArrayType->GetElemTyIdx().GetIdx()), + beCommon.GetTypeAlign(arrayType->GetElemTyIdx().GetIdx())); + dim++; + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } while (innerType->GetKind() == kTypeArray); + } + } + + int32 numIndex = static_cast(array.NumOpnds()) - 1; + MIRArrayType *curArrayType = arrayType; + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + if (dim > 1) { + BaseNode *prevNode = nullptr; + for (int i = 0; (i < dim) && (i < numIndex); i++) { + uint32 mpyDim = 1; + if (nestedArray) { + CHECK_FATAL(arrayType->GetSizeArrayItem(0) > 0, "Zero size array dimension"); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curArrayType->GetElemTyIdx()); + curArrayType = static_cast(innerType); + while (innerType->GetKind() == kTypeArray) { + innerArrayType = static_cast(innerType); + mpyDim *= innerArrayType->GetSizeArrayItem(0); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } + } else { + CHECK_FATAL(arrayType->GetSizeArrayItem(static_cast(i)) > 0, "Zero size array dimension"); + for (int j = i + 1; j < dim; j++) { + mpyDim *= arrayType->GetSizeArrayItem(static_cast(j)); + } + } + + BaseNode *index = static_cast(array.GetIndex(static_cast(i))); + bool isConst = false; + uint64 indexVal = 0; + if (index->op == OP_constval) { + ConstvalNode *constNode = static_cast(index); + indexVal = (static_cast(constNode->GetConstVal()))->GetExtValue(); + isConst = true; + MIRIntConst *newConstNode = mirModule.GetMemPool()->New( + indexVal * mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *newValNode = mirModule.CurFuncCodeMemPool()->New(newConstNode); + newValNode->SetPrimType(array.GetPrimType()); + if (i == 0) { + prevNode = newValNode; + continue; + } else { + resNode = newValNode; + } + } + if (i > 0 && !isConst) { + resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(static_cast(i))); + } + + BaseNode *mpyNode; + if (isConst) { + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim * indexVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array.GetPrimType()); + mpyNode = mulSize; + } else if (mpyDim == 1 && prevNode) { + mpyNode = prevNode; + prevNode = resNode; + } else { + mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNode->SetPrimType(array.GetPrimType()); + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(NodeConvert(array.GetPrimType(), *mulSize), 0); + mpyNode->SetOpnd(resNode, 1); + } + if (i == 0) { + prevNode = mpyNode; + continue; + } + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array.GetPrimType()); + newResNode->SetOpnd(mpyNode, 0); + newResNode->SetOpnd(prevNode, 1); + prevNode = newResNode; + } + resNode = prevNode; + } + + BaseNode *rMul = nullptr; + // esize is the size of the array element (eg. int = 4 long = 8) + uint64 esize; + if (nestedArray) { + esize = elemSize; + } else { + esize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + } + Opcode opadd = OP_add; + if (resNode->op == OP_constval) { + // index is a constant, we can calculate the offset now + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = static_cast(idxNode->GetConstVal())->GetExtValue(); + MIRIntConst *econst = mirModule.GetMemPool()->New( + idx * esize, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + rMul = mirModule.CurFuncCodeMemPool()->New(econst); + rMul->SetPrimType(array.GetPrimType()); + if (dim == 1 && array.GetBase()->op == OP_addrof && static_cast(array.GetBase())->GetFieldID() == 0) { + opadd = OP_CG_array_elem_add; + } + } else { + MIRIntConst *econst = mirModule.GetMemPool()->New(esize, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *eSize = mirModule.CurFuncCodeMemPool()->New(econst); + eSize->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSize, 1); + } + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opadd); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr, BaseNode *rhs, BlockNode *block) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = (static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * kBitsPerByte) + - bitOffset) - bitSize; + } + auto depositBits = builder->CreateExprDepositbits(OP_depositbits, primType, static_cast(bitOffset), + bitSize, bitField, rhs); + return builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositBits); + } + // if space not enough in the unit with size of primType, we would make an extra assignment from next bound + auto bitsRemained = (bitOffset + bitSize) - primTypeBitSize; + auto bitsExtracted = primTypeBitSize - bitOffset; + if (CGOptions::IsBigEndian()) { + bitOffset = 0; + } + auto *depositedLowerBits = builder->CreateExprDepositbits(OP_depositbits, primType, + static_cast(bitOffset), bitsExtracted, bitField, rhs); + auto *assignedLowerBits = builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositedLowerBits); + block->AddStatement(assignedLowerBits); + auto *extractedHigherBits = + builder->CreateExprExtractbits(OP_extractbits, primType, bitsExtracted, bitsRemained, rhs); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + auto *depositedHigherBits = + builder->CreateExprDepositbits(OP_depositbits, primType, 0, bitsRemained, bitFieldRemained, extractedHigherBits); + auto *assignedHigherBits = builder->CreateStmtIassignoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr, depositedHigherBits); + return assignedHigherBits; +} + +BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = (static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * kBitsPerByte) + - bitOffset) - bitSize; + } + return builder->CreateExprExtractbits(OP_extractbits, primType, static_cast(bitOffset), bitSize, bitField); + } + // if space not enough in the unit with size of primType, the result would be binding of two exprs of load + auto bitsRemained = (bitOffset + bitSize) - primTypeBitSize; + if (CGOptions::IsBigEndian()) { + bitOffset = 0; + } + auto *extractedLowerBits = builder->CreateExprExtractbits(OP_extractbits, primType, + static_cast(bitOffset), bitSize - bitsRemained, bitField); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + auto *result = builder->CreateExprDepositbits(OP_depositbits, primType, bitSize - bitsRemained, bitsRemained, + extractedLowerBits, bitFieldRemained); + return result; +} + +BaseNode *CGLowerer::LowerDreadBitfield(DreadNode &dread) { + auto *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + auto *structTy = static_cast(symbol->GetType()); + auto fTyIdx = structTy->GetFieldTyIdx(dread.GetFieldID()); + auto *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &dread; + } + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dread.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), baseAddr); +} + +BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { + uint32 index = iread.GetTyIdx(); + MIRPtrType *pointerTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(index)); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + /* Here pointed type can be Struct or JArray */ + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + TyIdx fTyIdx = structTy->GetFieldTyIdx(iread.GetFieldID()); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &iread; + } + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), iread.Opnd(0)); +} + +// input node must be cvt, retype, zext or sext +BaseNode *CGLowerer::LowerCastExpr(BaseNode &expr) { + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + BaseNode *simplified = MapleCastOpt::SimplifyCast(*mirBuilder, &expr); + return simplified != nullptr ? simplified : &expr; + } + return &expr; +} + +void CGLowerer::LowerTypePtr(BaseNode &node) const { + if ((node.GetPrimType() == PTY_ptr) || (node.GetPrimType() == PTY_ref)) { + node.SetPrimType(GetLoweredPtrType()); + } + + if (kOpcodeInfo.IsTypeCvt(node.GetOpCode())) { + auto &cvt = static_cast(node); + if ((cvt.FromType() == PTY_ptr) || (cvt.FromType() == PTY_ref)) { + cvt.SetFromType(GetLoweredPtrType()); + } + } else if (kOpcodeInfo.IsCompare(node.GetOpCode())) { + auto &cmp = static_cast(node); + if ((cmp.GetOpndType() == PTY_ptr) || (cmp.GetOpndType() == PTY_ref)) { + cmp.SetOpndType(GetLoweredPtrType()); + } + } +} + + +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 +BlockNode *CGLowerer::LowerReturnStructUsingFakeParm(NaryStmtNode &retNode) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + for (size_t i = 0; i < retNode.GetNopndSize(); ++i) { + retNode.SetOpnd(LowerExpr(retNode, *retNode.GetNopndAt(i), *blk), i); + } + BaseNode *opnd0 = retNode.Opnd(0); + if (!(opnd0 && opnd0->GetPrimType() == PTY_agg)) { + /* It is possible function never returns and have a dummy return const instead of a struct. */ + maple::LogInfo::MapleLogger(kLlWarn) << "return struct should have a kid" << std::endl; + } + + MIRFunction *curFunc = GetCurrentFunc(); + MIRSymbol *retSt = curFunc->GetFormal(0); + MIRPtrType *retTy = static_cast(retSt->GetType()); + IassignNode *iassign = mirModule.CurFuncCodeMemPool()->New(); + iassign->SetTyIdx(retTy->GetTypeIndex()); + DEBUG_ASSERT(opnd0 != nullptr, "opnd0 should not be nullptr"); + if ((beCommon.GetTypeSize(retTy->GetPointedTyIdx().GetIdx()) <= k16ByteSize) && (opnd0->GetPrimType() == PTY_agg)) { + /* struct goes into register. */ + curFunc->SetStructReturnedInRegs(); + } + iassign->SetFieldID(0); + iassign->SetRHS(opnd0); + if (retSt->IsPreg()) { + RegreadNode *regNode = mirModule.GetMIRBuilder()->CreateExprRegread( + GetLoweredPtrType(), + curFunc->GetPregTab()->GetPregIdxFromPregno(retSt->GetPreg()->GetPregNo())); + iassign->SetOpnd(regNode, 0); + } else { + AddrofNode *dreadNode = mirModule.CurFuncCodeMemPool()->New(OP_dread); + dreadNode->SetPrimType(GetLoweredPtrType()); + dreadNode->SetStIdx(retSt->GetStIdx()); + iassign->SetOpnd(dreadNode, 0); + } + blk->AddStatement(iassign); + retNode.GetNopnd().clear(); + retNode.SetNumOpnds(0); + blk->AddStatement(&retNode); + return blk; +} + +#endif /* TARGARM32 || TARGAARCH64 || TARGX86_64 */ + +BlockNode *CGLowerer::LowerReturn(NaryStmtNode &retNode) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + if (retNode.NumOpnds() != 0) { + BaseNode *expr = retNode.Opnd(0); + Opcode opr = expr->GetOpCode(); + if (opr == OP_dread) { + AddrofNode *retExpr = static_cast(expr); + MIRFunction *mirFunc = mirModule.CurFunction(); + MIRSymbol *sym = mirFunc->GetLocalOrGlobalSymbol(retExpr->GetStIdx()); + if (sym->GetAttr(ATTR_localrefvar)) { + mirFunc->InsertMIRSymbol(sym); + } + } + } + for (size_t i = 0; i < retNode.GetNopndSize(); ++i) { + retNode.SetOpnd(LowerExpr(retNode, *retNode.GetNopndAt(i), *blk), i); + } + blk->AddStatement(&retNode); + return blk; +} + +StmtNode *CGLowerer::LowerDassignBitfield(DassignNode &dassign, BlockNode &newBlk) { + dassign.SetRHS(LowerExpr(dassign, *dassign.GetRHS(), newBlk)); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRStructType *structTy = static_cast(symbol->GetType()); + CHECK_FATAL(structTy != nullptr, "LowerDassignBitfield: non-zero fieldID for non-structure"); + TyIdx fTyIdx = structTy->GetFieldTyIdx(dassign.GetFieldID()); + CHECK_FATAL(fTyIdx != 0u, "LowerDassignBitField: field id out of range for the structure"); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &dassign; + } + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dassign.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dassign.GetFieldID()); + return WriteBitField(byteBitOffsets, static_cast(fType), baseAddr, dassign.GetRHS(), &newBlk); +} + +StmtNode *CGLowerer::LowerIassignBitfield(IassignNode &iassign, BlockNode &newBlk) { + DEBUG_ASSERT(iassign.Opnd(0) != nullptr, "iassign.Opnd(0) should not be nullptr"); + iassign.SetOpnd(LowerExpr(iassign, *iassign.Opnd(0), newBlk), 0); + iassign.SetRHS(LowerExpr(iassign, *iassign.GetRHS(), newBlk)); + + CHECK_FATAL(iassign.GetTyIdx() < GlobalTables::GetTypeTable().GetTypeTable().size(), + "LowerIassignBitField: subscript out of range"); + uint32 index = iassign.GetTyIdx(); + MIRPtrType *pointerTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(index)); + CHECK_FATAL(pointerTy != nullptr, "LowerIassignBitField: type in iassign should be pointer type"); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + /* + * Here pointed type can be Struct or JArray + * We should seriously consider make JArray also a Struct type + */ + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + + TyIdx fTyIdx = structTy->GetFieldTyIdx(iassign.GetFieldID()); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &iassign; + } + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iassign.GetFieldID()); + auto *bitFieldType = static_cast(fType); + return WriteBitField(byteBitOffsets, bitFieldType, iassign.Opnd(0), iassign.GetRHS(), &newBlk); +} + +void CGLowerer::LowerIassign(IassignNode &iassign, BlockNode &newBlk) { + StmtNode *newStmt = nullptr; + if (iassign.GetFieldID() != 0) { + newStmt = LowerIassignBitfield(iassign, newBlk); + } else { + CHECK_FATAL(iassign.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(iassign.GetPrimType() != PTY_ref, "should have been lowered already"); + LowerStmt(iassign, newBlk); + newStmt = &iassign; + } + newBlk.AddStatement(newStmt); +} + +static GStrIdx NewAsmTempStrIdx() { + static uint32 strIdxCount = 0; // to create unique temporary symbol names + std::string asmTempStr("asm_tempvar"); + (void)asmTempStr.append(std::to_string(++strIdxCount)); + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(asmTempStr); +} + +void CGLowerer::LowerAsmStmt(AsmNode *asmNode, BlockNode *newBlk) { + for (size_t i = 0; i < asmNode->NumOpnds(); i++) { + BaseNode *opnd = LowerExpr(*asmNode, *asmNode->Opnd(i), *newBlk); + if (opnd->NumOpnds() == 0) { + asmNode->SetOpnd(opnd, i); + continue; + } + // introduce a temporary to store the expression tree operand + TyIdx tyIdxUsed = static_cast(opnd->GetPrimType()); + if (opnd->op == OP_iread) { + IreadNode *ireadNode = static_cast(opnd); + tyIdxUsed = ireadNode->GetType()->GetTypeIndex(); + } + StmtNode *assignNode = nullptr; + BaseNode *readOpnd = nullptr; + PrimType type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdxUsed)->GetPrimType(); + if ((type != PTY_agg) && CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + PregIdx pregIdx = mirModule.CurFunction()->GetPregTab()->CreatePreg(type); + assignNode = mirBuilder->CreateStmtRegassign(type, pregIdx, opnd); + readOpnd = mirBuilder->CreateExprRegread(type, pregIdx); + } else { + MIRSymbol *st = mirModule.GetMIRBuilder()->CreateSymbol(tyIdxUsed, NewAsmTempStrIdx(), + kStVar, kScAuto, mirModule.CurFunction(), kScopeLocal); + assignNode = mirModule.GetMIRBuilder()->CreateStmtDassign(*st, 0, opnd); + readOpnd = mirBuilder->CreateExprDread(*st); + } + newBlk->AddStatement(assignNode); + asmNode->SetOpnd(readOpnd, i); + } + newBlk->AddStatement(asmNode); +} + +DassignNode *CGLowerer::SaveReturnValueInLocal(StIdx stIdx, uint16 fieldID) { + MIRSymbol *var; + if (stIdx.IsGlobal()) { + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + var = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); + } + CHECK_FATAL(var != nullptr, "var should not be nullptr"); + PrimType pType; + if (var->GetAttr(ATTR_oneelem_simd)) { + pType = PTY_f64; + } else { + pType = GlobalTables::GetTypeTable().GetTypeTable().at(var->GetTyIdx())->GetPrimType(); + } + RegreadNode *regRead = mirModule.GetMIRBuilder()->CreateExprRegread(pType, -kSregRetval0); + return mirModule.GetMIRBuilder()->CreateStmtDassign(*var, fieldID, regRead); +} + +BaseNode *CGLowerer::LowerRem(BaseNode &expr, BlockNode &blk) { + auto &remExpr = static_cast(expr); + if (!IsPrimitiveFloat(remExpr.GetPrimType())) { + return &expr; + } + ExtFuncT fmodFunc = remExpr.GetPrimType() == PTY_f32 ? kFmodFloat : kFmodDouble; + uint32 i = 0; + for (; i < extFuncs.size(); ++i) { + if (extFuncs[i].first == fmodFunc) { + break; + } + } + CHECK_FATAL(i < extFuncs.size(), "rem expression primtype is not PTY_f32 nor PTY_f64."); + MIRSymbol *ret = CreateNewRetVar(*GlobalTables::GetTypeTable().GetPrimType(remExpr.GetPrimType()), + kIntrnRetValPrefix); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(remExpr.Opnd(0)); + args.emplace_back(remExpr.Opnd(1)); + CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCallAssigned(extFuncs[i].second, args, ret); + blk.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callStmt)); + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[fmodFunc].retType); + return mirModule.GetMIRBuilder()->CreateExprDread(*type, 0, *ret); +} + +/* to lower call (including icall) and intrinsicall statements */ +void CGLowerer::LowerCallStmt(StmtNode &stmt, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retty, bool uselvar, + bool isIntrinAssign) { + StmtNode *newStmt = nullptr; + if (stmt.GetOpCode() == OP_intrinsiccall) { + auto &intrnNode = static_cast(stmt); + newStmt = LowerIntrinsiccall(intrnNode, newBlk); + } else { + /* We note the function has a user-defined (i.e., not an intrinsic) call. */ + GetCurrentFunc()->SetHasCall(); + newStmt = &stmt; + } + + if (newStmt == nullptr) { + return; + } + + if (newStmt->GetOpCode() == OP_call || newStmt->GetOpCode() == OP_icall || newStmt->GetOpCode() == OP_icallproto) { + newStmt = LowerCall(static_cast(*newStmt), nextStmt, newBlk, retty, uselvar); + } + newStmt->SetSrcPos(stmt.GetSrcPos()); + newBlk.AddStatement(newStmt); + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2 && stmt.GetOpCode() == OP_intrinsiccall) { + /* Try to expand memset and memcpy call lowered from intrinsiccall */ + /* Skip expansion if call returns a value that is used later. */ + BlockNode *blkLowered = isIntrinAssign ? nullptr : LowerMemop(*newStmt); + if (blkLowered != nullptr) { + newBlk.RemoveStmt(newStmt); + newBlk.AppendStatementsFromBlock(*blkLowered); + } + } +} + +StmtNode *CGLowerer::GenCallNode(const StmtNode &stmt, PUIdx &funcCalled, CallNode& origCall) { + CallNode *newCall = nullptr; + if (stmt.GetOpCode() == OP_callassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_virtualcallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtVirtualCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_superclasscallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtSuperclassCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_interfacecallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtInterfaceCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } + newCall->SetDeoptBundleInfo(origCall.GetDeoptBundleInfo()); + newCall->SetSrcPos(stmt.GetSrcPos()); + CHECK_FATAL(newCall != nullptr, "nullptr is not expected"); + funcCalled = origCall.GetPUIdx(); + CHECK_FATAL((newCall->GetOpCode() == OP_call || newCall->GetOpCode() == OP_interfacecall), + "virtual call or super class call are not expected"); + if (newCall->GetOpCode() == OP_interfacecall) { + std::cerr << "interfacecall found\n"; + } + newCall->SetStmtAttrs(stmt.GetStmtAttrs()); + return newCall; +} + +StmtNode *CGLowerer::GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalled, bool &handledAtLowerLevel, + IntrinsiccallNode &origCall) { + StmtNode *newCall = nullptr; + handledAtLowerLevel = IsIntrinsicCallHandledAtLowerLevel(origCall.GetIntrinsic()); + if (handledAtLowerLevel) { + /* If the lower level can handle the intrinsic, just let it pass through. */ + newCall = &origCall; + } else { + PUIdx bFunc = GetBuiltinToUse(origCall.GetIntrinsic()); + if (bFunc != kFuncNotFound) { + newCall = mirModule.GetMIRBuilder()->CreateStmtCall(bFunc, origCall.GetNopnd()); + } else { + if (stmt.GetOpCode() == OP_intrinsiccallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtIntrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_xintrinsiccallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtXintrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd()); + } else { + newCall = mirModule.GetMIRBuilder()->CreateStmtIntrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd(), + origCall.GetTyIdx()); + } + } + newCall->SetSrcPos(stmt.GetSrcPos()); + funcCalled = bFunc; + CHECK_FATAL((newCall->GetOpCode() == OP_call || newCall->GetOpCode() == OP_intrinsiccall), + "xintrinsic and intrinsiccallwithtype call is not expected"); + } + return newCall; +} + +StmtNode *CGLowerer::GenIcallNode(PUIdx &funcCalled, IcallNode &origCall) { + IcallNode *newCall = nullptr; + if (origCall.GetOpCode() == OP_icallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcall(origCall.GetNopnd()); + } else { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcallproto(origCall.GetNopnd()); + newCall->SetRetTyIdx(static_cast(origCall).GetRetTyIdx()); + } + newCall->SetDeoptBundleInfo(origCall.GetDeoptBundleInfo()); + newCall->SetStmtAttrs(origCall.GetStmtAttrs()); + newCall->SetSrcPos(origCall.GetSrcPos()); + CHECK_FATAL(newCall != nullptr, "nullptr is not expected"); + funcCalled = kFuncNotFound; + return newCall; +} + +BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2nRets, const Opcode &opcode, + const PUIdx &funcCalled, bool handledAtLowerLevel, bool uselvar) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + blk->AddStatement(&newCall); + if (!handledAtLowerLevel) { + CHECK_FATAL(p2nRets.size() <= 1, "make sure p2nRets size <= 1"); + /* Create DassignStmt to save kSregRetval0. */ + StmtNode *dStmt = nullptr; + MIRType *retType = nullptr; + if (p2nRets.size() == 1) { + MIRSymbol *sym = nullptr; + StIdx stIdx = p2nRets[0].first; + if (stIdx.IsGlobal()) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + sym = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); + } + bool sizeIs0 = false; + if (sym != nullptr) { + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (beCommon.GetTypeSize(retType->GetTypeIndex().GetIdx()) == 0) { + sizeIs0 = true; + } + } + if (!sizeIs0) { + RegFieldPair regFieldPair = p2nRets[0].second; + if (!regFieldPair.IsReg()) { + uint16 fieldID = static_cast(regFieldPair.GetFieldID()); + DassignNode *dn = SaveReturnValueInLocal(stIdx, fieldID); + CHECK_FATAL(dn->GetFieldID() == 0, "make sure dn's fieldID return 0"); + LowerDassign(*dn, *blk); + CHECK_FATAL(&newCall == blk->GetLast() || newCall.GetNext() == blk->GetLast(), ""); + dStmt = (&newCall == blk->GetLast()) ? nullptr : blk->GetLast(); + CHECK_FATAL(newCall.GetNext() == dStmt, "make sure newCall's next equal dStmt"); + } else { + PregIdx pregIdx = static_cast(regFieldPair.GetPregIdx()); + MIRPreg *mirPreg = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(pregIdx); + bool is64x1vec = beCommon.CallIsOfAttr(FUNCATTR_oneelem_simd, &newCall); + PrimType pType = is64x1vec ? PTY_f64 : mirPreg->GetPrimType(); + RegreadNode *regNode = mirModule.GetMIRBuilder()->CreateExprRegread(pType, -kSregRetval0); + RegassignNode *regAssign; + if (is64x1vec && IsPrimitiveInteger(mirPreg->GetPrimType())) { // not f64 + MIRType *to; + if (IsUnsignedInteger(mirPreg->GetPrimType())) { + to = GlobalTables::GetTypeTable().GetUInt64(); + } else { + to = GlobalTables::GetTypeTable().GetInt64(); + } + MIRType *from = GlobalTables::GetTypeTable().GetDouble(); + BaseNode *rNode = mirModule.GetMIRBuilder()->CreateExprRetype(*to, *from, regNode); + regAssign = mirModule.GetMIRBuilder()->CreateStmtRegassign(mirPreg->GetPrimType(), + regFieldPair.GetPregIdx(), rNode); + } else { + regAssign = mirModule.GetMIRBuilder()->CreateStmtRegassign(mirPreg->GetPrimType(), + regFieldPair.GetPregIdx(), regNode); + } + blk->AddStatement(regAssign); + dStmt = regAssign; + } + } + } + blk->ResetBlock(); + /* if VerboseCG, insert a comment */ + if (ShouldAddAdditionalComment()) { + CommentNode *cmnt = mirModule.CurFuncCodeMemPool()->New(mirModule); + cmnt->SetComment(kOpcodeInfo.GetName(opcode).c_str()); + if (funcCalled == kFuncNotFound) { + cmnt->Append(" : unknown"); + } else { + cmnt->Append(" : "); + cmnt->Append(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcCalled)->GetName()); + } + blk->AddStatement(cmnt); + } + CHECK_FATAL(dStmt == nullptr || dStmt->GetNext() == nullptr, "make sure dStmt or dStmt's next is nullptr"); + LowerCallStmt(newCall, dStmt, *blk, retType, uselvar ? true : false, opcode == OP_intrinsiccallassigned); + if (!uselvar && dStmt != nullptr) { + dStmt->SetSrcPos(newCall.GetSrcPos()); + blk->AddStatement(dStmt); + } + } + return blk; +} + +// try to expand memset and memcpy +BlockNode *CGLowerer::LowerMemop(StmtNode &stmt) { + auto memOpKind = SimplifyMemOp::ComputeMemOpKind(stmt); + if (memOpKind == MEM_OP_unknown) { + return nullptr; + } + auto *prev = stmt.GetPrev(); + auto *next = stmt.GetNext(); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + blk->AddStatement(&stmt); + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + bool success = simplifyMemOp.AutoSimplify(stmt, *blk, true); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + stmt.SetPrev(prev); + stmt.SetNext(next); // recover callStmt's position + if (!success) { + return nullptr; + } + // lower new generated stmts + auto *currStmt = blk->GetFirst(); + while (currStmt != nullptr) { + auto *nextStmt = currStmt->GetNext(); + for (uint32 i = 0; i < currStmt->NumOpnds(); ++i) { + currStmt->SetOpnd(LowerExpr(*currStmt, *currStmt->Opnd(i), *blk), i); + } + currStmt = nextStmt; + } + return blk; +} + +BlockNode *CGLowerer::LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall) { + auto *builder = mirModule.GetMIRBuilder(); + auto *block = mirModule.CurFuncCodeMemPool()->New(); + auto intrinsicID = intrinsicCall.GetIntrinsic(); + auto &opndVector = intrinsicCall.GetNopnd(); + auto returnPair = intrinsicCall.GetReturnVec().begin(); + auto regFieldPair = returnPair->second; + if (regFieldPair.IsReg()) { + auto regIdx = regFieldPair.GetPregIdx(); + auto primType = mirModule.CurFunction()->GetPregItem(static_cast(regIdx))->GetPrimType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, primType, TyIdx(0), opndVector); + auto regAssign = builder->CreateStmtRegassign(primType, regIdx, intrinsicOp); + block->AddStatement(regAssign); + } else { + auto fieldID = regFieldPair.GetFieldID(); + auto stIdx = returnPair->first; + auto *type = mirModule.CurFunction()->GetLocalOrGlobalSymbol(stIdx)->GetType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, *type, opndVector); + auto dAssign = builder->CreateStmtDassign(stIdx, fieldID, intrinsicOp); + block->AddStatement(dAssign); + } + return LowerBlock(*block); +} + +BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { + StmtNode *newCall = nullptr; + CallReturnVector *p2nRets = nullptr; + PUIdx funcCalled = kFuncNotFound; + bool handledAtLowerLevel = false; + switch (stmt.GetOpCode()) { + case OP_callassigned: + case OP_virtualcallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: { + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + BlockNode *blkLowered = LowerMemop(stmt); + if (blkLowered != nullptr) { + return blkLowered; + } + } + auto &origCall = static_cast(stmt); + newCall = GenCallNode(stmt, funcCalled, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + MIRFunction *curFunc = mirModule.CurFunction(); + curFunc->SetLastFreqMap(newCall->GetStmtID(), + static_cast(curFunc->GetFreqFromLastStmt(stmt.GetStmtID()))); + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode &intrincall = static_cast(stmt); + auto intrinsicID = intrincall.GetIntrinsic(); + if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { + return LowerIntrinsiccallAassignedToAssignStmt(intrincall); + } + if (intrinsicID == INTRN_JAVA_POLYMORPHIC_CALL) { + BaseNode *contextClassArg = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); + constexpr int kContextIdx = 4; /* stable index in MCC_DexPolymorphicCall, never out of range */ + intrincall.InsertOpnd(contextClassArg, kContextIdx); + + BaseNode *firstArg = intrincall.GetNopndAt(0); + BaseNode *baseVal = mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), firstArg, + mirBuilder->CreateIntConst(1, PTY_ref)); + intrincall.SetNOpndAt(0, baseVal); + } + newCall = GenIntrinsiccallNode(stmt, funcCalled, handledAtLowerLevel, intrincall); + p2nRets = &intrincall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + case OP_intrinsiccallwithtypeassigned: { + auto &origCall = static_cast(stmt); + newCall = GenIntrinsiccallNode(stmt, funcCalled, handledAtLowerLevel, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + auto &origCall = static_cast(stmt); + newCall = GenIcallNode(funcCalled, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + default: + CHECK_FATAL(false, "NIY"); + return nullptr; + } + + /* transfer srcPosition location info */ + newCall->SetSrcPos(stmt.GetSrcPos()); + return GenBlockNode(*newCall, *p2nRets, stmt.GetOpCode(), funcCalled, handledAtLowerLevel, uselvar); +} + +#if TARGAARCH64 +static PrimType IsStructElementSame(MIRType *ty) { + if (ty->GetKind() == kTypeArray) { + MIRArrayType *arrtype = static_cast(ty); + MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); + if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { + return IsStructElementSame(pty); + } + return pty->GetPrimType(); + } else if (ty->GetKind() == kTypeStruct) { + MIRStructType *sttype = static_cast(ty); + FieldVector fields = sttype->GetFields(); + PrimType oldtype = PTY_void; + for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { + TyIdx fieldtyidx = fields[fcnt].second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + PrimType ptype = IsStructElementSame(fieldty); + if (oldtype != PTY_void && oldtype != ptype) { + return PTY_void; + } else { + oldtype = ptype; + } + } + return oldtype; + } else { + return ty->GetPrimType(); + } +} +#endif + +// return true if successfully lowered; nextStmt is in/out, and is made to point +// to its following statement if lowering of the struct return is successful +bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, + StmtNode *&nextStmt, bool &lvar, BlockNode *oldBlk) { + if (!nextStmt) { + return false; + } + CallReturnVector *p2nrets = stmt->GetCallReturnVector(); + if (p2nrets->size() == 0) { + return false; + } + CallReturnPair retPair = (*p2nrets)[0]; + if (retPair.second.IsReg()) { + return false; + } + MIRSymbol *retSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(retPair.first); + if (retSym->GetType()->GetPrimType() != PTY_agg) { + return false; + } + if (nextStmt->op != OP_dassign) { + // introduce a temporary and insert a dassign whose rhs is this temporary + // and whose lhs is retSym + MIRSymbol *temp = CreateNewRetVar(*retSym->GetType(), kUserRetValPrefix); + BaseNode *rhs = mirModule.GetMIRBuilder()->CreateExprDread(*temp->GetType(), 0, *temp); + DassignNode *dass = mirModule.GetMIRBuilder()->CreateStmtDassign( + retPair.first, retPair.second.GetFieldID(), rhs); + oldBlk->InsertBefore(nextStmt, dass); + nextStmt = dass; + // update CallReturnVector to the new temporary + (*p2nrets)[0].first = temp->GetStIdx(); + (*p2nrets)[0].second.SetFieldID(0); + } + // now, it is certain that nextStmt is a dassign + BaseNode *bnode = static_cast(nextStmt)->GetRHS(); + if (bnode->GetOpCode() != OP_dread) { + return false; + } + DreadNode *dnode = static_cast(bnode); + MIRType *dtype = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnode->GetStIdx())->GetType(); +#if TARGAARCH64 + PrimType ty = IsStructElementSame(dtype); + if (ty == PTY_f32 || ty == PTY_f64 || IsPrimitiveVector(ty)) { + return false; + } +#endif + if (dnode->GetPrimType() != PTY_agg) { + return false; + } + CallReturnPair pair = (*p2nrets)[0]; + if (pair.first != dnode->GetStIdx() || pair.second.GetFieldID() != dnode->GetFieldID()) { + return false; + } + auto *dnodeStmt = static_cast(nextStmt); + if (dnodeStmt->GetFieldID() != 0) { + return false; + } + if (dtype->GetSize() > k16ByteSize) { + (*p2nrets)[0].first = dnodeStmt->GetStIdx(); + (*p2nrets)[0].second.SetFieldID(dnodeStmt->GetFieldID()); + lvar = true; + // set ATTR_firstarg_return for callee + if (stmt->GetOpCode() == OP_callassigned) { + CallNode *callNode = static_cast(stmt); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + f->SetFirstArgReturn(); + f->GetMIRFuncType()->SetFirstArgReturn(); + } else { + // for icall, front-end already set ATTR_firstarg_return + } + } else { /* struct <= 16 passed in regs lowered into + call &foo + regassign u64 %1 (regread u64 %%retval0) + regassign ptr %2 (addrof ptr $s) + iassign <* u64> 0 (regread ptr %2, regread u64 %1) */ + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnodeStmt->GetStIdx()); + auto *structType = static_cast(symbol->GetType()); + auto size = static_cast(structType->GetSize()); + if (stmt->GetOpCode() == OP_callassigned) { + auto *callNode = static_cast(stmt); + for (size_t i = 0; i < callNode->GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(*callNode, *callNode->GetNopndAt(i), newBlk); + callNode->SetOpnd(newOpnd, i); + } + CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCall(callNode->GetPUIdx(), callNode->GetNopnd()); + callStmt->SetSrcPos(callNode->GetSrcPos()); + newBlk.AddStatement(callStmt); + } else if (stmt->GetOpCode() == OP_icallassigned || stmt->GetOpCode() == OP_icallprotoassigned) { + auto *icallNode = static_cast(stmt); + for (size_t i = 0; i < icallNode->GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(*icallNode, *icallNode->GetNopndAt(i), newBlk); + icallNode->SetOpnd(newOpnd, i); + } + IcallNode *icallStmt = nullptr; + if (stmt->GetOpCode() == OP_icallassigned) { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode->GetNopnd()); + } else { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcallproto(icallNode->GetNopnd()); + icallStmt->SetRetTyIdx(icallNode->GetRetTyIdx()); + } + icallStmt->SetSrcPos(icallNode->GetSrcPos()); + newBlk.AddStatement(icallStmt); + } else { + return false; + } + + uint32 origSize = size; + PregIdx pIdxR, pIdx1R, pIdx2R; + StmtNode *aStmt = nullptr; + RegreadNode *reg = nullptr; + + /* save x0 */ + reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval0); + pIdx1R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx1R, reg); + newBlk.AddStatement(aStmt); + + /* save x1 */ + if (origSize > k8ByteSize) { + reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval1); + pIdx2R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx2R, reg); + newBlk.AddStatement(aStmt); + } + + /* save &s */ + BaseNode *regAddr = mirBuilder->CreateExprAddrof(0, *symbol); + LowerTypePtr(*regAddr); + PregIdx pIdxL = GetCurrentFunc()->GetPregTab()->CreatePreg(GetLoweredPtrType()); + aStmt = mirBuilder->CreateStmtRegassign(PTY_a64, pIdxL, regAddr); + newBlk.AddStatement(aStmt); + + uint32 curSize = 0; + PregIdx pIdxS; + while (size) { + pIdxR = pIdx1R; + if (curSize >= k8ByteSize) { + pIdxR = pIdx2R; + } + BaseNode *addr; + BaseNode *shift; + BaseNode *regreadExp; + if (origSize != size) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetLoweredPtrType()); + addr = mirBuilder->CreateExprBinary(OP_add, *addrType, + mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL), + mirBuilder->CreateIntConst(origSize - size, PTY_i32)); + } else { + addr = mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL); + } + if (size >= k8ByteSize) { + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt64()), + 0, addr, mirBuilder->CreateExprRegread(PTY_u64, pIdxR)); + size -= k8ByteSize; + curSize += k8ByteSize; + } else if (size >= k4ByteSize) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary( + OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k32BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u32, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt32()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k4ByteSize; + curSize += k4ByteSize; + } else if (size >= k2ByteSize) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u16, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt16()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, + mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, + mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k16BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k2ByteSize; + curSize += k2ByteSize; + } else { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u8, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k8BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k1ByteSize; + curSize += k1ByteSize; + } + newBlk.AddStatement(aStmt); + } + } + nextStmt = nextStmt->GetNext(); // skip the dassign + return true; +} + +void CGLowerer::LowerStmt(StmtNode &stmt, BlockNode &newBlk) { + CHECK_FATAL(stmt.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(stmt.GetPrimType() != PTY_ref, "should have been lowered already"); + for (size_t i = 0; i < stmt.NumOpnds(); ++i) { + stmt.SetOpnd(LowerExpr(stmt, *stmt.Opnd(i), newBlk), i); + } +} + +void CGLowerer::LowerSwitchOpnd(StmtNode &stmt, BlockNode &newBlk) { + BaseNode *opnd = LowerExpr(stmt, *stmt.Opnd(0), newBlk); + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2 && opnd->GetOpCode() != OP_regread) { + PrimType ptyp = stmt.Opnd(0)->GetPrimType(); + PregIdx pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(ptyp); + RegassignNode *regAss = mirBuilder->CreateStmtRegassign(ptyp, pIdx, opnd); + newBlk.AddStatement(regAss); + GetCurrentFunc()->SetLastFreqMap(regAss->GetStmtID(), + static_cast(GetCurrentFunc()->GetFreqFromLastStmt(stmt.GetStmtID()))); + stmt.SetOpnd(mirBuilder->CreateExprRegread(ptyp, pIdx), 0); + } else { + stmt.SetOpnd(LowerExpr(stmt, *stmt.Opnd(0), newBlk), 0); + } +} + +void CGLowerer::AddElemToPrintf(MapleVector &argsPrintf, int num, ...) const { + va_list argPtr; + va_start(argPtr, num); + for (int i = 0; i < num; ++i) { + argsPrintf.push_back(va_arg(argPtr, BaseNode*)); + } + va_end(argPtr); +} + +void CGLowerer::SwitchAssertBoundary(StmtNode &stmt, MapleVector &argsPrintf) { + MIRSymbol *errMsg; + MIRSymbol *fileNameSym; + ConstvalNode *lineNum; + fileNameSym = mirBuilder->CreateConstStringSymbol(GetFileNameSymbolName(AssertBoundaryGetFileName(stmt)), + AssertBoundaryGetFileName(stmt)); + lineNum = mirBuilder->CreateIntConst(stmt.GetSrcPos().LineNum(), PTY_u32); + if (kOpcodeInfo.IsAssertLowerBoundary(stmt.GetOpCode())) { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssertge, + "%s:%d error: the pointer < the lower bounds when accessing the memory!\n"); + AddElemToPrintf(argsPrintf, 3, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } else { + if (kOpcodeInfo.IsAssertLeBoundary(stmt.GetOpCode())) { + if (stmt.GetOpCode() == OP_callassertle) { + auto &callStmt = static_cast(stmt); + std::string param; + MIRSymbol *funcName; + MIRSymbol *paramNum; + param = maple::GetNthStr(callStmt.GetParamIndex()); + errMsg = mirBuilder->CreateConstStringSymbol(kOpCallAssertle, + "%s:%d error: the pointer's bounds does not match the function %s declaration for the %s argument!\n"); + funcName = mirBuilder->CreateConstStringSymbol(callStmt.GetFuncName() + kOpCallAssertle, + callStmt.GetFuncName()); + paramNum = mirBuilder->CreateConstStringSymbol(kOpCallAssertle + param, param); + AddElemToPrintf(argsPrintf, 5, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum, + mirBuilder->CreateAddrof(*funcName, PTY_a64), + mirBuilder->CreateAddrof(*paramNum, PTY_a64)); + } else if (stmt.GetOpCode() == OP_returnassertle) { + auto &callStmt = static_cast(stmt); + MIRSymbol *funcName; + errMsg = mirBuilder->CreateConstStringSymbol(kOpReturnAssertle, + "%s:%d error: return value's bounds does not match the function declaration for %s\n"); + funcName = mirBuilder->CreateConstStringSymbol(callStmt.GetFuncName() + kOpReturnAssertle, + callStmt.GetFuncName()); + AddElemToPrintf(argsPrintf, 4, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum, + mirBuilder->CreateAddrof(*funcName, PTY_a64)); + } else { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssignAssertle, + "%s:%d error: l-value boundary should not be larger than r-value boundary!\n"); + AddElemToPrintf(argsPrintf, 3, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } + } else { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssertlt, + "%s:%d error: the pointer >= the upper bounds when accessing the memory!\n"); + AddElemToPrintf(argsPrintf, 3, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } + } +} + +void CGLowerer::LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, + std::vector &abortNode) { + MIRFunction *curFunc = mirModule.CurFunction(); + BaseNode *op0 = LowerExpr(stmt, *stmt.Opnd(0), block); + BaseNode *op1 = LowerExpr(stmt, *stmt.Opnd(1), block); + LabelIdx labIdx = GetLabelIdx(*curFunc); + LabelNode *labelBC = mirBuilder->CreateStmtLabel(labIdx); + Opcode op = OP_ge; + if (kOpcodeInfo.IsAssertUpperBoundary(stmt.GetOpCode())) { + op = (kOpcodeInfo.IsAssertLeBoundary(stmt.GetOpCode())) ? OP_le : OP_lt; + } + BaseNode *cond = mirBuilder->CreateExprCompare(op, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetPrimType(op0->GetPrimType()), + op0, op1); + CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); + + MIRFunction *printf = mirBuilder->GetOrCreateFunction("printf", TyIdx(PTY_i32)); + printf->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*printf->GetMIRFuncType()); + MapleVector argsPrintf(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + SwitchAssertBoundary(stmt, argsPrintf); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + StmtNode *callPrintf = mirBuilder->CreateStmtCall(printf->GetPuidx(), argsPrintf); + UnaryStmtNode *abortModeNode = mirBuilder->CreateStmtUnary(OP_abort, nullptr); + + brFalseNode->SetSrcPos(stmt.GetSrcPos()); + labelBC->SetSrcPos(stmt.GetSrcPos()); + callPrintf->SetSrcPos(stmt.GetSrcPos()); + abortModeNode->SetSrcPos(stmt.GetSrcPos()); + + newBlk.AddStatement(brFalseNode); + abortNode.emplace_back(labelBC); + abortNode.emplace_back(callPrintf); + abortNode.emplace_back(abortModeNode); +} + +BlockNode *CGLowerer::LowerBlock(BlockNode &block) { + BlockNode *newBlk = mirModule.CurFuncCodeMemPool()->New(); + BlockNode *tmpBlockNode = nullptr; + std::vector abortNode; + if (block.GetFirst() == nullptr) { + return newBlk; + } + + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + stmt->SetNext(nullptr); + currentBlock = newBlk; + + LowerTypePtr(*stmt); + + switch (stmt->GetOpCode()) { + case OP_switch: { + LowerSwitchOpnd(*stmt, *newBlk); + auto switchMp = std::make_unique(memPoolCtrler, "switchlowere"); + MapleAllocator switchAllocator(switchMp.get()); + SwitchLowerer switchLowerer(mirModule, static_cast(*stmt), switchAllocator); + BlockNode *blk = switchLowerer.LowerSwitch(); + if (blk->GetFirst() != nullptr) { + newBlk->AppendStatementsFromBlock(*blk); + } + needBranchCleanup = true; + break; + } + case OP_block: + tmpBlockNode = LowerBlock(static_cast(*stmt)); + CHECK_FATAL(tmpBlockNode != nullptr, "nullptr is not expected"); + newBlk->AppendStatementsFromBlock(*tmpBlockNode); + break; + case OP_dassign: { + LowerDassign(static_cast(*stmt), *newBlk); + break; + } + case OP_regassign: { + LowerRegassign(static_cast(*stmt), *newBlk); + break; + } + CASE_OP_ASSERT_BOUNDARY { + LowerAssertBoundary(*stmt, block, *newBlk, abortNode); + break; + } + case OP_iassign: { + LowerIassign(static_cast(*stmt), *newBlk); + break; + } + case OP_callassigned: + case OP_icallassigned: + case OP_icallprotoassigned: { + // pass the addr of lvar if this is a struct call assignment + bool lvar = false; + // nextStmt could be changed by the call to LowerStructReturn + if (!LowerStructReturn(*newBlk, stmt, nextStmt, lvar, &block)) { + newBlk->AppendStatementsFromBlock(*LowerCallAssignedStmt(*stmt, lvar)); + } + break; + } + case OP_virtualcallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: + case OP_intrinsiccallwithtypeassigned: + newBlk->AppendStatementsFromBlock(*LowerCallAssignedStmt(*stmt)); + break; + case OP_intrinsiccall: + case OP_call: + case OP_icall: + case OP_icallproto: +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + // nextStmt could be changed by the call to LowerStructReturn + LowerCallStmt(*stmt, nextStmt, *newBlk); +#else + LowerStmt(*stmt, *newBlk); +#endif + break; + case OP_return: { +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + if (GetCurrentFunc()->IsFirstArgReturn() && stmt->NumOpnds() > 0) { + newBlk->AppendStatementsFromBlock(*LowerReturnStructUsingFakeParm(static_cast(*stmt))); + } else { +#endif + NaryStmtNode *retNode = static_cast(stmt); + if (retNode->GetNopndSize() == 0) { + newBlk->AddStatement(stmt); + } else { + tmpBlockNode = LowerReturn(*retNode); + CHECK_FATAL(tmpBlockNode != nullptr, "nullptr is not expected"); + newBlk->AppendStatementsFromBlock(*tmpBlockNode); + } +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + } +#endif + break; + } + case OP_comment: + newBlk->AddStatement(stmt); + break; + case OP_try: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + hasTry = true; + break; + case OP_endtry: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + case OP_catch: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + case OP_throw: + if (mirModule.IsJavaModule()) { + if (GenerateExceptionHandlingCode()) { + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + } + } else { + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + } + break; + case OP_syncenter: + case OP_syncexit: { + LowerStmt(*stmt, *newBlk); + StmtNode *tmp = LowerSyncEnterSyncExit(*stmt); + CHECK_FATAL(tmp != nullptr, "nullptr is not expected"); + newBlk->AddStatement(tmp); + break; + } + case OP_decrefreset: { + /* + * only gconly can reach here + * lower stmt (decrefreset (addrof ptr %RegX_RXXXX)) to (dassign %RegX_RXXXX 0 (constval ref 0)) + */ + CHECK_FATAL(CGOptions::IsGCOnly(), "OP_decrefreset is expected only in gconly."); + LowerResetStmt(*stmt, *newBlk); + break; + } + case OP_asm: { + LowerAsmStmt(static_cast(stmt), newBlk); + break; + } + default: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + } + CHECK_FATAL(beCommon.GetSizeOfTypeSizeTable() == GlobalTables::GetTypeTable().GetTypeTableSize(), "Error!"); + } while (nextStmt != nullptr); + for (auto node : abortNode) { + newBlk->AddStatement(node); + } + return newBlk; +} + +void CGLowerer::SimplifyBlock(BlockNode &block) const { + if (block.GetFirst() == nullptr) { + return; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_call: { + auto *callStmt = static_cast(stmt); + if (CGOptions::IsDuplicateAsmFileEmpty()) { + break; + } + auto *oldFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt->GetPUIdx()); + if (asmMap.find(oldFunc->GetName()) == asmMap.end()) { + break; + } + auto *newFunc = theMIRModule->GetMIRBuilder()->GetOrCreateFunction(asmMap.at(oldFunc->GetName()), + callStmt->GetTyIdx()); + MIRSymbol *funcSym = newFunc->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + callStmt->SetPUIdx(newFunc->GetPuidx()); + break; + } + default: { + break; + } + } + } while (nextStmt != nullptr); + return; +} + +MIRType *CGLowerer::GetArrayNodeType(BaseNode &baseNode) { + MIRType *baseType = nullptr; + auto curFunc = mirModule.CurFunction(); + if (baseNode.GetOpCode() == OP_regread) { + RegreadNode *rrNode = static_cast(&baseNode); + MIRPreg *pReg = curFunc->GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx()); + if (pReg->IsRef()) { + baseType = pReg->GetMIRType(); + } + } + if (baseNode.GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(&baseNode); + MIRSymbol *symbol = curFunc->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + baseType = symbol->GetType(); + } + MIRType *arrayElemType = nullptr; + if (baseType != nullptr) { + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(baseType)->GetPointedTyIdx()); + while (stType->GetKind() == kTypeJArray) { + MIRJarrayType *baseType1 = static_cast(stType); + MIRType *elemType = baseType1->GetElemType(); + if (elemType->GetKind() == kTypePointer) { + const TyIdx &index = static_cast(elemType)->GetPointedTyIdx(); + stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(index); + } else { + stType = elemType; + } + } + + arrayElemType = stType; + } + return arrayElemType; +} + +void CGLowerer::SplitCallArg(CallNode &callNode, BaseNode *newOpnd, size_t i, BlockNode &newBlk) { + if (newOpnd->GetOpCode() != OP_regread && newOpnd->GetOpCode() != OP_constval && + newOpnd->GetOpCode() != OP_dread && newOpnd->GetOpCode() != OP_addrof && + newOpnd->GetOpCode() != OP_iaddrof && newOpnd->GetOpCode() != OP_constval && + newOpnd->GetOpCode() != OP_conststr && newOpnd->GetOpCode() != OP_conststr16) { + if (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel0) { + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(newOpnd->GetPrimType()); + MIRSymbol *ret = CreateNewRetVar(*type, kIntrnRetValPrefix); + DassignNode *dassignNode = mirBuilder->CreateStmtDassign(*ret, 0, newOpnd); + newBlk.AddStatement(dassignNode); + callNode.SetOpnd(mirBuilder->CreateExprDread(*type, 0, *ret), i); + } else { + PregIdx pregIdx = mirModule.CurFunction()->GetPregTab()->CreatePreg(newOpnd->GetPrimType()); + RegassignNode *temp = mirBuilder->CreateStmtRegassign(newOpnd->GetPrimType(), pregIdx, newOpnd); + newBlk.AddStatement(temp); + callNode.SetOpnd(mirBuilder->CreateExprRegread(newOpnd->GetPrimType(), pregIdx), i); + } + } else { + callNode.SetOpnd(newOpnd, i); + } +} + +StmtNode *CGLowerer::LowerCall( + CallNode &callNode, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retTy, bool uselvar) { + /* + * nextStmt in-out + * call $foo(constval u32 128) + * dassign %jlt (dread agg %%retval) + */ + bool isArrayStore = false; + + if (callNode.GetOpCode() == OP_call) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + if ((calleeFunc->GetName() == "MCC_WriteRefField") && (callNode.Opnd(1)->GetOpCode() == OP_iaddrof)) { + IreadNode *addrExpr = static_cast(callNode.Opnd(1)); + if (addrExpr->Opnd(0)->GetOpCode() == OP_array) { + isArrayStore = true; + } + } + } + + for (size_t i = 0; i < callNode.GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(callNode, *callNode.GetNopndAt(i), newBlk); +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + callNode.SetOpnd(newOpnd, i); +#else + SplitCallArg(callNode, newOpnd, i, newBlk); +#endif + } + + if (isArrayStore && checkLoadStore) { + bool needCheckStore = true; + MIRType *arrayElemType = GetArrayNodeType(*callNode.Opnd(0)); + MIRType *valueRealType = GetArrayNodeType(*callNode.Opnd(kNodeThirdOpnd)); + if ((arrayElemType != nullptr) && (valueRealType != nullptr) && (arrayElemType->GetKind() == kTypeClass) && + static_cast(arrayElemType)->IsFinal() && (valueRealType->GetKind() == kTypeClass) && + static_cast(valueRealType)->IsFinal() && + valueRealType->GetTypeIndex() == arrayElemType->GetTypeIndex()) { + needCheckStore = false; + } + + if (needCheckStore) { + MIRFunction *fn = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(callNode.Opnd(0)); + args.emplace_back(callNode.Opnd(kNodeThirdOpnd)); + StmtNode *checkStoreStmt = mirModule.GetMIRBuilder()->CreateStmtCall(fn->GetPuidx(), args); + newBlk.AddStatement(checkStoreStmt); + } + } + + DassignNode *dassignNode = nullptr; + if ((nextStmt != nullptr) && (nextStmt->GetOpCode() == OP_dassign)) { + dassignNode = static_cast(nextStmt); + } + + /* if nextStmt is not a dassign stmt, return */ + if (dassignNode == nullptr) { + return &callNode; + } + + if (!uselvar && retTy && beCommon.GetTypeSize(retTy->GetTypeIndex().GetIdx()) <= k16ByteSize) { + /* return structure fitting in one or two regs. */ + return &callNode; + } + + MIRType *retType = nullptr; + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { + if (retTy == nullptr) { + return &callNode; + } else { + retType = retTy; + } + } + + if (retType == nullptr) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + retType = calleeFunc->GetReturnType(); + if (calleeFunc->IsReturnStruct() && (retType->GetPrimType() == PTY_void)) { + MIRPtrType *pretType = static_cast((calleeFunc->GetNthParamType(0))); + CHECK_FATAL(pretType != nullptr, "nullptr is not expected"); + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pretType->GetPointedTyIdx()); + CHECK_FATAL((retType->GetKind() == kTypeStruct) || (retType->GetKind() == kTypeUnion), + "make sure retType is a struct type"); + } + } + + /* if return type is not of a struct, return */ + if ((retType->GetKind() != kTypeStruct) && (retType->GetKind() != kTypeUnion)) { + return &callNode; + } + + MIRSymbol *dsgnSt = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); + CHECK_FATAL(dsgnSt->GetType()->IsStructType(), "expects a struct type"); + MIRStructType *structTy = static_cast(dsgnSt->GetType()); + if (structTy == nullptr) { + return &callNode; + } + + RegreadNode *regReadNode = nullptr; + if (dassignNode->Opnd(0)->GetOpCode() == OP_regread) { + regReadNode = static_cast(dassignNode->Opnd(0)); + } + if (regReadNode == nullptr || (regReadNode->GetRegIdx() != -kSregRetval0)) { + return &callNode; + } + + MapleVector newNopnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + AddrofNode *addrofNode = mirModule.CurFuncCodeMemPool()->New(OP_addrof); + addrofNode->SetPrimType(GetLoweredPtrType()); + addrofNode->SetStIdx(dsgnSt->GetStIdx()); + addrofNode->SetFieldID(0); + + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { + auto ond = callNode.GetNopnd().begin(); + newNopnd.emplace_back(*ond); + newNopnd.emplace_back(addrofNode); + for (++ond; ond != callNode.GetNopnd().end(); ++ond) { + newNopnd.emplace_back(*ond); + } + } else { + newNopnd.emplace_back(addrofNode); + for (auto *opnd : callNode.GetNopnd()) { + newNopnd.emplace_back(opnd); + } + } + + callNode.SetNOpnd(newNopnd); + callNode.SetNumOpnds(static_cast(newNopnd.size())); + CHECK_FATAL(nextStmt != nullptr, "nullptr is not expected"); + nextStmt = nextStmt->GetNext(); + return &callNode; +} + +void CGLowerer::LowerEntry(MIRFunction &func) { + // determine if needed to insert fake parameter to return struct for current function + if (func.IsReturnStruct()) { + MIRType *retType = func.GetReturnType(); +#if TARGAARCH64 + PrimType pty = IsStructElementSame(retType); + if (pty == PTY_f32 || pty == PTY_f64 || IsPrimitiveVector(pty)) { + func.SetStructReturnedInRegs(); + return; + } +#endif + if (retType->GetPrimType() != PTY_agg) { + return; + } + if (retType->GetSize() > k16ByteSize) { + func.SetFirstArgReturn(); + func.GetMIRFuncType()->SetFirstArgReturn(); + } else { + func.SetStructReturnedInRegs(); + } + } + if (func.IsFirstArgReturn() && func.GetReturnType()->GetPrimType() != PTY_void) { + MIRSymbol *retSt = func.GetSymTab()->CreateSymbol(kScopeLocal); + retSt->SetStorageClass(kScFormal); + retSt->SetSKind(kStVar); + std::string retName(".return."); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + retName.append(funcSt->GetName()); + retSt->SetNameStrIdx(retName); + MIRType *pointType = beCommon.BeGetOrCreatePointerType(*func.GetReturnType()); + + retSt->SetTyIdx(pointType->GetTypeIndex()); + std::vector formals; + formals.emplace_back(retSt); + for (uint32 i = 0; i < func.GetFormalCount(); ++i) { + auto formal = func.GetFormal(i); + formals.emplace_back(formal); + } + func.SetFirstArgReturn(); + + beCommon.AddElementToFuncReturnType(func, func.GetReturnTyIdx()); + + func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true); + auto *funcType = func.GetMIRFuncType(); + DEBUG_ASSERT(funcType != nullptr, "null ptr check"); + funcType->SetFirstArgReturn(); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + } +} + +void CGLowerer::LowerPseudoRegs(const MIRFunction &func) const { + for (uint32 i = 1; i < func.GetPregTab()->Size(); ++i) { + MIRPreg *ipr = func.GetPregTab()->PregFromPregIdx(i); + PrimType primType = ipr->GetPrimType(); + if ((primType == PTY_ptr) || (primType == PTY_ref)) { + ipr->SetPrimType(GetLoweredPtrType()); + } else if (primType == PTY_u1) { + ipr->SetPrimType(PTY_u32); + } + } +} + +void CGLowerer::CleanupBranches(MIRFunction &func) const { + BlockNode *block = func.GetBody(); + StmtNode *prev = nullptr; + StmtNode *next = nullptr; + for (StmtNode *curr = block->GetFirst(); curr != nullptr; curr = next) { + next = curr->GetNext(); + if (next != nullptr) { + CHECK_FATAL(curr == next->GetPrev(), "unexpected node"); + } + if ((next != nullptr) && (prev != nullptr) && (curr->GetOpCode() == OP_goto)) { + /* + * Skip until find a label. + * Note that the CURRent 'goto' statement may be the last statement + * when discounting comment statements. + * Make sure we don't lose any comments. + */ + StmtNode *cmtB = nullptr; + StmtNode *cmtE = nullptr; + bool isCleanable = true; + while ((next != nullptr) && (next->GetOpCode() != OP_label)) { + if ((next->GetOpCode() == OP_try) || (next->GetOpCode() == OP_endtry) || (next->GetOpCode() == OP_catch)) { + isCleanable = false; + break; + } + next = next->GetNext(); + } + if ((next != nullptr) && (!isCleanable)) { + prev = next->GetPrev(); + continue; + } + + next = curr->GetNext(); + + while ((next != nullptr) && (next->GetOpCode() != OP_label)) { + if (next->GetOpCode() == OP_comment) { + if (cmtB == nullptr) { + cmtB = next; + cmtE = next; + } else { + CHECK_FATAL(cmtE != nullptr, "cmt_e is null in CGLowerer::CleanupBranches"); + cmtE->SetNext(next); + next->SetPrev(cmtE); + cmtE = next; + } + } + next = next->GetNext(); + } + + curr->SetNext(next); + + if (next != nullptr) { + next->SetPrev(curr); + } + + StmtNode *insertAfter = nullptr; + + if ((next != nullptr) && + ((static_cast(curr))->GetOffset() == (static_cast(next))->GetLabelIdx())) { + insertAfter = prev; + prev->SetNext(next); /* skip goto statement (which is pointed by curr) */ + next->SetPrev(prev); + curr = next; /* make curr point to the label statement */ + next = next->GetNext(); /* advance next to the next statement of the label statement */ + } else { + insertAfter = curr; + } + + /* insert comments before 'curr' */ + if (cmtB != nullptr) { + CHECK_FATAL(cmtE != nullptr, "nullptr is not expected"); + StmtNode *iaNext = insertAfter->GetNext(); + if (iaNext != nullptr) { + iaNext->SetPrev(cmtE); + } + cmtE->SetNext(iaNext); + + insertAfter->SetNext(cmtB); + cmtB->SetPrev(insertAfter); + + if (insertAfter == curr) { + curr = cmtE; + } + } + if (next == nullptr) { + func.GetBody()->SetLast(curr); + } + } + prev = curr; + } + CHECK_FATAL(func.GetBody()->GetLast() == prev, "make sure the return value of GetLast equal prev"); +} + +/* + * We want to place catch blocks so that they don't come before any of java trys that refer to them. + * In order to do that, we take advantage of the fact that the mpl. source we get is already flattened and + * no java-try-end-try block is enclosed in any other java-try-end-try block. they appear in the mpl file. + * We process each bb in bbList from the front to the end, and while doing so, we maintain a list of catch blocks + * we have seen. When we get to an end-try block, we examine each catch block label it has (offsets), + * and if we find any catch block in the "seen" list, we move the block after the end-try block. + * Note that we need to find a basic block which does not have 'fallthruBranch' control path. + * (Appending the catch block to any basic block that has the 'fallthruBranch' control path + * will alter the program semantics) + */ +void CGLowerer::LowerTryCatchBlocks(BlockNode &body) { + if (!hasTry) { + return; + } + +#if DEBUG + BBT::ValidateStmtList(nullptr, nullptr); +#endif + auto memPool = std::make_unique(memPoolCtrler, "CreateNewBB mempool"); + TryCatchBlocksLower tryCatchLower(*memPool, body, mirModule); + tryCatchLower.RecoverBasicBlock(); + bool generateEHCode = GenerateExceptionHandlingCode(); + tryCatchLower.SetGenerateEHCode(generateEHCode); + tryCatchLower.TraverseBBList(); +#if DEBUG + tryCatchLower.CheckTryCatchPattern(); +#endif +} + +inline bool IsAccessingTheSameMemoryLocation(const DassignNode &dassign, + const RegreadNode &rRead, const CGLowerer &cgLowerer) { + StIdx stIdx = cgLowerer.GetSymbolReferredToByPseudoRegister(rRead.GetRegIdx()); + return ((dassign.GetStIdx() == stIdx) && (dassign.GetFieldID() == 0)); +} + +inline bool IsAccessingTheSameMemoryLocation(const DassignNode &dassign, const DreadNode &dread) { + return ((dassign.GetStIdx() == dread.GetStIdx()) && (dassign.GetFieldID() == dread.GetFieldID())); +} + +inline bool IsDassignNOP(const DassignNode &dassign) { + if (dassign.GetRHS()->GetOpCode() == OP_dread) { + return IsAccessingTheSameMemoryLocation(dassign, static_cast(*dassign.GetRHS())); + } + return false; +} + +inline bool IsConstvalZero(const BaseNode &n) { + return ((n.GetOpCode() == OP_constval) && static_cast(n).GetConstVal()->IsZero()); +} + +#define NEXT_ID(x) ((x) + 1) +#define INTRN_FIRST_SYNC_ENTER NEXT_ID(INTRN_LAST) +#define INTRN_SECOND_SYNC_ENTER NEXT_ID(INTRN_FIRST_SYNC_ENTER) +#define INTRN_THIRD_SYNC_ENTER NEXT_ID(INTRN_SECOND_SYNC_ENTER) +#define INTRN_FOURTH_SYNC_ENTER NEXT_ID(INTRN_THIRD_SYNC_ENTER) +#define INTRN_YNC_EXIT NEXT_ID(INTRN_FOURTH_SYNC_ENTER) + +std::vector> CGLowerer::builtinFuncIDs; +std::unordered_map CGLowerer::intrinFuncIDs; +std::unordered_map CGLowerer::arrayClassCacheIndex; + +MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, const std::string &name, + const std::string ¶mName) { + MIRFunction *func = mirBuilder->GetOrCreateFunction(name, GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + MIRType *argTy = GlobalTables::GetTypeTable().GetPtr(); + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + argSt->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex(paramName)); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + std::vector formals; + formals.emplace_back(argSt); + if ((name == "MCC_SyncEnterFast0") || (name == "MCC_SyncEnterFast1") || + (name == "MCC_SyncEnterFast2") || (name == "MCC_SyncEnterFast3") || + (name == "MCC_SyncExitFast")) { + MIRSymbol *argStMatch = func->GetSymTab()->CreateSymbol(kScopeLocal); + argStMatch->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex("monitor_slot")); + argStMatch->SetTyIdx(argTy->GetTypeIndex()); + argStMatch->SetStorageClass(kScFormal); + argStMatch->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argStMatch); + formals.emplace_back(argStMatch); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex(), + false); + auto *funcType = func->GetMIRFuncType(); + DEBUG_ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); + return func; +} + +void CGLowerer::RegisterBuiltIns() { + for (uint32 i = 0; i < sizeof(cgBuiltins) / sizeof(cgBuiltins[0]); ++i) { + BuiltinFunctionID id = cgBuiltins[i].first; + IntrinDesc &desc = IntrinDesc::intrinTable[id]; + + MIRFunction *func = mirBuilder->GetOrCreateFunction(cgBuiltins[i].second, + GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + /* return type */ + MIRType *retTy = desc.GetReturnType(); + CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); + /* use void* for PTY_dynany */ + if (retTy->GetPrimType() == PTY_dynany) { + retTy = GlobalTables::GetTypeTable().GetPtr(); + } + + std::vector formals; + const std::string params[IntrinDesc::kMaxArgsNum] = { "p0", "p1", "p2", "p3", "p4", "p5" }; + for (uint32 j = 0; j < IntrinDesc::kMaxArgsNum; ++j) { + MIRType *argTy = desc.GetArgType(j); + if (argTy == nullptr) { + break; + } + /* use void* for PTY_dynany */ + if (argTy->GetPrimType() == PTY_dynany) { + argTy = GlobalTables::GetTypeTable().GetPtr(); + } + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + argSt->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex(params[j])); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + formals.emplace_back(argSt); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); + auto *funcType = func->GetMIRFuncType(); + DEBUG_ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); + } + + /* register __builtin_sync_enter */ + static_cast(RegisterFunctionVoidStarToVoid(INTRN_FIRST_SYNC_ENTER, "MCC_SyncEnterFast0", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_SECOND_SYNC_ENTER, "MCC_SyncEnterFast1", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_THIRD_SYNC_ENTER, "MCC_SyncEnterFast2", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_FOURTH_SYNC_ENTER, "MCC_SyncEnterFast3", "obj")); + /* register __builtin_sync_exit */ + static_cast(RegisterFunctionVoidStarToVoid(INTRN_YNC_EXIT, "MCC_SyncExitFast", "obj")); +} + +/* + * From Maple IR Document as of Apr 14, 2017 + * Type Conversion Expression Opcodes + * Conversions between integer types of different sizes require the cvt opcode. + * Conversion between signed and unsigned integers of the same size does not + * require any operation, not even retype. + * cvt : + * Convert the operand's value from to . + * If the sizes of the two types are the same, the conversion must involve + * altering the bits. + * retype: + * is converted to which has derived type without + * changing any bits. The size of and must be the same. + * may be of aggregate type. + */ +BaseNode *CGLowerer::MergeToCvtType(PrimType dType, PrimType sType, BaseNode &src) const { + CHECK_FATAL(IsPrimitiveInteger(dType) || IsPrimitiveFloat(dType), + "dtype should be primitiveInteger or primitiveFloat"); + CHECK_FATAL(IsPrimitiveInteger(sType) || IsPrimitiveFloat(sType), + "sType should be primitiveInteger or primitiveFloat"); + /* src i32, dest f32; src i64, dest f64 */ + CHECK_FATAL( + (IsPrimitiveInteger(sType) && IsPrimitiveFloat(dType) && + (GetPrimTypeBitSize(sType) == GetPrimTypeBitSize(dType))) || + (IsPrimitiveInteger(sType) && IsPrimitiveInteger(dType)), + "when sType is primitiveInteger and dType is primitiveFloat, sType's primTypeBitSize must equal dType's," + " or both sType and dType should primitiveInteger"); + + /* src & dest are both of float type */ + MIRType *toType = GlobalTables::GetTypeTable().GetPrimType(dType); + MIRType *fromType = GlobalTables::GetTypeTable().GetPrimType(sType); + if (IsPrimitiveInteger(sType) && IsPrimitiveFloat(dType) && + (GetPrimTypeBitSize(sType) == GetPrimTypeBitSize(dType))) { + return mirBuilder->CreateExprRetype(*toType, *fromType, &src); + } else if (IsPrimitiveInteger(sType) && IsPrimitiveInteger(dType)) { + if (GetPrimTypeBitSize(sType) >= GetPrimTypeBitSize(dType)) { + if (dType == PTY_u1) { /* e.g., type _Bool */ + toType = GlobalTables::GetTypeTable().GetPrimType(PTY_u8); + return mirBuilder->CreateExprCompare(OP_ne, *toType, *fromType, &src, mirBuilder->CreateIntConst(0, sType)); + } else if (GetPrimTypeBitSize(sType) > GetPrimTypeBitSize(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } else if (IsSignedInteger(sType) != IsSignedInteger(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } + src.SetPrimType(dType); + return &src; + /* + * Force type cvt here because we currently do not run constant folding + * or contanst propagation before CG. We may revisit this decision later. + */ + } else if (GetPrimTypeBitSize(sType) < GetPrimTypeBitSize(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } else if (IsConstvalZero(src)) { + return mirBuilder->CreateIntConst(0, dType); + } + CHECK_FATAL(false, "should not run here"); + } + CHECK_FATAL(false, "should not run here"); +} + +IreadNode &CGLowerer::GetLenNode(BaseNode &opnd0) { + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + RTSupport::GetRTSupportInstance().GetArrayLengthOffset(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(opnd0.GetPrimType())); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(opnd0.GetPrimType()); + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(opnd0.GetPrimType()); + BaseNode *refLenAddr = mirBuilder->CreateExprBinary(OP_add, *addrType, &opnd0, arrayHeaderCstNode); + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MIRType *ptrType = beCommon.BeGetOrCreatePointerType(*infoLenType); + IreadNode *lenNode = mirBuilder->CreateExprIread(*infoLenType, *ptrType, 0, refLenAddr); + return (*lenNode); +} + +LabelIdx CGLowerer::GetLabelIdx(MIRFunction &curFunc) const { + std::string suffix = std::to_string(curFunc.GetLabelTab()->GetLabelTableSize()); + GStrIdx labelStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("__label_BC_" + suffix); + LabelIdx labIdx = curFunc.GetLabelTab()->AddLabel(labelStrIdx); + return labIdx; +} + +void CGLowerer::ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode) { + bool needProcessArrayExpr = !ShouldOptarray() && mirModule.IsJavaModule(); + if (!needProcessArrayExpr) { + return; + } + /* Array boundary check */ + MIRFunction *curFunc = mirModule.CurFunction(); + auto &arrayNode = static_cast(expr); + StmtNode *boundaryCheckStmt = nullptr; + if (arrayNode.GetBoundsCheck()) { + CHECK_FATAL(arrayNode.GetNopndSize() == kOperandNumBinary, "unexpected nOpnd size"); + BaseNode *opnd0 = arrayNode.GetNopndAt(0); + if (opnd0->GetOpCode() == OP_iread) { + PregIdx pregIdx = curFunc->GetPregTab()->CreatePreg(opnd0->GetPrimType()); + RegassignNode *temp = mirBuilder->CreateStmtRegassign(opnd0->GetPrimType(), pregIdx, opnd0); + blkNode.InsertAfter(blkNode.GetLast(), temp); + arrayNode.SetNOpndAt(0, mirBuilder->CreateExprRegread(opnd0->GetPrimType(), pregIdx)); + } + IreadNode &lenNode = GetLenNode(*opnd0); + PregIdx lenPregIdx = curFunc->GetPregTab()->CreatePreg(lenNode.GetPrimType()); + RegassignNode *lenRegassignNode = mirBuilder->CreateStmtRegassign(lenNode.GetPrimType(), lenPregIdx, &lenNode); + BaseNode *lenRegreadNode = mirBuilder->CreateExprRegread(PTY_u32, lenPregIdx); + + LabelIdx labIdx = GetLabelIdx(*curFunc); + LabelNode *labelBC = mirBuilder->CreateStmtLabel(labIdx);; + BaseNode *cond = mirBuilder->CreateExprCompare(OP_ge, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), + arrayNode.GetNopndAt(1), lenRegreadNode); + CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); + MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Array_Boundary_Check", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arrayNode.GetNopndAt(0)); + args.emplace_back(arrayNode.GetNopndAt(1)); + boundaryCheckStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); + blkNode.InsertAfter(blkNode.GetLast(), lenRegassignNode); + blkNode.InsertAfter(blkNode.GetLast(), brFalseNode); + blkNode.InsertAfter(blkNode.GetLast(), boundaryCheckStmt); + blkNode.InsertAfter(blkNode.GetLast(), labelBC); + } +} + +BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkNode) { + bool isCvtU1Expr = (expr.GetOpCode() == OP_cvt && expr.GetPrimType() == PTY_u1 && + static_cast(expr).FromType() != PTY_u1); + if (expr.GetPrimType() == PTY_u1) { + expr.SetPrimType(PTY_u8); + } + if (expr.GetOpCode() == OP_intrinsicopwithtype) { + return LowerIntrinsicopwithtype(parent, static_cast(expr), blkNode); + } + + LowerTypePtr(expr); + + if (expr.GetOpCode() == OP_iread && expr.Opnd(0)->GetOpCode() == OP_array) { + /* iread ptr <* <$MUIDDataDefTabEntry>> 1 ( + * array 0 ptr <* <[5] <$MUIDDataDefTabEntry>>> (addrof ... + * ==> + * intrinsicop a64 MPL_READ_STATIC_OFFSET_TAB (addrof .. + */ + BaseNode *node = LowerExpr(expr, *expr.Opnd(0), blkNode); + if (node->GetOpCode() == OP_intrinsicop) { + auto *binNode = static_cast(node); + CHECK_FATAL(binNode->GetIntrinsic() == INTRN_MPL_READ_STATIC_OFFSET_TAB, "Something wrong here"); + return binNode; + } else { + expr.SetOpnd(node, 0); + } + } else { + for (size_t i = 0; i < expr.NumOpnds(); ++i) { + expr.SetOpnd(LowerExpr(expr, *expr.Opnd(i), blkNode), i); + } + } + // Convert `cvt u1 xx ` to `ne u8 xx (, constval xx 0)` + // No need to convert `cvt u1 u1 ` + if (isCvtU1Expr) { + auto &cvtExpr = static_cast(expr); + PrimType fromType = cvtExpr.FromType(); + auto *fromMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fromType)); + // We use u8 instead of u1 because codegen can't recognize u1 + auto *toMIRType = GlobalTables::GetTypeTable().GetUInt8(); + auto *zero = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *fromMIRType); + auto *converted = mirBuilder->CreateExprCompare(OP_ne, *toMIRType, *fromMIRType, cvtExpr.Opnd(0), + mirBuilder->CreateConstval(zero)); + return converted; + } + switch (expr.GetOpCode()) { + case OP_array: { + ProcessArrayExpr(expr, blkNode); + if (!mirModule.IsCModule()) { + return LowerArray(static_cast(expr), parent); + } else { + return LowerCArray(static_cast(expr)); + } + } + + case OP_dread: + return LowerDread(static_cast(expr), blkNode); + + case OP_addrof: + return LowerAddrof(static_cast(expr)); + + case OP_iread: + return LowerIread(static_cast(expr)); + + case OP_iaddrof: + return LowerIaddrof(static_cast(expr)); + + case OP_select: + if (IsComplexSelect(static_cast(expr))) { + return LowerComplexSelect(static_cast(expr), parent, blkNode); + } else if (mirModule.GetFlavor() != kFlavorLmbc) { + return SplitTernaryNodeResult(static_cast(expr), parent, blkNode); + } else { + return &expr; + } + + case OP_sizeoftype: { + CHECK(static_cast(expr).GetTyIdx() < beCommon.GetSizeOfTypeSizeTable(), + "index out of range in CGLowerer::LowerExpr"); + int64 typeSize = beCommon.GetTypeSize(static_cast(expr).GetTyIdx()); + return mirModule.GetMIRBuilder()->CreateIntConst(typeSize, PTY_u32); + } + + case OP_fieldsdist: { + auto &fdNode = static_cast(expr); + CHECK(fdNode.GetTyIdx() < beCommon.GetSizeOfTypeSizeTable(), + "index out of range in CGLowerer::LowerExpr"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fdNode.GetTyIdx()); + CHECK(ty->GetKind() == kTypeClass, "wrong type for FieldsDistNode"); + MIRClassType *classType = static_cast(ty); + const JClassLayout &layout = beCommon.GetJClassLayout(*classType); + DEBUG_ASSERT(!layout.empty(), "container should not be empty"); + int32 i1 = fdNode.GetFiledID1() > 0 ? fdNode.GetFiledID1() - 1 : 0; + int32 i2 = fdNode.GetFiledID2() > 0 ? fdNode.GetFiledID2() - 1 : 0; + int64 offset = layout[i2].GetOffset() - layout[i1].GetOffset(); + return mirModule.GetMIRBuilder()->CreateIntConst(offset, PTY_u32); + } + + case OP_intrinsicop: + if (IsIntrinsicOpHandledAtLowerLevel(static_cast(expr).GetIntrinsic())) { + return &expr; + } + return LowerIntrinsicop(parent, static_cast(expr), blkNode); + + case OP_alloca: { + GetCurrentFunc()->SetVlaOrAlloca(true); + return &expr; + } + case OP_rem: + return LowerRem(expr, blkNode); + + case OP_cand: + expr.SetOpCode(OP_land); + return SplitBinaryNodeOpnd1(static_cast(expr), blkNode); + case OP_cior: + expr.SetOpCode(OP_lior); + return SplitBinaryNodeOpnd1(static_cast(expr), blkNode); + case OP_cvt: + case OP_retype: + case OP_zext: + case OP_sext: + return LowerCastExpr(expr); + default: + return &expr; + } +} + +BaseNode *CGLowerer::LowerDread(DreadNode &dread, const BlockNode &block) { + /* use PTY_u8 for boolean type in dread/iread */ + if (dread.GetPrimType() == PTY_u1) { + dread.SetPrimType(PTY_u8); + } + return (dread.GetFieldID() == 0 ? LowerDreadToThreadLocal(dread, block) : LowerDreadBitfield(dread)); +} + +void CGLowerer::LowerRegassign(RegassignNode ®Node, BlockNode &newBlk) { + CHECK_FATAL(regNode.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(regNode.GetPrimType() != PTY_ref, "should have been lowered already"); + BaseNode *rhsOpnd = regNode.Opnd(0); + Opcode op = rhsOpnd->GetOpCode(); + if ((op == OP_gcmalloc) || (op == OP_gcpermalloc)) { + LowerGCMalloc(regNode, static_cast(*rhsOpnd), newBlk, op == OP_gcpermalloc); + return; + } else if ((op == OP_gcmallocjarray) || (op == OP_gcpermallocjarray)) { + LowerJarrayMalloc(regNode, static_cast(*rhsOpnd), newBlk, op == OP_gcpermallocjarray); + return; + } else { + regNode.SetOpnd(LowerExpr(regNode, *rhsOpnd, newBlk), 0); + newBlk.AddStatement(®Node); + } +} + +BaseNode *CGLowerer::ExtractSymbolAddress(const StIdx &stIdx) { + auto builder = mirModule.GetMIRBuilder(); + return builder->CreateExprAddrof(0, stIdx); +} + +BaseNode *CGLowerer::LowerDreadToThreadLocal(BaseNode &expr, const BlockNode &block) { + auto *result = &expr; + if (expr.GetOpCode() != maple::OP_dread) { + return result; + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + auto dread = static_cast(expr); + StIdx stIdx = dread.GetStIdx(); + if (!stIdx.IsGlobal()) { + return result; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + + if (symbol->IsThreadLocal()) { + // iread <* u32> 0 (regread u64 %addr) + auto addr = ExtractSymbolAddress(stIdx); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbol->GetType()); + auto iread = mirModule.GetMIRBuilder()->CreateExprIread(*symbol->GetType(), *ptrType, dread.GetFieldID(), addr); + result = iread; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + +StmtNode *CGLowerer::LowerDassignToThreadLocal(StmtNode &stmt, const BlockNode &block) { + StmtNode *result = &stmt; + if (stmt.GetOpCode() != maple::OP_dassign) { + return result; + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + auto dAssign = static_cast(stmt); + StIdx stIdx = dAssign.GetStIdx(); + if (!stIdx.IsGlobal()) { + return result; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + if (symbol->IsThreadLocal()) { + // iassign <* u32> 0 (regread u64 %addr, dread u32 $x) + auto addr = ExtractSymbolAddress(stIdx); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbol->GetType()); + auto iassign = mirModule.GetMIRBuilder()->CreateStmtIassign(*ptrType, dAssign.GetFieldID(), addr, dAssign.GetRHS()); + result = iassign; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + +void CGLowerer::LowerDassign(DassignNode &dsNode, BlockNode &newBlk) { + StmtNode *newStmt = nullptr; + BaseNode *rhs = nullptr; + Opcode op = dsNode.GetRHS()->GetOpCode(); + if (dsNode.GetFieldID() != 0) { + newStmt = LowerDassignBitfield(dsNode, newBlk); + } else if (op == OP_intrinsicop) { + IntrinsicopNode *intrinNode = static_cast(dsNode.GetRHS()); + MIRType *retType = IntrinDesc::intrinTable[intrinNode->GetIntrinsic()].GetReturnType(); + CHECK_FATAL(retType != nullptr, "retType should not be nullptr"); + if (retType->GetKind() == kTypeStruct) { + newStmt = LowerIntrinsicopDassign(dsNode, *intrinNode, newBlk); + } else { + rhs = LowerExpr(dsNode, *intrinNode, newBlk); + dsNode.SetRHS(rhs); + CHECK_FATAL(dsNode.GetRHS() != nullptr, "dsNode->rhs is null in CGLowerer::LowerDassign"); + if (!IsDassignNOP(dsNode)) { + newStmt = &dsNode; + } + } + } else if ((op == OP_gcmalloc) || (op == OP_gcpermalloc)) { + LowerGCMalloc(dsNode, static_cast(*dsNode.GetRHS()), newBlk, op == OP_gcpermalloc); + return; + } else if ((op == OP_gcmallocjarray) || (op == OP_gcpermallocjarray)) { + LowerJarrayMalloc(dsNode, static_cast(*dsNode.GetRHS()), newBlk, op == OP_gcpermallocjarray); + return; + } else { + rhs = LowerExpr(dsNode, *dsNode.GetRHS(), newBlk); + dsNode.SetRHS(rhs); + newStmt = &dsNode; + } + + if (newStmt != nullptr) { + newBlk.AddStatement(LowerDassignToThreadLocal(*newStmt, newBlk)); + } +} + +// Lower stmt Form +// Initial form: decrefreset (addrof ptr %RegX_RXXXX) +// Convert to form: dassign %RegX_RXXXX 0 (constval ref 0) +// Final form: str xzr, [x29,#XX] +void CGLowerer::LowerResetStmt(StmtNode &stmt, BlockNode &block) { + UnaryStmtNode &unaryStmtNode = static_cast(stmt); + AddrofNode *addrofNode = static_cast(unaryStmtNode.GetRHS()); + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(PTY_ref); + MIRConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, type); + ConstvalNode *exprConst = mirModule.CurFuncCodeMemPool()->New(); + exprConst->SetPrimType(type.GetPrimType()); + exprConst->SetConstVal(constVal); + DassignNode *dassignNode = mirModule.CurFuncCodeMemPool()->New(); + dassignNode->SetStIdx(addrofNode->GetStIdx()); + dassignNode->SetRHS(exprConst); + dassignNode->SetFieldID(addrofNode->GetFieldID()); + block.AddStatement(dassignNode); +} + +StmtNode *CGLowerer::LowerIntrinsicopDassign(const DassignNode &dsNode, + IntrinsicopNode &intrinNode, BlockNode &newBlk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + DEBUG_ASSERT(intrinNode.Opnd(i) != nullptr, "intrinNode.Opnd(i) should not be nullptr"); + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), newBlk), i); + } + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + const std::string name = intrinDesc->name; + CHECK_FATAL(intrinDesc->name != nullptr, "intrinDesc's name should not be nullptr"); + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirModule.GetMemPool()->New(&mirModule, st->GetStIdx()); + MapleVector &nOpnds = intrinNode.GetNopnd(); + st->SetFunction(fn); + std::vector fnTyVec; + std::vector fnTaVec; + CHECK_FATAL(intrinDesc->IsJsOp(), "intrinDesc should be JsOp"); + /* setup parameters */ + for (uint32 i = 0; i < nOpnds.size(); ++i) { + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "addrNode should not be nullptr"); + nOpnds[i] = addrNode; + } + MIRSymbol *dst = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + MIRType *ty = dst->GetType(); + MIRType *fnType = beCommon.BeGetOrCreateFunctionType(ty->GetTypeIndex(), fnTyVec, fnTaVec); + st->SetTyIdx(fnType->GetTypeIndex()); + fn->SetMIRFuncType(static_cast(fnType)); + fn->SetReturnTyIdx(ty->GetTypeIndex()); + CHECK_FATAL(ty->GetKind() == kTypeStruct, "ty's kind should be struct type"); + CHECK_FATAL(dsNode.GetFieldID() == 0, "dsNode's filedId should equal"); + AddrofNode *addrofNode = mirBuilder->CreateAddrof(*dst, PTY_a32); + MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + newOpnd.emplace_back(addrofNode); + (void)newOpnd.insert(newOpnd.end(), nOpnds.begin(), nOpnds.end()); + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); + callStmt->SetNOpnd(newOpnd); + return callStmt; +} + +/* From maple_ir/include/dex2mpl/dexintrinsic.def + * JAVA_ARRAY_LENGTH + * JAVA_ARRAY_FILL + * JAVA_FILL_NEW_ARRAY + * JAVA_CHECK_CAST + * JAVA_CONST_CLASS + * JAVA_INSTANCE_OF + * JAVA_MERGE + * JAVA_RANDOM + * #if DEXHACK + * JAVA_PRINTLN + * #endif + * INTRN_<> + * intrinsic + */ +BaseNode *CGLowerer::LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, const IntrinDesc &desc) { + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + CHECK_FATAL(desc.name != nullptr, "desc's name should not be nullptr"); + const std::string name = desc.name; + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirModule.GetMemPool()->New(&mirModule, st->GetStIdx()); + MapleVector &nOpnds = intrinNode.GetNopnd(); + st->SetFunction(fn); + std::vector fnTyVec; + std::vector fnTaVec; + CHECK_FATAL(desc.IsJsOp(), "desc should be jsOp"); + /* setup parameters */ + for (uint32 i = 0; i < nOpnds.size(); ++i) { + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "can not get address"); + nOpnds[i] = addrNode; + } + + MIRType *retType = desc.GetReturnType(); + CHECK_FATAL(retType != nullptr, "retType should not be nullptr"); + if (retType->GetKind() == kTypeStruct) { + /* create a local symbol and dread it; */ + std::string tmpstr("__ret_struct_tmp_st"); + static uint32 tmpIdx = 0; + tmpstr.append(std::to_string(tmpIdx++)); + MIRSymbol *tmpSt = mirBuilder->GetOrCreateDeclInFunc(tmpstr, *retType, *mirModule.CurFunction()); + MIRType *fnType = beCommon.BeGetOrCreateFunctionType(retType->GetTypeIndex(), fnTyVec, fnTaVec); + st->SetTyIdx(fnType->GetTypeIndex()); + fn->SetMIRFuncType(static_cast(fnType)); + AddrofNode *addrofNode = mirBuilder->CreateAddrof(*tmpSt, PTY_a32); + MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + newOpnd.emplace_back(addrofNode); + (void)newOpnd.insert(newOpnd.end(), nOpnds.begin(), nOpnds.end()); + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); + callStmt->SetNOpnd(newOpnd); + currentBlock->AddStatement(callStmt); + /* return the dread */ + AddrofNode *drRetSt = mirBuilder->CreateDread(*tmpSt, PTY_agg); + return drRetSt; + } + CHECK_FATAL(st->GetStIdx().FullIdx() != 0, "the fullIdx of st's stIdx should not equal 0"); + CallNode *callStmt = static_cast(mirBuilder->CreateStmtCall(st->GetStIdx().FullIdx(), nOpnds)); + currentBlock->AddStatement(callStmt); + PrimType promotedPrimType = intrinNode.GetPrimType() == PTY_u1 ? PTY_u32 : intrinNode.GetPrimType(); + BaseNode *drRetSt = mirBuilder->CreateExprRegread(promotedPrimType, -kSregRetval0); + /* + * for safty dassign the return value to a register and return the dread to that register + * to avoid such code: + * call $__js_int32 (addrof ptr %temp_var_8 0) + * call $__jsop_getelem (addrof a32 %temp_var_9 0, addrof a32 $arr 0, dread i32 %%retval 0) + * for many target, the first actual parameter and return value would use R0, which would cause the above + * case fail + */ + PregIdx tmpRegIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(promotedPrimType); + RegassignNode *dstoReg = mirBuilder->CreateStmtRegassign(promotedPrimType, tmpRegIdx, drRetSt); + currentBlock->AddStatement(dstoReg); + RegreadNode *outDsNode = mirBuilder->CreateExprRegread(promotedPrimType, tmpRegIdx); + return outDsNode; +} + +StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, const MIRSymbol &ret, + PUIdx bFunc, BaseNode *extraInfo) const { + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { + args.emplace_back(intrinNode.Opnd(i)); + } + if (extraInfo != nullptr) { + args.emplace_back(extraInfo); + } + return mirBuilder->CreateStmtCallAssigned(bFunc, args, &ret, OP_callassigned); +} + +StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, PregIdx retpIdx, PUIdx bFunc, + BaseNode *extraInfo) const { + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { + args.emplace_back(intrinNode.Opnd(i)); + } + if (extraInfo != nullptr) { + args.emplace_back(extraInfo); + } + return mirBuilder->CreateStmtCallRegassigned(bFunc, args, retpIdx, OP_callassigned); +} + +BaseNode *CGLowerer::LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + CHECK_FATAL(intrinNode.GetNumOpnds() > 0, "invalid JAVA_MERGE intrinsic node"); + BaseNode *candidate = intrinNode.Opnd(0); + DEBUG_ASSERT(candidate != nullptr, "candidate should not be nullptr"); + resNode = candidate; + if (parent.GetOpCode() == OP_regassign) { + PrimType sTyp = resNode->GetPrimType(); + auto ®Assign = static_cast(parent); + PrimType pType = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + if (sTyp != pType) { + resNode = MergeToCvtType(pType, sTyp, *resNode); + } + return resNode; + } + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + if (candidate->GetOpCode() == OP_constval) { + MIRSymbol *dest = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRType *toType = dest->GetType(); + PrimType dTyp = toType->GetPrimType(); + PrimType sTyp = resNode->GetPrimType(); + if (dTyp != sTyp) { + resNode = MergeToCvtType(dTyp, sTyp, *resNode); + } + return resNode; + } + CHECK_FATAL((candidate->GetOpCode() == OP_dread) || (candidate->GetOpCode() == OP_regread), + "candidate's opcode should be OP_dread or OP_regread"); + bool differentLocation = + (candidate->GetOpCode() == OP_dread) + ? !IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate)) + : !IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate), *this); + if (differentLocation) { + bool simpleMove = false; + /* res_node already contains the 0-th operand. */ + for (size_t i = 1; i < intrinNode.GetNumOpnds(); ++i) { + candidate = intrinNode.Opnd(i); + DEBUG_ASSERT(candidate != nullptr, "candidate should not be nullptr"); + bool sameLocation = + (candidate->GetOpCode() == OP_dread) + ? IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate)) + : IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate), *this); + if (sameLocation) { + simpleMove = true; + resNode = candidate; + break; + } + } + if (!simpleMove) { + /* if source and destination types don't match, insert 'retype' */ + MIRSymbol *dest = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRType *toType = dest->GetType(); + PrimType dTyp = toType->GetPrimType(); + CHECK_FATAL((dTyp != PTY_agg) && (dassign.GetFieldID() <= 0), + "dType should not be PTY_agg and dassign's filedId <= 0"); + PrimType sType = resNode->GetPrimType(); + if (dTyp != sType) { + resNode = MergeToCvtType(dTyp, sType, *resNode); + } + } + } + return resNode; + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should not be kFuncNotFound"); + MIRFunction *biFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(bFunc); + + BaseNode *arrAddr = intrinNode.Opnd(0); + DEBUG_ASSERT(arrAddr != nullptr, "arrAddr should not be nullptr"); + if (((arrAddr->GetPrimType() == PTY_a64) || (arrAddr->GetPrimType() == PTY_ref)) && + ((parent.GetOpCode() == OP_regassign) || (parent.GetOpCode() == OP_dassign) || (parent.GetOpCode() == OP_ge))) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(arrAddr->GetPrimType())); + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + RTSupport::GetRTSupportInstance().GetArrayLengthOffset(), *addrType); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(arrAddr->GetPrimType()); + + BaseNode *refLenAddr = mirBuilder->CreateExprBinary(OP_add, *addrType, arrAddr, arrayHeaderCstNode); + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MIRType *ptrType = beCommon.BeGetOrCreatePointerType(*infoLenType); + resNode = mirBuilder->CreateExprIread(*infoLenType, *ptrType, 0, refLenAddr); + auto curFunc = mirModule.CurFunction(); + std::string suffix = std::to_string(curFunc->GetLabelTab()->GetLabelTableSize()); + GStrIdx labelStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("__label_nonnull_" + suffix); + LabelIdx labIdx = curFunc->GetLabelTab()->AddLabel(labelStrIdx); + LabelNode *labelNonNull = mirBuilder->CreateStmtLabel(labIdx); + + BaseNode *cond = mirBuilder->CreateExprCompare(OP_ne, + *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetRef(), arrAddr, + mirBuilder->CreateIntConst(0, PTY_ref)); + CondGotoNode *brtureNode = mirBuilder->CreateStmtCondGoto(cond, OP_brtrue, labIdx); + + MIRFunction *newFunc = + mirBuilder->GetOrCreateFunction("MCC_ThrowNullArrayNullPointerException", + GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*newFunc->GetMIRFuncType()); + newFunc->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + StmtNode *call = mirBuilder->CreateStmtCallAssigned(newFunc->GetPuidx(), args, nullptr, OP_callassigned); + + currentBlock->AddStatement(brtureNode); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*call)); + currentBlock->AddStatement(labelNonNull); + return resNode; + } + + if (parent.GetOpCode() == OP_regassign) { + auto ®Assign = static_cast(parent); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, regAssign.GetRegIdx(), bFunc); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + PrimType pType = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + resNode = mirBuilder->CreateExprRegread(pType, regAssign.GetRegIdx()); + return resNode; + } + + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + MIRSymbol *ret = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, *ret, bFunc); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + resNode = mirBuilder->CreateExprDread(*biFunc->GetReturnType(), 0, *ret); + return resNode; + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + if (intrinNode.GetIntrinsic() == INTRN_JAVA_MERGE) { + resNode = LowerIntrinJavaMerge(parent, intrinNode); + } else if (intrinNode.GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { + resNode = LowerIntrinJavaArrayLength(parent, intrinNode); + } + + return resNode; +} + +void CGLowerer::ProcessClassInfo(MIRType &classType, bool &classInfoFromRt, std::string &classInfo) const { + MIRPtrType &ptrType = static_cast(classType); + MIRType *pType = ptrType.GetPointedType(); + CHECK_FATAL(pType != nullptr, "Class type not found for INTRN_JAVA_CONST_CLASS"); + MIRType *typeScalar = nullptr; + + if (pType->GetKind() == kTypeScalar) { + typeScalar = pType; + } else if (classType.GetKind() == kTypeScalar) { + typeScalar = &classType; + } + if (typeScalar != nullptr) { + std::string eName(GetPrimTypeJavaName(typeScalar->GetPrimType())); + classInfo = PRIMITIVECLASSINFO_PREFIX_STR + eName; + } + if ((pType->GetKind() == kTypeByName) || (pType->GetKind() == kTypeClass) || (pType->GetKind() == kTypeInterface)) { + MIRStructType *classTypeSecond = static_cast(pType); + classInfo = CLASSINFO_PREFIX_STR + classTypeSecond->GetName(); + } else if ((pType->GetKind() == kTypeArray) || (pType->GetKind() == kTypeJArray)) { + MIRJarrayType *jarrayType = static_cast(pType); + CHECK_FATAL(jarrayType != nullptr, "jarrayType is null in CGLowerer::LowerIntrinsicopWithType"); + std::string baseName = jarrayType->GetJavaName(); + if (jarrayType->IsPrimitiveArray() && (jarrayType->GetDim() <= kThreeDimArray)) { + classInfo = PRIMITIVECLASSINFO_PREFIX_STR + baseName; + } else if (arrayNameForLower::kArrayBaseName.find(baseName) != arrayNameForLower::kArrayBaseName.end()) { + classInfo = CLASSINFO_PREFIX_STR + baseName; + } else { + classInfoFromRt = true; + classInfo = baseName; + } + } +} + +BaseNode *CGLowerer::GetBaseNodeFromCurFunc(MIRFunction &curFunc, bool isFromJarray) { + BaseNode *baseNode = nullptr; + if (curFunc.IsStatic()) { + /* + * it's a static function. + * pass caller functions's classinfo directly + */ + std::string callerName = CLASSINFO_PREFIX_STR; + callerName += mirModule.CurFunction()->GetBaseClassName(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(callerName); + MIRSymbol *callerClassInfoSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (callerClassInfoSym == nullptr) { + if (isFromJarray) { + MIRType *mType = GlobalTables::GetTypeTable().GetVoidPtr(); + CHECK_FATAL(mType != nullptr, "type is null in CGLowerer::LowerJarrayMalloc"); + callerClassInfoSym = mirBuilder->CreateGlobalDecl(callerName.c_str(), *mType); + callerClassInfoSym->SetStorageClass(kScExtern); + } else { + callerClassInfoSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + callerClassInfoSym->SetNameStrIdx(strIdx); + callerClassInfoSym->SetStorageClass(kScGlobal); + callerClassInfoSym->SetSKind(kStVar); + /* it must be a local symbol */ + GlobalTables::GetGsymTable().AddToStringSymbolMap(*callerClassInfoSym); + callerClassInfoSym->SetTyIdx(static_cast(PTY_ptr)); + } + } + + baseNode = mirBuilder->CreateExprAddrof(0, *callerClassInfoSym); + } else { + /* + * it's an instance function. + * pass caller function's this pointer + */ + CHECK_FATAL(curFunc.GetFormalCount() != 0, "index out of range in CGLowerer::GetBaseNodeFromCurFunc"); + MIRSymbol *formalSt = curFunc.GetFormal(0); + if (formalSt->IsPreg()) { + if (isFromJarray) { + baseNode = mirBuilder->CreateExprRegread(formalSt->GetType()->GetPrimType(), + curFunc.GetPregTab()->GetPregIdxFromPregno(formalSt->GetPreg()->GetPregNo())); + } else { + CHECK_FATAL(curFunc.GetParamSize() != 0, "index out of range in CGLowerer::GetBaseNodeFromCurFunc"); + baseNode = mirBuilder->CreateExprRegread((curFunc.GetNthParamType(0))->GetPrimType(), + curFunc.GetPregTab()->GetPregIdxFromPregno(formalSt->GetPreg()->GetPregNo())); + } + } else { + baseNode = mirBuilder->CreateExprDread(*formalSt); + } + } + return baseNode; +} + +BaseNode *CGLowerer::GetClassInfoExprFromRuntime(const std::string &classInfo) { + /* + * generate runtime call to get class information + * jclass __mrt_getclass(jobject caller, const char *name) + * if the calling function is an instance function, it's the calling obj + * if the calling function is a static function, it's the calling class + */ + BaseNode *classInfoExpr = nullptr; + PUIdx getClassFunc = GetBuiltinToUse(INTRN_JAVA_GET_CLASS); + CHECK_FATAL(getClassFunc != kFuncNotFound, "classfunc is not found"); + /* return jclass */ + MIRType *voidPtrType = GlobalTables::GetTypeTable().GetPtr(); + MIRSymbol *ret0 = CreateNewRetVar(*voidPtrType, kIntrnRetValPrefix); + + BaseNode *arg0 = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); + BaseNode *arg1 = nullptr; + /* classname */ + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(classInfo, klassJavaDescriptor); + UStrIdx classNameStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(klassJavaDescriptor); + arg1 = mirModule.GetMemPool()->New(classNameStrIdx); + arg1->SetPrimType(PTY_ptr); + + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg0); + args.emplace_back(arg1); + StmtNode *getClassCall = mirBuilder->CreateStmtCallAssigned(getClassFunc, args, ret0, OP_callassigned); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*getClassCall)); + classInfoExpr = mirBuilder->CreateExprDread(*voidPtrType, 0, *ret0); + return classInfoExpr; +} + +BaseNode *CGLowerer::GetClassInfoExprFromArrayClassCache(const std::string &classInfo) { + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(classInfo, klassJavaDescriptor); + if (arrayClassCacheIndex.find(klassJavaDescriptor) == arrayClassCacheIndex.end()) { + return nullptr; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName( + namemangler::kArrayClassCacheTable + mirModule.GetFileNameAsPostfix()); + MIRSymbol *arrayClassSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (arrayClassSt == nullptr) { + return nullptr; + } + auto index = arrayClassCacheIndex[klassJavaDescriptor]; +#ifdef USE_32BIT_REF + const int32 width = 4; +#else + const int32 width = 8; +#endif /* USE_32BIT_REF */ + int64 offset = static_cast(index) * width; + ConstvalNode *offsetExpr = mirBuilder->CreateIntConst(offset, PTY_u32); + AddrofNode *baseExpr = mirBuilder->CreateExprAddrof(0, *arrayClassSt, mirModule.GetMemPool()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(baseExpr); + args.emplace_back(offsetExpr); + return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY, OP_intrinsicop, + *GlobalTables::GetTypeTable().GetPrimType(PTY_ref), args); +} + +BaseNode *CGLowerer::GetClassInfoExpr(const std::string &classInfo) const { + BaseNode *classInfoExpr = nullptr; + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classInfo); + MIRSymbol *classInfoSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (classInfoSym != nullptr) { + classInfoExpr = mirBuilder->CreateExprAddrof(0, *classInfoSym); + } else { + classInfoSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + classInfoSym->SetNameStrIdx(strIdx); + classInfoSym->SetStorageClass(kScGlobal); + classInfoSym->SetSKind(kStVar); + if (CGOptions::IsPIC()) { + classInfoSym->SetStorageClass(kScExtern); + } else { + classInfoSym->SetAttr(ATTR_weak); + } + GlobalTables::GetGsymTable().AddToStringSymbolMap(*classInfoSym); + classInfoSym->SetTyIdx(static_cast(PTY_ptr)); + + classInfoExpr = mirBuilder->CreateExprAddrof(0, *classInfoSym); + } + return classInfoExpr; +} + +BaseNode *CGLowerer::LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + if ((intrinNode.GetIntrinsic() == INTRN_JAVA_CONST_CLASS) || (intrinNode.GetIntrinsic() == INTRN_JAVA_INSTANCE_OF)) { + PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc not founded"); + MIRFunction *biFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(bFunc); + MIRType *classType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinNode.GetTyIdx()); + std::string classInfo; + BaseNode *classInfoExpr = nullptr; + bool classInfoFromRt = false; /* whether the classinfo is generated by RT */ + ProcessClassInfo(*classType, classInfoFromRt, classInfo); + if (classInfoFromRt) { + classInfoExpr = GetClassInfoExprFromArrayClassCache(classInfo); + if (classInfoExpr == nullptr) { + classInfoExpr = GetClassInfoExprFromRuntime(classInfo); + } + } else { + classInfoExpr = GetClassInfoExpr(classInfo); + LowerTypePtr(*classInfoExpr); + } + + if (intrinNode.GetIntrinsic() == INTRN_JAVA_CONST_CLASS) { + CHECK_FATAL(classInfoExpr != nullptr, "classInfoExpr should not be nullptr"); + if ((classInfoExpr->GetPrimType() == PTY_ptr) || (classInfoExpr->GetPrimType() == PTY_ref)) { + classInfoExpr->SetPrimType(GetLoweredPtrType()); + } + resNode = classInfoExpr; + return resNode; + } + + if (parent.GetOpCode() == OP_regassign) { + auto ®Assign = static_cast(parent); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, regAssign.GetRegIdx(), bFunc, classInfoExpr); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + PrimType pTyp = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + resNode = mirBuilder->CreateExprRegread(pTyp, regAssign.GetRegIdx()); + return resNode; + } + + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + MIRSymbol *ret = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, *ret, bFunc, classInfoExpr); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + resNode = mirBuilder->CreateExprDread(*biFunc->GetReturnType(), 0, *ret); + return resNode; + } + CHECK_FATAL(false, "should not run here"); + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &newBlk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), newBlk), i); + } + + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc &intrinDesc = IntrinDesc::intrinTable[intrnID]; + if (intrinDesc.IsJS()) { + return LowerJavascriptIntrinsicop(intrinNode, intrinDesc); + } + if (intrinDesc.IsJava()) { + return LowerIntrinsicop(parent, intrinNode); + } + if (intrinNode.GetIntrinsic() == INTRN_MPL_READ_OVTABLE_ENTRY_LAZY) { + return &intrinNode; + } + if (intrinNode.GetIntrinsic() == INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY) { + return &intrinNode; + } + if (intrnID == INTRN_C_constant_p) { + BaseNode *opnd = intrinNode.Opnd(0); + int64 val = (opnd->op == OP_constval || opnd->op == OP_sizeoftype || + opnd->op == OP_conststr || opnd->op == OP_conststr16) ? 1 : 0; + return mirModule.GetMIRBuilder()->CreateIntConst(val, PTY_i32); + } + if (intrnID == INTRN_C___builtin_expect) { + return intrinNode.Opnd(0); + } + if (intrinDesc.IsVectorOp() || intrinDesc.IsAtomic()) { + return &intrinNode; + } + CHECK_FATAL(false, "unexpected intrinsic type in CGLowerer::LowerIntrinsicop"); + return &intrinNode; +} + +BaseNode *CGLowerer::LowerIntrinsicopwithtype(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &blk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), blk), i); + } + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + CHECK_FATAL(!intrinDesc->IsJS(), "intrinDesc should not be js"); + if (intrinDesc->IsJava()) { + return LowerIntrinsicopWithType(parent, intrinNode); + } + CHECK_FATAL(false, "should not run here"); + return &intrinNode; +} + +StmtNode *CGLowerer::LowerIntrinsicMplClearStack(const IntrinsiccallNode &intrincall, BlockNode &newBlk) { + StmtNode *newStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), 0, + intrincall.Opnd(0), mirBuilder->GetConstUInt8(0)); + newBlk.AddStatement(newStmt); + + BaseNode *length = intrincall.Opnd(1); + PrimType pType = PTY_i64; + PregIdx pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(pType); + newStmt = mirBuilder->CreateStmtRegassign(pType, pIdx, mirBuilder->CreateIntConst(1, pType)); + newBlk.AddStatement(newStmt); + MIRFunction *func = GetCurrentFunc(); + + const std::string &name = func->GetName() + std::string("_Lalloca_"); + LabelIdx label1 = GetCurrentFunc()->GetOrCreateLableIdxFromName(name + std::to_string(labelIdx++)); + LabelIdx label2 = GetCurrentFunc()->GetOrCreateLableIdxFromName(name + std::to_string(labelIdx++)); + + newStmt = mirBuilder->CreateStmtGoto(OP_goto, label2); + newBlk.AddStatement(newStmt); + LabelNode *ln = mirBuilder->CreateStmtLabel(label1); + newBlk.AddStatement(ln); + + RegreadNode *regLen = mirBuilder->CreateExprRegread(pType, pIdx); + + BinaryNode *addr = mirBuilder->CreateExprBinary(OP_add, + *GlobalTables::GetTypeTable().GetAddr64(), + intrincall.Opnd(0), regLen); + + newStmt = mirBuilder->CreateStmtIassign(*beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), + 0, addr, mirBuilder->GetConstUInt8(0)); + newBlk.AddStatement(newStmt); + + BinaryNode *subLen = mirBuilder->CreateExprBinary( + OP_add, *GlobalTables::GetTypeTable().GetPrimType(pType), regLen, mirBuilder->CreateIntConst(1, pType)); + newStmt = mirBuilder->CreateStmtRegassign(pType, pIdx, subLen); + newBlk.AddStatement(newStmt); + + ln = mirBuilder->CreateStmtLabel(label2); + newBlk.AddStatement(ln); + + CompareNode *cmpExp = + mirBuilder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetUInt32(), + *GlobalTables::GetTypeTable().GetPrimType(pType), regLen, length); + newStmt = mirBuilder->CreateStmtCondGoto(cmpExp, OP_brtrue, label1); + + return newStmt; +} + +StmtNode *CGLowerer::LowerIntrinsicRCCall(const IntrinsiccallNode &intrincall) { + /* If GCONLY enabled, lowering RC intrinsics in another way. */ + MIRIntrinsicID intrnID = intrincall.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + + /* convert intrinsic call into function call. */ + if (intrinFuncIDs.find(intrinDesc) == intrinFuncIDs.end()) { + /* add funcid into map */ + MIRFunction *fn = mirBuilder->GetOrCreateFunction(intrinDesc->name, TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + intrinFuncIDs[intrinDesc] = fn->GetPuidx(); + } + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(intrinFuncIDs.at(intrinDesc)); + for (size_t i = 0; i < intrincall.GetNopndSize(); ++i) { + callStmt->GetNopnd().emplace_back(intrincall.GetNopndAt(i)); + callStmt->SetNumOpnds(callStmt->GetNumOpnds() + 1); + } + return callStmt; +} + +void CGLowerer::LowerArrayStore(const IntrinsiccallNode &intrincall, BlockNode &newBlk) { + bool needCheckStore = true; + BaseNode *arrayNode = intrincall.Opnd(0); + MIRType *arrayElemType = GetArrayNodeType(*arrayNode); + BaseNode *valueNode = intrincall.Opnd(kNodeThirdOpnd); + MIRType *valueRealType = GetArrayNodeType(*valueNode); + if ((arrayElemType != nullptr) && (valueRealType != nullptr) && (arrayElemType->GetKind() == kTypeClass) && + static_cast(arrayElemType)->IsFinal() && (valueRealType->GetKind() == kTypeClass) && + static_cast(valueRealType)->IsFinal() && + (valueRealType->GetTypeIndex() == arrayElemType->GetTypeIndex())) { + needCheckStore = false; + } + + if (needCheckStore) { + MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(intrincall.Opnd(0)); + args.emplace_back(intrincall.Opnd(kNodeThirdOpnd)); + StmtNode *checkStoreStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); + newBlk.AddStatement(checkStoreStmt); + } +} + +StmtNode *CGLowerer::LowerDefaultIntrinsicCall(IntrinsiccallNode &intrincall, MIRSymbol &st, MIRFunction &fn) { + MIRIntrinsicID intrnID = intrincall.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + std::vector funcTyVec; + std::vector fnTaVec; + MapleVector &nOpnds = intrincall.GetNopnd(); + MIRType *retTy = intrinDesc->GetReturnType(); + CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); + if (retTy->GetKind() == kTypeStruct) { + funcTyVec.emplace_back(beCommon.BeGetOrCreatePointerType(*retTy)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + fn.SetReturnStruct(); + } + for (uint32 i = 0; i < nOpnds.size(); ++i) { + MIRType *argTy = intrinDesc->GetArgType(i); + CHECK_FATAL(argTy != nullptr, "argTy should not be nullptr"); + if (argTy->GetKind() == kTypeStruct) { + funcTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "can not get address"); + nOpnds[i] = addrNode; + } else { + funcTyVec.emplace_back(argTy->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + } + } + MIRType *funcType = beCommon.BeGetOrCreateFunctionType(retTy->GetTypeIndex(), funcTyVec, fnTaVec); + st.SetTyIdx(funcType->GetTypeIndex()); + fn.SetMIRFuncType(static_cast(funcType)); + if (retTy->GetKind() == kTypeStruct) { + fn.SetReturnTyIdx(static_cast(PTY_void)); + } else { + fn.SetReturnTyIdx(retTy->GetTypeIndex()); + } + return static_cast(mirBuilder->CreateStmtCall(fn.GetPuidx(), nOpnds)); +} + +StmtNode *CGLowerer::LowerIntrinsicMplCleanupLocalRefVarsSkip(IntrinsiccallNode &intrincall) { + MIRFunction *mirFunc = mirModule.CurFunction(); + BaseNode *skipExpr = intrincall.Opnd(intrincall.NumOpnds() - 1); + + CHECK_FATAL(skipExpr != nullptr, "should be dread"); + CHECK_FATAL(skipExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(skipExpr); + MIRSymbol *skipSym = mirFunc->GetLocalOrGlobalSymbol(refNode->GetStIdx()); + if (skipSym->GetAttr(ATTR_localrefvar)) { + mirFunc->InsertMIRSymbol(skipSym); + } + return &intrincall; +} + +StmtNode *CGLowerer::LowerIntrinsiccall(IntrinsiccallNode &intrincall, BlockNode &newBlk) { + MIRIntrinsicID intrnID = intrincall.GetIntrinsic(); + for (size_t i = 0; i < intrincall.GetNumOpnds(); ++i) { + intrincall.SetOpnd(LowerExpr(intrincall, *intrincall.Opnd(i), newBlk), i); + } + if (intrnID == INTRN_MPL_CLEAR_STACK) { + return LowerIntrinsicMplClearStack(intrincall, newBlk); + } + if (intrnID == INTRN_C_va_start) { + return &intrincall; + } + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + if (intrinDesc->IsSpecial() || intrinDesc->IsAtomic()) { + /* For special intrinsics we leave them to CGFunc::SelectIntrinCall() */ + return &intrincall; + } + /* default lowers intrinsic call to real function call. */ + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + CHECK_FATAL(intrinDesc->name != nullptr, "intrinsic's name should not be nullptr"); + const std::string name = intrinDesc->name; + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirBuilder->GetOrCreateFunction(intrinDesc->name, TyIdx(0)); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + st->SetFunction(fn); + st->SetAppearsInCode(true); + return LowerDefaultIntrinsicCall(intrincall, *st, *fn); +} + +StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { + CHECK_FATAL(stmt.GetOpCode() == OP_syncenter || stmt.GetOpCode() == OP_syncexit, + "stmt's opcode should be OP_syncenter or OP_syncexit"); + + auto &nStmt = static_cast(stmt); + BuiltinFunctionID id; + if (nStmt.GetOpCode() == OP_syncenter) { + if (nStmt.NumOpnds() == 1) { + /* Just as ParseNaryStmt do for syncenter */ + MIRType &intType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + /* default 2 for __sync_enter_fast() */ + MIRIntConst *intConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(2, intType); + ConstvalNode *exprConst = mirModule.GetMemPool()->New(); + exprConst->SetPrimType(PTY_i32); + exprConst->SetConstVal(intConst); + nStmt.GetNopnd().emplace_back(exprConst); + nStmt.SetNumOpnds(nStmt.GetNopndSize()); + } + CHECK_FATAL(nStmt.NumOpnds() == kOperandNumBinary, "wrong args for syncenter"); + CHECK_FATAL(nStmt.Opnd(1)->GetOpCode() == OP_constval, "wrong 2nd arg type for syncenter"); + ConstvalNode *cst = static_cast(nStmt.GetNopndAt(1)); + MIRIntConst *intConst = safe_cast(cst->GetConstVal()); + switch (intConst->GetExtValue()) { + case kMCCSyncEnterFast0: + id = INTRN_FIRST_SYNC_ENTER; + break; + case kMCCSyncEnterFast1: + id = INTRN_SECOND_SYNC_ENTER; + break; + case kMCCSyncEnterFast2: + id = INTRN_THIRD_SYNC_ENTER; + break; + case kMCCSyncEnterFast3: + id = INTRN_FOURTH_SYNC_ENTER; + break; + default: + CHECK_FATAL(false, "wrong kind for syncenter"); + break; + } + } else { + CHECK_FATAL(nStmt.NumOpnds() == 1, "wrong args for syncexit"); + id = INTRN_YNC_EXIT; + } + PUIdx bFunc = GetBuiltinToUse(id); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should be found"); + + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(nStmt.Opnd(0)); + return mirBuilder->CreateStmtCall(bFunc, args); +} + +PUIdx CGLowerer::GetBuiltinToUse(BuiltinFunctionID id) const { + /* + * use std::vector & linear search as the number of entries is small. + * we may revisit it if the number of entries gets larger. + */ + for (const auto &funcID : builtinFuncIDs) { + if (funcID.first == id) { + return funcID.second; + } + } + return kFuncNotFound; +} + +void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc, BlockNode &blkNode, bool perm) { + MIRFunction *func = mirBuilder->GetOrCreateFunction((perm ? "MCC_NewPermanentObject" : "MCC_NewObj_fixed_class"), + (TyIdx)(GetLoweredPtrType())); + func->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + /* Get the classinfo */ + MIRStructType *classType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(gcmalloc.GetTyIdx())); + std::string classInfoName = CLASSINFO_PREFIX_STR + classType->GetName(); + MIRSymbol *classSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(classInfoName)); + if (classSym == nullptr) { + MIRType *pointerType = beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetVoid()); + classSym = mirBuilder->CreateGlobalDecl(classInfoName, *pointerType); + classSym->SetStorageClass(kScExtern); + } + CallNode *callAssign = nullptr; + auto *curFunc = mirModule.CurFunction(); + if (classSym->GetAttr(ATTR_abstract) || classSym->GetAttr(ATTR_interface)) { + MIRFunction *funcSecond = mirBuilder->GetOrCreateFunction("MCC_Reflect_ThrowInstantiationError", + static_cast(GetLoweredPtrType())); + funcSecond->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*funcSecond->GetMIRFuncType()); + funcSecond->AllocSymTab(); + BaseNode *arg = mirBuilder->CreateExprAddrof(0, *classSym); + if (node.GetOpCode() == OP_dassign) { + auto &dsNode = static_cast(node); + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg); + callAssign = mirBuilder->CreateStmtCallAssigned(funcSecond->GetPuidx(), args, ret, OP_callassigned); + } else { + CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); + callAssign = mirBuilder->CreateStmtCallRegassigned( + funcSecond->GetPuidx(), static_cast(node).GetRegIdx(), OP_callassigned, arg); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); + return; + } + BaseNode *arg = mirBuilder->CreateExprAddrof(0, *classSym); + + if (node.GetOpCode() == OP_dassign) { + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg); + callAssign = mirBuilder->CreateStmtCallAssigned(func->GetPuidx(), args, ret, OP_callassigned); + } else { + CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); + callAssign = mirBuilder->CreateStmtCallRegassigned( + func->GetPuidx(), static_cast(node).GetRegIdx(), OP_callassigned, arg); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); +} + +std::string CGLowerer::GetNewArrayFuncName(const uint32 elemSize, const bool perm) const { + if (elemSize == 1) { + return perm ? "MCC_NewPermArray8" : "MCC_NewArray8"; + } + if (elemSize == 2) { + return perm ? "MCC_NewPermArray16" : "MCC_NewArray16"; + } + if (elemSize == 4) { + return perm ? "MCC_NewPermArray32" : "MCC_NewArray32"; + } + CHECK_FATAL((elemSize == 8), "Invalid elemSize."); + return perm ? "MCC_NewPermArray64" : "MCC_NewArray64"; +} + +void CGLowerer::LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode &node, BlockNode &blkNode, bool perm) { + /* Extract jarray type */ + TyIdx tyIdx = node.GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypeJArray, "Type param of gcmallocjarray is not a MIRJarrayType"); + auto jaryType = static_cast(type); + CHECK_FATAL(jaryType != nullptr, "Type param of gcmallocjarray is not a MIRJarrayType"); + + /* Inspect element type */ + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jaryType->GetElemTyIdx()); + PrimType elemPrimType = elemType->GetPrimType(); + uint32 elemSize = GetPrimTypeSize(elemPrimType); + if (elemType->GetKind() != kTypeScalar) { /* element is reference */ + elemSize = static_cast(RTSupport::GetRTSupportInstance().GetFieldSize()); + } + + std::string klassName = jaryType->GetJavaName(); + std::string arrayClassInfoName; + bool isPredefinedArrayClass = false; + BaseNode *arrayCacheNode = nullptr; + if (jaryType->IsPrimitiveArray() && (jaryType->GetDim() <= kThreeDimArray)) { + arrayClassInfoName = PRIMITIVECLASSINFO_PREFIX_STR + klassName; + isPredefinedArrayClass = true; + } else if (arrayNameForLower::kArrayKlassName.find(klassName) != arrayNameForLower::kArrayKlassName.end()) { + arrayClassInfoName = CLASSINFO_PREFIX_STR + klassName; + isPredefinedArrayClass = true; + } else { + arrayCacheNode = GetClassInfoExprFromArrayClassCache(klassName); + } + + std::string funcName; + MapleVector args(mirModule.GetMPAllocator().Adapter()); + auto *curFunc = mirModule.CurFunction(); + if (isPredefinedArrayClass || (arrayCacheNode != nullptr)) { + funcName = GetNewArrayFuncName(elemSize, perm); + args.emplace_back(node.Opnd(0)); /* n_elems */ + if (isPredefinedArrayClass) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayClassInfoName); + MIRSymbol *arrayClassSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(arrayClassInfoName)); + if (arrayClassSym == nullptr) { + arrayClassSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + arrayClassSym->SetNameStrIdx(strIdx); + arrayClassSym->SetStorageClass(kScGlobal); + arrayClassSym->SetSKind(kStVar); + if (CGOptions::IsPIC()) { + arrayClassSym->SetStorageClass(kScExtern); + } else { + arrayClassSym->SetAttr(ATTR_weak); + } + GlobalTables::GetGsymTable().AddToStringSymbolMap(*arrayClassSym); + arrayClassSym->SetTyIdx(static_cast(PTY_ptr)); + } + args.emplace_back(mirBuilder->CreateExprAddrof(0, *arrayClassSym)); + } else { + args.emplace_back(arrayCacheNode); + } + } else { + funcName = perm ? "MCC_NewPermanentArray" : "MCC_NewObj_flexible_cname"; + args.emplace_back(mirBuilder->CreateIntConst(elemSize, PTY_u32)); /* elem_size */ + args.emplace_back(node.Opnd(0)); /* n_elems */ + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(klassName, klassJavaDescriptor); + UStrIdx classNameStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(klassJavaDescriptor); + ConststrNode *classNameExpr = mirModule.GetMemPool()->New(classNameStrIdx); + classNameExpr->SetPrimType(PTY_ptr); + args.emplace_back(classNameExpr); /* class_name */ + args.emplace_back(GetBaseNodeFromCurFunc(*curFunc, true)); + /* set class flag 0 */ + args.emplace_back(mirBuilder->CreateIntConst(0, PTY_u32)); + } + MIRFunction *func = mirBuilder->GetOrCreateFunction(funcName, static_cast(GetLoweredPtrType())); + func->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + CallNode *callAssign = nullptr; + if (stmt.GetOpCode() == OP_dassign) { + auto &dsNode = static_cast(stmt); + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + + callAssign = mirBuilder->CreateStmtCallAssigned(func->GetPuidx(), args, ret, OP_callassigned); + } else { + auto ®Node = static_cast(stmt); + callAssign = mirBuilder->CreateStmtCallRegassigned(func->GetPuidx(), args, regNode.GetRegIdx(), OP_callassigned); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); +} + +bool CGLowerer::IsIntrinsicCallHandledAtLowerLevel(MIRIntrinsicID intrinsic) const { + /* only INTRN_MPL_ATOMIC_EXCHANGE_PTR now. */ + return intrinsic == INTRN_MPL_ATOMIC_EXCHANGE_PTR; +} + +bool CGLowerer::IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const { + switch (intrinsic) { +#if TARGAARCH64 || TARGX86_64 + case INTRN_C_cos: + case INTRN_C_cosf: + case INTRN_C_cosh: + case INTRN_C_coshf: + case INTRN_C_acos: + case INTRN_C_acosf: + case INTRN_C_sin: + case INTRN_C_sinf: + case INTRN_C_sinh: + case INTRN_C_sinhf: + case INTRN_C_asin: + case INTRN_C_asinf: + case INTRN_C_atan: + case INTRN_C_atanf: + case INTRN_C_exp: + case INTRN_C_expf: + case INTRN_C_ffs: + case INTRN_C_log: + case INTRN_C_logf: + case INTRN_C_log10: + case INTRN_C_log10f: + case INTRN_C_clz32: + case INTRN_C_clz64: + case INTRN_C_ctz32: + case INTRN_C_ctz64: + case INTRN_C_popcount32: + case INTRN_C_popcount64: + case INTRN_C_parity32: + case INTRN_C_parity64: + case INTRN_C_clrsb32: + case INTRN_C_clrsb64: + case INTRN_C_isaligned: + case INTRN_C_alignup: + case INTRN_C_aligndown: + case INTRN_C___sync_add_and_fetch_1: + case INTRN_C___sync_add_and_fetch_2: + case INTRN_C___sync_add_and_fetch_4: + case INTRN_C___sync_add_and_fetch_8: + case INTRN_C___sync_sub_and_fetch_1: + case INTRN_C___sync_sub_and_fetch_2: + case INTRN_C___sync_sub_and_fetch_4: + case INTRN_C___sync_sub_and_fetch_8: + case INTRN_C___sync_fetch_and_add_1: + case INTRN_C___sync_fetch_and_add_2: + case INTRN_C___sync_fetch_and_add_4: + case INTRN_C___sync_fetch_and_add_8: + case INTRN_C___sync_fetch_and_sub_1: + case INTRN_C___sync_fetch_and_sub_2: + case INTRN_C___sync_fetch_and_sub_4: + case INTRN_C___sync_fetch_and_sub_8: + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: + case INTRN_C___sync_bool_compare_and_swap_4: + case INTRN_C___sync_bool_compare_and_swap_8: + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: + case INTRN_C___sync_val_compare_and_swap_4: + case INTRN_C___sync_val_compare_and_swap_8: + case INTRN_C___sync_lock_test_and_set_1: + case INTRN_C___sync_lock_test_and_set_2: + case INTRN_C___sync_lock_test_and_set_4: + case INTRN_C___sync_lock_test_and_set_8: + case INTRN_C___sync_lock_release_8: + case INTRN_C___sync_lock_release_4: + case INTRN_C___sync_lock_release_2: + case INTRN_C___sync_lock_release_1: + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + case INTRN_C___sync_fetch_and_nand_1: + case INTRN_C___sync_fetch_and_nand_2: + case INTRN_C___sync_fetch_and_nand_4: + case INTRN_C___sync_fetch_and_nand_8: + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + case INTRN_C___sync_nand_and_fetch_1: + case INTRN_C___sync_nand_and_fetch_2: + case INTRN_C___sync_nand_and_fetch_4: + case INTRN_C___sync_nand_and_fetch_8: + case INTRN_C___sync_synchronize: + case INTRN_C__builtin_return_address: + case INTRN_C__builtin_extract_return_addr: + case INTRN_C_memcmp: + case INTRN_C_strlen: + case INTRN_C_strcmp: + case INTRN_C_strncmp: + case INTRN_C_strchr: + case INTRN_C_strrchr: + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return true; +#endif + default: + return false; + } +} + +void CGLowerer::InitArrayClassCacheTableIndex() { + MIRSymbol *reflectStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectStartHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionStartHotStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectBothHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionBothHotStrTabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectRunHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionRunHotStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *arrayCacheNameTableSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kArrayClassCacheNameTable + mirModule.GetFileNameAsPostfix())); + if (arrayCacheNameTableSym == nullptr) { + return; + } + MIRAggConst &aggConst = static_cast(*(arrayCacheNameTableSym->GetKonst())); + MIRSymbol *strTab = nullptr; + for (size_t i = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst *elemConst = aggConst.GetConstVecItem(i); + uint32 intValue = static_cast(((safe_cast(elemConst))->GetExtValue()) & 0xFFFFFFFF); + bool isHotReflectStr = (intValue & 0x00000003) != 0; /* use the last two bits of intValue in this expression */ + if (isHotReflectStr) { + uint32 tag = (intValue & 0x00000003) - kCStringShift; /* use the last two bits of intValue in this expression */ + if (tag == kLayoutBootHot) { + strTab = reflectStartHotStrtabSym; + } else if (tag == kLayoutBothHot) { + strTab = reflectBothHotStrtabSym; + } else { + strTab = reflectRunHotStrtabSym; + } + } else { + strTab = reflectStrtabSym; + } + DEBUG_ASSERT(strTab != nullptr, "strTab is nullptr"); + std::string arrayClassName; + MIRAggConst *strAgg = static_cast(strTab->GetKonst()); + for (auto start = (intValue >> 2); start < strAgg->GetConstVec().size(); ++start) { /* the last two bits is flag */ + MIRIntConst *oneChar = static_cast(strAgg->GetConstVecItem(start)); + if ((oneChar != nullptr) && !oneChar->IsZero()) { + arrayClassName += static_cast(oneChar->GetExtValue()); + } else { + break; + } + } + arrayClassCacheIndex[arrayClassName] = i; + } +} + +void CGLowerer::LowerFunc(MIRFunction &func) { + labelIdx = 0; + SetCurrentFunc(&func); + hasTry = false; + LowerEntry(func); + LowerPseudoRegs(func); + BlockNode *origBody = func.GetBody(); + CHECK_FATAL(origBody != nullptr, "origBody should not be nullptr"); + + BlockNode *newBody = LowerBlock(*origBody); + func.SetBody(newBody); + if (needBranchCleanup) { + CleanupBranches(func); + } + + if (mirModule.IsJavaModule() && func.GetBody()->GetFirst() && GenerateExceptionHandlingCode()) { + LowerTryCatchBlocks(*func.GetBody()); + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + // We do the simplify work here because now all the intrinsic calls and potential expansion work of memcpy or other + // functions are handled well. So we can concentrate to do the replacement work. + SimplifyBlock(*newBody); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/rt.cpp b/ecmascript/mapleall/maple_be/src/be/rt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a14d9cf52f0f503d90320fb332e62b9bd62bc8a --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/rt.cpp @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "rt.h" + +namespace maplebe { +const std::string RTSupport::kObjectMapSectionName = ".maple.objectmap"; +const std::string RTSupport::kGctibLabelArrayOfObject = "MCC_GCTIB___ArrayOfObject"; +const std::string RTSupport::kGctibLabelJavaObject = "MCC_GCTIB__Ljava_2Flang_2FObject_3B"; +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/switch_lowerer.cpp b/ecmascript/mapleall/maple_be/src/be/switch_lowerer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6343ec2a6dc9e845330bdd7df6841ea525bcae53 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/switch_lowerer.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +/* + * This module analyzes the tag distribution in a switch statement and decides + * the best strategy in terms of runtime performance to generate code for it. + * The generated code makes use of 3 code generation techniques: + * + * 1. cascade of if-then-else based on equality test + * 2. rangegoto + * 3. binary search + * + * 1 is applied only if the number of possibilities is <= 6. + * 2 corresponds to indexed jump, but it requires allocating an array + * initialized with the jump targets. Since it causes memory usage overhead, + * rangegoto is used only if the density is higher than 0.7. + * If neither 1 nor 2 is applicable, 3 is applied in the form of a decision + * tree. In this case, each test would split the tags into 2 halves. For + * each half, the above algorithm is then applied recursively until the + * algorithm terminates. + * + * But we don't want to apply 3 right from the beginning if both 1 and 2 do not + * apply, because there may be regions that have density > 0.7. Thus, the + * switch lowerer begins by finding clusters. A cluster is defined to be a + * maximal range of tags whose density is > 0.7. + * + * In finding clusters, the original switch table is sorted and then each dense + * region is condensed into 1 switch item; in the switch_items table, each item // either corresponds to an original + * entry in the original switch table (pair's // second is 0), or to a dense region (pair's second gives the upper limit + * of the dense range). The output code is generated based on the switch_items. See BuildCodeForSwitchItems() which is + * recursive. +*/ +#include "switch_lowerer.h" +#include "mir_nodes.h" +#include "mir_builder.h" +#include "mir_lower.h" /* "../../../maple_ir/include/mir_lower.h" */ + +namespace maplebe { +using namespace maple; + +static bool CasePairKeyLessThan(const CasePair &left, const CasePair &right) { + return left.first < right.first; +} + +void SwitchLowerer::FindClusters(MapleVector &clusters) const { + int32 length = static_cast(stmt->GetSwitchTable().size()); + int32 i = 0; + while (i < length - kClusterSwitchCutoff) { + for (int32 j = length - 1; j > i; --j) { + float tmp1 = static_cast(j - i); + float tmp2 = static_cast(stmt->GetCasePair(static_cast(static_cast(j))).first) - + static_cast(stmt->GetCasePair(static_cast(static_cast(i))).first); + float currDensity = tmp1 / tmp2; + if (((j - i) >= kClusterSwitchCutoff) && + ((currDensity >= kClusterSwitchDensityHigh) || + ((currDensity >= kClusterSwitchDensityLow) && (tmp2 < kMaxRangeGotoTableSize)))) { + clusters.emplace_back(Cluster(i, j)); + i = j; + break; + } + } + ++i; + } +} + +void SwitchLowerer::InitSwitchItems(MapleVector &clusters) { + if (clusters.empty()) { + for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { + switchItems.emplace_back(SwitchItem(i, 0)); + } + } else { + int32 j = 0; + Cluster front = clusters[j]; + for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { + if (i == front.first) { + switchItems.emplace_back(SwitchItem(i, front.second)); + i = front.second; + ++j; + if (static_cast(clusters.size()) > j) { + front = clusters[j]; + } + } else { + switchItems.emplace_back(SwitchItem(i, 0)); + } + } + } +} + +RangeGotoNode *SwitchLowerer::BuildRangeGotoNode(int32 startIdx, int32 endIdx) { + RangeGotoNode *node = mirModule.CurFuncCodeMemPool()->New(mirModule); + node->SetOpnd(stmt->GetSwitchOpnd(), 0); + + node->SetRangeGotoTable(SmallCaseVector(mirModule.CurFuncCodeMemPoolAllocator()->Adapter())); + node->SetTagOffset(static_cast(stmt->GetCasePair(static_cast(startIdx)).first)); + uint32 curTag = 0; + node->AddRangeGoto(curTag, stmt->GetCasePair(startIdx).second); + int64 lastCaseTag = stmt->GetSwitchTable().at(startIdx).first; + for (int32 i = startIdx + 1; i <= endIdx; ++i) { + /* + * The second condition is to solve the problem that compilation falls into a dead loop, + * because in some cases the two will fall into a dead loop if they are equal. + */ + while ((stmt->GetCasePair(i).first != (lastCaseTag + 1)) && (stmt->GetCasePair(i).first != lastCaseTag)) { + /* fill in a gap in the case tags */ + curTag = (++lastCaseTag) - node->GetTagOffset(); + if (stmt->GetDefaultLabel() != 0) { + node->AddRangeGoto(curTag, stmt->GetDefaultLabel()); + } + } + curTag = static_cast(stmt->GetCasePair(static_cast(i)).first - node->GetTagOffset()); + node->AddRangeGoto(curTag, stmt->GetCasePair(i).second); + lastCaseTag = stmt->GetCasePair(i).first; + } + /* If the density is high enough, the range is allowed to be large */ + // DEBUG_ASSERT(static_cast(node->GetRangeGotoTable().size()) <= kMaxRangeGotoTableSize, + // "rangegoto table exceeds allowed number of entries"); + DEBUG_ASSERT(node->GetNumOpnds() == 1, "RangeGotoNode is a UnaryOpnd and numOpnds must be 1"); + return node; +} + +CompareNode *SwitchLowerer::BuildCmpNode(Opcode opCode, uint32 idx) { + CompareNode *binaryExpr = mirModule.CurFuncCodeMemPool()->New(opCode); + binaryExpr->SetPrimType(PTY_u32); + binaryExpr->SetOpndType(stmt->GetSwitchOpnd()->GetPrimType()); + + MIRType &type = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(stmt->GetSwitchOpnd()->GetPrimType())); + MIRConst *constVal = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(stmt->GetCasePair(idx).first, type); + ConstvalNode *exprConst = mirModule.CurFuncCodeMemPool()->New(); + exprConst->SetPrimType(stmt->GetSwitchOpnd()->GetPrimType()); + exprConst->SetConstVal(constVal); + + binaryExpr->SetBOpnd(stmt->GetSwitchOpnd(), 0); + binaryExpr->SetBOpnd(exprConst, 1); + return binaryExpr; +} + +GotoNode *SwitchLowerer::BuildGotoNode(int32 idx) { + if (idx == -1 && stmt->GetDefaultLabel() == 0) { + return nullptr; + } + GotoNode *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + if (idx == -1) { + gotoStmt->SetOffset(stmt->GetDefaultLabel()); + } else { + gotoStmt->SetOffset(stmt->GetCasePair(idx).second); + } + return gotoStmt; +} + +CondGotoNode *SwitchLowerer::BuildCondGotoNode(int32 idx, Opcode opCode, BaseNode &cond) { + if (idx == -1 && stmt->GetDefaultLabel() == 0) { + return nullptr; + } + CondGotoNode *cGotoStmt = mirModule.CurFuncCodeMemPool()->New(opCode); + cGotoStmt->SetOpnd(&cond, 0); + if (idx == -1) { + cGotoStmt->SetOffset(stmt->GetDefaultLabel()); + } else { + cGotoStmt->SetOffset(stmt->GetCasePair(idx).second); + } + return cGotoStmt; +} + +/* start and end is with respect to switchItems */ +BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool lowBlockNodeChecked, + bool highBlockNodeChecked) { + DEBUG_ASSERT(start >= 0, "invalid args start"); + DEBUG_ASSERT(end >= 0, "invalid args end"); + BlockNode *localBlk = mirModule.CurFuncCodeMemPool()->New(); + if (start > end) { + return localBlk; + } + CondGotoNode *cGoto = nullptr; + RangeGotoNode *rangeGoto = nullptr; + IfStmtNode *ifStmt = nullptr; + CompareNode *cmpNode = nullptr; + MIRLower mirLowerer(mirModule, mirModule.CurFunction()); + mirLowerer.Init(); + /* if low side starts with a dense item, handle it first */ + while ((start <= end) && (switchItems[start].second != 0)) { + if (!lowBlockNodeChecked) { + lowBlockNodeChecked = true; + if (!(IsUnsignedInteger(stmt->GetSwitchOpnd()->GetPrimType()) && + (stmt->GetCasePair(static_cast(switchItems[static_cast(start)].first)).first == 0))) { + cGoto = BuildCondGotoNode(-1, OP_brtrue, *BuildCmpNode(OP_lt, switchItems[start].first)); + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + } + } + rangeGoto = BuildRangeGotoNode(switchItems[start].first, switchItems[start].second); + if (stmt->GetDefaultLabel() == 0) { + localBlk->AddStatement(rangeGoto); + } else { + cmpNode = BuildCmpNode(OP_le, switchItems[start].second); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + ifStmt->GetThenPart()->AddStatement(rangeGoto); + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + if (start < end) { + lowBlockNodeChecked = (stmt->GetCasePair(switchItems[start].second).first + 1 == + stmt->GetCasePair(switchItems[start + 1].first).first); + } + ++start; + } + /* if high side starts with a dense item, handle it also */ + while ((start <= end) && (switchItems[end].second != 0)) { + if (!highBlockNodeChecked) { + cGoto = BuildCondGotoNode(-1, OP_brtrue, *BuildCmpNode(OP_gt, switchItems[end].second)); + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + highBlockNodeChecked = true; + } + rangeGoto = BuildRangeGotoNode(switchItems[end].first, switchItems[end].second); + if (stmt->GetDefaultLabel() == 0) { + localBlk->AddStatement(rangeGoto); + } else { + cmpNode = BuildCmpNode(OP_ge, switchItems[end].first); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + ifStmt->GetThenPart()->AddStatement(rangeGoto); + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + if (start < end) { + highBlockNodeChecked = + (stmt->GetCasePair(switchItems[end].first).first - 1 == + stmt->GetCasePair(switchItems[end - 1].first).first) || + (stmt->GetCasePair(switchItems[end].first).first - 1 == + stmt->GetCasePair(switchItems[end - 1].second).first); + } + --end; + } + if (start > end) { + if (!lowBlockNodeChecked || !highBlockNodeChecked) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + jumpToDefaultBlockGenerated = true; + } + } + return localBlk; + } + if ((start == end) && lowBlockNodeChecked && highBlockNodeChecked) { + /* only 1 case with 1 tag remains */ + auto *gotoStmt = BuildGotoNode(switchItems[static_cast(start)].first); + if (gotoStmt != nullptr) { + localBlk->AddStatement(gotoStmt); + } + return localBlk; + } + if (end < (start + kClusterSwitchCutoff)) { + /* generate equality checks for what remains */ + while ((start <= end) && (switchItems[start].second == 0)) { + if ((start == end) && lowBlockNodeChecked && highBlockNodeChecked) { + cGoto = reinterpret_cast(BuildGotoNode(switchItems[start].first)); /* can omit the condition */ + } else { + cGoto = BuildCondGotoNode(switchItems[start].first, OP_brtrue, *BuildCmpNode(OP_eq, switchItems[start].first)); + } + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + if (lowBlockNodeChecked && (start < end)) { + lowBlockNodeChecked = (stmt->GetCasePair(switchItems[start].first).first + 1 == + stmt->GetCasePair(switchItems[start + 1].first).first); + } + ++start; + } + if (start <= end) { /* recursive call */ + BlockNode *tmp = BuildCodeForSwitchItems(start, end, lowBlockNodeChecked, highBlockNodeChecked); + CHECK_FATAL(tmp != nullptr, "tmp should not be nullptr"); + localBlk->AppendStatementsFromBlock(*tmp); + } else if (!lowBlockNodeChecked || !highBlockNodeChecked) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + jumpToDefaultBlockGenerated = true; + } + } + return localBlk; + } + + int64 lowestTag = stmt->GetCasePair(switchItems[start].first).first; + int64 highestTag = stmt->GetCasePair(switchItems[end].first).first; + + /* + * if lowestTag and higesttag have the same sign, use difference + * if lowestTag and higesttag have the diefferent sign, use sum + * 1LL << 63 judge lowestTag ^ highestTag operate result highest + * bit is 1 or not, the result highest bit is 1 express lowestTag + * and highestTag have same sign , otherwise diefferent sign.highestTag + * add or subtract lowestTag divide 2 to get middle tag. + */ + int64 middleTag = ((((static_cast(lowestTag)) ^ (static_cast(highestTag))) & (1ULL << 63)) == 0) + ? (highestTag - lowestTag) / 2 + lowestTag + : (highestTag + lowestTag) / 2; + /* find the mid-point in switch_items between start and end */ + int32 mid = start; + while (stmt->GetCasePair(switchItems[mid].first).first < middleTag) { + ++mid; + } + DEBUG_ASSERT(mid >= start, "switch lowering logic mid should greater than or equal start"); + DEBUG_ASSERT(mid <= end, "switch lowering logic mid should less than or equal end"); + /* generate test for binary search */ + if (stmt->GetDefaultLabel() != 0) { + cmpNode = BuildCmpNode(OP_lt, static_cast(switchItems[static_cast(mid)].first)); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + bool leftHighBNdChecked = (stmt->GetCasePair(switchItems.at(mid - 1).first).first + 1 == + stmt->GetCasePair(switchItems.at(mid).first).first) || + (stmt->GetCasePair(switchItems.at(mid - 1).second).first + 1 == + stmt->GetCasePair(switchItems.at(mid).first).first); + ifStmt->SetThenPart(BuildCodeForSwitchItems(start, mid - 1, lowBlockNodeChecked, leftHighBNdChecked)); + ifStmt->SetElsePart(BuildCodeForSwitchItems(mid, end, true, highBlockNodeChecked)); + if (ifStmt->GetElsePart()) { + ifStmt->SetNumOpnds(kOperandNumTernary); + } + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + return localBlk; +} + +BlockNode *SwitchLowerer::LowerSwitch() { + if (stmt->GetSwitchTable().empty()) { /* change to goto */ + BlockNode *localBlk = mirModule.CurFuncCodeMemPool()->New(); + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + } + return localBlk; + } + + // add case labels to label table's caseLabelSet + MIRLabelTable *labelTab = mirModule.CurFunction()->GetLabelTab(); + for (CasePair &casePair : stmt->GetSwitchTable()) { + labelTab->caseLabelSet.insert(casePair.second); + } + + MapleVector clusters(ownAllocator->Adapter()); + stmt->SortCasePair(CasePairKeyLessThan); + FindClusters(clusters); + InitSwitchItems(clusters); + BlockNode *blkNode = BuildCodeForSwitchItems(0, static_cast(switchItems.size()) - 1, false, false); + if (!jumpToDefaultBlockGenerated) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + blkNode->AddStatement(gotoDft); + } + } + return blkNode; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/be/trycatchblockslower.cpp b/ecmascript/mapleall/maple_be/src/be/trycatchblockslower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6342d2bf79da3451fabee25c08af78dac77afe91 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/be/trycatchblockslower.cpp @@ -0,0 +1,897 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "try_catch.h" +namespace maplebe { +BBT *TryCatchBlocksLower::CreateNewBB(StmtNode *first, StmtNode *last) { + BBT *newBB = memPool.New(first, last, &memPool); + bbList.emplace_back(newBB); + return newBB; +} + +BBT *TryCatchBlocksLower::FindTargetBBlock(LabelIdx idx, const std::vector &bbs) { + for (auto &target : bbs) { + if (target->GetLabelIdx() == idx) { + return target; + } + } + return nullptr; +} + +/* returns the first statement that is moved in into the try block. If none is moved in, nullptr is returned */ +StmtNode *TryCatchBlocksLower::MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, const MapleVector &labeledBBsInTry) { + StmtNode *firstStmtMovedIn = nullptr; + const MapleVector &bbs = labeledBBsInTry; + StmtNode *jtStmt = jtBB.GetKeyStmt(); +#if DEBUG + StmtNode *js = jtBB.GetFirstStmt(); + while (js->GetOpCode() != OP_try) { + js = js->GetNext(); + } + CHECK_FATAL(js == jtStmt, "make sure js equal jtStmt"); +#endif + StmtNode *ts = jtBB.GetFirstStmt()->GetPrev(); + while ((ts != nullptr) && (ts->GetOpCode() == OP_comment)) { + ts = ts->GetPrev(); + } + + if (ts != nullptr && ts->IsCondBr()) { + CHECK_FATAL(ts->GetNext() == jtBB.GetFirstStmt(), "make sure ts's next equal jtBB's firstStmt"); + StmtNode *firstStmtNode = jtBB.GetFirstStmt(); + /* [ jtbb_b..jtstmt ]; either jtbb_b is a comment or jtbb_b == jtstmt */ + LabelIdx id = static_cast(ts)->GetOffset(); + for (auto &lbb : bbs) { + if (lbb->GetLabelIdx() == id) { + /* + * this cond goto jumps into the try block; let the try block enclose it + * first find the preceding comment statements if any + */ + StmtNode *brS = ts; + while ((ts->GetPrev() != nullptr) && (ts->GetPrev()->GetOpCode() == OP_comment)) { + ts = ts->GetPrev(); + } + StmtNode *secondStmtNode = ts; /* beginning statement of branch block */ + /* [ brbb_b..br_s ]; either brbb_b is a comment or brbb_b == br_s */ + firstStmtNode->SetPrev(secondStmtNode->GetPrev()); + if (secondStmtNode->GetPrev()) { + secondStmtNode->GetPrev()->SetNext(firstStmtNode); + } + jtStmt->GetNext()->SetPrev(brS); + brS->SetNext(jtStmt->GetNext()); + secondStmtNode->SetPrev(jtStmt); + jtStmt->SetNext(secondStmtNode); + condbrBB.SetLastStmt(*firstStmtNode->GetPrev()); + CHECK_FATAL(condbrBB.GetFallthruBranch() == &jtBB, "make sure condbrBB's fallthruBranch equal &jtBB"); + condbrBB.SetFallthruBranch(&jtBB); + condbrBB.SetCondJumpBranch(nullptr); + firstStmtMovedIn = secondStmtNode; + break; + } + } + } + return firstStmtMovedIn; +} + +void TryCatchBlocksLower::RecoverBasicBlock() { + std::vector condbrBBs; + std::vector switchBBs; + std::vector labeledBBs; + using BBTPair = std::pair; + std::vector tryBBs; + std::vector catchBBs; + + CHECK_FATAL(body.GetFirst() != nullptr, "body should not be NULL"); + bodyFirst = body.GetFirst(); + StmtNode *next = bodyFirst; + /* + * comment block [ begin, end ], We treat comment statements as if they are parts + * of the immediately following non-comment statement + */ + StmtNode *commentB = nullptr; + StmtNode *commentE = nullptr; + + BBT *curBB = nullptr; + BBT *lastBB = nullptr; + BBT *openTry = nullptr; + + /* recover basic blocks */ + for (StmtNode *stmt = next; stmt != nullptr; stmt = next) { + next = stmt->GetNext(); + + if (stmt->GetOpCode() == OP_comment) { + if (commentB == nullptr) { + commentB = stmt; + commentE = stmt; + } else { + CHECK_FATAL(commentE != nullptr, "nullptr is not expected"); + CHECK_FATAL(commentE->GetNext() == stmt, "make sure commentE's next is stmt"); + commentE = stmt; + } + continue; + } + + CHECK_FATAL(stmt->GetOpCode() != OP_comment, "make sure stmt's opcde not equal OP_comment"); + CHECK_FATAL(commentB == nullptr || (commentE != nullptr && commentE->GetNext() == stmt), + "make sure commentB is nullptr or commentE's next is stmt"); + + if (curBB != nullptr) { + if (stmt->GetOpCode() != OP_label && stmt->GetOpCode() != OP_try && stmt->GetOpCode() != OP_endtry) { + curBB->Extend(commentB, stmt); + } else { + /* java catch blockes always start with a label (i.e., OP_catch) */ + CHECK_FATAL(curBB->GetCondJumpBranch() == nullptr, "expect curBB's condJumpBranch is nullptr"); + CHECK_FATAL(curBB->GetFallthruBranch() == nullptr, "expect curBB's fallthruBranch is nullptr"); + /* a 'label' statement starts a new basic block */ + BBT *newBB = CreateNewBB(commentB, stmt); + /* + * if the immediately preceding statement (discounting comments) was throw, goto or return, + * curBB is to be reset to nullptr, so the control won't come here. + */ + curBB->SetFallthruBranch(newBB); + curBB = newBB; + } + } else { + /* start a new basic block with 'comment_b -- stmt' */ + curBB = CreateNewBB(commentB, stmt); + if (lastBB != nullptr) { + Opcode lastBBLastStmtOp = lastBB->GetLastStmt()->GetOpCode(); + if (lastBB->GetLastStmt()->IsCondBr() || lastBBLastStmtOp == OP_endtry) { + lastBB->SetFallthruBranch(curBB); + } + /* else don't connect curBB to last_bb */ + } + } + commentB = nullptr; + commentE = nullptr; + + switch (stmt->GetOpCode()) { + case OP_throw: + case OP_return: + case OP_goto: + /* start a new bb at the next stmt */ + lastBB = curBB; + curBB = nullptr; + break; + case OP_label: { + LabelNode *labelStmt = static_cast(stmt); + labeledBBs.emplace_back(curBB); + curBB->SetLabelIdx(static_cast(labelStmt->GetLabelIdx())); + } break; + case OP_brtrue: + case OP_brfalse: + condbrBBs.emplace_back(curBB); + lastBB = curBB; + curBB = nullptr; + break; + case OP_switch: + switchBBs.emplace_back(curBB); + lastBB = curBB; + curBB = nullptr; + break; + /* + * We deal try and endtry slightly differently. + * 1. try begins a basic block which includes the try statement and the subsequent statements up to one that + * results in non-sequential control transfer such as unconditional/conditional branches. + * 2. endtry will create its own basic block which contains the endtry statement and nothing else. + */ + case OP_try: + case OP_endtry: { + /* because a label statement is inserted at the function entry */ + CHECK_FATAL(curBB != nullptr, "expect curBB is not nullptr"); + CHECK_FATAL(curBB->GetCondJumpBranch() == nullptr, "expect curBB's condJumpBranch is nullptr"); + CHECK_FATAL(curBB->GetFallthruBranch() == nullptr, "expect curBB's fallthruBranch is nullptr"); + CHECK_FATAL(curBB->GetLastStmt()->GetOpCode() == stmt->GetOpCode(), + "the opcode of curBB's lastStmt should equal stmt's opcocde"); + if (stmt->GetOpCode() == OP_try) { + CHECK_FATAL(openTry == nullptr, "trys are not expected to be nested"); + curBB->SetType(BBT::kBBTry, *stmt); + openTry = curBB; + prevBBOfTry[openTry] = lastBB; + } else { + tryBBs.emplace_back(BBTPair(openTry, curBB)); + openTry = nullptr; + curBB->SetType(BBT::kBBEndTry, *stmt); + lastBB = curBB; + curBB = nullptr; + } + break; + } + case OP_catch: { +#if DEBUG + StmtNode *ss = stmt->GetPrev(); + while ((ss != nullptr) && (ss->GetOpCode() == OP_comment)) { + ss = ss->GetPrev(); + } + CHECK_FATAL(ss != nullptr, "expect ss is not nullptr"); + CHECK_FATAL(ss->GetOpCode() == OP_label, "expect op equal OP_label"); + for (auto &tb : catchBBs) { + CHECK_FATAL(tb != curBB, "tb should not equal curBB"); + } +#endif + catchBBs.emplace_back(curBB); + curBB->SetType(BBT::kBBCatch, *stmt); + break; + } + case OP_block: + CHECK_FATAL(0, "should not run here"); + default: + break; + } + } + + for (auto &cbBB : condbrBBs) { + CHECK_FATAL(cbBB->GetLastStmt()->IsCondBr(), "cbBB's lastStmt is not condBr"); + CondGotoNode *cbBBLastStmt = static_cast(cbBB->GetLastStmt()); + cbBB->SetCondJumpBranch(FindTargetBBlock(static_cast(cbBBLastStmt->GetOffset()), labeledBBs)); + } + + for (auto &swBB : switchBBs) { + CHECK_FATAL(swBB->GetLastStmt()->GetOpCode() == OP_switch, "the opcode of sw's lastStmt should equal OP_switch"); + SwitchNode *ss = static_cast(swBB->GetLastStmt()); + + swBB->AddSuccs(FindTargetBBlock(ss->GetDefaultLabel(), labeledBBs)); + for (auto &cp : ss->GetSwitchTable()) { + swBB->AddSuccs(FindTargetBBlock(cp.second, labeledBBs)); + } + } + + for (auto &bb : bbList) { + firstStmtToBBMap[bb->GetFirstStmt()] = bb; + } + CHECK_FATAL(openTry == nullptr, "trys are not expected to be nested"); +} + +/* if catchBB is in try-endtry block and catch is own to current try-endtry, process it and return true */ +bool TryCatchBlocksLower::CheckAndProcessCatchNodeInCurrTryBlock(BBT &origLowerBB, LabelIdx ebbLabel, + uint32 index) { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *tryStmt = tryEndTryBlock.GetTryStmtNode(); + bool found = false; + for (size_t tempIndex = 0; tempIndex < static_cast(tryStmt)->GetOffsetsCount(); ++tempIndex) { + auto id = static_cast(tryStmt)->GetOffset(tempIndex); + /* + * if this labeled bb is a catch block, + * remove it from the list of blocks enclosed in this try-block' + */ + if (ebbLabel == id) { + found = true; + enclosedBBs[index] = nullptr; + std::vector currBBThread; + BBT *lowerBB = &origLowerBB; + /* append it to the list of blocks placed after the end try block */ + currBBThread.emplace_back(lowerBB); + while (lowerBB->GetFallthruBranch() != nullptr) { + lowerBB = lowerBB->GetFallthruBranch(); + CHECK_FATAL(!lowerBB->IsTry(), "ebb must not be tryBB"); + if (lowerBB->IsEndTry()) { + CHECK_FATAL(lowerBB == endTryBB, "lowerBB should equal endTryBB"); + break; + } + for (uint32 j = 0; j < enclosedBBs.size(); ++j) { + if (enclosedBBs[j] == lowerBB) { + enclosedBBs[j] = nullptr; + break; + } + } + currBBThread.emplace_back(lowerBB); + } + + if (!lowerBB->IsEndTry()) { + for (auto &e : currBBThread) { + bbsToRelocate.emplace_back(e); + } + } else { + /* + * We have the following case. + * bb_head -> bb_1 -> .. bb_n -> endtry_bb -> succ + * For this particular case, we swap endtry bb and curr_bb_thread because the bblock that + * contains the endtry statement does not contain any other statements!! + */ + CHECK_FATAL(endTryBB->GetFirstStmt()->GetOpCode() == OP_comment || + endTryBB->GetFirstStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's firstStmt should be OP_comment or OP_endtry"); + CHECK_FATAL(endTryBB->GetLastStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's lastStmt should be OP_endtry"); + + /* we move endtry_bb before thread_head */ + BBT *threadHead = currBBThread.front(); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev() != nullptr, + "the prev node of threadHead's firstStmt should be not nullptr"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetOpCode() == OP_comment || + threadHead->GetFirstStmt()->GetOpCode() == OP_label, + "the opcode of threadHead's firstStmt should be OP_comment or OP_label"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev()->GetNext() == threadHead->GetFirstStmt(), + "the next of the prev of threadHead's firstStmt should equal threadHead's firstStmt"); + threadHead->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(threadHead->GetFirstStmt()->GetPrev()); + BBT *threadTail = currBBThread.back(); + threadTail->GetLastStmt()->SetNext(endTryBB->GetLastStmt()->GetNext()); + if (endTryBB->GetLastStmt()->GetNext() != nullptr) { + endTryBB->GetLastStmt()->GetNext()->SetPrev(threadTail->GetLastStmt()); + } + endTryBB->GetLastStmt()->SetNext(threadHead->GetFirstStmt()); + + CHECK_FATAL(endTryBB->GetCondJumpBranch() == nullptr, "endTryBB's condJumpBranch must be nullptr"); + if (threadTail->GetFallthruBranch() != nullptr) { + threadTail->SetFallthruBranch(firstStmtToBBMap[threadTail->GetLastStmt()->GetNext()]); + } + endTryBB->SetFallthruBranch(nullptr); + if (bodyEndWithEndTry) { + body.SetLast(threadTail->GetLastStmt()); + } + } + break; + } + } + return found; +} + +/* collect catchbb->fallthru(0-n) into currBBThread, when encounter a new catch, return it, else return nullptr */ +BBT *TryCatchBlocksLower::CollectCatchAndFallthruUntilNextCatchBB(BBT *&lowerBB, uint32 &nextEnclosedIdx, + std::vector &currBBThread) { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + + BBT *nextBBThreadHead = nullptr; + while (lowerBB->GetFallthruBranch() != nullptr) { + lowerBB = lowerBB->GetFallthruBranch(); + ++nextEnclosedIdx; + if (lowerBB->IsEndTry()) { + CHECK_FATAL(lowerBB == endTryBB, "lowerBB should equal endTryBB"); + break; + } + + for (uint32 j = 0; j < enclosedBBs.size(); ++j) { + if (enclosedBBs[j] == lowerBB) { + enclosedBBs[j] = nullptr; + break; + } + } + if (lowerBB->IsCatch()) { + nextBBThreadHead = lowerBB; + break; + } + currBBThread.emplace_back(lowerBB); + } + + if (nextBBThreadHead == nullptr && lowerBB->GetFallthruBranch() == nullptr && lowerBB != endTryBB && + nextEnclosedIdx < enclosedBBs.size() && enclosedBBs[nextEnclosedIdx]) { + /* + * Using a loop to find the next_bb_thread_head when it's a catch_BB or a normal_BB which + * is after a catch_BB. Other condition, push_back into the curr_bb_thread. + */ + do { + lowerBB = enclosedBBs[nextEnclosedIdx]; + enclosedBBs[nextEnclosedIdx++] = nullptr; + BBT *head = currBBThread.front(); + if (head->IsCatch() || lowerBB->IsCatch()) { + nextBBThreadHead = lowerBB; + break; + } + currBBThread.emplace_back(lowerBB); + } while (nextEnclosedIdx < enclosedBBs.size()); + } + + return nextBBThreadHead; +} + +void TryCatchBlocksLower::ProcessThreadTail(BBT &threadTail, BBT * const &nextBBThreadHead, bool hasMoveEndTry) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *newEndTry = endTryBB->GetKeyStmt()->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + newEndTry->SetPrev(threadTail.GetLastStmt()); + newEndTry->SetNext(threadTail.GetLastStmt()->GetNext()); + if (bodyEndWithEndTry && hasMoveEndTry) { + if (threadTail.GetLastStmt()->GetNext()) { + threadTail.GetLastStmt()->GetNext()->SetPrev(newEndTry); + } + } else { + CHECK_FATAL(threadTail.GetLastStmt()->GetNext() != nullptr, + "the next of threadTail's lastStmt should not be nullptr"); + threadTail.GetLastStmt()->GetNext()->SetPrev(newEndTry); + } + threadTail.GetLastStmt()->SetNext(newEndTry); + + threadTail.SetLastStmt(*newEndTry); + if (hasMoveEndTry && nextBBThreadHead == nullptr) { + body.SetLast(threadTail.GetLastStmt()); + } +} + +/* Wrap this catch block with try-endtry block */ +void TryCatchBlocksLower::WrapCatchWithTryEndTryBlock(std::vector &currBBThread, BBT *&nextBBThreadHead, + uint32 &nextEnclosedIdx, bool hasMoveEndTry) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *tryStmt = tryEndTryBlock.GetTryStmtNode(); + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + for (auto &e : currBBThread) { + CHECK_FATAL(!e->IsTry(), "expect e is not try"); + } + BBT *threadHead = currBBThread.front(); + if (threadHead->IsCatch()) { + StmtNode *jcStmt = threadHead->GetKeyStmt(); + CHECK_FATAL(jcStmt->GetNext() != nullptr, "jcStmt's next should not be nullptr"); + TryNode *jtCopy = static_cast(tryStmt)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + jtCopy->SetNext(jcStmt->GetNext()); + jtCopy->SetPrev(jcStmt); + jcStmt->GetNext()->SetPrev(jtCopy); + jcStmt->SetNext(jtCopy); + + BBT *threadTail = currBBThread.back(); + + /* for this endtry stmt, we don't need to create a basic block */ + ProcessThreadTail(*threadTail, static_cast(nextBBThreadHead), hasMoveEndTry); + } else { + /* For cases try->catch->normal_bb->normal_bb->endtry, Combine normal bb first. */ + while (nextEnclosedIdx < enclosedBBs.size()) { + if (nextBBThreadHead != nullptr) { + if (nextBBThreadHead->IsCatch()) { + break; + } + } + BBT *ebbSecond = enclosedBBs[nextEnclosedIdx]; + enclosedBBs[nextEnclosedIdx++] = nullptr; + CHECK_FATAL(ebbSecond != endTryBB, "ebbSecond should not equal endTryBB"); + if (ebbSecond->IsCatch()) { + nextBBThreadHead = ebbSecond; + break; + } + currBBThread.emplace_back(ebbSecond); + } + /* normal bb. */ + StmtNode *stmt = threadHead->GetFirstStmt(); + + TryNode *jtCopy = static_cast(tryStmt)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + jtCopy->SetNext(stmt); + jtCopy->SetPrev(stmt->GetPrev()); + stmt->GetPrev()->SetNext(jtCopy); + stmt->SetPrev(jtCopy); + threadHead->SetFirstStmt(*jtCopy); + + BBT *threadTail = currBBThread.back(); + + /* for this endtry stmt, we don't need to create a basic block */ + ProcessThreadTail(*threadTail, static_cast(nextBBThreadHead), hasMoveEndTry); + } +} + +/* + * We have the following case. + * bb_head -> bb_1 -> .. bb_n -> endtry_bb -> succ + * For this particular case, we swap EndTry bb and curr_bb_thread, because the bblock that contains the endtry + * statement does not contain any other statements!! + */ +void TryCatchBlocksLower::SwapEndTryBBAndCurrBBThread(const std::vector &currBBThread, bool &hasMoveEndTry, + const BBT *nextBBThreadHead) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + CHECK_FATAL(endTryBB->GetFirstStmt()->GetOpCode() == OP_comment || + endTryBB->GetFirstStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's firstStmt should be OP_comment or OP_endtry"); + CHECK_FATAL(endTryBB->GetLastStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's lastStmt should be OP_endtry"); + + /* we move endtry_bb before bb_head */ + BBT *threadHead = currBBThread.front(); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev() != nullptr, + "the prev of threadHead's firstStmt should not nullptr"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetOpCode() == OP_comment || + threadHead->GetFirstStmt()->GetOpCode() == OP_label, + "the opcode of threadHead's firstStmt should be OP_comment or OP_label"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev()->GetNext() == threadHead->GetFirstStmt(), + "the next of the prev of threadHead's firstStmt should equal threadHead's firstStmt"); + + endTryBB->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetLastStmt()->GetNext()); + if (endTryBB->GetLastStmt()->GetNext() != nullptr) { + endTryBB->GetLastStmt()->GetNext()->SetPrev(endTryBB->GetFirstStmt()->GetPrev()); + } + + threadHead->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(threadHead->GetFirstStmt()->GetPrev()); + + endTryBB->GetLastStmt()->SetNext(threadHead->GetFirstStmt()); + threadHead->GetFirstStmt()->SetPrev(endTryBB->GetLastStmt()); + + CHECK_FATAL(endTryBB->GetCondJumpBranch() == nullptr, "endTryBB's condJumpBranch must be nullptr"); + endTryBB->SetFallthruBranch(nullptr); + if (bodyEndWithEndTry) { + hasMoveEndTry = true; + if (nextBBThreadHead == nullptr) { + body.SetLast(currBBThread.back()->GetLastStmt()); + } + } +} + +void TryCatchBlocksLower::ProcessEnclosedBBBetweenTryEndTry() { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + MapleVector &labeledBBsInTry = tryEndTryBlock.GetLabeledBBsInTry(); + + for (uint32 i = 0; i < enclosedBBs.size(); ++i) { + BBT *lowerBB = enclosedBBs[i]; + uint32 nextEnclosedIdx = i + 1; + if (lowerBB == nullptr) { + continue; /* we may have removed the element */ + } + if (!lowerBB->IsLabeled()) { + continue; + } + labeledBBsInTry.emplace_back(lowerBB); + + /* + * It seems the way a finally is associated with its try is to put the catch block inside + * the java-try-end-try block. So, keep the 'catch(void*)' in it. + */ + LabelIdx ebbLabel = lowerBB->GetLabelIdx(); + bool found = CheckAndProcessCatchNodeInCurrTryBlock(*lowerBB, ebbLabel, i); + /* fill cur_bb_thread until meet the next catch */ + if (!found && lowerBB->IsCatch()) { + enclosedBBs[i] = nullptr; + std::vector currBBThread; + BBT *nextBBThreadHead = nullptr; + bool isFirstTime = true; + bool hasMoveEndTry = false; + do { + if (nextBBThreadHead != nullptr) { + isFirstTime = false; + } + nextBBThreadHead = nullptr; + currBBThread.clear(); + currBBThread.emplace_back(lowerBB); + nextBBThreadHead = CollectCatchAndFallthruUntilNextCatchBB(lowerBB, nextEnclosedIdx, currBBThread); + WrapCatchWithTryEndTryBlock(currBBThread, nextBBThreadHead, nextEnclosedIdx, hasMoveEndTry); + if (isFirstTime) { + SwapEndTryBBAndCurrBBThread(currBBThread, hasMoveEndTry, nextBBThreadHead); + } + } while (nextBBThreadHead != nullptr); + } + } +} + +void TryCatchBlocksLower::ConnectRemainBB() { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + BBT *startTryBB = tryEndTryBlock.GetStartTryBB(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + size_t nEnclosedBBs = enclosedBBs.size(); + size_t k = 0; + while ((k < nEnclosedBBs) && (enclosedBBs[k] == nullptr)) { + ++k; + } + + if (k < nEnclosedBBs) { + BBT *prevBB = enclosedBBs[k]; + + startTryBB->GetLastStmt()->SetNext(prevBB->GetFirstStmt()); + prevBB->GetFirstStmt()->SetPrev(startTryBB->GetLastStmt()); + + for (++k; k < nEnclosedBBs; ++k) { + BBT *lowerBB = enclosedBBs[k]; + if (lowerBB == nullptr) { + continue; + } + prevBB->GetLastStmt()->SetNext(lowerBB->GetFirstStmt()); + lowerBB->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + prevBB = lowerBB; + } + + prevBB->GetLastStmt()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + } else { + startTryBB->GetLastStmt()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(startTryBB->GetLastStmt()); + } +} + +BBT *TryCatchBlocksLower::FindInsertAfterBB() { + BBT *insertAfter = tryEndTryBlock.GetEndTryBB(); + CHECK_FATAL(tryEndTryBlock.GetEndTryBB()->GetLastStmt()->GetOpCode() == OP_endtry, "LowerBB type check"); + BBT *iaOpenTry = nullptr; + while (insertAfter->GetFallthruBranch() != nullptr || iaOpenTry != nullptr) { + if (insertAfter->GetFallthruBranch() != nullptr) { + insertAfter = insertAfter->GetFallthruBranch(); + } else { + CHECK_FATAL(iaOpenTry != nullptr, "iaOpenTry should not be nullptr"); + insertAfter = firstStmtToBBMap[insertAfter->GetLastStmt()->GetNext()]; + CHECK_FATAL(!insertAfter->IsTry(), "insertAfter should not be try"); + } + + if (insertAfter->IsTry()) { + iaOpenTry = insertAfter; + } else if (insertAfter->IsEndTry()) { + iaOpenTry = nullptr; + } + } + return insertAfter; +} + +void TryCatchBlocksLower::PlaceRelocatedBB(BBT &insertAfter) { + StmtNode *iaLast = insertAfter.GetLastStmt(); + CHECK_FATAL(iaLast != nullptr, "iaLast should not nullptr"); + + StmtNode *iaNext = iaLast->GetNext(); + if (iaNext == nullptr) { + CHECK_FATAL(body.GetLast() == iaLast, "body's last should equal iaLast"); + } + BBT *prevBB = &insertAfter; + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + for (auto &rbb : bbsToRelocate) { + prevBB->GetLastStmt()->SetNext(rbb->GetFirstStmt()); + rbb->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + prevBB = rbb; + } + prevBB->GetLastStmt()->SetNext(iaNext); + if (iaNext != nullptr) { + iaNext->SetPrev(prevBB->GetLastStmt()); + } else { + /* !ia_next means we started with insert_after that was the last bblock Refer to the above CHECK_FATAL. */ + body.SetLast(prevBB->GetLastStmt()); + body.GetLast()->SetNext(nullptr); + } +} + +void TryCatchBlocksLower::PalceCatchSeenSofar(BBT &insertAfter) { + TryNode *tryNode = static_cast(tryEndTryBlock.GetTryStmtNode()); + DEBUG_ASSERT(tryNode != nullptr, "tryNode should not be nullptr"); + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + + for (size_t offsetIndex = 0; offsetIndex < tryNode->GetOffsetsCount(); ++offsetIndex) { + auto id = tryNode->GetOffset(offsetIndex); + bool myCatchBlock = false; + for (auto &jcb : bbsToRelocate) { + if (!jcb->IsLabeled()) { + continue; + } + myCatchBlock = (id == jcb->GetLabelIdx()); + if (myCatchBlock) { + break; + } + } + /* + * If the catch block is the one enclosed in this try-endtry block, + * we just relocated it above, so we don't need to consider it again + */ + if (myCatchBlock) { + continue; + } + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + for (auto &jcb : catchesSeenSoFar) { + CHECK_FATAL(jcb->IsLabeled(), "jcb should be labeled"); + if (id == jcb->GetLabelIdx()) { + /* + * Remove jcb and all of the blocks that are reachable by following fallthruBranch. + * If we hit a try block, cut there, append an unconditional jump to it to the preceding bblock, + * and relocate them. We may need to insert a label in the try block + */ + BBT *lastBB = jcb; + while (lastBB->GetFallthruBranch() != nullptr && !lastBB->GetFallthruBranch()->IsTry()) { + lastBB = lastBB->GetFallthruBranch(); + } + +#if DEBUG + BBT::ValidateStmtList(bodyFirst); +#endif + if (lastBB->GetFallthruBranch() != nullptr) { + BBT *jtBB = lastBB->GetFallthruBranch(); + CHECK_FATAL(jtBB->IsTry(), "jtBB should be try"); + if (!jtBB->IsLabeled()) { + LabelIdx jtLabIdx = mirModule.GetMIRBuilder()->CreateLabIdx(*mirModule.CurFunction()); + jtBB->SetLabelIdx(jtLabIdx); + StmtNode *labelStmt = mirModule.GetMIRBuilder()->CreateStmtLabel(jtLabIdx); + bool adjustBBFirstStmt = (jtBB->GetKeyStmt() == jtBB->GetFirstStmt()); + labelStmt->SetNext(jtBB->GetKeyStmt()); + labelStmt->SetPrev(jtBB->GetKeyStmt()->GetPrev()); + CHECK_FATAL(jtBB->GetKeyStmt()->GetPrev() != nullptr, "the prev of jtBB's ketStmt shpould not be nullptr"); + jtBB->GetKeyStmt()->GetPrev()->SetNext(labelStmt); + CHECK_FATAL(jtBB->GetKeyStmt()->GetNext() != nullptr, "the next of jtBB's ketStmt shpould not be nullptr"); + jtBB->GetKeyStmt()->SetPrev(labelStmt); + if (adjustBBFirstStmt) { + firstStmtToBBMap.erase(jtBB->GetFirstStmt()); + jtBB->SetFirstStmt(*labelStmt); + firstStmtToBBMap[jtBB->GetFirstStmt()] = jtBB; + } + } + CHECK_FATAL(jtBB->IsLabeled(), "jtBB should be labeled"); + CHECK_FATAL(lastBB->GetLastStmt()->GetOpCode() != OP_goto, + "the opcode of lastBB's lastStmt should not be OP_goto"); + StmtNode *gotoStmt = mirModule.GetMIRBuilder()->CreateStmtGoto(OP_goto, jtBB->GetLabelIdx()); + + StmtNode *lastBBLastStmt = lastBB->GetLastStmt(); + gotoStmt->SetNext(lastBBLastStmt->GetNext()); + gotoStmt->SetPrev(lastBBLastStmt); + if (lastBBLastStmt->GetNext()) { + lastBBLastStmt->GetNext()->SetPrev(gotoStmt); + } + lastBBLastStmt->SetNext(gotoStmt); + + lastBB->SetLastStmt(*gotoStmt); + lastBB->SetFallthruBranch(nullptr); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + } + + /* we want to remove [jcb .. last_bb], inclusively. */ + if (jcb->GetFirstStmt() == body.GetFirst()) { + body.SetFirst(lastBB->GetLastStmt()->GetNext()); + body.GetFirst()->SetPrev(nullptr); + lastBB->GetLastStmt()->GetNext()->SetPrev(nullptr); + bodyFirst = body.GetFirst(); + } else { + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev() != nullptr, "the prev of jcb's firstStmt should not be nullptr"); + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev()->GetNext() == jcb->GetFirstStmt(), + "the next of the prev of jcb's firstStmt should equal jcb's firstStmt"); + if (lastBB->GetLastStmt()->GetNext() != nullptr) { + jcb->GetFirstStmt()->GetPrev()->SetNext(lastBB->GetLastStmt()->GetNext()); + lastBB->GetLastStmt()->GetNext()->SetPrev(jcb->GetFirstStmt()->GetPrev()); + } else { + CHECK_FATAL(lastBB->GetLastStmt() == body.GetLast(), "lastBB's lastStmt should equal body's last"); + body.SetLast(jcb->GetFirstStmt()->GetPrev()); + body.GetLast()->SetNext(nullptr); + jcb->GetFirstStmt()->GetPrev()->SetNext(nullptr); + } + } + jcb->GetFirstStmt()->SetPrev(nullptr); + lastBB->GetLastStmt()->SetNext(nullptr); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(body.GetFirst(), jcb->GetFirstStmt()); +#endif + + /* append it (i.e., [jcb->firstStmt .. last_bb->lastStmt]) after insert_after */ + CHECK_FATAL(insertAfter.GetFallthruBranch() == nullptr, "insertAfter's fallthruBranch should be nullptr"); + if (insertAfter.GetLastStmt() == body.GetLast()) { + CHECK_FATAL(insertAfter.GetLastStmt()->GetNext() == nullptr, + "the next of insertAfter's lastStmt should not be nullptr"); + } + + jcb->GetFirstStmt()->SetPrev(insertAfter.GetLastStmt()); + lastBB->GetLastStmt()->SetNext(insertAfter.GetLastStmt()->GetNext()); + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + + if (insertAfter.GetLastStmt()->GetNext() != nullptr) { + insertAfter.GetLastStmt()->GetNext()->SetPrev(lastBB->GetLastStmt()); + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } else { + /* + * note that we have a single BlockNode that contains all the instructions of a method. + * What that means is each instruction's next is not nullptr except for the very last instruction. + * insert_after->lastStmt->next == nullptr, means insert_after->lastStmt is indeed the last instruction, + * and we are moving instructions of 'last_bb' after it. Thus, we need to fix the BlockNode's last field. + */ + body.SetLast(lastBB->GetLastStmt()); + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } + insertAfter.GetLastStmt()->SetNext(jcb->GetFirstStmt()); + if (jcb->GetFirstStmt()->GetPrev() != nullptr) { + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev()->GetNext() == jcb->GetFirstStmt(), + "the next of the prev of jcb's firstStmt should equal jcb's firstStmt"); + } + if (lastBB->GetLastStmt()->GetNext() != nullptr) { + CHECK_FATAL(lastBB->GetLastStmt()->GetNext()->GetPrev() == lastBB->GetLastStmt(), + "thr prev of the next of lastBB's lastStmt should equal lastBB's lastStmt"); + } + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } + } + } +} + +void TryCatchBlocksLower::TraverseBBList() { + tryEndTryBlock.Init(); + for (auto &bb : bbList) { + if (bb->IsCatch() && tryEndTryBlock.GetStartTryBB() == nullptr) { + /* Add to the list of catch blocks seen so far. */ + catchesSeenSoFar.emplace_back(bb); + } + bodyEndWithEndTry = false; + + if (tryEndTryBlock.GetStartTryBB() == nullptr) { + if (bb->IsTry()) { + StmtNode *firstNonCommentStmt = bb->GetFirstStmt(); + while (firstNonCommentStmt != nullptr && firstNonCommentStmt->GetOpCode() == OP_comment) { + firstNonCommentStmt = firstNonCommentStmt->GetNext(); + } + CHECK_FATAL(bb->GetLastStmt()->GetOpCode() != OP_try || bb->GetLastStmt() == firstNonCommentStmt || + !generateEHCode, "make sure the opcode of bb's lastStmt is not OP_try" + "or the opcode of bb's lastStmt is OP_try but bb's lastStmt equals firstNonCommentStmt" + "or not generate EHCode"); + /* prepare for processing a java try block */ + tryEndTryBlock.Reset(*bb); + } + continue; + } + + /* We should have not a try block enclosed in another java try block!! */ + CHECK_FATAL(!bb->IsTry(), "bb should not be try"); + if (!bb->IsEndTry()) { + tryEndTryBlock.PushToEnclosedBBs(*bb); + } else { + tryEndTryBlock.SetEndTryBB(bb); + if (tryEndTryBlock.GetEndTryBB()->GetLastStmt() == body.GetLast()) { + bodyEndWithEndTry = true; + } +#if DEBUG + for (size_t i = 0; i < tryEndTryBlock.GetEnclosedBBsSize(); ++i) { + CHECK_FATAL(tryEndTryBlock.GetEnclosedBBsElem(i), "there should not be nullptr in enclosedBBs"); + } +#endif + ProcessEnclosedBBBetweenTryEndTry(); + /* Now, connect the remaining ones again n_enclosed_bbs includes 'nullptr's (i.e., deleted entries) */ + ConnectRemainBB(); + BBT *insertAfter = FindInsertAfterBB(); + PlaceRelocatedBB(*insertAfter); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + if (prevBBOfTry[tryEndTryBlock.GetStartTryBB()]) { + StmtNode *firstStmtMovedIn = MoveCondGotoIntoTry(*tryEndTryBlock.GetStartTryBB(), + *prevBBOfTry[tryEndTryBlock.GetStartTryBB()], + tryEndTryBlock.GetLabeledBBsInTry()); + if (firstStmtMovedIn == bodyFirst) { + bodyFirst = tryEndTryBlock.GetStartTryBB()->GetFirstStmt(); + prevBBOfTry[tryEndTryBlock.GetStartTryBB()] = nullptr; + } + } + /* + * Now, examine each offset attached to this try and move any catch block + * that is not in 'bbs_to_relocate' but in 'catches_seen_so_far' + */ + PalceCatchSeenSofar(*insertAfter); + + /* close the try that is open */ + tryEndTryBlock.SetStartTryBB(nullptr); + } +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + } + + body.SetFirst(bodyFirst); +} + +void TryCatchBlocksLower::CheckTryCatchPattern() const { + StmtNode *openJt = nullptr; + for (StmtNode *stmt = body.GetFirst(); stmt; stmt = stmt->GetNext()) { + switch (stmt->GetOpCode()) { + case OP_try: + openJt = stmt; + break; + case OP_endtry: + openJt = nullptr; + break; + case OP_catch: + if (openJt != nullptr) { + CatchNode *jcn = static_cast(stmt); + for (uint32 i = 0; i < jcn->Size(); ++i) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jcn->GetExceptionTyIdxVecElement(i)); + MIRPtrType *ptr = static_cast(type); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr->GetPointedTyIdx()); + CHECK_FATAL(type->GetPrimType() == PTY_void, "type's primType should be PTY_void"); + } + } + break; + default: + break; + } + } +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..72ba534f1b21bbd023375a8f6b493d9338cbcc20 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -0,0 +1,14 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..636c2427d42a1e4b0a04784f700c1fa5e0755e80 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace AArch64Abi { +bool IsAvailableReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return canBeAssigned; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return canBeAssigned; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsCallerSaveReg(AArch64reg regNO) { + return (R0 <= regNO && regNO <= R18) || (V0 <= regNO && regNO <= V7) || + (V16 <= regNO && regNO <= V31) || (regNO == kRFLAG); +} + +bool IsCalleeSavedReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isCalleeSave; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isCalleeSave; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsParamReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isParam; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isParam; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsSpillReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isSpill; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isSpill; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsExtraSpillReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isExtraSpill; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isExtraSpill; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsSpillRegInRA(AArch64reg regNO, bool has3RegOpnd) { + /* if has 3 RegOpnd, previous reg used to spill. */ + if (has3RegOpnd) { + return AArch64Abi::IsSpillReg(regNO) || AArch64Abi::IsExtraSpillReg(regNO); + } + return AArch64Abi::IsSpillReg(regNO); +} + +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize) { + if (ty->GetKind() == kTypeStruct) { + MIRStructType *structTy = static_cast(ty); + if (structTy->GetFields().size() == 1) { + auto fieldPair = structTy->GetFields()[0]; + MIRType *fieldTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + if (fieldTy->GetKind() == kTypeArray) { + MIRArrayType *arrayTy = static_cast(fieldTy); + MIRType *arrayElemTy = arrayTy->GetElemType(); + arraySize = arrayTy->GetSizeArrayItem(0); + if (arrayTy->GetDim() == k1BitSize && arraySize <= static_cast(k4BitSize) && + IsPrimitiveVector(arrayElemTy->GetPrimType())) { + return arrayElemTy->GetPrimType(); + } + } + } + } + return PTY_void; +} +} /* namespace AArch64Abi */ +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..713ace44ae7ed4574634bd7697baaa57fbf5872b --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp @@ -0,0 +1,360 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_alignment.h" +#include "insn.h" +#include "loop.h" +#include "aarch64_cg.h" +#include "cg_option.h" +#include + +namespace maplebe { +void AArch64AlignAnalysis::FindLoopHeader() { + MapleVector loops = aarFunc->GetLoops(); + if (loops.empty()) { + return; + } + for (const auto *loop : loops) { + const BB *header = loop->GetHeader(); + if (header != nullptr) { + InsertLoopHeaderBBs(const_cast(*header)); + } + } +} + +void AArch64AlignAnalysis::FindJumpTarget() { + MapleUnorderedMap label2BBMap = aarFunc->GetLab2BBMap(); + if (label2BBMap.empty()) { + return; + } + for (auto &iter : label2BBMap) { + BB *jumpBB = iter.second; + if (jumpBB != nullptr) { + InsertJumpTargetBBs(*jumpBB); + } + } +} + +bool AArch64AlignAnalysis::IsIncludeCall(BB &bb) { + return bb.HasCall(); +} + +bool AArch64AlignAnalysis::IsInSizeRange(BB &bb) { + uint64 size = 0; + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction() || insn->GetMachineOpcode() == MOP_pseudo_ret_int || + insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } + size += kAlignInsnLength; + } + BB *curBB = &bb; + while (curBB->GetNext() != nullptr && curBB->GetNext()->GetLabIdx() == 0) { + FOR_BB_INSNS_CONST(insn, curBB->GetNext()) { + if (!insn->IsMachineInstruction() || insn->GetMachineOpcode() == MOP_pseudo_ret_int || + insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } + size += kAlignInsnLength; + } + curBB = curBB->GetNext(); + } + AArch64AlignInfo targetInfo; + if (CGOptions::GetAlignMinBBSize() == 0 || CGOptions::GetAlignMaxBBSize() == 0) { + return false; + } + targetInfo.alignMinBBSize = (CGOptions::OptimizeForSize()) ? 16 : CGOptions::GetAlignMinBBSize(); + targetInfo.alignMaxBBSize = (CGOptions::OptimizeForSize()) ? 44 : CGOptions::GetAlignMaxBBSize(); + if (size <= targetInfo.alignMinBBSize || size >= targetInfo.alignMaxBBSize) { + return false; + } + return true; +} + +bool AArch64AlignAnalysis::HasFallthruEdge(BB &bb) { + for (auto *iter : bb.GetPreds()) { + if (iter == bb.GetPrev()) { + return true; + } + } + return false; +} + +void AArch64AlignAnalysis::ComputeLoopAlign() { + if (loopHeaderBBs.empty()) { + return; + } + for (BB *bb : loopHeaderBBs) { + if (bb == cgFunc->GetFirstBB() || IsIncludeCall(*bb) || !IsInSizeRange(*bb)) { + continue; + } + bb->SetNeedAlign(true); + if (CGOptions::GetLoopAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.loopAlign = CGOptions::GetLoopAlignPow(); + if (alignInfos.find(bb) == alignInfos.end()) { + alignInfos[bb] = targetInfo.loopAlign; + } else { + uint32 curPower = alignInfos[bb]; + alignInfos[bb] = (targetInfo.loopAlign < curPower) ? targetInfo.loopAlign : curPower; + } + bb->SetAlignPower(alignInfos[bb]); + } +} + +void AArch64AlignAnalysis::ComputeJumpAlign() { + if (jumpTargetBBs.empty()) { + return; + } + for (BB *bb : jumpTargetBBs) { + if (bb == cgFunc->GetFirstBB() || !IsInSizeRange(*bb) || HasFallthruEdge(*bb)) { + continue; + } + bb->SetNeedAlign(true); + if (CGOptions::GetJumpAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.jumpAlign = (CGOptions::OptimizeForSize()) ? 3 : CGOptions::GetJumpAlignPow(); + if (alignInfos.find(bb) == alignInfos.end()) { + alignInfos[bb] = targetInfo.jumpAlign; + } else { + uint32 curPower = alignInfos[bb]; + alignInfos[bb] = (targetInfo.jumpAlign < curPower) ? targetInfo.jumpAlign : curPower; + } + bb->SetAlignPower(alignInfos[bb]); + } +} + +uint32 AArch64AlignAnalysis::GetAlignRange(uint32 alignedVal, uint32 addr) const { + if (addr == 0) { + return addr; + } + uint32 range = (alignedVal - (((addr - 1) * kInsnSize) & (alignedVal - 1))) / kInsnSize - 1; + return range; +} + +bool AArch64AlignAnalysis::IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const { + return (((addr1 - 1) * kInsnSize) / alignedRegionSize) == (((addr2 - 1) * kInsnSize) / alignedRegionSize); +} + +bool AArch64AlignAnalysis::MarkCondBranchAlign() { + sameTargetBranches.clear(); + uint32 addr = 0; + bool change = false; + FOR_ALL_BB(bb, aarFunc) { + if (bb != nullptr && bb->IsBBNeedAlign()) { + uint32 alignedVal = (1U << bb->GetAlignPower()); + uint32 alignNopNum = GetAlignRange(alignedVal, addr); + addr += alignNopNum; + bb->SetAlignNopNum(alignNopNum); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + addr += insn->GetAtomicNum(); + MOperator mOp = insn->GetMachineOpcode(); + if ((mOp == MOP_wtbz || mOp == MOP_wtbnz || mOp == MOP_xtbz || mOp == MOP_xtbnz) && insn->IsNeedSplit()) { + ++addr; + } + if (!insn->IsCondBranch() || insn->GetOperandSize() == 0) { + insn->SetAddress(addr); + continue; + } + Operand &opnd = insn->GetOperand(insn->GetOperandSize() - 1); + if (!opnd.IsLabelOpnd()) { + insn->SetAddress(addr); + continue; + } + LabelIdx targetIdx = static_cast(opnd).GetLabelIndex(); + if (sameTargetBranches.find(targetIdx) == sameTargetBranches.end()) { + sameTargetBranches[targetIdx] = addr; + insn->SetAddress(addr); + continue; + } + uint32 sameTargetAddr = sameTargetBranches[targetIdx]; + uint32 alignedRegionSize = 1 << kAlignRegionPower; + /** + * if two branches jump to the same target and their addresses are within an 16byte aligned region, + * add a certain number of [nop] to move them out of the region. + */ + if (IsInSameAlignedRegion(sameTargetAddr, addr, alignedRegionSize)) { + uint32 nopNum = GetAlignRange(alignedRegionSize, addr) + 1; + nopNum = nopNum > kAlignMaxNopNum ? 0 : nopNum; + if (nopNum == 0) { + break; + } + change = true; + insn->SetNopNum(nopNum); + for (uint32 i = 0; i < nopNum; i++) { + addr += insn->GetAtomicNum(); + } + } else { + insn->SetNopNum(0); + } + sameTargetBranches[targetIdx] = addr; + insn->SetAddress(addr); + } + } + return change; +} + +void AArch64AlignAnalysis::UpdateInsnId() { + uint32 id = 0; + FOR_ALL_BB(bb, aarFunc) { + if (bb != nullptr && bb->IsBBNeedAlign()) { + uint32 alignedVal = 1U << (bb->GetAlignPower()); + uint32 range = GetAlignRange(alignedVal, id); + id = id + (range > kAlignPseudoSize ? range : kAlignPseudoSize); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + id += insn->GetAtomicNum(); + if (insn->IsCondBranch() && insn->GetNopNum() != 0) { + id += insn->GetNopNum(); + } + MOperator mOp = insn->GetMachineOpcode(); + if ((mOp == MOP_wtbz || mOp == MOP_wtbnz || mOp == MOP_xtbz || mOp == MOP_xtbnz) && insn->IsNeedSplit()) { + ++id; + } + insn->SetId(id); + if (insn->GetMachineOpcode() == MOP_adrp_ldr && CGOptions::IsLazyBinding() && !aarFunc->GetCG()->IsLibcore()) { + ++id; + } + } + } +} + +bool AArch64AlignAnalysis::MarkShortBranchSplit() { + bool change = false; + bool split; + do { + split = false; + UpdateInsnId(); + for (auto *bb = aarFunc->GetFirstBB(); bb != nullptr && !split; bb = bb->GetNext()) { + for (auto *insn = bb->GetLastInsn(); insn != nullptr && !split; insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + MOperator mOp = insn->GetMachineOpcode(); + if (mOp != MOP_wtbz && mOp != MOP_wtbnz && mOp != MOP_xtbz && mOp != MOP_xtbnz) { + continue; + } + if (insn->IsNeedSplit()) { + continue; + } + auto &labelOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + if (aarFunc->DistanceCheck(*bb, labelOpnd.GetLabelIndex(), insn->GetId())) { + continue; + } + split = true; + change = true; + insn->SetNeedSplit(split); + } + } + } while (split); + return change; +} + +void AArch64AlignAnalysis::AddNopAfterMark() { + FOR_ALL_BB(bb, aarFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || !insn->IsCondBranch() || insn->GetNopNum() == 0) { + continue; + } + /** + * To minimize the performance loss of nop, we decided to place nop on an island before the current addr. + * The island here is after [b, ret, br, blr]. + * To ensure correct insertion of the nop, the nop is inserted in the original position in the following cases: + * 1. A branch with the same target exists before it. + * 2. A branch whose nopNum value is not 0 exists before it. + * 3. no BBs need to be aligned between the original location and the island. + */ + std::unordered_map targetCondBrs; + bool findIsland = false; + Insn *detect = insn->GetPrev(); + BB *region = bb; + while (detect != nullptr || region != aarFunc->GetFirstBB()) { + while (detect == nullptr) { + DEBUG_ASSERT(region->GetPrev() != nullptr, "get region prev failed"); + region = region->GetPrev(); + detect = region->GetLastInsn(); + } + if (detect->GetMachineOpcode() == MOP_xuncond || detect->GetMachineOpcode() == MOP_xret || + detect->GetMachineOpcode() == MOP_xbr) { + findIsland = true; + break; + } + if (region->IsBBNeedAlign()) { + break; + } + if (!detect->IsMachineInstruction() || !detect->IsCondBranch() || detect->GetOperandSize() == 0) { + detect = detect->GetPrev(); + continue; + } + if (detect->GetNopNum() != 0) { + break; + } + Operand &opnd = detect->GetOperand(detect->GetOperandSize() - 1); + if (!opnd.IsLabelOpnd()) { + detect = detect->GetPrev(); + continue; + } + LabelIdx targetIdx = static_cast(opnd).GetLabelIndex(); + if (targetCondBrs.find(targetIdx) != targetCondBrs.end()) { + break; + } + targetCondBrs[targetIdx] = detect; + detect = detect->GetPrev(); + } + uint32 nopNum = insn->GetNopNum(); + if (findIsland) { + for (uint32 i = 0; i < nopNum; i++) { + (void)bb->InsertInsnAfter(*detect, aarFunc->GetInsnBuilder()->BuildInsn(MOP_nop)); + } + } else { + for (uint32 i = 0; i < nopNum; i++) { + (void)bb->InsertInsnBefore(*insn, aarFunc->GetInsnBuilder()->BuildInsn(MOP_nop)); + } + } + } + } +} + +/** + * The insertion of nop affects the judgement of the addressing range of short branches, + * and the splitting of short branches affects the calculation of the location and number of nop insertions. + * In the iteration process of both, we only make some marks, wait for the fixed points, and fill in nop finally. + */ +void AArch64AlignAnalysis::ComputeCondBranchAlign() { + bool condBrChange = false; + bool shortBrChange = false; + while (true) { + condBrChange = MarkCondBranchAlign(); + if (!condBrChange) { + break; + } + shortBrChange = MarkShortBranchSplit(); + if (!shortBrChange) { + break; + } + } + AddNopAfterMark(); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..25b9354489a273af80823901f9abd98caa9d2be1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -0,0 +1,464 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_args.h" +#include +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +using namespace maple; + +void AArch64MoveRegArgs::Run() { + MoveVRegisterArgs(); + MoveRegisterArgs(); +} + +void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsList, + std::vector &indexList, + std::map &pairReg, + std::vector &numFpRegs, + std::vector &fpSize) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + uint32 numFormal = static_cast(aarchCGFunc->GetFunction().GetFormalCount()); + numFpRegs.resize(numFormal); + fpSize.resize(numFormal); + AArch64CallConvImpl parmlocator(aarchCGFunc->GetBecommon()); + CCLocInfo ploc; + uint32 start = 0; + if (numFormal) { + MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (func->IsReturnStruct() && func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + start = 1; + } + } + } + for (uint32 i = start; i < numFormal; ++i) { + MIRType *ty = aarchCGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarchCGFunc->GetFunction()); + if (ploc.reg0 == kRinvalid) { + continue; + } + AArch64reg reg0 = static_cast(ploc.reg0); + MIRSymbol *sym = aarchCGFunc->GetFunction().GetFormal(i); + if (sym->IsPreg()) { + continue; + } + argsList[i] = reg0; + indexList.emplace_back(i); + if (ploc.reg1 == kRinvalid) { + continue; + } + if (ploc.numFpPureRegs) { + uint32 index = i; + numFpRegs[index] = ploc.numFpPureRegs; + fpSize[index] = ploc.fpSize; + continue; + } + pairReg[i] = static_cast(ploc.reg1); + } +} + +ArgInfo AArch64MoveRegArgs::GetArgInfo(std::map &argsList, std::vector &numFpRegs, + std::vector &fpSize, uint32 argIndex) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + ArgInfo argInfo; + argInfo.reg = argsList[argIndex]; + argInfo.mirTy = aarchCGFunc->GetFunction().GetNthParamType(argIndex); + argInfo.symSize = aarchCGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.memPairSecondRegSize = 0; + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = false; + argInfo.isTwoRegParm = false; + + if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { + /* vector type */ + argInfo.stkSize = argInfo.symSize; + } else if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { + argInfo.isTwoRegParm = true; + if (numFpRegs[argIndex] > kOneRegister) { + argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; + } else { + if (argInfo.symSize > k12ByteSize) { + argInfo.memPairSecondRegSize = k8ByteSize; + } else { + /* Round to 4 the stack space required for storing the struct */ + argInfo.memPairSecondRegSize = k4ByteSize; + } + argInfo.doMemPairOpt = true; + if (CGOptions::IsArm64ilp32()) { + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else { + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } + } + } else if (argInfo.symSize > k16ByteSize) { + /* For large struct passing, a pointer to the copy is used. */ + if (CGOptions::IsArm64ilp32()) { + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else { + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } + } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize < k8ByteSize)) { + /* + * For small aggregate parameter, set to minimum of 8 bytes. + * B.5:If the argument type is a Composite Type then the size of the argument is rounded up to the + * nearest multiple of 8 bytes. + */ + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else if (numFpRegs[argIndex] > kOneRegister) { + argInfo.isTwoRegParm = true; + argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; + } else { + argInfo.stkSize = (argInfo.symSize < k4ByteSize) ? k4ByteSize : argInfo.symSize; + if (argInfo.symSize > k4ByteSize) { + argInfo.symSize = k8ByteSize; + } + } + argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; + argInfo.sym = aarchCGFunc->GetFunction().GetFormal(argIndex); + CHECK_NULL_FATAL(argInfo.sym); + argInfo.symLoc = + static_cast(aarchCGFunc->GetMemlayout()->GetSymAllocInfo(argInfo.sym->GetStIndex())); + CHECK_NULL_FATAL(argInfo.symLoc); + if (argInfo.doMemPairOpt && (aarchCGFunc->GetBaseOffset(*(argInfo.symLoc)) & 0x7)) { + /* Do not optimize for struct reg pair for unaligned access. + * However, this symbol requires two parameter registers, separate stores must be generated. + */ + argInfo.symSize = GetPointerSize(); + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = true; + } + return argInfo; +} + +bool AArch64MoveRegArgs::IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const { + if (firstArgInfo.symLoc->GetMemSegment() != secondArgInfo.symLoc->GetMemSegment()) { + return false; + } + if (firstArgInfo.symSize != secondArgInfo.symSize) { + return false; + } + if (firstArgInfo.symSize != k4ByteSize && firstArgInfo.symSize != k8ByteSize) { + return false; + } + if (firstArgInfo.regType != secondArgInfo.regType) { + return false; + } + return firstArgInfo.symLoc->GetOffset() + firstArgInfo.stkSize == secondArgInfo.symLoc->GetOffset(); +} + +void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*firstArgInfo.symLoc)); + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(firstArgInfo.reg, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + MOperator mOp = firstArgInfo.regType == kRegTyInt ? ((firstArgInfo.stkSize > k4ByteSize) ? MOP_xstp : MOP_wstp) + : ((firstArgInfo.stkSize > k4ByteSize) ? MOP_dstp : MOP_sstp); + RegOperand *regOpnd2 = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(secondArgInfo.reg, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + if (firstArgInfo.doMemPairOpt && firstArgInfo.isTwoRegParm) { + AArch64reg regFp2 = static_cast(firstArgInfo.reg + kOneRegister); + regOpnd2 = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(regFp2, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + } + + int32 limit = (secondArgInfo.stkSize > k4ByteSize) ? kStpLdpImm64UpperBound : kStpLdpImm32UpperBound; + int32 stOffset = aarchCGFunc->GetBaseOffset(*firstArgInfo.symLoc); + MemOperand *memOpnd = nullptr; + if (stOffset > limit || baseReg != nullptr) { + if (baseReg == nullptr || lastSegment != firstArgInfo.symLoc->GetMemSegment()) { + ImmOperand &immOpnd = + aarchCGFunc->CreateImmOperand(stOffset - firstArgInfo.symLoc->GetOffset(), k64BitSize, false); + baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); + lastSegment = firstArgInfo.symLoc->GetMemSegment(); + aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, GetLoweredPtrType()); + } + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(firstArgInfo.symLoc->GetOffset()), + k32BitSize); + if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseReg, nullptr, &offsetOpnd, firstArgInfo.sym); + } else { + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), + k32BitSize); + if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseOpnd, nullptr, &offsetOpnd, firstArgInfo.sym); + } + Insn &pushInsn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *regOpnd2, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string argName = firstArgInfo.sym->GetName() + " " + secondArgInfo.sym->GetName(); + pushInsn.SetComment(std::string("store param: ").append(argName)); + } + aarchCGFunc->GetCurBB()->AppendInsn(pushInsn); +} + +void AArch64MoveRegArgs::GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, + int32 offset) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MOperator mOp = aarchCGFunc->PickStInsn(stBitSize, argInfo.mirTy->GetPrimType()); + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(dest, stBitSize, argInfo.regType); + + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + MemOperand *memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + stBitSize, baseOpnd, nullptr, &offsetOpnd, argInfo.sym); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); + } + aarchCGFunc->GetCurBB()->AppendInsn(insn); +} + +void AArch64MoveRegArgs::GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + int32 stOffset = aarchCGFunc->GetBaseOffset(*argInfo.symLoc); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*argInfo.symLoc)); + RegOperand ®Opnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(argInfo.reg, argInfo.stkSize * kBitsPerByte, argInfo.regType); + MemOperand *memOpnd = nullptr; + if (MemOperand::IsPIMMOffsetOutOfRange(stOffset, argInfo.symSize * kBitsPerByte) || + (baseReg != nullptr && (lastSegment == argInfo.symLoc->GetMemSegment()))) { + if (baseReg == nullptr || lastSegment != argInfo.symLoc->GetMemSegment()) { + ImmOperand &immOpnd = aarchCGFunc->CreateImmOperand(stOffset - argInfo.symLoc->GetOffset(), k64BitSize, + false); + baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); + lastSegment = argInfo.symLoc->GetMemSegment(); + aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, PTY_a64); + } + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(argInfo.symLoc->GetOffset()), k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseReg, + nullptr, &offsetOpnd, argInfo.sym); + } else { + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), + k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseOpnd, + nullptr, &offsetOpnd, argInfo.sym); + } + + MOperator mOp = aarchCGFunc->PickStInsn(argInfo.symSize * kBitsPerByte, argInfo.mirTy->GetPrimType()); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); + } + aarchCGFunc->GetCurBB()->AppendInsn(insn); + + if (argInfo.createTwoStores || argInfo.doMemPairOpt) { + /* second half of the struct passing by registers. */ + uint32 part2BitSize = argInfo.memPairSecondRegSize * kBitsPerByte; + GenOneInsn(argInfo, *baseOpnd, part2BitSize, reg2, (stOffset + GetPointerSize())); + } else if (numFpRegs > kOneRegister) { + uint32 fpSizeBits = fpSize * kBitsPerByte; + AArch64reg regFp2 = static_cast(argInfo.reg + kOneRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp2, (stOffset + static_cast(fpSize))); + if (numFpRegs > kTwoRegister) { + AArch64reg regFp3 = static_cast(argInfo.reg + kTwoRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k4BitShift))); + } + if (numFpRegs > kThreeRegister) { + AArch64reg regFp3 = static_cast(argInfo.reg + kThreeRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k8BitShift))); + } + } +} + +void AArch64MoveRegArgs::MoveRegisterArgs() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = aarchCGFunc->GetCurBB(); + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + + std::map movePara; + std::vector moveParaIndex; + std::map pairReg; + std::vector numFpRegs; + std::vector fpSize; + CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); + + std::vector::iterator it; + std::vector::iterator next; + for (it = moveParaIndex.begin(); it != moveParaIndex.end(); ++it) { + uint32 firstIndex = *it; + ArgInfo firstArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, firstIndex); + next = it; + ++next; + if ((next != moveParaIndex.end()) || (firstArgInfo.doMemPairOpt)) { + uint32 secondIndex = (firstArgInfo.doMemPairOpt) ? firstIndex : *next; + ArgInfo secondArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, secondIndex); + secondArgInfo.reg = (firstArgInfo.doMemPairOpt) ? pairReg[firstIndex] : movePara[secondIndex]; + secondArgInfo.symSize = (firstArgInfo.doMemPairOpt) ? firstArgInfo.memPairSecondRegSize : secondArgInfo.symSize; + secondArgInfo.symLoc = (firstArgInfo.doMemPairOpt) ? secondArgInfo.symLoc : + static_cast(aarchCGFunc->GetMemlayout()->GetSymAllocInfo( + secondArgInfo.sym->GetStIndex())); + /* Make sure they are in same segment if want to use stp */ + if (((firstArgInfo.isTwoRegParm && secondArgInfo.isTwoRegParm) || + (!firstArgInfo.isTwoRegParm && !secondArgInfo.isTwoRegParm)) && + (firstArgInfo.doMemPairOpt || IsInSameSegment(firstArgInfo, secondArgInfo))) { + GenerateStpInsn(firstArgInfo, secondArgInfo); + if (!firstArgInfo.doMemPairOpt) { + it = next; + } + continue; + } + } + GenerateStrInsn(firstArgInfo, pairReg[firstIndex], numFpRegs[firstIndex], fpSize[firstIndex]); + } + + aarchCGFunc->GetFirstBB()->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + aarchCGFunc->SetCurBB(*formerCurBB); +} + +void AArch64MoveRegArgs::MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 bitSize = byteSize * kBitsPerByte; + MemOperand &memOpnd = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize, true); + RegOperand *regOpnd = nullptr; + if (mirSym.IsPreg()) { + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + regOpnd = &aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } else { + regOpnd = &aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->NewVReg(kRegTyInt, k8ByteSize)); + } + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickLdInsn(GetPrimTypeBitSize(stype), stype), *regOpnd, memOpnd); + MemOperand &memOpnd1 = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize, false); + Insn &insn1 = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickStInsn(GetPrimTypeBitSize(stype), stype), *regOpnd, memOpnd1); + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn1); + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 bitSize = byteSize * kBitsPerByte; + MemOperand &memOpnd = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize); + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = aarchCGFunc->GetOrCreateVirtualRegisterOperand( + aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickLdInsn(GetPrimTypeBitSize(stype), stype), dstRegOpnd, memOpnd); + + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "vreg parameters should be kScFormal type."); + insn.SetComment(key); + } + + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const { + auto *aarchCGFunc = static_cast(cgFunc); + RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 srcBitSize = ((byteSize < k4ByteSize) ? k4ByteSize : byteSize) * kBitsPerByte; + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = + aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + dstRegOpnd.SetSize(srcBitSize); + RegOperand &srcRegOpnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), srcBitSize, regType); + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "should be args"); + MOperator mOp = aarchCGFunc->PickMovBetweenRegs(stype, stype); + if (mOp == MOP_vmovvv || mOp == MOP_vmovuu) { + VectorInsn &vInsn = aarchCGFunc->GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(dstRegOpnd).AddOpndChain(srcRegOpnd); + auto *vecSpec1 = aarchCGFunc->GetMemoryPool()->New(srcBitSize >> k3ByteSize, k8BitSize); + auto *vecSpec2 = aarchCGFunc->GetMemoryPool()->New(srcBitSize >> k3ByteSize, k8BitSize); + vInsn.PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + aarchCGFunc->GetCurBB()->InsertInsnBegin(vInsn); + return; + } + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, dstRegOpnd, srcRegOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::MoveVRegisterArgs() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = aarchCGFunc->GetCurBB(); + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + AArch64CallConvImpl parmlocator(aarchCGFunc->GetBecommon()); + CCLocInfo ploc; + + uint32 formalCount = static_cast(aarchCGFunc->GetFunction().GetFormalCount()); + uint32 start = 0; + if (formalCount) { + MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (func->IsReturnStruct() && func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16BitSize) { + start = 1; + } + } + } + for (uint32 i = start; i < formalCount; ++i) { + MIRType *ty = aarchCGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarchCGFunc->GetFunction()); + MIRSymbol *sym = aarchCGFunc->GetFunction().GetFormal(i); + + /* load locarefvar formals to store in the reflocals. */ + if (aarchCGFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + MoveLocalRefVarToRefLocals(*sym); + } + + if (!sym->IsPreg()) { + continue; + } + + if (ploc.reg0 == kRinvalid) { + /* load stack parameters to the vreg. */ + LoadStackArgsToVReg(*sym); + } else { + MoveArgsToVReg(ploc, *sym); + } + } + + aarchCGFunc->GetFirstBB()->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + aarchCGFunc->SetCurBB(*formerCurBB); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fee7b22da3da061e62dfe775c43922b909815348 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -0,0 +1,738 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "becommon.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +using namespace maple; + +namespace { +constexpr int kMaxRegCount = 4; + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * ARM 64-bit Architecture. Table 1. + */ +enum AArch64ArgumentClass : uint8 { + kAArch64NoClass, + kAArch64IntegerClass, + kAArch64FloatClass, + kAArch64MemoryClass +}; + +int32 ProcessNonStructAndNonArrayWhenClassifyAggregate(const MIRType &mirType, + AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength) { + CHECK_FATAL(classesLength > 0, "classLength must > 0"); + /* scalar type */ + switch (mirType.GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes[0] = kAArch64IntegerClass; + return 1; + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_c128: + classes[0] = kAArch64FloatClass; + return 1; + default: + CHECK_FATAL(false, "NYI"); + } + + /* should not reach to this point */ + return 0; +} + +PrimType TraverseStructFieldsForFp(MIRType *ty, uint32 &numRegs) { + if (ty->GetKind() == kTypeArray) { + MIRArrayType *arrtype = static_cast(ty); + MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); + if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { + return TraverseStructFieldsForFp(pty, numRegs); + } + for (uint32 i = 0; i < arrtype->GetDim(); ++i) { + numRegs += arrtype->GetSizeArrayItem(i); + } + return pty->GetPrimType(); + } else if (ty->GetKind() == kTypeStruct) { + MIRStructType *sttype = static_cast(ty); + FieldVector fields = sttype->GetFields(); + PrimType oldtype = PTY_void; + for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { + TyIdx fieldtyidx = fields[fcnt].second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + PrimType ptype = TraverseStructFieldsForFp(fieldty, numRegs); + if (oldtype != PTY_void && oldtype != ptype) { + return PTY_void; + } else { + oldtype = ptype; + } + } + return oldtype; + } else { + numRegs++; + return ty->GetPrimType(); + } +} + +int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize); + +uint32 ProcessStructWhenClassifyAggregate(const BECommon &be, MIRStructType &structType, + AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize) { + CHECK_FATAL(classesLength > 0, "classLength must > 0"); + uint32 sizeOfTyInDwords = static_cast( + RoundUp(be.GetTypeSize(structType.GetTypeIndex()), k8ByteSize) >> k8BitShift); + bool isF32 = false; + bool isF64 = false; + uint32 numRegs = 0; + for (uint32 f = 0; f < structType.GetFieldsSize(); ++f) { + TyIdx fieldTyIdx = structType.GetFieldsElemt(f).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + PrimType pType = TraverseStructFieldsForFp(fieldType, numRegs); + if (pType == PTY_f32) { + if (isF64) { + isF64 = false; + break; + } + isF32 = true; + } else if (pType == PTY_f64) { + if (isF32) { + isF32 = false; + break; + } + isF64 = true; + } else if (IsPrimitiveVector(pType)) { + isF64 = true; + break; + } else { + isF32 = isF64 = false; + break; + } + } + if (isF32 || isF64) { + CHECK_FATAL(numRegs <= classesLength, "ClassifyAggregate: num regs exceed limit"); + for (uint32 i = 0; i < numRegs; ++i) { + classes[i] = kAArch64FloatClass; + } + fpSize = isF32 ? k4ByteSize : k8ByteSize; + return numRegs; + } + + classes[0] = kAArch64IntegerClass; + if (sizeOfTyInDwords == kDwordSizeTwo) { + classes[1] = kAArch64IntegerClass; + } + DEBUG_ASSERT(sizeOfTyInDwords <= classesLength, "sizeOfTyInDwords exceed limit"); + return sizeOfTyInDwords; +} + +/* + * Analyze the given aggregate using the rules given by the ARM 64-bit ABI and + * return the number of doublewords to be passed in registers; the classes of + * the doublewords are returned in parameter "classes"; if 0 is returned, it + * means the whole aggregate is passed in memory. + */ +int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize) { + CHECK_FATAL(classesLength > 0, "invalid index"); + uint64 sizeOfTy = be.GetTypeSize(mirType.GetTypeIndex()); + /* Rule B.3. + * If the argument type is a Composite Type that is larger than 16 bytes + * then the argument is copied to memory allocated by the caller and + * the argument is replaced by a pointer to the copy. + */ + if ((sizeOfTy > k16ByteSize) || (sizeOfTy == 0)) { + return 0; + } + + /* + * An argument of any Integer class takes up an integer register + * which is a single double-word. + * Rule B.4. The size of an argument of composite type is rounded up to the nearest + * multiple of 8 bytes. + */ + int64 sizeOfTyInDwords = static_cast(RoundUp(sizeOfTy, k8ByteSize) >> k8BitShift); + DEBUG_ASSERT(sizeOfTyInDwords > 0, "sizeOfTyInDwords should be sizeOfTyInDwords > 0"); + DEBUG_ASSERT(sizeOfTyInDwords <= kTwoRegister, "sizeOfTyInDwords should be <= 2"); + int64 i; + for (i = 0; i < sizeOfTyInDwords; ++i) { + classes[i] = kAArch64NoClass; + } + if ((mirType.GetKind() != kTypeStruct) && (mirType.GetKind() != kTypeArray) && (mirType.GetKind() != kTypeUnion)) { + return ProcessNonStructAndNonArrayWhenClassifyAggregate(mirType, classes, classesLength); + } + if (mirType.GetKind() == kTypeStruct) { + MIRStructType &structType = static_cast(mirType); + return static_cast(ProcessStructWhenClassifyAggregate(be, structType, classes, classesLength, fpSize)); + } + /* post merger clean-up */ + for (i = 0; i < sizeOfTyInDwords; ++i) { + if (classes[i] == kAArch64MemoryClass) { + return 0; + } + } + return static_cast(sizeOfTyInDwords); +} +} + +/* external interface to look for pure float struct */ +uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { + if (structType.GetSize() > k32ByteSize) { + return 0; + } + AArch64ArgumentClass classes[kMaxRegCount]; + uint32 numRegs = ProcessStructWhenClassifyAggregate(beCommon, structType, classes, kMaxRegCount, fpSize); + if (numRegs == 0) { + return 0; + } + + bool isPure = true; + for (uint i = 0; i < numRegs; ++i) { + DEBUG_ASSERT(i < kMaxRegCount, "i should be lower than kMaxRegCount"); + if (classes[i] != kAArch64FloatClass) { + isPure = false; + break; + } + } + if (isPure) { + return numRegs; + } + return 0; +} + +void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { + pLoc.reg0 = kRinvalid; + pLoc.reg1 = kRinvalid; + pLoc.reg2 = kRinvalid; + pLoc.reg3 = kRinvalid; + pLoc.memOffset = nextStackArgAdress; + pLoc.fpSize = 0; + pLoc.numFpPureRegs = 0; +} + +int32 AArch64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) { + InitCCLocInfo(pLoc); + uint32 retSize = beCommon.GetTypeSize(retType.GetTypeIndex().GetIdx()); + if (retSize == 0) { + return 0; /* size 0 ret val */ + } + if (retSize <= k16ByteSize) { + /* For return struct size less or equal to 16 bytes, the values */ + /* are returned in register pairs. */ + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; /* Max of four floats. */ + uint32 fpSize; + uint32 numRegs = static_cast(ClassifyAggregate(beCommon, retType, classes, sizeof(classes), fpSize)); + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); + AllocateNSIMDFPRegisters(pLoc, numRegs); + pLoc.numFpPureRegs = numRegs; + pLoc.fpSize = fpSize; + return 0; + } else { + CHECK_FATAL(numRegs <= kTwoRegister, "LocateNextParm: illegal number of regs"); + if (numRegs == kOneRegister) { + pLoc.reg0 = AllocateGPRegister(); + } else { + AllocateTwoGPRegisters(pLoc); + } + return 0; + } + } else { + /* For return struct size > 16 bytes the pointer returns in x8. */ + pLoc.reg0 = R8; + return GetPointerSize(); + } +} + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.4.2 + * + * For internal only functions, we may want to implement + * our own rules as Apple IOS has done. Maybe we want to + * generate two versions for each of externally visible functions, + * one conforming to the ARM standard ABI, and the other for + * internal only use. + * + * LocateNextParm should be called with each parameter in the parameter list + * starting from the beginning, one call per parameter in sequence; it returns + * the information on how each parameter is passed in pLoc + * + * *** CAUTION OF USE: *** + * If LocateNextParm is called for function formals, third argument isFirst is true. + * LocateNextParm is then checked against a function parameter list. All other calls + * of LocateNextParm are against caller's argument list must not have isFirst set, + * or it will be checking the caller's enclosing function. + */ +int32 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) { + InitCCLocInfo(pLoc); + + bool is64x1vec = false; + if (tFunc != nullptr && tFunc->GetParamSize() > 0) { + is64x1vec = tFunc->GetNthParamAttr(paramNum).GetAttr(ATTR_oneelem_simd) != 0; + } + + if (isFirst) { + MIRFunction *func = tFunc != nullptr ? tFunc : const_cast(beCommon.GetMIRModule().CurFunction()); + if (func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + size_t size = beCommon.GetTypeSize(tyIdx); + if (size == 0) { + /* For return struct size 0 there is no return value. */ + return 0; + } + /* For return struct size > 16 bytes the pointer returns in x8. */ + pLoc.reg0 = R8; + return GetPointerSize(); + } + } + uint64 typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex()); + if (typeSize == 0) { + return 0; + } + int32 typeAlign = beCommon.GetTypeAlign(mirType.GetTypeIndex()); + /* + * Rule C.12 states that we do round nextStackArgAdress up before we use its value + * according to the alignment requirement of the argument being processed. + * We do the rounding up at the end of LocateNextParm(), + * so we want to make sure our rounding up is correct. + */ + DEBUG_ASSERT((nextStackArgAdress & (std::max(typeAlign, static_cast(k8ByteSize)) - 1)) == 0, + "C.12 alignment requirement is violated"); + pLoc.memSize = static_cast(typeSize); + ++paramNum; + + int32 aggCopySize = 0; + switch (mirType.GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_ptr: + case PTY_ref: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_i128: + case PTY_u128: + /* Rule C.7 */ + typeSize = k8ByteSize; + pLoc.reg0 = is64x1vec ? AllocateSIMDFPRegister() : AllocateGPRegister(); + DEBUG_ASSERT(nextGeneralRegNO <= AArch64Abi::kNumIntParmRegs, "RegNo should be pramRegNO"); + break; + /* + * for c64 complex numbers, we assume + * - callers marshall the two f32 numbers into one f64 register + * - callees de-marshall one f64 value into the real and the imaginery part + */ + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_v2i32: + case PTY_v4i16: + case PTY_v8i8: + case PTY_v2u32: + case PTY_v4u16: + case PTY_v8u8: + case PTY_v2f32: + /* Rule C.1 */ + DEBUG_ASSERT(GetPrimTypeSize(PTY_f64) == k8ByteSize, "unexpected type size"); + typeSize = k8ByteSize; + pLoc.reg0 = AllocateSIMDFPRegister(); + break; + /* + * for c128 complex numbers, we assume + * - callers marshall the two f64 numbers into one f128 register + * - callees de-marshall one f128 value into the real and the imaginery part + */ + case PTY_c128: + case PTY_v2i64: + case PTY_v4i32: + case PTY_v8i16: + case PTY_v16i8: + case PTY_v2u64: + case PTY_v4u32: + case PTY_v8u16: + case PTY_v16u8: + case PTY_v2f64: + case PTY_v4f32: + /* SIMD-FP registers have 128-bits. */ + pLoc.reg0 = AllocateSIMDFPRegister(); + DEBUG_ASSERT(nextFloatRegNO <= AArch64Abi::kNumFloatParmRegs, "regNO should not be greater than kNumFloatParmRegs"); + DEBUG_ASSERT(typeSize == k16ByteSize, "unexpected type size"); + break; + /* + * case of quad-word integer: + * we don't support java yet. + * if (has-16-byte-alignment-requirement) + * nextGeneralRegNO = (nextGeneralRegNO+1) & ~1; // C.8 round it up to the next even number + * try allocate two consecutive registers at once. + */ + /* case PTY_agg */ + case PTY_agg: { + aggCopySize = ProcessPtyAggWhenLocateNextParm(mirType, pLoc, typeSize, typeAlign); + break; + } + default: + CHECK_FATAL(false, "NYI"); + } + + /* Rule C.12 */ + if (pLoc.reg0 == kRinvalid) { + /* being passed in memory */ + nextStackArgAdress = pLoc.memOffset + static_cast(static_cast(typeSize)); + } + return aggCopySize; +} + +int32 AArch64CallConvImpl::ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, + int32 typeAlign) { + /* + * In AArch64, integer-float or float-integer + * argument passing is not allowed. All should go through + * integer-integer. + * In the case where a struct is homogeneous composed of one of the fp types, + * either all single fp or all double fp, then it can be passed by float-float. + */ + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; + typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex().GetIdx()); + int32 aggCopySize = 0; + if (typeSize > k16ByteSize) { + aggCopySize = static_cast(RoundUp(typeSize, GetPointerSize())); + } + /* + * alignment requirement + * Note. This is one of a few things iOS diverges from + * the ARM 64-bit standard. They don't observe the round-up requirement. + */ + if (typeAlign == k16ByteSize) { + RoundNGRNUpToNextEven(); + } + + uint32 fpSize; + uint32 numRegs = static_cast( + ClassifyAggregate(beCommon, mirType, classes, sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); + typeSize = k8ByteSize; + AllocateNSIMDFPRegisters(pLoc, numRegs); + pLoc.numFpPureRegs = numRegs; + pLoc.fpSize = fpSize; + } else if (numRegs == 1) { + /* passing in registers */ + typeSize = k8ByteSize; + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(false, "param passing in FP reg not allowed here"); + } else { + pLoc.reg0 = AllocateGPRegister(); + /* Rule C.11 */ + DEBUG_ASSERT((pLoc.reg0 != kRinvalid) || (nextGeneralRegNO == AArch64Abi::kNumIntParmRegs), + "reg0 should not be kRinvalid or nextGeneralRegNO should equal kNumIntParmRegs"); + } + } else if (numRegs == kTwoRegister) { + /* Other aggregates with 8 < size <= 16 bytes can be allocated in reg pair */ + DEBUG_ASSERT(classes[0] == kAArch64IntegerClass || classes[0] == kAArch64NoClass, + "classes[0] must be either integer class or no class"); + DEBUG_ASSERT(classes[1] == kAArch64IntegerClass || classes[1] == kAArch64NoClass, + "classes[1] must be either integer class or no class"); + AllocateTwoGPRegisters(pLoc); + /* Rule C.11 */ + if (pLoc.reg0 == kRinvalid) { + nextGeneralRegNO = AArch64Abi::kNumIntParmRegs; + } + } else { + /* + * 0 returned from ClassifyAggregate(). This means the whole data + * is passed thru memory. + * Rule B.3. + * If the argument type is a Composite Type that is larger than 16 + * bytes then the argument is copied to memory allocated by the + * caller and the argument is replaced by a pointer to the copy. + * + * Try to allocate an integer register + */ + typeSize = k8ByteSize; + pLoc.reg0 = AllocateGPRegister(); + pLoc.memSize = k8ByteSizeInt; /* byte size of a pointer in AArch64 */ + if (pLoc.reg0 != kRinvalid) { + numRegs = 1; + } + } + /* compute rightpad */ + if ((numRegs == 0) || (pLoc.reg0 == kRinvalid)) { + /* passed in memory */ + typeSize = RoundUp(static_cast(static_cast(pLoc.memSize)), k8ByteSize); + } + return aggCopySize; +} + +/* + * instantiated with the type of the function return value, it describes how + * the return value is to be passed back to the caller + * + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.5 + * "If the type, T, of the result of a function is such that + * void func(T arg) + * would require that 'arg' be passed as a value in a register + * (or set of registers) according to the rules in $5.4 Parameter + * Passing, then the result is returned in the same registers + * as would be used for such an argument. + */ +void AArch64CallConvImpl::InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo) { + PrimType pType = retTy.GetPrimType(); + switch (pType) { + case PTY_void: + break; + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::intReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i32 : PTY_u32; /* promote the type */ + return; + + case PTY_ptr: + case PTY_ref: + CHECK_FATAL(false, "PTY_ptr should have been lowered"); + return; + + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_i128: + case PTY_u128: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::intReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ + return; + + /* + * for c64 complex numbers, we assume + * - callers marshall the two f32 numbers into one f64 register + * - callees de-marshall one f64 value into the real and the imaginery part + */ + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_v2i32: + case PTY_v4i16: + case PTY_v8i8: + case PTY_v2u32: + case PTY_v4u16: + case PTY_v8u8: + case PTY_v2f32: + + /* + * for c128 complex numbers, we assume + * - callers marshall the two f64 numbers into one f128 register + * - callees de-marshall one f128 value into the real and the imaginery part + */ + case PTY_c128: + case PTY_v2i64: + case PTY_v4i32: + case PTY_v8i16: + case PTY_v16i8: + case PTY_v2u64: + case PTY_v4u32: + case PTY_v8u16: + case PTY_v16u8: + case PTY_v2f64: + case PTY_v4f32: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::floatReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = pType; + return; + + /* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.5 + * "Otherwise, the caller shall reserve a block of memory of + * sufficient size and alignment to hold the result. The + * address of the memory block shall be passed as an additional + * argument to the function in x8. The callee may modify the + * result memory block at any point during the execution of the + * subroutine (there is no requirement for the callee to preserve + * the value stored in x8)." + */ + case PTY_agg: { + uint64 size = beCommon.GetTypeSize(retTy.GetTypeIndex()); + if ((size > k16ByteSize) || (size == 0)) { + /* + * The return value is returned via memory. + * The address is in X8 and passed by the caller. + */ + SetupToReturnThroughMemory(ccLocInfo); + return; + } + uint32 fpSize; + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; + ccLocInfo.regCount = static_cast(ClassifyAggregate(beCommon, retTy, classes, + sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); + if (classes[0] == kAArch64FloatClass) { + switch (ccLocInfo.regCount) { + case kFourRegister: + ccLocInfo.reg3 = AArch64Abi::floatReturnRegs[3]; + break; + case kThreeRegister: + ccLocInfo.reg2 = AArch64Abi::floatReturnRegs[2]; + break; + case kTwoRegister: + ccLocInfo.reg1 = AArch64Abi::floatReturnRegs[1]; + break; + case kOneRegister: + ccLocInfo.reg0 = AArch64Abi::floatReturnRegs[0]; + break; + default: + CHECK_FATAL(0, "AArch64CallConvImpl: unsupported"); + } + if (fpSize == k4ByteSize) { + ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f32; + } else { + ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f64; + } + return; + } else if (ccLocInfo.regCount == 0) { + SetupToReturnThroughMemory(ccLocInfo); + return; + } else { + if (ccLocInfo.regCount == 1) { + /* passing in registers */ + if (classes[0] == kAArch64FloatClass) { + ccLocInfo.reg0 = AArch64Abi::floatReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_f64; + } else { + ccLocInfo.reg0 = AArch64Abi::intReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_i64; + } + } else { + DEBUG_ASSERT(ccLocInfo.regCount <= k2ByteSize, "reg count from ClassifyAggregate() should be 0, 1, or 2"); + DEBUG_ASSERT(classes[0] == kAArch64IntegerClass, "error val :classes[0]"); + DEBUG_ASSERT(classes[1] == kAArch64IntegerClass, "error val :classes[1]"); + ccLocInfo.reg0 = AArch64Abi::intReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_i64; + ccLocInfo.reg1 = AArch64Abi::intReturnRegs[1]; + ccLocInfo.primTypeOfReg1 = PTY_i64; + } + return; + } + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const { + DEBUG_ASSERT(pLoc.reg1 == kRinvalid, "make sure reg1 equal kRinvalid"); + PrimType pType = retTy2.GetPrimType(); + switch (pType) { + case PTY_void: + break; + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_ptr: + case PTY_ref: + case PTY_a64: + case PTY_u64: + case PTY_i64: + pLoc.reg1 = AArch64Abi::intReturnRegs[1]; + pLoc.primTypeOfReg1 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ + break; + default: + CHECK_FATAL(false, "NYI"); + } +} + +/* + * From "ARM Procedure Call Standard for ARM 64-bit Architecture" + * ARM IHI 0055C_beta, 6th November 2013 + * $ 5.1 machine Registers + * $ 5.1.1 General-Purpose Registers + *

Option

Description

+

Description

Value Range

+

Value Range

Default Value

+

Default Value

--modules

+

--debug-info

-m

+

Provides debug information.

Compiles JS files based on the module.

+

-

-

-

-

+

-

--debug-log

-

-l

+

--debugger-evaluate-expression

Enables the log function.

+

Evaluates base64 style expression in debugger

-

+

-

-

+

-

--dump-assembly

-

-a

+

--dump-assembly

Outputs a text ARK bytecode file.

+

Outputs an assembly file.

-

+

-

-

+

-

--debug

+

--dump-ast

-d

+

Prints the parsed AST(Abstract Syntax Tree)

Provides debug information.

+

-

-

-

-

+

-

--show-statistics

-

-s

+

--dump-debug-info

Displays statistics about bytecodes.

+

Prints debug Info

-

+

-

-

+

-

--output

+

--dump-literal-buffer

+

Prints the content of literal buffer

-o

+

-

+

-

+

--dump-size-stat

Specifies the path of the output file.

+

Displays statistics about bytecodes.

-

+

-

-

+

-

--timeout

+

--extension

-t

+

Specifies input file type

Specifies the timeout threshold.

+

['js', 'ts', 'as']

-

+

-

-

+

--help

+

Displays help information.

+

-

+

-

--help

+

--module

+

Compiles the code based on the ecmascript standard module.

+

-

-h

+

-

Displays help information.

+

--opt-level

+

Specifies the level for compilation optimization.

-

+

['0', '1', '2']

-

+

0

--bc-version

+

--output

+

+Specifies the path of the output file.

-v

+

-

+

-

+

--parse-only

Outputs the current bytecode version.

+

Parse the input file only

-

+

-

-

+

-

--bc-min-version

+

--thread

  

Outputs the lowest bytecode version supported.

+

Specifies the number of threads used to generate bytecode

-

+

0-Number of threads supported by your machine

-

+

0

Note + * SP Stack Pointer + * R30/LR Link register Stores the return address. + * We push it into stack along with FP on function + * entry using STP and restore it on function exit + * using LDP even if the function is a leaf (i.e., + * it does not call any other function) because it + * is free (we have to store FP anyway). So, if a + * function is a leaf, we may use it as a temporary + * register. + * R29/FP Frame Pointer + * R19-R28 Callee-saved + * registers + * R18 Platform reg Can we use it as a temporary register? + * R16,R17 IP0,IP1 Maybe used as temporary registers. Should be + * given lower priorities. (i.e., we push them + * into the free register stack before the others) + * R9-R15 Temporary registers, caller-saved + * Note: + * R16 and R17 may be used by a linker as a scratch register between + * a routine and any subroutine it calls. They can also be used within a + * routine to hold intermediate values between subroutine calls. + * + * The role of R18 is platform specific. If a platform ABI has need of + * a dedicated general purpose register to carry inter-procedural state + * (for example, the thread context) then it should use this register for + * that purpose. If the platform ABI has no such requirements, then it should + * use R18 as an additional temporary register. The platform ABI specification + * must document the usage for this register. + * + * A subroutine invocation must preserve the contents of the registers R19-R29 + * and SP. All 64 bits of each value stored in R19-R29 must be preserved, even + * when using the ILP32 data model. + * + * $ 5.1.2 SIMD and Floating-Point Registers + * + * The first eight registers, V0-V7, are used to pass argument values into + * a subroutine and to return result values from a function. They may also + * be used to hold intermediate values within a routine. + * + * V8-V15 must be preserved by a callee across subroutine calls; the + * remaining registers do not need to be preserved( or caller-saved). + * Additionally, only the bottom 64 bits of each value stored in V8- + * V15 need to be preserved. + */ +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b316fe0625a7ab2c213c1b39f6d0ad6ff30877a --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_cfgo.h" +#include "aarch64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void AArch64CFGOptimizer::InitOptimizePatterns() { + /* disable the pass that conflicts with cfi */ + if (!cgFunc->GenCfi()) { + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + } + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 AArch64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) { + return AArch64isa::GetJumpTargetIdx(insn); +} +MOperator AArch64FlipBRPattern::FlipConditionOp(MOperator flippedOp) { + return AArch64isa::FlipConditionOp(flippedOp); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b19fc885d04a4b1b28d6120ef03865f91d82a3dc --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -0,0 +1,371 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cg.h" +#include "mir_builder.h" +#include "becommon.h" +#include "label_creation.h" +#include "alignment.h" + +namespace maplebe { +#include "immvalid.def" +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc AArch64CG::kMd[kMopLast] = { +#include "aarch64_md.def" +}; +#undef DEFINE_MOP + +std::array, kIntRegTypeNum> AArch64CG::intRegNames = { + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + "err23", "err24", "err25", "err26", "err27", "err28", "err", "err", "err", "errsp", "errzr", /* x29 is fp */ + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "b10", "b11", + "b12", "b13", "b14", "b15", "b16", "b17", "b18", "b19", "b20", "b21", "b22", "b23", + "b24", "b25", "b26", "b27", "b28", "b29", "b30", "b31", "errMaxRegNum", "rflag" }, + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + "err23", "err24", "err25", "err26", "err27", "err28", "err29", "err30", "err31", "errsp", "errzr", /* x29 is fp */ + "h0", "h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9", "h10", "h11", + "h12", "h13", "h14", "h15", "h16", "h17", "h18", "h19", "h20", "h21", "h22", "h23", + "h24", "h25", "h26", "h27", "h28", "h29", "h30", "h31", "errMaxRegNum", "rflag" }, + std::array { + "err", "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11", "w12", "w13", "w14", + "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26", "w27", "w28", + "w29", "err", "err", "wsp", "wzr", /* x29 is fp */ + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", + "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", + "errMaxRegNum", "rflag" }, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", + "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", + "x29" /* use X40 when debug */, "sp", "xzr", /* x29 is fp */ + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", + "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", + "errMaxRegNum", "rflag" }, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", + "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", + "x29" /* use X40 when debug */, "sp", "xzr", /* x29 is fp */ + "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", + "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23", "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31", + "errMaxRegNum", "rflag" } +}; + +std::array AArch64CG::vectorRegNames = { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + /* x29 is fp, err40 is fp before RA */ + "err23", "err24", "err25", "err26", "err27", "err28", "err29", "err30", "errsp", "errzr", "err40", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", + "errMaxRegNum", "rflag" }; + +bool AArch64CG::IsExclusiveFunc(MIRFunction &mirFunc) { + const std::string &funcName = mirFunc.GetName(); + for (const auto &it : ehExclusiveNameVec) { + if (it.compare(funcName) == 0) { + return true; + } + } + return false; +} +namespace wordsMap { + /* + * Generate object maps. + * + * 1. each class record its GCTIB in method meta (not read only meta) + * 2. GCTIB include: header protoType; n bitmap word; bitmap word + * 3. each reference word(4 or 8 bytes) is represented by 2 bits + * 00: not ref + * 01: normal ref + * 10: weak ref + * 11: unowned ref + * + * For example, if a scalar object has five ptr fields at offsets 24, 40(weak), + * 64(unowned), the generated code will be like: + * + * MCC_GCTIB__xxx: + * .long 0x40 // object has child reference + * .long 1 // one word in the bitmap + * .quad 0b110000100001000000 + * ... + */ + const uint32 kRefWordsPerMapWord = 32; /* contains bitmap for 32 ref words in 64 bits */ + const uint32 kLogRefWordsPerMapWord = 5; +#ifdef USE_32BIT_REF + const uint32 kReferenceWordSize = 4; + const uint32 kLog2ReferenceWordSize = 2; +#else + const uint32 kReferenceWordSize = 8; + const uint32 kLog2ReferenceWordSize = 3; +#endif + const uint32 kInMapWordOffsetMask = ((kReferenceWordSize * kRefWordsPerMapWord) - 1); + const uint32 kInMapWordIndexShift = (kLog2ReferenceWordSize - 1); + const uint32 kMapWordIndexShift = (kLog2ReferenceWordSize + kLogRefWordsPerMapWord); + + const uint64 kRefBits = 1; + const uint64 kWeakRefBits = 2; + const uint64 kUnownedRefBits = 3; + + /* + * Give a structrue type, calculate its bitmap_vector + */ + static void GetGCTIBBitMapWords(const BECommon &beCommon, MIRStructType &stType, std::vector &bitmapWords) { + bitmapWords.clear(); + if (stType.GetKind() == kTypeClass) { + uint64 curBitmap = 0; + uint32 curBitmapIndex = 0; + uint32 prevOffset = 0; + for (const auto &fieldInfo : beCommon.GetJClassLayout(static_cast(stType))) { + if (fieldInfo.IsRef()) { + uint32 curOffset = fieldInfo.GetOffset(); + /* skip meta field */ + if (curOffset == 0) { + continue; + } + CHECK_FATAL((curOffset > prevOffset) || (prevOffset == 0), "not ascending offset"); + uint32 wordIndex = curOffset >> kMapWordIndexShift; + if (wordIndex > curBitmapIndex) { + bitmapWords.emplace_back(curBitmap); + for (uint32 i = curBitmapIndex + 1; i < wordIndex; i++) { + bitmapWords.emplace_back(0); + } + curBitmap = 0; + curBitmapIndex = wordIndex; + } + uint32 bitOffset = (curOffset & kInMapWordOffsetMask) >> kInMapWordIndexShift; + if (CGOptions::IsGCOnly()) { + /* ignore unowned/weak when GCONLY is enabled. */ + curBitmap |= (kRefBits << bitOffset); + } else if (fieldInfo.IsUnowned()) { + curBitmap |= (kUnownedRefBits << bitOffset); + } else if (fieldInfo.IsWeak()) { + curBitmap |= (kWeakRefBits << bitOffset); + } else { + /* ref */ + curBitmap |= (kRefBits << bitOffset); + } + prevOffset = curOffset; + } + } + if (curBitmap != 0) { + bitmapWords.emplace_back(curBitmap); + } + } else if (stType.GetKind() != kTypeInterface) { + /* interface doesn't have reference fields */ + CHECK_FATAL(false, "GetGCTIBBitMapWords unexpected type"); + } + } +} + +bool AArch64CG::IsTargetInsn(MOperator mOp) const { + return (mOp > MOP_undef && mOp <= MOP_nop); +} +bool AArch64CG::IsClinitInsn(MOperator mOp) const { + return (mOp == MOP_clinit || mOp == MOP_clinit_tail || mOp == MOP_adrp_ldr); +} +bool AArch64CG::IsPseudoInsn(MOperator mOp) const { + return (mOp >= MOP_pseudo_param_def_x && mOp < MOP_nop); +} + +bool AArch64CG::IsEffectiveCopy(Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp >= MOP_xmovrr && mOp <= MOP_xvmovrv) { + return true; + } + if (mOp == MOP_vmovuu || mOp == MOP_vmovvv) { + return true; + } + if ((mOp >= MOP_xaddrrr && mOp <= MOP_ssub) || (mOp >= MOP_xlslrri6 && mOp <= MOP_wlsrrrr)) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.IsZero()) { + return true; + } + } + } + if (mOp > MOP_xmulrrr && mOp <= MOP_xvmuld) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.GetValue() == 1) { + return true; + } + } + } + return false; +} + +void AArch64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const { + A64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +/* + * Find if there exist same GCTIB (both rcheader and bitmap are same) + * for different class. If ture reuse, if not emit and record new GCTIB. + */ +void AArch64CG::FindOrCreateRepresentiveSym(std::vector &bitmapWords, uint32 rcHeader, + const std::string &name) { + GCTIBKey *key = memPool->New(allocator, rcHeader, bitmapWords); + const std::string &gcTIBName = GCTIB_PREFIX_STR + name; + MapleUnorderedMap::const_iterator iter = keyPatternMap.find(key); + if (iter == keyPatternMap.end() || gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + /* Emit the GCTIB label for the class */ + GCTIBPattern *ptn = memPool->New(*key, *memPool); + + if (gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + ptn->SetName("MCC_GCTIB__Ljava_2Flang_2FObject_3B"); + } + (void)keyPatternMap.insert(std::make_pair(key, ptn)); + (void)symbolPatternMap.insert(std::make_pair(gcTIBName, ptn)); + + /* Emit GCTIB pattern */ + std::string ptnString = "\t.type " + ptn->GetName() + ", %object\n" + "\t.data\n" + "\t.align 3\n"; + + MIRSymbol *gcTIBSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::GetInternalNameLiteral(gcTIBName))); + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() == kScFstatic) { + ptnString += "\t.local "; + } else { + ptnString += "\t.global "; + } + + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + emitter->Emit(ptn->GetName()); + emitter->Emit("\n"); + + /* Emit the GCTIB pattern label for the class */ + emitter->Emit(ptn->GetName()); + emitter->Emit(":\n"); + + emitter->Emit("\t.long "); + emitter->EmitHexUnsigned(rcHeader); + emitter->Emit("\n"); + + /* generate n_bitmap word */ + emitter->Emit("\t.long "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitDecUnsigned(bitmapWords.size()); + emitter->Emit("\n"); + + /* Emit each bitmap word */ + for (const auto &bitmapWord : bitmapWords) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " bitmap_word: 0x"<< bitmapWord << " " << PRIx64 << "\n"; + } + emitter->Emit("\t.quad "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitHexUnsigned(bitmapWord); + emitter->Emit("\n"); + } + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() != kScFstatic) { + /* add local symbol REF_XXX to every global GCTIB symbol */ + CreateRefSymForGlobalPtn(*ptn); + keyPatternMap[key] = ptn; + } + } else { + (void)symbolPatternMap.insert(make_pair(gcTIBName, iter->second)); + } +} + +/* + * Add local symbol REF_XXX to global GCTIB symbol, + * and replace the global GCTIBPattern in keyPatternMap. + */ +void AArch64CG::CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const { + const std::string &refPtnString = REF_PREFIX_STR + ptn.GetName(); + const std::string &ptnString = "\t.type " + refPtnString + ", %object\n" + + "\t.data\n" + + "\t.align 3\n" + + "\t.local " + refPtnString + "\n" + + refPtnString + ":\n" + + "\t.quad " + ptn.GetName() + "\n"; + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + ptn.SetName(refPtnString); +} + +std::string AArch64CG::FindGCTIBPatternName(const std::string &name) const { + auto iter = symbolPatternMap.find(name); + if (iter == symbolPatternMap.end()) { + CHECK_FATAL(false, "No GCTIB pattern found for symbol: %s", name.c_str()); + } + return iter->second->GetName(); +} + +void AArch64CG::GenerateObjectMaps(BECommon &beCommon) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "DEBUG: Generating object maps...\n"; + } + + for (auto &tyId : GetMIRModule()->GetClassList()) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "Class tyIdx: " << tyId << "\n"; + } + TyIdx tyIdx(tyId); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(ty != nullptr, "ty nullptr check"); + /* Only emit GCTIB for classes owned by this module */ + DEBUG_ASSERT(ty->IsStructType(), "ty isn't MIRStructType* in AArch64CG::GenerateObjectMaps"); + MIRStructType *strTy = static_cast(ty); + if (!strTy->IsLocal()) { + continue; + } + + GStrIdx nameIdx = ty->GetNameStrIdx(); + + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); + + /* Emit for a class */ + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " name: " << name << "\n"; + } + + std::vector bitmapWords; + wordsMap::GetGCTIBBitMapWords(beCommon, *strTy, bitmapWords); + /* fill specific header according to the size of bitmapWords */ + uint32 rcHeader = (!bitmapWords.empty()) ? 0x40 : 0; + FindOrCreateRepresentiveSym(bitmapWords, rcHeader, name); + } +} + +void AArch64CG::EnrollTargetPhases(MaplePhaseManager *pm) const { + if (!GetMIRModule()->IsCModule()) { + CGOptions::DisableCGSSA(); + } +#include "aarch64_phases.def" +} + +Insn &AArch64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { + DEBUG_ASSERT(defOpnd.IsRegister(), "build SSA on register operand"); + CHECK_FATAL(defOpnd.IsOfIntClass() || defOpnd.IsOfFloatOrSIMDClass(), " unknown operand type "); + bool is64bit = defOpnd.GetSize() == k64BitSize; + MOperator mop = MOP_nop; + if (defOpnd.GetSize() == k128BitSize) { + DEBUG_ASSERT(defOpnd.IsOfFloatOrSIMDClass(), "unexpect 128bit int operand in aarch64"); + mop = MOP_xvphivd; + } else { + mop = defOpnd.IsOfIntClass() ? is64bit ? MOP_xphirr : MOP_wphirr : is64bit ? MOP_xvphid : MOP_xvphis; + } + DEBUG_ASSERT(mop != MOP_nop, "unexpect 128bit int operand in aarch64"); + return GetCurCGFuncNoConst()->GetInsnBuilder()->BuildInsn(mop, defOpnd, listParam); +} + +PhiOperand &AArch64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) { + return *mp.New(mAllocator); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e905a906943f0bdbca26a6488dcb6b9607dff28 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -0,0 +1,12087 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cg.h" +#include "aarch64_cgfunc.h" +#include +#include +#include +#include +#include "cfi.h" +#include "mpl_logging.h" +#include "rt.h" +#include "opcode_info.h" +#include "mir_builder.h" +#include "mir_symbol_builder.h" +#include "mpl_atomic.h" +#include "metadata_layout.h" +#include "emit.h" +#include "simplify.h" +#include + +namespace maplebe { +using namespace maple; +CondOperand AArch64CGFunc::ccOperands[kCcLast] = { + CondOperand(CC_EQ), + CondOperand(CC_NE), + CondOperand(CC_CS), + CondOperand(CC_HS), + CondOperand(CC_CC), + CondOperand(CC_LO), + CondOperand(CC_MI), + CondOperand(CC_PL), + CondOperand(CC_VS), + CondOperand(CC_VC), + CondOperand(CC_HI), + CondOperand(CC_LS), + CondOperand(CC_GE), + CondOperand(CC_LT), + CondOperand(CC_GT), + CondOperand(CC_LE), + CondOperand(CC_AL), +}; + +namespace { +constexpr int32 kSignedDimension = 2; /* signed and unsigned */ +constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ +constexpr int32 kFloatByteSizeDimension = 3; /* 4 bytes, 8 bytes, 16 bytes(vector) */ +constexpr int32 kShiftAmount12 = 12; /* for instruction that can use shift, shift amount must be 0 or 12 */ + +MOperator ldIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wldrb, MOP_wldrh, MOP_wldr, MOP_xldr }, + /* signed == 1 */ + { MOP_wldrsb, MOP_wldrsh, MOP_wldr, MOP_xldr } +}; + +MOperator stIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr }, + /* signed == 1 */ + { MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr } +}; + +MOperator ldIsAcq[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wldarb, MOP_wldarh, MOP_wldar, MOP_xldar }, + /* signed == 1 */ + { MOP_undef, MOP_undef, MOP_wldar, MOP_xldar } +}; + +MOperator stIsRel[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr }, + /* signed == 1 */ + { MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr } +}; + +MOperator ldFs[kFloatByteSizeDimension] = { MOP_sldr, MOP_dldr, MOP_qldr }; +MOperator stFs[kFloatByteSizeDimension] = { MOP_sstr, MOP_dstr, MOP_qstr }; + +MOperator ldFsAcq[kFloatByteSizeDimension] = { MOP_undef, MOP_undef, MOP_undef }; +MOperator stFsRel[kFloatByteSizeDimension] = { MOP_undef, MOP_undef, MOP_undef }; + +/* extended to unsigned ints */ +MOperator uextIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* u8 u16 u32 u64 */ + { MOP_undef, MOP_xuxtb32, MOP_xuxtb32, MOP_xuxtb32}, /* u8/i8 */ + { MOP_undef, MOP_undef, MOP_xuxth32, MOP_xuxth32}, /* u16/i16 */ + { MOP_undef, MOP_undef, MOP_xuxtw64, MOP_xuxtw64}, /* u32/i32 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +/* extended to signed ints */ +MOperator extIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* i8 i16 i32 i64 */ + { MOP_undef, MOP_xsxtb32, MOP_xsxtb32, MOP_xsxtb64}, /* u8/i8 */ + { MOP_undef, MOP_undef, MOP_xsxth32, MOP_xsxth64}, /* u16/i16 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_xsxtw64}, /* u32/i32 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType, AArch64isa::MemoryOrdering memOrd) { + DEBUG_ASSERT(__builtin_popcount(static_cast(memOrd)) <= 1, "must be kMoNone or kMoAcquire"); + DEBUG_ASSERT(primType != PTY_ptr, "should have been lowered"); + DEBUG_ASSERT(primType != PTY_ref, "should have been lowered"); + DEBUG_ASSERT(bitSize >= k8BitSize, "PTY_u1 should have been lowered?"); + DEBUG_ASSERT(__builtin_popcount(bitSize) == 1, "PTY_u1 should have been lowered?"); + if (isLoad) { + DEBUG_ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoAcquire) || + (memOrd == AArch64isa::kMoAcquireRcpc) || (memOrd == AArch64isa::kMoLoacquire), "unknown Memory Order"); + } else { + DEBUG_ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoRelease) || + (memOrd == AArch64isa::kMoLorelease), "unknown Memory Order"); + } + + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if ((IsPrimitiveInteger(primType) || primType == PTY_agg) && !IsPrimitiveVector(primType)) { + MOperator(*table)[kIntByteSizeDimension]; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldIsAcq : ldIs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stIsRel : stIs; + } + + int32 signedUnsigned = IsUnsignedInteger(primType) ? 0 : 1; + if (primType == PTY_agg) { + CHECK_FATAL(bitSize >= k8BitSize, " unexpect agg size"); + bitSize = static_cast(RoundUp(bitSize, k8BitSize)); + DEBUG_ASSERT((bitSize & (bitSize - 1)) == 0, "bitlen error"); + } + + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + if (primType == PTY_i128 || primType == PTY_u128) { + bitSize = k64BitSize; + } + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 4; + DEBUG_ASSERT(size <= 3, "wrong bitSize"); + return table[signedUnsigned][size]; + } else { + MOperator *table = nullptr; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldFsAcq : ldFs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stFsRel : stFs; + } + + /* __builtin_ffs(x) returns: 32 -> 6, 64 -> 7, 128 -> 8 */ + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 6; + DEBUG_ASSERT(size <= 2, "size must be 0 to 2"); + return table[size]; + } +} +} + +bool IsBlkassignForPush(const BlkassignoffNode &bNode) { + BaseNode *dest = bNode.Opnd(0); + bool spBased = false; + if (dest->GetOpCode() == OP_regread) { + RegreadNode &node = static_cast(*dest); + if (-node.GetRegIdx() == kSregSp) { + spBased = true; + } + } + return spBased; +} + +RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType) { + RegOperand *resOpnd = nullptr; + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + return *resOpnd; +} + +MOperator AArch64CGFunc::PickLdInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd) const { + return PickLdStInsn(true, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickStInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd) const { + return PickLdStInsn(false, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickExtInsn(PrimType dtype, PrimType stype) const { + int32 sBitSize = static_cast(GetPrimTypeBitSize(stype)); + int32 dBitSize = static_cast(GetPrimTypeBitSize(dtype)); + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if (IsPrimitiveInteger(stype) && IsPrimitiveInteger(dtype)) { + MOperator(*table)[kIntByteSizeDimension]; + table = IsUnsignedInteger(stype) ? uextIs : extIs; + if (stype == PTY_i128 || stype == PTY_u128) { + sBitSize = static_cast(k64BitSize); + } + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + uint32 row = static_cast(__builtin_ffs(sBitSize)) - k4BitSize; + DEBUG_ASSERT(row <= 3, "wrong bitSize"); + if (dtype == PTY_i128 || dtype == PTY_u128) { + dBitSize = static_cast(k64BitSize); + } + uint32 col = static_cast(__builtin_ffs(dBitSize)) - k4BitSize; + DEBUG_ASSERT(col <= 3, "wrong bitSize"); + return table[row][col]; + } + CHECK_FATAL(0, "extend not primitive integer"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovBetweenRegs(PrimType destType, PrimType srcType) const { + if (IsPrimitiveVector(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize ? MOP_vmovuu : MOP_vmovvv; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_wmovrr : MOP_xmovrr; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovs : MOP_xvmovd; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovrs : MOP_xvmovrd; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovsr : MOP_xvmovdr; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize ? MOP_vwmovru : + GetPrimTypeSize(destType) <= k4ByteSize ? MOP_vwmovrv : MOP_vxmovrv; + } + CHECK_FATAL(false, "unexpected operand primtype for mov"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const { + CHECK_FATAL(lhs.GetRegisterType() == rhs.GetRegisterType(), "PickMovInsn: unequal kind NYI"); + CHECK_FATAL(lhs.GetSize() == rhs.GetSize(), "PickMovInsn: unequal size NYI"); + DEBUG_ASSERT(((lhs.GetSize() < k64BitSize) || (lhs.GetRegisterType() == kRegTyFloat)), + "should split the 64 bits or more mov"); + if (lhs.GetRegisterType() == kRegTyInt) { + return MOP_wmovrr; + } + if (lhs.GetRegisterType() == kRegTyFloat) { + return (lhs.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + DEBUG_ASSERT(false, "PickMovInsn: kind NYI"); + return MOP_undef; +} + +void AArch64CGFunc::SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) { + DEBUG_ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + DEBUG_ASSERT(memOrd != AArch64isa::kMoNone, "Just checking"); + + uint32 ssize = isDirect ? src.GetSize() : GetPrimTypeBitSize(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + MOperator mOp = PickLdInsn(ssize, stype, memOrd); + + Operand *newSrc = &src; + auto &memOpnd = static_cast(src); + OfstOperand *immOpnd = memOpnd.GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newSrc = &CreateReplacementMemOperand(ssize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(src); + } + + /* Check if the right load-acquire instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + if (IsPrimitiveFloat(stype)) { + /* Uses signed integer version ldar followed by a floating-point move(fmov). */ + DEBUG_ASSERT(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, itype, memOrd), regOpnd, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = (stype == PTY_f32) ? MOP_xvmovsr : MOP_xvmovdr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, regOpnd)); + } else { + /* Use unsigned version ldarb/ldarh followed by a sign-extension instruction(sxtb/sxth). */ + DEBUG_ASSERT((ssize == k8BitSize) || (ssize == k16BitSize), "Just checking"); + PrimType utype = (ssize == k8BitSize) ? PTY_u8 : PTY_u16; + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, utype, memOrd), dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = ((dsize == k32BitSize) ? ((ssize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32) + : ((ssize == k8BitSize) ? MOP_xsxtb64 : MOP_xsxth64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, dest)); + } + } +} + +void AArch64CGFunc::SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) { + DEBUG_ASSERT(dest.GetKind() == Operand::kOpdMem, "Just checking"); + + uint32 dsize = isDirect ? dest.GetSize() : GetPrimTypeBitSize(stype); + MOperator mOp = PickStInsn(dsize, stype, memOrd); + + Operand *newDest = &dest; + MemOperand *memOpnd = static_cast(&dest); + OfstOperand *immOpnd = memOpnd->GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd->GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newDest = &CreateReplacementMemOperand(dsize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(dest); + } + + /* Check if the right store-release instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, src, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + /* Use a floating-point move(fmov) followed by a stlr. */ + DEBUG_ASSERT(IsPrimitiveFloat(stype), "must be float type"); + CHECK_FATAL(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + mOp = (stype == PTY_f32) ? MOP_xvmovrs : MOP_xvmovrd; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, regOpnd, src)); + Insn &insn = GetInsnBuilder()->BuildInsn(PickStInsn(dsize, itype, memOrd), regOpnd, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType) { + if (IsPrimitiveInteger(dType) != IsPrimitiveInteger(sType)) { + RegOperand &tempReg = CreateRegisterOperandOfType(sType); + SelectCopyImm(tempReg, src, sType); + SelectCopy(dest, dType, tempReg, sType); + } else { + SelectCopyImm(dest, src, sType); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype) { + uint32 dsize = GetPrimTypeBitSize(dtype); + DEBUG_ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer"); + DEBUG_ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)), + "The destination operand must be >= 8-bit"); + if (src.IsSingleInstructionMovable()) { + MOperator mOp = (dsize == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src)); + return; + } + uint64 srcVal = static_cast(src.GetValue()); + /* using mov/movk to load the immediate value */ + if (dsize == k8BitSize) { + /* compute lower 8 bits value */ + if (dtype == PTY_u8) { + /* zero extend */ + srcVal = (srcVal << 56) >> 56; + dtype = PTY_u16; + } else { + /* sign extend */ + srcVal = ((static_cast(srcVal)) << 56) >> 56; + dtype = PTY_i16; + } + dsize = k16BitSize; + } + if (dsize == k16BitSize) { + if (dtype == PTY_u16) { + /* check lower 16 bits and higher 16 bits respectively */ + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected value"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) == 0, "unexpected value"); + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected value"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + return; + } else { + /* sign extend and let `dsize == 32` case take care of it */ + srcVal = ((static_cast(srcVal)) << 48) >> 48; + dsize = k32BitSize; + } + } + if (dsize == k32BitSize) { + /* check lower 16 bits and higher 16 bits respectively */ + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected val"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0, "unexpected val"); + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + /* create an imm opereand which represents upper 16 bits of the immediate */ + ImmOperand &srcUpper = CreateImmOperand(static_cast((srcVal >> k16BitSize) & 0x0000FFFFULL), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovkri16, dest, srcUpper, *lslOpnd)); + } else { + /* + * partition it into 4 16-bit chunks + * if more 0's than 0xFFFF's, use movz as the initial instruction. + * otherwise, movn. + */ + bool useMovz = BetterUseMOVZ(srcVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = srcVal & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (srcVal >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + + for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (srcVal >> (static_cast(sa))) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(useMovz ? MOP_xmovzri16 : MOP_xmovnri16, dest, src16, *lslOpnd)); + useMovk = true; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovkri16, dest, src16, *lslOpnd)); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd)); + } + } +} + +std::string AArch64CGFunc::GenerateMemOpndVerbose(const Operand &src) const { + DEBUG_ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + const MIRSymbol *symSecond = static_cast(&src)->GetSymbol(); + if (symSecond != nullptr) { + std::string key; + MIRStorageClass sc = symSecond->GetStorageClass(); + if (sc == kScFormal) { + key = "param: "; + } else if (sc == kScAuto) { + key = "local var: "; + } else { + key = "global: "; + } + return key.append(symSecond->GetName()); + } + return ""; +} + +void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, + Operand &src, PrimType stype) { + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&src)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_acquire)) { + memOrd = AArch64isa::kMoAcquire; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectLoadAcquire(dest, dtype, src, stype, memOrd, true); + return; + } + Insn *insn = nullptr; + uint32 ssize = src.GetSize(); + PrimType regTy = PTY_void; + RegOperand *loadReg = nullptr; + MOperator mop = MOP_undef; + if (IsPrimitiveFloat(stype) || IsPrimitiveVector(stype)) { + CHECK_FATAL(dsize == ssize, "dsize %u expect equals ssize %u", dtype, ssize); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + if (stype == PTY_agg && dtype == PTY_agg) { + mop = MOP_undef; + } else { + mop = PickExtInsn(dtype, stype); + } + if (ssize == (GetPrimTypeSize(dtype) * kBitsPerByte) || mop == MOP_undef) { + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + regTy = dsize == k64BitSize ? dtype : PTY_i32; + loadReg = &CreateRegisterOperandOfType(regTy); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), *loadReg, src); + } + } + + if (GetCG()->GenerateVerboseCG()) { + insn->SetComment(GenerateMemOpndVerbose(src)); + } + + GetCurBB()->AppendInsn(*insn); + if (regTy != PTY_void && mop != MOP_undef) { + DEBUG_ASSERT(loadReg != nullptr, "loadReg should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dest, *loadReg)); + } +} + +bool AArch64CGFunc::IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, + bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const { + bool isInRange = false; + switch (mOp) { + case MOP_xstr: + case MOP_wstr: + isInRange = + (isIntactIndexed && + ((!is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm32UpperBound)) || + (is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm64UpperBound)))) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrb: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrbLdrbImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrh: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrhLdrhImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + default: + break; + } + return isInRange; +} + +bool AArch64CGFunc::IsStoreMop(MOperator mOp) const { + switch (mOp) { + case MOP_sstr: + case MOP_dstr: + case MOP_qstr: + case MOP_xstr: + case MOP_wstr: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64CGFunc::SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg, Insn *curInsn) { + bool useMovz = BetterUseMOVZ(immVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = static_cast(immVal) & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (static_cast(immVal) >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + auto *bb = (curInsn != nullptr) ? curInsn->GetBB() : GetCurBB(); + for (int64 i = 0 ; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (static_cast(immVal) >> sa) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + Insn *newInsn = nullptr; + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + MOperator mOpCode = useMovz ? MOP_xmovzri16 : MOP_xmovnri16; + newInsn = &GetInsnBuilder()->BuildInsn(mOpCode, destReg, src16, *lslOpnd); + useMovk = true; + } else { + newInsn = &GetInsnBuilder()->BuildInsn(MOP_xmovkri16, destReg, src16, *lslOpnd); + } + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, *newInsn); + } else { + bb->AppendInsn(*newInsn); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, insn); + } else { + bb->AppendInsn(insn); + } + } +} + +void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, + uint32 dsize, Operand &src, PrimType stype) { + if (opndType != Operand::kOpdMem) { + if (!CGOptions::IsArm64ilp32()) { + DEBUG_ASSERT(stype != PTY_a32, ""); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(dtype, stype), dest, src)); + return; + } + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&dest)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_release)) { + memOrd = AArch64isa::kMoRelease; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectStoreRelease(dest, dtype, src, stype, memOrd, true); + return; + } + + bool is64Bits = (dest.GetSize() == k64BitSize) ? true : false; + MOperator strMop = PickStInsn(dsize, stype); + if (!dest.IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + + MemOperand *memOpnd = static_cast(&dest); + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + if (memOpnd->GetOffsetOperand() == nullptr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + ImmOperand *immOpnd = static_cast(memOpnd->GetOffsetOperand()); + DEBUG_ASSERT(immOpnd != nullptr, "immOpnd should not be nullptr"); + int64 immVal = immOpnd->GetValue(); + bool isIntactIndexed = memOpnd->IsIntactIndexed(); + bool isPostIndexed = memOpnd->IsPostIndexed(); + bool isPreIndexed = memOpnd->IsPreIndexed(); + DEBUG_ASSERT(!isPostIndexed, "memOpnd should not be post-index type"); + DEBUG_ASSERT(!isPreIndexed, "memOpnd should not be pre-index type"); + bool isInRange = false; + if (!GetMirModule().IsCModule()) { + isInRange = IsImmediateValueInRange(strMop, immVal, is64Bits, isIntactIndexed, isPostIndexed, isPreIndexed); + } else { + isInRange = IsOperandImmValid(strMop, memOpnd, kInsnSecondOpnd); + } + bool isMopStr = IsStoreMop(strMop); + if (isInRange || !isMopStr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "nullptr check"); + if (isIntactIndexed) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dsize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, *memOpnd)); + } else if (isPostIndexed || isPreIndexed) { + RegOperand ® = CreateRegisterOperandOfType(PTY_i64); + MOperator mopMov = MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMov, reg, *immOpnd)); + MOperator mopAdd = MOP_xaddrrr; + MemOperand &newDest = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + Insn &insn1 = GetInsnBuilder()->BuildInsn(strMop, src, newDest); + Insn &insn2 = GetInsnBuilder()->BuildInsn(mopAdd, *newDest.GetBaseRegister(), *newDest.GetBaseRegister(), reg); + if (isPostIndexed) { + GetCurBB()->AppendInsn(insn1); + GetCurBB()->AppendInsn(insn2); + } else { + /* isPreIndexed */ + GetCurBB()->AppendInsn(insn2); + GetCurBB()->AppendInsn(insn1); + } + } +} + +void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype) { + DEBUG_ASSERT(dest.IsRegister() || dest.IsMemoryAccessOperand(), ""); + uint32 dsize = GetPrimTypeBitSize(dtype); + if (dest.IsRegister()) { + dsize = dest.GetSize(); + } + Operand::OperandType opnd0Type = dest.GetKind(); + Operand::OperandType opnd1Type = src.GetKind(); + DEBUG_ASSERT(((dsize >= src.GetSize()) || (opnd0Type == Operand::kOpdRegister) || (opnd0Type == Operand::kOpdMem)), "NYI"); + DEBUG_ASSERT(((opnd0Type == Operand::kOpdRegister) || (src.GetKind() == Operand::kOpdRegister)), + "either src or dest should be register"); + + switch (opnd1Type) { + case Operand::kOpdMem: + SelectCopyMemOpnd(dest, dtype, dsize, src, stype); + break; + case Operand::kOpdOffset: + case Operand::kOpdImmediate: + SelectCopyImm(dest, dtype, static_cast(src), stype); + break; + case Operand::kOpdFPImmediate: + CHECK_FATAL(static_cast(src).GetValue() == 0, "NIY"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, + dest, GetZeroOpnd(dsize))); + break; + case Operand::kOpdRegister: { + if (opnd0Type == Operand::kOpdRegister && IsPrimitiveVector(stype)) { + /* check vector reg to vector reg move */ + CHECK_FATAL(IsPrimitiveVector(dtype), "invalid vectreg to vectreg move"); + MOperator mop = (dsize <= k64BitSize) ? MOP_vmovuu : MOP_vmovvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(dest).AddOpndChain(src); + auto *vecSpecSrc = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + break; + } + RegOperand &desReg = static_cast(dest); + RegOperand &srcReg = static_cast(src); + if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) { + break; + } + SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype); + break; + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +/* This function copies src to a register, the src can be an imm, mem or a label */ +RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dtype) { + RegOperand &dest = CreateRegisterOperandOfType(dtype); + SelectCopy(dest, dtype, src, stype); + return dest; +} + +/* + * We need to adjust the offset of a stack allocated local variable + * if we store FP/SP before any other local variables to save an instruction. + * See AArch64CGFunc::OffsetAdjustmentForFPLR() in aarch64_cgfunc.cpp + * + * That is when we !UsedStpSubPairForCallFrameAllocation(). + * + * Because we need to use the STP/SUB instruction pair to store FP/SP 'after' + * local variables when the call frame size is greater that the max offset + * value allowed for the STP instruction (we cannot use STP w/ prefix, LDP w/ + * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to + * adjust the offsets. + */ +bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen) { + DEBUG_ASSERT(bitLen >= k8BitSize, "bitlen error"); + DEBUG_ASSERT(bitLen <= k128BitSize, "bitlen error"); + + if (bitLen >= k8BitSize) { + bitLen = static_cast(RoundUp(bitLen, k8BitSize)); + } + DEBUG_ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error"); + + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) { + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + offsetValue += static_cast(static_cast(GetMemlayout())->RealStackFrameSize() + 0xff); + } + offsetValue += 2 * kIntregBytelen; /* Refer to the above comment */ + return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen); + } else { + return false; + } +} + +bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) { + const InsnDesc *md = &AArch64CG::kMd[mOp]; + auto *opndProp = md->opndMD[opndIdx]; + + Operand::OperandType opndTy = opndProp->GetOperandType(); + if (opndTy == Operand::kOpdMem) { + auto *memOpnd = static_cast(o); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX) { + return true; + } + if (md->IsLoadStorePair() || + (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd->IsIntactIndexed())) { + int64 offsetValue = memOpnd->GetOffsetImmediate()->GetOffsetValue(); + if (memOpnd->GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + offsetValue += static_cast(GetMemlayout())->RealStackFrameSize() + 0xffL; + } + return md->IsValidImmOpnd(offsetValue); + } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + int32 offsetValue = static_cast(memOpnd->GetOffsetImmediate()->GetOffsetValue()); + return offsetValue == 0; + } else { + CHECK_FATAL(!memOpnd->IsIntactIndexed(), "CHECK WHAT?"); + int32 offsetValue = static_cast(memOpnd->GetOffsetImmediate()->GetOffsetValue()); + return (offsetValue <= static_cast(k256BitSize) && offsetValue >= kNegative256BitSize); + } + } else if (opndTy == Operand::kOpdImmediate) { + return md->IsValidImmOpnd(static_cast(o)->GetValue()); + } + return true; +} + +MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, + RegOperand &baseReg, int64 offset) { + return CreateMemOpnd(baseReg, offset, bitLen); +} + +bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const { + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) { + return false; + } + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int32 opndVal = static_cast(ofstOpnd->GetOffsetValue()); + int32 maxPimm = memOpnd.GetMaxPIMM(bitLen); + int32 q0 = opndVal / maxPimm; + int32 addend = q0 * maxPimm; + int32 r0 = opndVal - addend; + int32 alignment = memOpnd.GetImmediateOffsetAlignment(bitLen); + int32 r1 = static_cast(r0) & ((1u << static_cast(alignment)) - 1); + addend = addend + r1; + return (addend > 0); +} + +RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum) { + RegOperand *resOpnd = nullptr; + if (baseRegNum == AArch64reg::kRinvalid) { + resOpnd = &CreateRegisterOperandOfType(PTY_i64); + } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(baseRegNum), + GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum); + } + return resOpnd; +} + +/* + * When immediate of str/ldr is over 256bits, it should be aligned according to the reg byte size. + * Here we split the offset into (512 * n) and +/-(new Offset) when misaligned, to make sure that + * the new offet is always under 256 bits. + */ +MemOperand &AArch64CGFunc::ConstraintOffsetToSafeRegion(uint32 bitLen, const MemOperand &memOpnd) { + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + int32 multiplier = (offsetValue / k512BitSize) + static_cast(offsetValue % k512BitSize > k256BitSize); + int32 addMount = multiplier * k512BitSizeInt; + int32 newOffset = offsetValue - addMount; + RegOperand *baseReg = memOpnd.GetBaseRegister(); + ImmOperand &immAddMount = CreateImmOperand(addMount, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddMount.SetVary(kUnAdjustVary); + } + + RegOperand *resOpnd = GetBaseRegForSplit(kRinvalid); + SelectAdd(*resOpnd, *baseReg, immAddMount, PTY_i64); + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, newOffset); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +ImmOperand &AArch64CGFunc::SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, RegOperand *resOpnd, + int64 ofstVal, bool isDest, Insn *insn, bool forPair) { + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + /* + * opndVal == Q0 * 32760(16380) + R0 + * R0 == Q1 * 8(4) + R1 + * ADDEND == Q0 * 32760(16380) + R1 + * NEW_OFFSET = Q1 * 8(4) + * we want to generate two instructions: + * ADD TEMP_REG, X29, ADDEND + * LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ] + */ + int32 maxPimm = 0; + if (!forPair) { + maxPimm = MemOperand::GetMaxPIMM(bitLen); + } else { + maxPimm = MemOperand::GetMaxPairPIMM(bitLen); + } + DEBUG_ASSERT(maxPimm != 0, "get max pimm failed"); + + int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0); + int64 addend = q0 * maxPimm; + int64 r0 = ofstVal - addend; + int64 alignment = MemOperand::GetImmediateOffsetAlignment(bitLen); + auto q1 = static_cast(static_cast(r0) >> static_cast(alignment)); + auto r1 = static_cast(static_cast(r0) & ((1u << static_cast(alignment)) - 1)); + auto remained = static_cast(static_cast(q1) << static_cast(alignment)); + addend = addend + r1; + if (addend > 0) { + int64 suffixClear = 0xfff; + if (forPair) { + suffixClear = 0xff; + } + int64 remainedTmp = remained + (addend & suffixClear); + if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && + ((static_cast(remainedTmp) & ((1u << static_cast(alignment)) - 1)) == 0)) { + remained = remainedTmp; + addend = (addend & ~suffixClear); + } + } + ImmOperand &immAddend = CreateImmOperand(addend, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddend.SetVary(kUnAdjustVary); + } + return immAddend; +} + +MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum, bool isDest, + Insn *insn, bool forPair) { + DEBUG_ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd"); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + RegOperand *resOpnd = GetBaseRegForSplit(baseRegNum); + ImmOperand &immAddend = SplitAndGetRemained(memOpnd, bitLen, resOpnd, ofstVal, isDest, insn, forPair); + int64 remained = (ofstVal - immAddend.GetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + if (insn == nullptr) { + SelectAdd(*resOpnd, *origBaseReg, immAddend, PTY_i64); + } else { + SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn); + } + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) { + SelectDassign(stmt.GetStIdx(), stmt.GetFieldID(), stmt.GetRHS()->GetPrimType(), opnd0); +} + +/* + * Used for SelectDassign when do optimization for volatile store, because the stlr instruction only allow + * store to the memory addrress with the register base offset 0. + * STLR , [{,#0}], 32-bit variant (size = 10) + * STLR , [{,#0}], 64-bit variant (size = 11) + * So the function do the prehandle of the memory operand to satisify the Store-Release.. + */ +RegOperand *AArch64CGFunc::ExtractNewMemBase(const MemOperand &memOpnd) { + const MIRSymbol *sym = memOpnd.GetSymbol(); + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if (mode == MemOperand::kAddrModeLiteral) { + return nullptr; + } + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(baseOpnd != nullptr, "nullptr check"); + RegOperand &resultOpnd = CreateRegisterOperandOfType(baseOpnd->GetRegisterType(), baseOpnd->GetSize() / kBitsPerByte); + bool is64Bits = (baseOpnd->GetSize() == k64BitSize); + if (mode == MemOperand::kAddrModeLo12Li) { + StImmOperand &stImm = CreateStImmOperand(*sym, 0, 0); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resultOpnd, *baseOpnd, stImm); + addInsn.SetComment("new add insn"); + GetCurBB()->AppendInsn(addInsn); + } else if (mode == MemOperand::kAddrModeBOi) { + OfstOperand *offsetOpnd = memOpnd.GetOffsetImmediate(); + if (offsetOpnd->GetOffsetValue() != 0) { + MOperator mOp = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *offsetOpnd)); + } else { + return baseOpnd; + } + } else { + CHECK_FATAL(mode == MemOperand::kAddrModeBOrX, "unexpect addressing mode."); + RegOperand *regOpnd = static_cast(&memOpnd)->GetIndexRegister(); + MOperator mOp = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *regOpnd)); + } + return &resultOpnd; +} + +/* + * NOTE: I divided SelectDassign so that we can create "virtual" assignments + * when selecting other complex Maple IR instructions. For example, the atomic + * exchange and other intrinsics will need to assign its results to local + * variables. Such Maple IR instructions are pltform-specific (e.g. + * atomic_exchange can be implemented as one single machine intruction on x86_64 + * and ARMv8.1, but ARMv8.0 needs an LL/SC loop), therefore they cannot (in + * principle) be lowered at BELowerer or CGLowerer. + */ +void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stIdx); + int32 offset = 0; + bool parmCopy = false; + if (fieldId != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectDassign: non-zero fieldID for non-structure"); + offset = GetBecommon().GetFieldOffset(*structType, fieldId).first; + parmCopy = IsParamStructCopy(*symbol); + } + uint32 regSize = GetPrimTypeBitSize(rhsPType); + MIRType *type = symbol->GetType(); + Operand &stOpnd = LoadIntoRegister(opnd0, IsPrimitiveInteger(rhsPType) || + IsPrimitiveVectorInteger(rhsPType), regSize, + IsSignedInteger(type->GetPrimType())); + MOperator mOp = MOP_undef; + if ((type->GetKind() == kTypeStruct) || (type->GetKind() == kTypeUnion)) { + MIRStructType *structType = static_cast(type); + type = structType->GetFieldType(fieldId); + } else if (type->GetKind() == kTypeClass) { + MIRClassType *classType = static_cast(type); + type = classType->GetFieldType(fieldId); + } + + uint32 dataSize = GetPrimTypeBitSize(type->GetPrimType()); + if (type->GetPrimType() == PTY_agg) { + dataSize = GetPrimTypeBitSize(PTY_a64); + } + MemOperand *memOpnd = nullptr; + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to + * MIRFuncType), so we allow `kTypeFunction` to appear here */ + DEBUG_ASSERT(((type->GetKind() == kTypeScalar) || (type->GetKind() == kTypePointer) || (type->GetKind() == kTypeFunction) || + (type->GetKind() == kTypeStruct) || (type->GetKind() == kTypeUnion)|| (type->GetKind() == kTypeArray)), + "NYI dassign type"); + PrimType ptyp = type->GetPrimType(); + if (ptyp == PTY_agg) { + ptyp = PTY_a64; + } + + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + if (isVolStore) { + RegOperand *baseOpnd = ExtractNewMemBase(*memOpnd); + if (baseOpnd != nullptr) { + memOpnd = &CreateMemOpnd(*baseOpnd, 0, dataSize); + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + } + + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; + if (symbol->GetAsmAttr() != UStrIdx(0) && + symbol->GetStorageClass() != kScPstatic && symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + SelectCopy(specifiedOpnd, type->GetPrimType(), opnd0, rhsPType); + } else if (memOrd == AArch64isa::kMoNone) { + mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + if (GetCG()->GenerateVerboseCG()) { + insn.SetComment(GenerateMemOpndVerbose(*memOpnd)); + } + GetCurBB()->AppendInsn(insn); + } else { + AArch64CGFunc::SelectStoreRelease(*memOpnd, ptyp, stOpnd, ptyp, memOrd, true); + } +} + +void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + int64 offset = stmt.offset; + uint32 size = GetPrimTypeSize(stmt.GetPrimType()) * k8ByteSize; + MOperator mOp = (size == k16BitSize) ? MOP_wstrh : + ((size == k32BitSize) ? MOP_wstr : + ((size == k64BitSize) ? MOP_xstr : MOP_undef)); + CHECK_FATAL(mOp != MOP_undef, "illegal size for dassignoff"); + MemOperand *memOpnd = &GetOrCreateMemOpnd(*symbol, offset, size); + if ((memOpnd->GetMemVaryType() == kNotVary) && + (IsImmediateOffsetOutOfRange(*memOpnd, size) || (offset % 8 != 0))) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, size); + } + Operand &stOpnd = LoadIntoRegister(opnd0, true, size, false); + memOpnd = memOpnd->IsOffsetMisaligned(size) ? &ConstraintOffsetToSafeRegion(size, *memOpnd) : memOpnd; + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + GetCurBB()->AppendInsn(insn); +} + +void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { + Operand *opnd0 = HandleExpr(stmt, *stmt.Opnd(0)); + RegOperand &baseReg = LoadIntoRegister(*opnd0, PTY_a64); + auto &zwr = GetZeroOpnd(k32BitSize); + auto &mem = CreateMemOpnd(baseReg, 0, k32BitSize); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, zwr, mem); + loadRef.SetDoNotRemove(true); + if (GetCG()->GenerateVerboseCG()) { + loadRef.SetComment("null pointer check"); + } + GetCurBB()->AppendInsn(loadRef); +} + +void AArch64CGFunc::SelectAbort() { + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + auto &mem = CreateMemOpnd(inOpnd, 0, k64BitSize); + Insn &movXzr = GetInsnBuilder()->BuildInsn(MOP_xmovri64, inOpnd, CreateImmOperand(0, k64BitSize, false)); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, GetZeroOpnd(k64BitSize), mem); + loadRef.SetDoNotRemove(true); + movXzr.SetDoNotRemove(true); + GetCurBB()->AppendInsn(movXzr); + GetCurBB()->AppendInsn(loadRef); +} + +static std::string GetRegPrefixFromPrimType(PrimType pType, uint32 size, const std::string &constraint) { + std::string regPrefix = ""; + /* memory access check */ + if (constraint.find("m") != std::string::npos || constraint.find("Q") != std::string::npos) { + regPrefix += "["; + } + if (IsPrimitiveVector(pType)) { + regPrefix += "v"; + } else if (IsPrimitiveInteger(pType)) { + if (size == k32BitSize) { + regPrefix += "w"; + } else { + regPrefix += "x"; + } + } else { + if (size == k32BitSize) { + regPrefix += "s"; + } else { + regPrefix += "d"; + } + } + return regPrefix; +} + +void AArch64CGFunc::SelectAsm(AsmNode &node) { + SetHasAsm(); + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + LogInfo::MapleLogger() << "Using coloring RA\n"; + const_cast(GetCG()->GetCGOptions()).SetOption(CGOptions::kDoColorRegAlloc); + const_cast(GetCG()->GetCGOptions()).ClearOption(CGOptions::kDoLinearScanRegAlloc); + } + } + Operand *asmString = &CreateStringOperand(node.asmString); + ListOperand *listInputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listOutputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listClobber = CreateListOpnd(*GetFuncScopeAllocator()); + ListConstraintOperand *listInConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listInRegPrefix = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutRegPrefix = memPool->New(*GetFuncScopeAllocator()); + std::list> rPlusOpnd; + bool noReplacement = false; + if (node.asmString.find('$') == std::string::npos) { + /* no replacements */ + noReplacement = true; + } + /* input constraints should be processed before OP_asm instruction */ + for (size_t i = 0; i < node.numOpnds; ++i) { + /* process input constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.inputConstraints[i]); + bool isOutputTempNode = false; + if (str[0] == '+') { + isOutputTempNode = true; + } + listInConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + /* process input operands */ + switch (node.Opnd(i)->op) { + case OP_dread: { + DreadNode &dread = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectDread(node, dread); + PrimType pType = dread.GetPrimType(); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_addrof: { + auto &addrofNode = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectAddrof(addrofNode, node); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = addrofNode.GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_constval: { + CHECK_FATAL(!isOutputTempNode, "Unexpect"); + auto &constNode = static_cast(*node.Opnd(i)); + CHECK_FATAL(constNode.GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int64 scale = mirIntConst->GetExtValue(); + if (str.find("r") != std::string::npos) { + bool isSigned = scale < 0; + ImmOperand &immOpnd = CreateImmOperand(scale, k64BitSize, isSigned); + /* set default type as a 64 bit reg */ + PrimType pty = isSigned ? PTY_i64 : PTY_u64; + auto &tempReg = static_cast(CreateRegisterOperandOfType(pty)); + SelectCopy(tempReg, pty, immOpnd, isSigned ? PTY_i64 : PTY_u64); + listInputOpnd->PushOpnd(static_cast(tempReg)); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pty, tempReg.GetSize(), str)))); + } else { + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); + listInputOpnd->PushOpnd(static_cast(inOpnd)); + + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand("i" + std::to_string(scale)))); + } + break; + } + case OP_regread: { + auto ®readNode = static_cast(*node.Opnd(i)); + PregIdx pregIdx = regreadNode.GetRegIdx(); + RegOperand &inOpnd = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + listInputOpnd->PushOpnd(static_cast(inOpnd)); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType pType = preg->GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd.GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(&static_cast(inOpnd), pType)); + } + break; + } + default: + CHECK_FATAL(0, "Inline asm input expression not handled"); + } + } + std::vector intrnOpnds; + intrnOpnds.emplace_back(asmString); + intrnOpnds.emplace_back(listOutputOpnd); + intrnOpnds.emplace_back(listClobber); + intrnOpnds.emplace_back(listInputOpnd); + intrnOpnds.emplace_back(listOutConstraint); + intrnOpnds.emplace_back(listInConstraint); + intrnOpnds.emplace_back(listOutRegPrefix); + intrnOpnds.emplace_back(listInRegPrefix); + Insn *asmInsn = &GetInsnBuilder()->BuildInsn(MOP_asm, intrnOpnds); + GetCurBB()->AppendInsn(*asmInsn); + + /* process listOutputOpnd */ + for (size_t i = 0; i < node.asmOutputs.size(); ++i) { + bool isOutputTempNode = false; + RegOperand *rPOpnd = nullptr; + /* process output constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.outputConstraints[i]); + + listOutConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + if (str[0] == '+') { + CHECK_FATAL(!rPlusOpnd.empty(), "Need r+ operand"); + rPOpnd = static_cast((rPlusOpnd.begin()->first)); + listOutputOpnd->PushOpnd(*rPOpnd); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(rPlusOpnd.begin()->second, rPOpnd->GetSize(), str)))); + if (!rPlusOpnd.empty()) { + rPlusOpnd.pop_front(); + } + isOutputTempNode = true; + } + if (str.find("Q") != std::string::npos || str.find("m") != std::string::npos) { + continue; + } + /* process output operands */ + StIdx stIdx = node.asmOutputs[i].first; + RegFieldPair regFieldPair = node.asmOutputs[i].second; + if (regFieldPair.IsReg()) { + PregIdx pregIdx = static_cast(regFieldPair.GetPregIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + RegOperand *outOpnd = + isOutputTempNode ? rPOpnd : &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + PrimType srcType = mirPreg->GetPrimType(); + PrimType destType = srcType; + if (GetPrimTypeBitSize(destType) < k32BitSize) { + destType = IsSignedInteger(destType) ? PTY_i32 : PTY_u32; + } + RegType rtype = GetRegTyFromPrimTy(srcType); + RegOperand &opnd0 = isOutputTempNode ? + GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)) : + CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(srcType))); + SelectCopy(opnd0, destType, *outOpnd, srcType); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(srcType, outOpnd->GetSize(), str)))); + } + } else { + MIRSymbol *var; + if (stIdx.IsGlobal()) { + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + var = mirModule.CurFunction()->GetSymbolTabItem(stIdx.Idx()); + } + CHECK_FATAL(var != nullptr, "var should not be nullptr"); + if (!noReplacement || var->GetAsmAttr() != UStrIdx(0)) { + RegOperand *outOpnd = nullptr; + PrimType pty = GlobalTables::GetTypeTable().GetTypeTable().at(var->GetTyIdx())->GetPrimType(); + if (var->GetAsmAttr() != UStrIdx(0)) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(var->GetAsmAttr()); + outOpnd = &GetOrCreatePhysicalRegisterOperand(regDesp); + } else { + RegType rtype = GetRegTyFromPrimTy(pty); + outOpnd = isOutputTempNode ? rPOpnd : &CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(pty))); + } + SaveReturnValueInLocal(node.asmOutputs, i, PTY_a64, *outOpnd, node); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand( + GetRegPrefixFromPrimType(pty, outOpnd->GetSize(), str)))); + } + } + } + } + if (noReplacement) { + return; + } + + /* process listClobber */ + for (size_t i = 0; i < node.clobberList.size(); ++i) { + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.clobberList[i]); + auto regno = static_cast(str[1] - '0'); + if (str[2] >= '0' && str[2] <= '9') { + regno = regno * kDecimalMax + static_cast((str[2] - '0')); + } + RegOperand *reg; + switch (str[0]) { + case 'w': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k32BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 'x': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k64BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 's': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k32BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'd': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'v': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'c': { + asmInsn->SetAsmDefCondCode(); + break; + } + case 'm': { + asmInsn->SetAsmModMem(); + break; + } + default: + CHECK_FATAL(0, "Inline asm clobber list not handled"); + } + } +} + +void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + if (GetCG()->IsLmbc()) { + PrimType lhsSize = stmt.GetPrimType(); + PrimType rhsSize = stmt.Opnd(0)->GetPrimType(); + if (lhsSize != rhsSize && stmt.Opnd(0)->GetOpCode() == OP_ireadoff) { + Insn *prev = GetCurBB()->GetLastInsn(); + if (prev->GetMachineOpcode() == MOP_wldrsb || prev->GetMachineOpcode() == MOP_wldrsh) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[prev->GetMachineOpcode() == MOP_wldrsb ? MOP_xldrsb : MOP_xldrsh]); + } else if (prev->GetMachineOpcode() == MOP_wldr && stmt.GetPrimType() == PTY_i64) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[MOP_xldrsw]); + } + } + } + RegOperand *regOpnd = nullptr; + PregIdx pregIdx = stmt.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if (GetCG()->IsLmbc() && stmt.GetPrimType() == PTY_agg) { + if (static_cast(opnd0).IsOfIntClass()) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_i64); + } else if (opnd0.GetSize() <= k4ByteSize) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f32); + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f64); + } + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); + } + } else { + regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + /* look at rhs */ + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + if (GetCG()->IsLmbc() && rhsType == PTY_agg) { + /* This occurs when a call returns a small struct */ + /* The subtree should already taken care of the agg type that is in excess of 8 bytes */ + rhsType = PTY_i64; + } + PrimType dtype = rhsType; + if (GetPrimTypeBitSize(dtype) < k32BitSize) { + DEBUG_ASSERT(IsPrimitiveInteger(dtype), ""); + dtype = IsSignedInteger(dtype) ? PTY_i32 : PTY_u32; + } + DEBUG_ASSERT(regOpnd != nullptr, "null ptr check!"); + SelectCopy(*regOpnd, dtype, opnd0, rhsType); + if (GetCG()->GenerateVerboseCG()) { + if (GetCurBB()->GetLastInsn()) { + GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } else if (GetCurBB()->GetPrev()->GetLastInsn()) { + GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } + } + + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx); + PrimType stype = GetTypeFromPseudoRegIdx(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(srcBitLength, stype), *regOpnd, *dest)); + } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } else if (regOpnd->GetRegisterNumber() >= V0 && regOpnd->GetRegisterNumber() <= V3) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 align) { + MemOperand *lhsMemOpnd = &memOpnd; + if ((lhsMemOpnd->GetMemVaryType() == kNotVary) && + IsImmediateOffsetOutOfRange(*lhsMemOpnd, align * kBitsPerByte)) { + RegOperand *addReg = &CreateRegisterOperandOfType(PTY_i64); + lhsMemOpnd = &SplitOffsetWithAddInstruction(*lhsMemOpnd, align * k8BitSize, addReg->GetRegisterNumber()); + } + return lhsMemOpnd; +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx) { + auto *a64MemOpnd = &memOpnd; + if ((a64MemOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, &memOpnd, opndIdx)) { + if (opndIdx == kInsnSecondOpnd) { + a64MemOpnd = &SplitOffsetWithAddInstruction(*a64MemOpnd, dSize); + } else if (opndIdx == kInsnThirdOpnd) { + a64MemOpnd = &SplitOffsetWithAddInstruction( + *a64MemOpnd, dSize, AArch64reg::kRinvalid, false, nullptr, true); + } else { + CHECK_FATAL(false, "NYI"); + } + } + return a64MemOpnd; +} + +MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, + bool needLow12) { + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && GetBecommon().GetTypeSize(sym.GetTyIdx()) > k16ByteSize) { + /* formal of size of greater than 16 is copied by the caller and the pointer to it is passed. */ + /* otherwise it is passed in register and is accessed directly. */ + memOpnd = &GetOrCreateMemOpnd(sym, 0, align * kBitsPerByte); + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *memOpnd); + GetCurBB()->AppendInsn(ldInsn); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, vreg, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), nullptr); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, offset, align * kBitsPerByte, false, needLow12); + } + return FixLargeMemOpnd(*memOpnd, align); +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol &symbol, int64 offsetVal, + RegOperand &BaseReg) { + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + if (isLo12) { + StImmOperand &stImm = CreateStImmOperand(symbol, 0, 0); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, *tgtAddr, BaseReg, stImm)); + } else { + ImmOperand &imm = CreateImmOperand(offsetVal, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, BaseReg, imm)); + } + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd) { + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + OfstOperand *ofstOpnd = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, exprOpnd, *ofstOpnd)); + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(uint64 copySize) { + RegOperand *vregMemcpySize = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *sizeOpnd = &CreateImmOperand(static_cast(copySize), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *vregMemcpySize, *sizeOpnd)); + return vregMemcpySize; +} + +Insn *AArch64CGFunc::AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &newStrLdr) { + if (bothUnion) { + if (lastStrLdr == nullptr) { + GetCurBB()->AppendInsn(newStrLdr); + } else { + GetCurBB()->InsertInsnAfter(*lastStrLdr, newStrLdr); + } + } else { + GetCurBB()->AppendInsn(newStrLdr); + } + return &newStrLdr; +} + +void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { + MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + uint32 lhsOffset = 0; + MIRType *lhsType = lhsSymbol->GetType(); + bool bothUnion = false; + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + bothUnion |= (structType->GetKind() == kTypeUnion); + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + bothUnion &= (structType->GetKind() == kTypeUnion); + } + bothUnion &= (rhsSymbol == lhsSymbol); + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(lhsOffset, rhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, + rhsOffset, static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = &GetOrCreateMemOpnd(*rhsSymbol, + rhsOffset, copySize * k8BitSize, false, true); + rhsBaseMemOpnd = FixLargeMemOpnd(*rhsBaseMemOpnd, copySize); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsIsLo12, *rhsSymbol, rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + Insn *lastLdr = nullptr; + Insn *lastStr = nullptr; + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = i * copySize + static_cast(rhsOffsetVal); + uint64 lhsBaseOffset = i * copySize + static_cast(lhsOffsetVal); + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + /* generate the load */ + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = (!rhsIsLo12 && !lhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + Insn *newLoadInsn = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + DEBUG_ASSERT(newLoadInsn != nullptr, "build load instruction failed in SelectAggDassign"); + lastLdr = AggtStrLdrInsert(bothUnion, lastLdr, *newLoadInsn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + Insn *newStoreInsn = nullptr; + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd); + } + DEBUG_ASSERT(newStoreInsn != nullptr, "build store instruction failed in SelectAggDassign"); + lastStr = AggtStrLdrInsert(bothUnion, lastStr, *newStoreInsn); + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + MemOperand *rhsMemOpnd; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd; + lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + DEBUG_ASSERT(rhsStructType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffset, *addrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = rhsOffset + i * copySize; + uint64 lhsBaseOffset = static_cast(lhsOffsetVal) + i * copySize; + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + addrOpnd, nullptr, &ofstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, copySize)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + bool doPair = (!lhsIsLo12 && copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize)); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + addrOpnd, nullptr, &ofstOpnd, nullptr); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd; + lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { + DEBUG_ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + PrimType retPtype; + RegType regType; + uint32 memSize; + uint32 regSize; + parmlocator.LocateRetVal(*lhsType, pLoc); + AArch64reg r[kFourRegister]; + r[0] = static_cast(pLoc.reg0); + r[1] = static_cast(pLoc.reg1); + r[2] = static_cast(pLoc.reg2); + r[3] = static_cast(pLoc.reg3); + if (pLoc.numFpPureRegs) { + regSize = (pLoc.fpSize == k4ByteSize) ? k32BitSize : k64BitSize; + memSize = pLoc.fpSize; + retPtype = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + regType = kRegTyFloat; + } else { + regSize = k64BitSize; + memSize = k8BitSize; + retPtype = PTY_u64; + regType = kRegTyInt; + } + for (uint32 i = 0; i < kFourRegister; ++i) { + if (r[i] == kRinvalid) { + break; + } + RegOperand &parm = GetOrCreatePhysicalRegisterOperand(r[i], regSize, regType); + Operand &mOpnd = GetOrCreateMemOpnd(*lhsSymbol, memSize * i, regSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(regSize, retPtype), parm, mOpnd)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); + } +} + +static MIRType *GetPointedToType(const MIRPtrType &pointerType) { + MIRType *aType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType.GetPointedTyIdx()); + if (aType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + MIRFarrayType *farrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx()); + } + return aType; +} + +void AArch64CGFunc::SelectIassign(IassignNode &stmt) { + int32 offset = 0; + MIRPtrType *pointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iassign node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (stmt.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structType != nullptr, "SelectIassign: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(stmt.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, stmt.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* write into an object array or a high-dimensional array */ + } + } + } + + PrimType styp = stmt.GetRHS()->GetPrimType(); + Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS()); + Operand &srcOpnd = + LoadIntoRegister(*valOpnd, + (IsPrimitiveInteger(styp) || IsPrimitiveVectorInteger(styp)), GetPrimTypeBitSize(styp)); + + PrimType destType = pointedType->GetPrimType(); + if (destType == PTY_agg) { + destType = PTY_a64; + } + if (IsPrimitiveVector(styp)) { /* a vector type */ + destType = styp; + } + DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; + if (isVolStore && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + + if (memOrd == AArch64isa::kMoNone) { + SelectCopy(memOpnd, destType, srcOpnd, destType); + } else { + AArch64CGFunc::SelectStoreRelease(memOpnd, destType, srcOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); +} + +void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { + int32 offset = stmt.GetOffset(); + PrimType destType = stmt.GetPrimType(); + + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.GetBOpnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; + Operand *valOpnd = HandleExpr(stmt, *stmt.GetBOpnd(1)); + Operand &srcOpnd = LoadIntoRegister(*valOpnd, true, GetPrimTypeBitSize(destType)); + SelectCopy(memOpnd, destType, srcOpnd, destType); +} + +MemOperand *AArch64CGFunc::GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg baseRegno) { + MemOperand *memOpnd; + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(baseRegno, k64BitSize, kRegTyInt); + uint32 bitlen = byteSize * kBitsPerByte; + if (offset < 0 && offset < -256) { + RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); + GetCurBB()->AppendInsn(addInsn); + OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, nullptr, offsetOpnd, nullptr); + } else { + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + } + memOpnd->SetStackMem(true); + return memOpnd; +} + +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + MIRType *rType = GetLmbcCallReturnType(); + bool isPureFpStruct = false; + uint32 numRegs = 0; + if (rType && rType->GetPrimType() == PTY_agg && opnd.IsRegister() && + static_cast(opnd).IsPhysicalRegister()) { + CHECK_FATAL(rType->GetSize() <= k16BitSize, "SelectIassignfpoff invalid agg size"); + uint32 fpSize; + numRegs = FloatParamRegRequired(static_cast(rType), fpSize); + if (numRegs) { + primType = (fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + isPureFpStruct = true; + } + } + uint32 byteSize = GetPrimTypeSize(primType); + uint32 bitlen = byteSize * kBitsPerByte; + if (isPureFpStruct) { + for (uint32 i = 0 ; i < numRegs; ++i) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset + static_cast(i * byteSize), byteSize); + RegOperand &srcOpnd = GetOrCreatePhysicalRegisterOperand(AArch64reg(V0 + i), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } + } else { + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } +} + +/* Load and assign to a new register. To be moved to the correct call register OR stack + location in LmbcSelectParmList */ +void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + uint32 byteLen = GetPrimTypeSize(pTy); + uint32 bitLen = byteLen * kBitsPerByte; + RegType regTy = GetRegTyFromPrimTy(pTy); + int32 curRegArgs = GetLmbcArgsInRegs(regTy); + if (curRegArgs < static_cast(k8ByteSize)) { + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, offset, 1); + } + else { + /* Move into allocated space */ + Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); + Operand ® = LoadIntoRegister(opnd, pTy); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(bitLen, pTy), reg, memOpd)); + } + IncLmbcArgsInRegs(regTy); /* num of args in registers */ + IncLmbcTotalArgs(); /* num of args */ +} + +/* Search for CALL/ICALL/ICALLPROTO node, must be called from a blkassignoff node */ +MIRType *AArch64CGFunc::LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) const { + for (; stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { + break; + } + } + CHECK_FATAL(stmt && (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto), + "blkassign sp not followed by call"); + uint32 nargs = GetLmbcTotalArgs(); + MIRType *ty = nullptr; + if (stmt->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + if (fn->GetFormalCount() > 0) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[nargs].formalTyIdx); + } + *parmList = &fn->GetParamTypes(); + // would return null if the actual parameter is bogus + } else if (stmt->GetOpCode() == OP_icallproto) { + IcallNode *icallproto = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); + MIRFuncType *fType = static_cast(type); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + *parmList = &fType->GetParamTypeList(); + } else { + CHECK_FATAL(stmt->GetOpCode() == OP_icallproto, + "LmbcGetAggTyFromCallSite:: unexpected call operator"); + } + return ty; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForRet(const BlkassignoffNode &bNode, const Operand *src) { + PrimType pTy; + uint32 size = 0; + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + MIRFunction *func = &GetFunction(); + + if (func->IsReturnStruct()) { + /* This blkassignoff is for struct return? */ + uint32 loadSize; + uint32 numRegs = 0; + if (bNode.GetNext()->GetOpCode() == OP_return) { + MIRStructType *ty = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFuncRetStructTyIdx())); + uint32 fpregs = FloatParamRegRequired(ty, size); + if (fpregs > 0) { + /* pure floating point in agg */ + numRegs = fpregs; + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + AArch64reg reg = static_cast(V0 + i); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, loadSize, kRegTyFloat); + SelectCopy(*res, pTy, mem, pTy); + } + } else { + /* int/float mixed */ + numRegs = 2; + pTy = PTY_i64; + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(R0, loadSize, kRegTyInt); + SelectCopy(*res, pTy, mem, pTy); + if (bNode.blockSize > static_cast(k8ByteSize)) { + MemOperand &newMem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + res = &GetOrCreatePhysicalRegisterOperand(R1, loadSize, kRegTyInt); + SelectCopy(*res, pTy, newMem, pTy); + } + } + bool intReg = fpregs == 0; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = static_cast((intReg ? R0 : V0) + i); + MOperator mop = intReg ? MOP_pseudo_ret_int : MOP_pseudo_ret_float; + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize, intReg ? kRegTyInt : kRegTyFloat); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop, dest); + GetCurBB()->AppendInsn(pseudo); + } + return true; + } + } + return false; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList) { + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + if (IsBlkassignForPush(bNode)) { + PrimType pTy = PTY_i64; + MIRStructType *ty = static_cast(LmbcGetAggTyFromCallSite(&bNode, parmList)); + uint32 size = 0; + uint32 fpregs = ty ? FloatParamRegRequired(ty, size) : 0; /* fp size determined */ + if (fpregs > 0) { + /* pure floating point in agg */ + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, 0, static_cast(fpregs)); + IncLmbcArgsInRegs(kRegTyFloat); + } + IncLmbcTotalArgs(); + return true; + } else if (bNode.blockSize <= static_cast(k16ByteSize)) { + /* integer/mixed types in register/s */ + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset, bNode.blockSize > static_cast(k8ByteSize) ? 2 : 1); + IncLmbcArgsInRegs(kRegTyInt); + if (bNode.blockSize > static_cast(k8ByteSize)) { + MemOperand &newMem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + RegOperand *newRes = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*newRes, pTy, newMem, pTy); + SetLmbcArgInfo(newRes, pTy, bNode.offset + k8ByteSizeInt, 2); + IncLmbcArgsInRegs(kRegTyInt); + } + IncLmbcTotalArgs(); + return true; + } + } + return false; +} + +/* This function is incomplete and may be removed when Lmbc IR is changed + to have the lowerer figures out the address of the large agg to reside */ +uint32 AArch64CGFunc::LmbcFindTotalStkUsed(std::vector *paramList) { + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + for (TyIdx tyIdx : *paramList) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + (void)parmlocator.LocateNextParm(*ty, pLoc); + } + return 0; +} + +/* All arguments passed as registers */ +uint32 AArch64CGFunc::LmbcTotalRegsUsed() { + if (GetLmbcArgInfo() == nullptr) { + return 0; /* no arg */ + } + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + MapleVector &types = GetLmbcCallArgTypes(); + uint32 iCnt = 0; + uint32 fCnt = 0; + for (uint32 i = 0; i < regs.size(); i++) { + if (IsPrimitiveInteger(types[i])) { + if ((iCnt + static_cast(regs[i])) <= k8ByteSize) { + iCnt += static_cast(regs[i]); + }; + } else { + if ((fCnt + static_cast(regs[i])) <= k8ByteSize) { + fCnt += static_cast(regs[i]); + }; + } + } + return iCnt + fCnt; +} + +/* If blkassignoff for argument, this function loads the agg arguments into + virtual registers, disregard if there is sufficient physicall call + registers. Argument > 16-bytes are copied to preset space and ptr + result is loaded into virtual register. + If blassign is not for argument, this function simply memcpy */ +void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) +{ + CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + std::vector *parmList; + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + if (LmbcSmallAggForRet(bNode, src)) { + return; + } else if (LmbcSmallAggForCall(bNode, src, &parmList)) { + return; + } + Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* memcpy for agg assign OR large agg for arg/ret */ + int32 offset = bNode.offset; + if (IsBlkassignForPush(bNode)) { + /* large agg for call, addr to be pushed in SelectCall */ + offset = GetLmbcTotalStkUsed(); + if (offset < 0) { + /* length of ALL stack based args for this call, this location is where the + next large agg resides, its addr will then be passed */ + offset = LmbcFindTotalStkUsed(parmList) + LmbcTotalRegsUsed(); + } + SetLmbcTotalStkUsed(offset + bNode.blockSize); /* next use */ + SetLmbcArgInfo(regResult, PTY_i64, 0, 1); /* 1 reg for ptr */ + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + /* copy large agg arg to offset below */ + } + std::vector opndVec; + opndVec.push_back(regResult); /* result */ + opndVec.push_back(PrepareMemcpyParamOpnd(offset, *dest)); /* param 0 */ + opndVec.push_back(src); /* param 1 */ + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(static_cast(bNode.blockSize)))); /* param 2 */ + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { + DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); + uint32 lhsOffset = 0; + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRPtrType *lhsPointerType = static_cast(stmtType); + bool loadToRegs4StructReturn = false; + if (mirModule.CurFunction()->StructReturnedInRegs()) { + MIRSymbol *retSt = mirModule.CurFunction()->GetFormal(0); + if (stmt.Opnd(0)->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(stmt.Opnd(0)); + MIRSymbol *addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + loadToRegs4StructReturn = (retSt == addrSym); + } + } + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsType); + DEBUG_ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + } else if (lhsType->GetKind() == kTypeArray) { +#if DEBUG + MIRArrayType *arrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsType->GetKind(); + DEBUG_ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } else if (lhsType->GetKind() == kTypeFArray) { +#if DEBUG + MIRFarrayType *farrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsElemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsElemType->GetKind(); + DEBUG_ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + } + if (loadToRegs4StructReturn) { + /* generate move to regs for agg return */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + parmlocator.LocateNextParm(*lhsType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*rhsSymbol); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*rhsSymbol, + (rhsOffset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*rhsSymbol, + (rhsOffset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[kFirstReg] = static_cast(pLoc.reg0); + regs[kSecondReg] = static_cast(pLoc.reg1); + regs[kThirdReg] = static_cast(pLoc.reg2); + regs[kFourthReg] = static_cast(pLoc.reg3); + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetInsnBuilder()->BuildInsn(mop2, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset, + static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*rhsSymbol, copySize, rhsOffset, true); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); ++i) { + uint32 rhsBaseOffset = static_cast(rhsOffsetVal + i * copySize); + uint32 lhsBaseOffset = lhsOffset + i * copySize; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, copySize); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + bool doPair = (!rhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + if (doPair) { + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd)); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), result.GetSize(), kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + } + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + Operand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand &lhsMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, static_cast(nullptr)); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + lhsMemOpnd = *FixLargeMemOpnd(mOp, lhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { /* rhs is iread */ + DEBUG_ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + DEBUG_ASSERT(rhsStructType, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + if (loadToRegs4StructReturn) { + /* generate move to regs. */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, + rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(rhsOffset), *rhsAddrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + DEBUG_ASSERT(copySize != 0, "expect non-zero"); + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + /* generate the load */ + uint32 operandSize = copySize * k8BitSize; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + i * copySize, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = ((copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), operandSize, kInsnThirdOpnd); + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(operandSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), operandSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + i * copySize, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *static_cast(lhsMemOpnd), operandSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(operandSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(lhsMemOpnd), operandSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + uint32 memOpndSize = newAlignUsed * k8BitSize; + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOpLD, *rhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + Insn &insn = GetInsnBuilder()->BuildInsn(mOpLD, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpST, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } +} + +void AArch64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { + uint32 offset = 0; + if (x->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(x); + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = sym->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(dread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, dread->GetFieldID()).first); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs for agg return */ + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + (void)parmlocator.LocateNextParm(*mirType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*sym); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[0] = static_cast(pLoc.reg0); + regs[1] = static_cast(pLoc.reg1); + regs[2] = static_cast(pLoc.reg2); + regs[3] = static_cast(pLoc.reg3); + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetInsnBuilder()->BuildInsn(mop2, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else if (x->GetOpCode() == OP_iread) { + IreadNode *iread = static_cast(x); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*iread, *iread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, iread->Opnd(0)->GetPrimType()); + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx())); + MIRType *mirType = static_cast(ptrType->GetPointedType()); + bool isRefField = false; + if (iread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(iread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, iread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*structType, iread->GetFieldID()); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs. */ + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(offset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, + rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else { // dummy return of 0 inserted by front-end at absence of return + DEBUG_ASSERT(x->GetOpCode() == OP_constval, "SelectReturnSendOfStructInRegs: unexpected return operand"); + uint32 typeSize = GetPrimTypeSize(x->GetPrimType()); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize * kBitsPerByte, kRegTyInt); + ImmOperand &src = CreateImmOperand(0, k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, src)); + return; + } +} + +Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + if (symbol->IsEhIndex()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + /* use the second register return by __builtin_eh_return(). */ + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*type, retMech); + retLocator.SetupSecondRetReg(*type, retMech); + return &GetOrCreatePhysicalRegisterOperand(static_cast(retMech.GetReg1()), k64BitSize, kRegTyInt); + } + + PrimType symType = symbol->GetType()->GetPrimType(); + uint32 offset = 0; + bool parmCopy = false; + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectDread: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(expr.GetFieldID())->GetPrimType(); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first); + parmCopy = IsParamStructCopy(*symbol); + } + + uint32 dataSize = GetPrimTypeBitSize(symType); + uint32 aggSize = 0; + if (symType == PTY_agg) { + if (expr.GetPrimType() == PTY_agg) { + aggSize = static_cast(GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx())); + dataSize = ((expr.GetFieldID() == 0) ? GetPointerSize() : aggSize) << 3; + } else { + dataSize = GetPrimTypeBitSize(expr.GetPrimType()); + } + } + MemOperand *memOpnd = nullptr; + if (aggSize > k8ByteSize) { + if (parent.op == OP_eval) { + if (symbol->GetAttr(ATTR_volatile)) { + /* Need to generate loads for the upper parts of the struct. */ + Operand &dest = GetZeroOpnd(k64BitSize); + uint32 numLoads = static_cast(RoundUp(aggSize, k64BitSize) / k64BitSize); + for (uint32 o = 0; o < numLoads; ++o) { + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } + if (IsImmediateOffsetOutOfRange(*memOpnd, GetPointerSize())) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, GetPointerSize()); + } + SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); + } + } else { + /* No side-effects. No need to generate anything for eval. */ + } + } else { + if (expr.GetFieldID() != 0) { + CHECK_FATAL(false, "SelectDread: Illegal agg size"); + } + } + } + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && + IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + PrimType resultType = expr.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, symType); + /* a local register variable defined with a specified register */ + if (symbol->GetAsmAttr() != UStrIdx(0) && + symbol->GetStorageClass() != kScPstatic && symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + return &specifiedOpnd; + } + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; + SelectCopy(resOpnd, resultType, *memOpnd, symType); + return &resOpnd; +} + +RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr) { + PregIdx pregIdx = expr.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + return &GetOrCreateSpecialRegisterOperand(-pregIdx, expr.GetPrimType()); + } + RegOperand ® = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + MemOperand *src = GetPseudoRegisterSpillMemoryOperand(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType stype = preg->GetPrimType(); + uint32 srcBitLength = GetPrimTypeSize(stype) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(srcBitLength, stype), reg, *src)); + } + return ® +} + +void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field) { + const MIRSymbol *symbol = stImm.GetSymbol(); + if (symbol->GetStorageClass() == kScAuto) { + SetStackProtectInfo(kAddrofStack); + } + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else if (mirModule.IsJavaModule()) { + auto it = immOpndsRequiringOffsetAdjustment.find(symLoc); + if ((it != immOpndsRequiringOffsetAdjustment.end()) && (symbol->GetType()->GetPrimType() != PTY_agg)) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + if (symbol->GetType()->GetKind() != kTypeClass) { + immOpndsRequiringOffsetAdjustment[symLoc] = offset; + } + } + } else { + /* Do not cache modified symbol location */ + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + } + + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + } else if (symbol->IsThreadLocal()) { + SelectAddrofThreadLocal(result, stImm); + return; + } else { + Operand *srcOpnd = &result; + if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, stImm)); + srcOpnd = &tmpreg; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + } + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + + auto size = GetPointerSize() * kBitsPerByte; + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, size, static_cast(srcOpnd), + nullptr, &offset, nullptr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(size == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } + } +} + +void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field) { + const MIRSymbol *symbol = memOpnd.GetSymbol(); + if (symbol->GetStorageClass() == kScAuto) { + auto *offsetOpnd = static_cast(memOpnd.GetOffsetImmediate()); + Operand &immOpnd = CreateImmOperand(offsetOpnd->GetOffsetValue(), PTY_u32, false); + DEBUG_ASSERT(memOpnd.GetBaseRegister() != nullptr, "nullptr check"); + SelectAdd(result, *memOpnd.GetBaseRegister(), immOpnd, PTY_u32); + SetStackProtectInfo(kAddrofStack); + } else if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, tmpreg, memOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, result, memOpnd)); + } +} + +Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + int32 offset = 0; + AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); + if (isAddrofoff) { + offset = addrofoffExpr.offset; + } else { + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + /* with array of structs, it is possible to have nullptr */ + if (structType != nullptr) { + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + } + } + } + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && + ((!isAddrofoff && expr.GetFieldID() != 0) || + (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + /* + * Struct param is copied on the stack by caller if struct size > 16. + * Else if size < 16 then struct param is copied into one or two registers. + */ + RegOperand *stackAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* load the base address of the struct copy from stack. */ + SelectAddrof(*stackAddr, CreateStImmOperand(*symbol, 0, 0)); + Operand *structAddr; + if (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) <= k16ByteSize) { + isAggParamInReg = true; + structAddr = stackAddr; + } else { + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand *mo = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + stackAddr, nullptr, offopnd, nullptr); + structAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *structAddr, *mo)); + } + if (offset == 0) { + return structAddr; + } else { + /* add the struct offset to the base address */ + Operand *result = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *imm = &CreateImmOperand(PTY_a64, offset); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *structAddr, *imm)); + return result; + } + } + PrimType ptype = expr.GetPrimType(); + Operand &result = GetOrCreateResOperand(parent, ptype); + if (symbol->IsReflectionClassInfo() && !symbol->IsReflectionArrayClassInfo() && !GetCG()->IsLibcore()) { + /* + * Turn addrof __cinf_X into a load of _PTR__cinf_X + * adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B + * ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] + */ + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + symbol->SetStorageClass(kScFstatic); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, result, CreateStImmOperand(*symbol, 0, 0))); + /* make it un rematerializable. */ + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(static_cast(result).GetRegisterNumber()); + if (preg) { + preg->SetOp(OP_undef); + } + return &result; + } + + SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), isAddrofoff ? 0 : expr.GetFieldID()); + return &result; +} + +Operand *AArch64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + return SelectAddrof(static_cast(static_cast(expr)), parent, true); +} + +Operand &AArch64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &operand = GetOrCreateResOperand(parent, primType); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + SelectAddrof(operand, CreateStImmOperand(*mirFunction->GetFuncSymbol(), 0, 0)); + return operand; +} + +/* For an entire aggregate that can fit inside a single 8 byte register. */ +PrimType AArch64CGFunc::GetDestTypeFromAggSize(uint32 bitSize) const { + PrimType primType; + switch (bitSize) { + case k8BitSize: { + primType = PTY_u8; + break; + } + case k16BitSize: { + primType = PTY_u16; + break; + } + case k32BitSize: { + primType = PTY_u32; + break; + } + case k64BitSize: { + primType = PTY_u64; + break; + } + default: + CHECK_FATAL(false, "aggregate of unhandled size"); + } + return primType; +} + +Operand &AArch64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = GetOrCreateResOperand(parent, primType); + Operand &immOpnd = CreateImmOperand(expr.GetOffset(), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return dst; +} + +Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) { + auto offset = ireadoff.GetOffset(); + auto primType = ireadoff.GetPrimType(); + auto bitSize = GetPrimTypeBitSize(primType); + auto *baseAddr = ireadoff.Opnd(0); + auto *result = &CreateRegisterOperandOfType(primType); + auto *addrOpnd = HandleExpr(ireadoff, *baseAddr); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); + auto mop = PickLdInsn(bitSize, primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *result, memOpnd)); + return result; +} + +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType, + AArch64reg baseRegno) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize, baseRegno); + RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); + MOperator mOp = PickLdInsn(byteSize * kBitsPerByte, primType); + Insn &load = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + GetCurBB()->AppendInsn(load); + return result; +} + +RegOperand *AArch64CGFunc::LmbcStructReturnLoad(int32 offset) { + RegOperand *result = nullptr; + MIRFunction &func = GetFunction(); + CHECK_FATAL(func.IsReturnStruct(), "LmbcStructReturnLoad: not struct return"); + MIRType *ty = func.GetReturnType(); + uint32 sz = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(static_cast(ty), fpSize); + if (numFpRegs > 0) { + PrimType pType = (fpSize <= k4ByteSize) ? PTY_f32 : PTY_f64; + for (int32 i = (numFpRegs - kOneRegister); i > 0; --i) { + result = GenLmbcParamLoad(offset + (i * static_cast(fpSize)), fpSize, kRegTyFloat, pType); + AArch64reg regNo = static_cast(V0 + static_cast(i)); + RegOperand *reg = &GetOrCreatePhysicalRegisterOperand(regNo, fpSize * kBitsPerByte, kRegTyFloat); + SelectCopy(*reg, pType, *result, pType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *reg); + GetCurBB()->AppendInsn(pseudo); + } + result = GenLmbcParamLoad(offset, fpSize, kRegTyFloat, pType); + } else if (sz <= k4ByteSize) { + result = GenLmbcParamLoad(offset, k4ByteSize, kRegTyInt, PTY_u32); + } else if (sz <= k8ByteSize) { + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } else if (sz <= k16ByteSize) { + result = GenLmbcParamLoad(offset + k8ByteSizeInt, k8ByteSize, kRegTyInt, PTY_i64); + RegOperand *r1 = &GetOrCreatePhysicalRegisterOperand(R1, k8ByteSize * kBitsPerByte, kRegTyInt); + SelectCopy(*r1, PTY_i64, *result, PTY_i64); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *r1); + GetCurBB()->AppendInsn(pseudo); + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } + return result; +} + +Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + RegType regty = GetRegTyFromPrimTy(primType); + RegOperand *result = nullptr; + if (offset >= 0) { + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(static_cast(offset)); + if (info->GetPrimType() == PTY_agg) { + if (info->IsOnStack()) { + result = GenLmbcParamLoad(info->GetOnStackOffset(), GetPrimTypeSize(PTY_a64), kRegTyInt, PTY_a64); + regno_t baseRegno = result->GetRegisterNumber(); + result = GenLmbcParamLoad(offset - static_cast(info->GetOffset()), + bytelen, regty, primType, (AArch64reg)baseRegno); + } else if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + } else { + CHECK_FATAL(primType == info->GetPrimType(), "Incorrect primtype"); + CHECK_FATAL(offset == info->GetOffset(), "Incorrect offset"); + if (info->GetRegNO() == 0 || !info->HasRegassign()) { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + result = &GetOrCreatePhysicalRegisterOperand(static_cast(info->GetRegNO()), bitlen, regty); + } + } + } else { + if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + } + return result; +} + +Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset, PrimType finalBitFieldDestType) { + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (expr.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + + DEBUG_ASSERT(structType != nullptr, "SelectIread: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(expr.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, expr.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* read from an object array, or an high-dimentional array */ + } + } + } + + RegType regType = GetRegTyFromPrimTy(expr.GetPrimType()); + uint32 regSize = GetPrimTypeSize(expr.GetPrimType()); + if (expr.GetFieldID() == 0 && pointedType->GetPrimType() == PTY_agg) { + /* Maple IR can passing small struct to be loaded into a single register. */ + if (regType == kRegTyFloat) { + /* regsize is correct */ + } else { + uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); + regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + } else if (regSize < k4ByteSize) { + regSize = k4ByteSize; /* 32-bit */ + } + Operand *result = nullptr; + if (parent.GetOpCode() == OP_eval) { + /* regSize << 3, that is regSize * 8, change bytes to bits */ + result = &GetZeroOpnd(regSize << 3); + } else { + result = &GetOrCreateResOperand(parent, expr.GetPrimType()); + } + + PrimType destType = pointedType->GetPrimType(); + + uint32 bitSize = 0; + if ((pointedType->GetKind() == kTypeStructIncomplete) || (pointedType->GetKind() == kTypeClassIncomplete) || + (pointedType->GetKind() == kTypeInterfaceIncomplete)) { + bitSize = GetPrimTypeBitSize(expr.GetPrimType()); + maple::LogInfo::MapleLogger(kLlErr) << "Warning: objsize is zero! \n"; + } else { + if (pointedType->IsStructType()) { + MIRStructType *structType = static_cast(pointedType); + /* size << 3, that is size * 8, change bytes to bits */ + bitSize = std::min(structType->GetSize(), static_cast(GetPointerSize())) << 3; + } else { + bitSize = GetPrimTypeBitSize(destType); + } + if (regType == kRegTyFloat) { + destType = expr.GetPrimType(); + bitSize = GetPrimTypeBitSize(destType); + } else if (destType == PTY_agg) { + switch (bitSize) { + case k8BitSize: + destType = PTY_u8; + break; + case k16BitSize: + destType = PTY_u16; + break; + case k32BitSize: + destType = PTY_u32; + break; + case k64BitSize: + destType = PTY_u64; + break; + default: + destType = PTY_u64; // when eval agg . a way to round up + break; + } + } + } + + MemOperand *memOpnd = + CreateMemOpndOrNull(destType, expr, *expr.Opnd(0), static_cast(offset) + extraOffset, memOrd); + if (aggParamReg != nullptr) { + isAggParamInReg = false; + return aggParamReg; + } + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (isVolLoad && (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) { + memOrd = AArch64isa::kMoAcquire; + isVolLoad = false; + } + + memOpnd = memOpnd->IsOffsetMisaligned(bitSize) ? &ConstraintOffsetToSafeRegion(bitSize, *memOpnd) : memOpnd; + if (memOrd == AArch64isa::kMoNone) { + MOperator mOp = 0; + if (finalBitFieldDestType == kPtyInvalid) { + mOp = PickLdInsn(bitSize, destType); + } else { + mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + if (parent.GetOpCode() == OP_eval && result->IsRegister() && + static_cast(result)->GetRegisterNumber() == RZR) { + insn.SetComment("null-check"); + } + GetCurBB()->AppendInsn(insn); + + if (parent.op != OP_eval) { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *prop = md->GetOpndDes(0); + if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) { + switch (destType) { + case PTY_i8: + mOp = MOP_xsxtb64; + break; + case PTY_i16: + mOp = MOP_xsxth64; + break; + case PTY_i32: + mOp = MOP_xsxtw64; + break; + case PTY_u8: + mOp = MOP_xuxtb32; + break; + case PTY_u16: + mOp = MOP_xuxth32; + break; + case PTY_u32: + mOp = MOP_xuxtw64; + break; + default: + break; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + mOp, insn.GetOperand(0), insn.GetOperand(0))); + } + } + } else { + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, bitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + AArch64CGFunc::SelectLoadAcquire(*result, destType, *memOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); + return result; +} + +Operand *AArch64CGFunc::SelectIntConst(MIRIntConst &intConst) { + return &CreateImmOperand(intConst.GetExtValue(), GetPrimTypeSize(intConst.GetType().GetPrimType()) * kBitsPerByte, + false); +} + +template +Operand *SelectLiteral(T *c, MIRFunction *func, uint32 labelIdx, AArch64CGFunc *cgFunc) { + MIRSymbol *st = func->GetSymTab()->CreateSymbol(kScopeLocal); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string funcName = funcSt->GetName(); + lblStr.append(funcName).append(std::to_string(labelIdx)); + st->SetNameStrIdx(lblStr); + st->SetStorageClass(kScPstatic); + st->SetSKind(kStConst); + st->SetKonst(c); + // todo: to avoid conflict between local symbol index and label index. + cgFunc->SetLocalSymLabelIndex(*st, (func->GetStIdx().Idx() << k8BitSize) + labelIdx); + PrimType primType = c->GetType().GetPrimType(); + st->SetTyIdx(TyIdx(primType)); + uint32 typeBitSize = GetPrimTypeBitSize(primType); + + if (cgFunc->GetMirModule().IsCModule() && (T::GetPrimType() == PTY_f32 || T::GetPrimType() == PTY_f64)) { + return static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } + if (T::GetPrimType() == PTY_f32) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) ? + static_cast(&cgFunc->CreateImmOperand( + Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) : + static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else if (T::GetPrimType() == PTY_f64) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) ? + static_cast(&cgFunc->CreateImmOperand( + Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) : + static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else { + CHECK_FATAL(false, "Unsupported const type"); + } + return nullptr; +} + +Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent) { + Operand *result; + bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + if (canRepreset) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true); + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, *newOpnd0)); + } else { + if (is64Bits) { // For DoubleConst, use ldr .literal + uint32 labelIdxTmp = GetLabelIdx(); + result = SelectLiteral(static_cast(&mirConst), &GetFunction(), labelIdxTmp++, this); + SetLabelIdx(labelIdxTmp); + return result; + } + Operand *newOpnd0 = &CreateImmOperand(val, GetPrimTypeSize(stype) * kBitsPerByte, false); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = LoadIntoRegister(*newOpnd0, itype); + + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xvmovdr : MOP_xvmovsr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, regOpnd)); + } + return result; +} + +Operand *AArch64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) { + PrimType stype = floatConst.GetType().GetPrimType(); + int32 val = floatConst.GetIntValue(); + /* according to aarch64 encoding format, convert int to float expression */ + Operand *result; + result = HandleFmovImm(stype, val, floatConst, parent); + return result; +} + +Operand *AArch64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) { + PrimType stype = doubleConst.GetType().GetPrimType(); + int64 val = doubleConst.GetIntValue(); + /* according to aarch64 encoding format, convert int to float expression */ + Operand *result; + result = HandleFmovImm(stype, val, doubleConst, parent); + return result; +} + +template +Operand *SelectStrLiteral(T &c, AArch64CGFunc &cgFunc) { + std::string labelStr; + if (c.GetKind() == kConstStrConst) { + labelStr.append(".LUstr_"); + } else if (c.GetKind() == kConstStr16Const) { + labelStr.append(".LUstr16_"); + } else { + CHECK_FATAL(false, "Unsupported literal type"); + } + labelStr.append(std::to_string(c.GetValue())); + + MIRSymbol *labelSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + if (labelSym == nullptr) { + labelSym = cgFunc.GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c.GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc.NewMirConst(c)); + } + + if (c.GetPrimType() == PTY_ptr) { + StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc.CreateRegisterOperandOfType(PTY_a64); + cgFunc.SelectAddrof(addrOpnd, stOpnd); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand *AArch64CGFunc::SelectStrConst(MIRStrConst &strConst) { + return SelectStrLiteral(strConst, *this); +} + +Operand *AArch64CGFunc::SelectStr16Const(MIRStr16Const &str16Const) { + return SelectStrLiteral(str16Const, *this); +} + +static inline void AppendInstructionTo(Insn &i, CGFunc &f) { + f.GetCurBB()->AppendInsn(i); +} + +/* + * Returns the number of leading 0-bits in x, starting at the most significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetHead0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((0x8000000000000000ULL >> static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * Returns the number of trailing 0-bits in x, starting at the least significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetTail0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((static_cast(1) << static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * If the input integer is power of 2, return log2(input) + * else return -1 + */ +static inline int32 GetLog2(uint64 val) { + if (__builtin_popcountll(val) == 1) { + return __builtin_ffsll(static_cast(val)) - 1; + } + return -1; +} + +MOperator AArch64CGFunc::PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const { + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_bne : MOP_beq; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_beq : MOP_bne; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_blt : MOP_blo) + : (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_ble : MOP_bls) + : (isFloat ? MOP_bhi : (isSigned ? MOP_bgt : MOP_bhi)); + case OP_gt: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bgt : (isSigned ? MOP_bgt : MOP_bhi)) + : (isSigned ? MOP_ble : MOP_bls); + case OP_ge: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)) + : (isSigned ? MOP_blt : MOP_blo); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +bool AArch64CGFunc::GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, + PrimType primType, + LabelOperand &targetOpnd, Operand &opnd0) { + bool finish = true; + MOperator mOpCode = MOP_undef; + switch (cmpOp) { + case OP_ne: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } else { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + case OP_eq: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } else { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + /* + * TBZ/TBNZ instruction have a range of +/-32KB, need to check if the jump target is reachable in a later + * phase. If the branch target is not reachable, then we change tbz/tbnz into combination of ubfx and + * cbz/cbnz, which will clobber one extra register. With LSRA under O2, we can use of the reserved registers + * for that purpose. + */ + case OP_lt: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } else { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + case OP_ge: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } else { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + default: + finish = false; + break; + } + return finish; +} + +void AArch64CGFunc::SelectIgoto(Operand *opnd0) { + Operand *srcOpnd = opnd0; + if (opnd0->GetKind() == Operand::kOpdMem) { + Operand *dst = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *dst, *opnd0)); + srcOpnd = dst; + } + GetCurBB()->SetKind(BB::kBBIgoto); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, *srcOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, + Operand &origOpnd1, PrimType primType, bool signedCond) { + Operand *opnd0 = &origOpnd0; + Operand *opnd1 = &origOpnd1; + opnd0 = &LoadIntoRegister(origOpnd0, primType); + + bool is64Bits = GetPrimTypeBitSize(primType) == k64BitSize; + bool isFloat = IsPrimitiveFloat(primType); + Operand &rflag = GetOrCreateRflag(); + if (isFloat) { + opnd1 = &LoadIntoRegister(origOpnd1, primType); + MOperator mOp = is64Bits ? MOP_dcmperr : ((GetPrimTypeBitSize(primType) == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } else { + bool isImm = ((origOpnd1.GetKind() == Operand::kOpdImmediate) || (origOpnd1.GetKind() == Operand::kOpdOffset)); + if ((origOpnd1.GetKind() != Operand::kOpdRegister) && !isImm) { + opnd1 = &SelectCopy(origOpnd1, primType, primType); + } + MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr; + + if (isImm) { + /* Special cases, i.e., comparing with zero + * Do not perform optimization for C, unlike Java which has no unsigned int. + */ + if (static_cast(opnd1)->IsZero() && + (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0)) { + bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0); + if (finish) { + return; + } + } + + /* + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + ImmOperand *immOpnd = static_cast(opnd1); + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri; + } else { + opnd1 = &SelectCopy(*opnd1, primType, primType); + } + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } + + bool isSigned = IsPrimitiveInteger(primType) ? IsSignedInteger(primType) : (signedCond ? true : false); + MOperator jmpOperator = PickJmpInsn(jmpOp, cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOperator, rflag, targetOpnd)); +} + +/* + * brtrue @label0 (ge u8 i32 ( + * cmp i32 i64 (dread i64 %Reg2_J, dread i64 %Reg4_J), + * constval i32 0)) + * ===> + * cmp r1, r2 + * bge Cond, label0 + */ +void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) { + DEBUG_ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode"); + Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0)); + Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1)); + CompareNode *node = static_cast(&expr); + bool isFloat = IsPrimitiveFloat(node->GetOpndType()); + opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType()); + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(*opnd1, node->GetOpndType()); + } + SelectAArch64Cmp(*opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(node->GetOpndType())); + /* handle condgoto now. */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp = condNode->GetOpCode(); + PrimType pType = static_cast(condNode)->GetOpndType(); + isFloat = IsPrimitiveFloat(pType); + Operand &rflag = GetOrCreateRflag(); + bool isSigned = IsPrimitiveInteger(pType) ? IsSignedInteger(pType) : + (IsSignedInteger(condNode->GetPrimType()) ? true : false); + MOperator jmpOp = PickJmpInsn(stmt.GetOpCode(), cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOp, rflag, targetOpnd)); +} + +/* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ +void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr) { + auto &cmpNode = static_cast(expr); + Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0)); + Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1)); + PrimType operandType = cmpNode.GetOpndType(); + opnd0 = opnd0->IsRegister() ? static_cast(opnd0) + : &SelectCopy(*opnd0, operandType, operandType); + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = opnd1->IsRegister() ? static_cast(opnd1) + : &SelectCopy(*opnd1, operandType, operandType); + } +#ifdef DEBUG + bool isFloat = IsPrimitiveFloat(operandType); + if (!isFloat) { + DEBUG_ASSERT(false, "incorrect operand types"); + } +#endif + SelectTargetFPCmpQuiet(*opnd0, *opnd1, GetPrimTypeBitSize(operandType)); + Operand &rFlag = GetOrCreateRflag(); + LabelIdx tempLabelIdx = stmt.GetOffset(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(tempLabelIdx); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blo, rFlag, targetOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) { + /* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp; + + if (opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + (condNode->GetOpCode() == OP_lior)) { + ImmOperand &condBit = CreateImmOperand(0, k8BitSize, false); + if (stmt.GetOpCode() == OP_brtrue) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbnz, static_cast(opnd0), condBit, targetOpnd)); + } else { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbz, static_cast(opnd0), condBit, targetOpnd)); + } + return; + } + + PrimType pType; + if (kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + cmpOp = condNode->GetOpCode(); + pType = static_cast(condNode)->GetOpndType(); + } else { + /* not a compare node; dread for example, take its pType */ + cmpOp = OP_ne; + pType = condNode->GetPrimType(); + } + bool signedCond = IsSignedInteger(pType) || IsPrimitiveFloat(pType); + SelectCondGoto(targetOpnd, stmt.GetOpCode(), cmpOp, opnd0, opnd1, pType, signedCond); +} + +void AArch64CGFunc::SelectGoto(GotoNode &stmt) { + Operand &targetOpnd = GetOrCreateLabelOperand(stmt.GetOffset()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + GetCurBB()->SetKind(BB::kBBGoto); +} + +Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + SelectAdd(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_add); + } + return resOpnd; +} + +void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (opnd0Type != Operand::kOpdRegister) { + /* add #imm, #imm */ + if (opnd1Type != Operand::kOpdRegister) { + SelectAdd(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* add #imm, reg */ + SelectAdd(resOpnd, opnd1, opnd0, primType); /* commutative */ + return; + } + /* add reg, reg */ + if (opnd1Type == Operand::kOpdRegister) { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI add"); + MOperator mOp = IsPrimitiveFloat(primType) ? + (is64Bits ? MOP_dadd : MOP_sadd) : (is64Bits ? MOP_xaddrrr : MOP_waddrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } else if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + /* add reg, otheregType */ + SelectAdd(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } else { + /* add reg, #imm */ + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectSub(resOpnd, opnd0, *immOpnd, primType); + return; + } + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + MOperator mOpCode = MOP_undef; + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType); + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + newOpnd0 = tmpRes; + } + /* process lower 12 bits */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + regno_t regNO0 = static_cast(opnd0).GetRegisterNumber(); + /* addrrrs do not support sp */ + if (bitNum <= k16ValidBit && regNO0 != RSP) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} + +Operand *AArch64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMadd(resOpnd, opndM0, opndM1, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) { + Operand::OperandType opndM0Type = opndM0.GetKind(); + Operand::OperandType opndM1Type = opndM1.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (opndM0Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, SelectCopy(opndM0, primType, primType), opndM1, opnd1, primType); + return; + } else if (opndM1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, SelectCopy(opndM1, primType, primType), opnd1, primType); + return; + } else if (opnd1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, opndM1, SelectCopy(opnd1, primType, primType), primType); + return; + } + + DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI MAdd"); + MOperator mOp = is64Bits ? MOP_xmaddrrrr : MOP_wmaddrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opndM0, opndM1, opnd1)); +} + +Operand &AArch64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + DEBUG_ASSERT(opnd1->GetOpCode() == OP_constval, "Internal error, opnd1->op should be OP_constval."); + + switch (opnd0->op) { + case OP_regread: { + RegreadNode *regreadNode = static_cast(opnd0); + return *SelectRegread(*regreadNode); + } + case OP_addrof: { + AddrofNode *addrofNode = static_cast(opnd0); + MIRSymbol &symbol = *mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofNode->GetStIdx()); + DEBUG_ASSERT(addrofNode->GetFieldID() == 0, "For debug SelectCGArrayElemAdd."); + + Operand &result = GetOrCreateResOperand(parent, PTY_a64); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + MIRIntConst *mirIntConst = static_cast(mirConst); + SelectAddrof(result, CreateStImmOperand(symbol, mirIntConst->GetExtValue(), 0)); + + return result; + } + default: + CHECK_FATAL(0, "Internal error, cannot handle opnd0."); + } +} + +void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(primType); + Operand *opnd0Bak = &LoadIntoRegister(opnd0, primType); + if (opnd1Type == Operand::kOpdRegister) { + MOperator mOp = isFloat ? (is64Bits ? MOP_dsub : MOP_ssub) : (is64Bits ? MOP_xsubrrr : MOP_wsubrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *opnd0Bak, opnd1)); + return; + } + + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdOffset)) { + SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType); + return; + } + + int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { + /* + * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + * large offset is treated as sub (higher 12 bits + 4096) + add + * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store + */ + MOperator mOpCode = MOP_undef; + bool isSplitSub = false; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + isSplitSub = true; + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue()); + + mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + immOpnd->SetValue(static_cast(kMax12UnsignedImm) - immOpnd->GetValue()); + opnd0Bak = &resOpnd; + } + /* process lower 12 bits */ + mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + + if (bitNum <= k16ValidBit) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd)); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd); + GetCurBB()->AppendInsn(newInsn); +} + +Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectSub(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_sub); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectMpy(*resOpnd, opnd0, opnd1, primType); + } else { + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_mul); + } + return resOpnd; +} + +void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset) || + (opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsPrimitiveInteger(primType)) { + ImmOperand *imm = + ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? static_cast(&opnd0) + : static_cast(&opnd1); + Operand *otherOp = ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? &opnd1 : &opnd0; + int64 immValue = llabs(imm->GetValue()); + if (immValue != 0 && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + /* immValue is 1 << n */ + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + int64 shiftVal = __builtin_ffsll(immValue); + ImmOperand &shiftNum = CreateImmOperand(shiftVal - 1, dsize, false); + SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType); + bool reachSignBit = (is64Bits && (shiftVal == k64BitSize)) || (!is64Bits && (shiftVal == k32BitSize)); + if (imm->GetValue() < 0 && !reachSignBit) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } else if (immValue > 2) { + uint32 zeroNum = __builtin_ffsll(immValue) - 1; + int64 headVal = static_cast(immValue) >> zeroNum; + /* + * if (headVal - 1) & (headVal - 2) == 0, that is (immVal >> zeroNum) - 1 == 1 << n + * otherOp * immVal = (otherOp * (immVal >> zeroNum) * (1 << zeroNum) + * = (otherOp * ((immVal >> zeroNum) - 1) + otherOp) * (1 << zeroNum) + */ + if (((static_cast(headVal) - 1) & (static_cast(headVal) - 2)) == 0) { + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false); + RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType); + SelectAdd(resOpnd, *otherOp, tmpOpnd, primType); + ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false); + SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } + } + + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd1, opnd0, primType); + } else { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Mpy"); + MOperator mOp = IsPrimitiveFloat(primType) ? (is64Bits ? MOP_xvmuld : MOP_xvmuls) + : (is64Bits ? MOP_xmulrrr : MOP_wmulrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(origOpnd0, primType); + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && IsSignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + int64 immValue = llabs(imm->GetValue()); + if ((immValue != 0) && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + if (immValue == 1) { + if (imm->GetValue() > 0) { + uint32 mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + } else { + SelectNeg(resOpnd, opnd0, primType); + } + + return; + } + int32 shiftNumber = __builtin_ffsll(immValue) - 1; + ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false); + Operand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, dsize - shiftNumber, bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd)); + SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } else if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsUnsignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + if (imm->GetValue() != 0) { + if ((imm->GetValue() > 0) && + ((static_cast(imm->GetValue()) & (static_cast(imm->GetValue()) - 1)) == 0)) { + ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false); + SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType); + + return; + } else if (imm->GetValue() < 0) { + SelectAArch64Cmp(opnd0, *imm, true, dsize); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_CS), is64Bits); + + return; + } + } + } + } + + if (opnd0Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if (opnd1Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Div"); + MOperator mOp = IsPrimitiveFloat(primType) ? (is64Bits ? MOP_ddivrrr : MOP_sdivrrr) + : (IsSignedInteger(primType) ? (is64Bits ? MOP_xsdivrrr : MOP_wsdivrrr) + : (is64Bits ? MOP_xudivrrr : MOP_wudivrrr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +Operand *AArch64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectDiv(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, + bool is64Bits) { + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Wrong type for REM"); + /* + * printf("%d \n", 29 % 7 ); + * -> 1 + * printf("%u %d \n", (unsigned)-7, (unsigned)(-7) % 7 ); + * -> 4294967289 4 + * printf("%d \n", (-7) % 7 ); + * -> 0 + * printf("%d \n", 237 % -7 ); + * 6-> + * printf("implicit i->u conversion %d \n", ((unsigned)237) % -7 ); + * implicit conversion 237 + + * http://stackoverflow.com/questions/35351470/obtaining-remainder-using-single-aarch64-instruction + * input: x0=dividend, x1=divisor + * udiv|sdiv x2, x0, x1 + * msub x3, x2, x1, x0 -- multply-sub : x3 <- x0 - x2*x1 + * result: x2=quotient, x3=remainder + * + * allocate temporary register + */ + RegOperand &temp = CreateRegisterOperandOfType(primType); + /* + * mov w1, #2 + * sdiv wTemp, w0, w1 + * msub wRespond, wTemp, w1, w0 + * ========> + * asr wTemp, w0, #31 + * lsr wTemp, wTemp, #31 (#30 for 4, #29 for 8, ...) + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 (#3 for 4, #7 for 8, ...) + * sub wRespond, wRespond, w2 + * + * if divde by 2 + * ========> + * lsr wTemp, w0, #31 + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 + * sub wRespond, wRespond, w2 + * + * for unsigned rem op, just use and + */ + if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) { + ImmOperand *imm = nullptr; + Insn *movImmInsn = GetCurBB()->GetLastInsn(); + if (movImmInsn && + ((movImmInsn->GetMachineOpcode() == MOP_wmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) && + movImmInsn->GetOperand(0).Equals(opnd1)) { + /* + * mov w1, #2 + * rem res, w0, w1 + */ + imm = static_cast(&movImmInsn->GetOperand(kInsnSecondOpnd)); + } else if (opnd1.IsImmediate()) { + /* + * rem res, w0, #2 + */ + imm = static_cast(&opnd1); + } + /* positive or negative do not have effect on the result */ + int64 dividor = 0; + if (imm && (imm->GetValue() != LONG_MIN)) { + dividor = abs(imm->GetValue()); + } + const int64 Log2OfDividor = GetLog2(static_cast(dividor)); + if ((dividor != 0) && (Log2OfDividor > 0)) { + if (is64Bits) { + CHECK_FATAL(Log2OfDividor < k64BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned); + if (Log2OfDividor != 1) { + /* 63->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xasrrri6, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, opnd0, rightShiftValue)); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, resOpnd, remBits)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, opnd0, remBits)); + return; + } + } else { + CHECK_FATAL(Log2OfDividor < k32BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned); + if (Log2OfDividor != 1) { + /* 31->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wasrrri5, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, opnd0, rightShiftValue)); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, resOpnd, remBits)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, opnd0, remBits)); + return; + } + } + } + } + + uint32 mopDiv = is64Bits ? (isSigned ? MOP_xsdivrrr : MOP_xudivrrr) : (isSigned ? MOP_wsdivrrr : MOP_wudivrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopDiv, temp, opnd0, opnd1)); + + uint32 mopSub = is64Bits ? MOP_xmsubrrrr : MOP_wmsubrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopSub, resOpnd, temp, opnd1, opnd0)); +} + +Operand *AArch64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(dtype), "wrong type for rem"); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + + /* promoted type */ + PrimType primType = ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectRem(resOpnd, opnd0, opnd1, primType, isSigned, is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLand(BinaryNode &node, Operand &lhsOpnd, Operand &rhsOpnd, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Land should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 4 //==0100b, ne # if(OP0!=0) cmp Op1 and 0, else NZCV <- 0100 makes OP0==0 + * cset RES, ne # if Z==1(i.e., OP0==0||OP1==0) RES<-0, RES<-1 + */ + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + SelectAArch64CCmp(opnd1, CreateImmOperand(0, primType, false), CreateImmOperand(4, PTY_u8, false), + GetCondOperand(CC_NE), is64Bits); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr) { + PrimType primType = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Lior should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 0 //==0100b, eq # if(OP0==0,eq) cmp Op1 and 0, else NZCV <- 0000 makes OP0!=0 + * cset RES, ne # if Z==1(i.e., OP0==0&&OP1==0) RES<-0, RES<-1 + */ + if (parentIsBr && !is64Bits && opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + opnd1.IsRegister() && (static_cast(&opnd1)->GetValidBitsNum() == 1)) { + uint32 mOp = MOP_wiorrrr; + static_cast(resOpnd).SetValidBitsNum(1); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + SelectBior(resOpnd, opnd0, opnd1, primType); + SelectAArch64Cmp(resOpnd, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + } + return &resOpnd; +} + +void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, + Opcode opcode, PrimType primType, const BaseNode &parent) { + uint32 dsize = resOpnd.GetSize(); + bool isFloat = IsPrimitiveFloat(primType); + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = rhsOpnd.GetKind(); + Operand *opnd1 = &rhsOpnd; + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(rhsOpnd, primType); + } + + bool unsignedIntegerComparison = !isFloat && !IsSignedInteger(primType); + /* + * OP_cmp, OP_cmpl, OP_cmpg + * OP0, OP1 ; fcmp for OP_cmpl/OP_cmpg, cmp/fcmpe for OP_cmp + * CSINV RES, WZR, WZR, GE + * CSINC RES, RES, WZR, LE + * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow) + * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow) + */ + RegOperand &xzr = GetZeroOpnd(dsize); + if ((opcode == OP_cmpl) || (opcode == OP_cmpg)) { + DEBUG_ASSERT(isFloat, "incorrect operand types"); + SelectTargetFPCmpQuiet(opnd0, *opnd1, GetPrimTypeBitSize(primType)); + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + if (opcode == OP_cmpl) { + SelectAArch64CSINV(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } else { + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } + return; + } + + if (opcode == OP_cmp) { + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + if (unsignedIntegerComparison) { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_HS), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LS), (dsize == k64BitSize)); + } else { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + } + return; + } + + // lt u8 i32 ( xxx, 0 ) => get sign bit + if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() && + (static_cast(opnd1)->GetValue() == 0) && parent.GetOpCode() != OP_select) { + bool is64Bits = (opnd0.GetSize() == k64BitSize); + if (!unsignedIntegerComparison) { + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + ImmOperand &shiftNum = + CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, static_cast(bitLen), false); + MOperator mOpCode = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, shiftNum)); + return; + } + ImmOperand &constNum = CreateImmOperand(0, is64Bits ? k64BitSize : k32BitSize, false); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(is64Bits ? MOP_xmovri64 : MOP_wmovri32, resOpnd, constNum)); + return; + } + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + + ConditionCode cc = CC_EQ; + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + CHECK_FATAL(false, "illegal logical operator"); + } + SelectAArch64CSet(resOpnd, GetCondOperand(cc), (dsize == k64BitSize)); +} + +Operand *AArch64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &GetOrCreateResOperand(parent, node.GetPrimType()); + SelectCmpOp(*resOpnd, opnd0, opnd1, node.GetOpCode(), node.GetOpndType(), parent); + } else { + resOpnd = SelectVectorCompare(&opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), node.GetOpCode()); + } + return resOpnd; +} + +void AArch64CGFunc::SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize) { + MOperator mOpCode = 0; + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o0).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqri : (dsize == k32BitSize) ? MOP_scmpqri : MOP_hcmpqri; + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqrr : (dsize == k32BitSize) ? MOP_scmpqrr : MOP_hcmpqrr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, o1)); +} + +void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize) { + MOperator mOpCode = 0; + Operand *newO1 = &o1; + if (isIntType) { + if ((o1.GetKind() == Operand::kOpdImmediate) || (o1.GetKind() == Operand::kOpdOffset)) { + ImmOperand *immOpnd = static_cast(&o1); + /* + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + } else { + /* load into register */ + PrimType ptype = (dsize == k64BitSize) ? PTY_i64 : PTY_i32; + newO1 = &SelectCopy(o1, ptype, ptype); + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } else { /* float */ + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o1).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmperi : ((dsize == k32BitSize) ? MOP_scmperi : MOP_hcmperi); + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmperr : ((dsize == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } + DEBUG_ASSERT(mOpCode != 0, "mOpCode undefined"); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, *newO1)); +} + +void AArch64CGFunc::SelectAArch64CCmp(Operand &o, Operand &i, Operand &nzcv, CondOperand &cond, bool is64Bits) { + uint32 mOpCode = is64Bits ? MOP_xccmpriic : MOP_wccmpriic; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + opndVec.push_back(&o); + opndVec.push_back(&i); + opndVec.push_back(&nzcv); + opndVec.push_back(&cond); + opndVec.push_back(&rflag); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opndVec)); +} + +void AArch64CGFunc::SelectAArch64CSet(Operand &r, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsetrc : MOP_wcsetrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, r, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsinvrrrc : MOP_wcsinvrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsincrrrc : MOP_wcsincrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +Operand *AArch64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kAND, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kAND, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); /* promoted type */ + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectRelationOperator(operatorCode, *resOpnd, opnd0, opnd1, primType); + } else { + /* vector operations */ + resOpnd = SelectVectorBitwiseOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), + (operatorCode == kAND) ? OP_band : (operatorCode == kIOR ? OP_bior : OP_bxor)); + } + return resOpnd; +} + +MOperator AArch64CGFunc::SelectRelationMop(RelationOperator operatorCode, + RelationOperatorOpndPattern opndPattern, bool is64Bits, + bool isBitmaskImmediate, bool isBitNumLessThan16) const { + MOperator mOp = MOP_undef; + if (opndPattern == kRegReg) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrr : MOP_wandrrr; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrr : MOP_wiorrrr; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrr : MOP_weorrrr; + break; + default: + break; + } + return mOp; + } + /* opndPattern == KRegImm */ + if (isBitmaskImmediate) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrri13 : MOP_wandrri12; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrri13 : MOP_wiorrri12; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrri13 : MOP_weorrri12; + break; + default: + break; + } + return mOp; + } + /* normal imm value */ + if (isBitNumLessThan16) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrrs : MOP_wandrrrs; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrrs : MOP_wiorrrrs; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + break; + default: + break; + } + return mOp; + } + return mOp; +} + +void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, + Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + /* op #imm. #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* op #imm, reg -> op reg, #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, opnd1, opnd0, primType); + return; + } + /* op reg, reg */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI band"); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } + /* op reg, #imm */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + SelectRelationOperator(operatorCode, resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsZero()) { + if (operatorCode == kAND) { + uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMv, resOpnd, + GetZeroOpnd(dsize))); + } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) { + SelectCopy(resOpnd, primType, opnd0, primType); + } + } else if ((immOpnd->IsAllOnes()) || (!is64Bits && immOpnd->IsAllOnes32bit())) { + if (operatorCode == kAND) { + SelectCopy(resOpnd, primType, opnd0, primType); + } else if (operatorCode == kIOR) { + uint32 mopMovn = is64Bits ? MOP_xmovnri16 : MOP_wmovnri16; + ImmOperand &src16 = CreateImmOperand(0, k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(0, is64Bits); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(mopMovn, resOpnd, src16, *lslOpnd)); + } else if (operatorCode == kEOR) { + SelectMvn(resOpnd, opnd0, primType); + } + } else if (immOpnd->IsBitmaskImmediate()) { + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, true, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + int64 immVal = immOpnd->GetValue(); + int32 tail0BitNum = GetTail0BitNum(immVal); + int32 head0BitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + + if (bitNum <= k16ValidBit) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0BitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true); + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0BitNum), bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd)); + } else { + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd)); + } + } + } +} + +Operand *AArch64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kIOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kIOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + /* promoted type */ + PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (IsPrimitiveInteger(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + Operand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectAArch64Cmp(regOpnd0, regOpnd1, true, dsize); + Operand &newResOpnd = LoadIntoRegister(resOpnd, primType); + if (isMin) { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_LT) : GetCondOperand(CC_LO); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } else { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_GT) : GetCondOperand(CC_HI); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } + } else if (IsPrimitiveFloat(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + RegOperand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectFMinFMax(resOpnd, regOpnd0, regOpnd1, is64Bits, isMin); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *AArch64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectMinOrMax(true, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectMinOrMax(false, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +void AArch64CGFunc::SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin) { + uint32 mOpCode = isMin ? (is64Bits ? MOP_xfminrrr : MOP_wfminrrr) : (is64Bits ? MOP_xfmaxrrr : MOP_wfmaxrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1)); +} + +Operand *AArch64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kEOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kEOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + bool isOneElemVector = false; + BaseNode *expr = node.Opnd(0); + if (expr->GetOpCode() == OP_dread) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(static_cast(expr)->GetStIdx()); + isOneElemVector = symbol->GetAttr(ATTR_oneelem_simd); + } + + Operand *opd0 = &opnd0; + PrimType otyp0 = expr->GetPrimType(); + if (IsPrimitiveVector(dtype) && opnd0.IsConstImmediate()) { + opd0 = SelectVectorFromScalar(dtype, opd0, node.Opnd(0)->GetPrimType()); + otyp0 = dtype; + } + + if (IsPrimitiveVector(dtype) && opnd1.IsConstImmediate()) { + int64 sConst = static_cast(opnd1).GetValue(); + resOpnd = SelectVectorShiftImm(dtype, opd0, &opnd1, static_cast(sConst), opcode); + } else if ((IsPrimitiveVector(dtype) || isOneElemVector) && !opnd1.IsConstImmediate()) { + resOpnd = SelectVectorShift(dtype, opd0, otyp0, &opnd1, node.Opnd(1)->GetPrimType(), opcode); + } else { + PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + resOpnd = &GetOrCreateResOperand(parent, primType); + ShiftDirection direct = (opcode == OP_lshr) ? kShiftLright : ((opcode == OP_ashr) ? kShiftAright : kShiftLeft); + SelectShift(*resOpnd, opnd0, opnd1, direct, primType); + } + + if (dtype == PTY_i16) { + MOperator exOp = is64Bits ? MOP_xsxth64 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } else if (dtype == PTY_i8) { + MOperator exOp = is64Bits ? MOP_xsxtb64 : MOP_xsxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + uint32 dsize = GetPrimTypeBitSize(dtype); + PrimType primType = (dsize == k64BitSize) ? PTY_u64 : PTY_u32; + RegOperand *resOpnd = &GetOrCreateResOperand(parent, primType); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + MOperator mopRor = (dsize == k64BitSize) ? MOP_xrorrrr : MOP_wrorrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopRor, *resOpnd, *firstOpnd, opnd1)); + return resOpnd; +} + +void AArch64CGFunc::SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, + PrimType primType) { + opnd0 = &LoadIntoRegister(*opnd0, primType); + opnd1 = &LoadIntoRegister(*opnd1, primType); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + MOperator mopBxor = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBxor, resOpnd, *opnd0, *opnd1, opnd2)); +} + +void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct, + PrimType primType) { + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + + MOperator mopShift; + if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) { + ImmOperand *immOpnd1 = static_cast(&opnd1); + const int64 kVal = immOpnd1->GetValue(); + const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits; + if (kVal == 0) { + SelectCopy(resOpnd, primType, *firstOpnd, primType); + return; + } + /* e.g. a >> -1 */ + if ((kVal < 0) || (kVal > kShiftamt)) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrri6 : MOP_wlslrri5; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrri6 : MOP_wasrrri5; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + break; + } + } else if (opnd1Type != Operand::kOpdRegister) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } else { + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrrr : MOP_wlslrrr; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrrr : MOP_wasrrrr; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrrr : MOP_wlsrrrr; + break; + } + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopShift, resOpnd, *firstOpnd, opnd1)); +} + +Operand *AArch64CGFunc::SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0) { + PrimType dtyp = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + uint32 mopCsneg = is64Bits ? MOP_xcnegrrrc : MOP_wcnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_MI); + MOperator newMop = lastInsn.GetMachineOpcode() + 1; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + for (uint32 i = 0; i < lastInsn.GetOperandSize(); i++) { + opndVec.push_back(&lastInsn.GetOperand(i)); + } + Insn *subsInsn = &GetInsnBuilder()->BuildInsn(newMop, opndVec); + GetCurBB()->ReplaceInsn(lastInsn, *subsInsn); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, condOpnd, rflag)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) { + PrimType dtyp = node.GetPrimType(); + if (IsPrimitiveVector(dtyp)) { + return SelectVectorAbs(dtyp, &opnd0); + } else if (IsPrimitiveFloat(dtyp)) { + CHECK_FATAL(GetPrimTypeBitSize(dtyp) >= k32BitSize, "We don't support hanf-word FP operands yet"); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtyp); + RegOperand &resOpnd = CreateRegisterOperandOfType(dtyp); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_dabsrr : MOP_sabsrr, + resOpnd, newOpnd0)); + return &resOpnd; + } else { + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + Insn *lastInsn = GetCurBB()->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->GetMachineOpcode() >= MOP_xsubrrr && + lastInsn->GetMachineOpcode() <= MOP_wsubrri12) { + return SelectAbsSub(*lastInsn, node, newOpnd0); + } + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + SelectAArch64Cmp(newOpnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), + true, GetPrimTypeBitSize(dtyp)); + uint32 mopCsneg = is64Bits ? MOP_xcsnegrrrc : MOP_wcsnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_GE); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, newOpnd0, + condOpnd, rflag)); + return &resOpnd; + } +} + +Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(dtype) || IsPrimitiveVectorInteger(dtype), "bnot expect integer or NYI"); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + bool isSigned = IsSignedInteger(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + resOpnd = &GetOrCreateResOperand(parent, primType); + + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + + uint32 mopBnot = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBnot, *resOpnd, newOpnd0)); + } else { + /* vector operand */ + resOpnd = SelectVectorNot(dtype, &opnd0); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + auto bitWidth = (GetPrimTypeBitSize(dtype)); + RegOperand *resOpnd = nullptr; + resOpnd = &GetOrCreateResOperand(parent, dtype); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtype); + uint32 mopBswap = bitWidth == 64 ? MOP_xrevrr : (bitWidth == 32 ? MOP_wrevrr : MOP_wrevrr16); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBswap, *resOpnd, newOpnd0)); + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + CHECK_FATAL(!is64Bits, "dest opnd should not be 64bit"); + PrimType destType = GetIntegerPrimTypeBySizeAndSign(bitSize, isSigned); + Operand *result = SelectIread(parent, *static_cast(node.Opnd(0)), + static_cast(bitOffset / k8BitSize), destType); + return result; +} + +Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent) { + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + RegOperand *srcVecRegOperand = static_cast(&srcOpnd); + if (srcVecRegOperand && srcVecRegOperand->IsRegister() && (srcVecRegOperand->GetSize() == k128BitSize)) { + if ((bitSize == k8BitSize || bitSize == k16BitSize || bitSize == k32BitSize || bitSize == k64BitSize) && + (bitOffset % bitSize) == k0BitSize) { + uint32 lane = bitOffset / bitSize; + PrimType srcVecPtype; + if (bitSize == k64BitSize) { + srcVecPtype = PTY_v2u64; + } else if (bitSize == k32BitSize) { + srcVecPtype = PTY_v4u32; + } else if (bitSize == k16BitSize) { + srcVecPtype = PTY_v8u16; + } else { + srcVecPtype = PTY_v16u8; + } + RegOperand *resRegOperand = SelectVectorGetElement(node.GetPrimType(), + &srcOpnd, srcVecPtype, static_cast(lane)); + return resRegOperand; + } else { + CHECK_FATAL(false, "NYI"); + } + } + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool isSigned = (node.GetOpCode() == OP_sext) ? true : (node.GetOpCode() == OP_zext) ? false : IsSignedInteger(dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits; + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + if (bitOffset == 0) { + if (!isSigned && (bitSize < immWidth)) { + SelectBand(resOpnd, opnd0, CreateImmOperand(static_cast((static_cast(1) << bitSize) - 1), + immWidth, false), dtype); + return &resOpnd; + } else { + MOperator mOp = MOP_undef; + if (bitSize == k8BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef) : + (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef)); + } else if (bitSize == k16BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef) : + (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef)); + } else if (bitSize == k32BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr; + } + if (mOp != MOP_undef) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + return &resOpnd; + } + } + } + uint32 mopBfx = + is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5); + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2)); + return &resOpnd; +} + +/* + * operand fits in MOVK if + * is64Bits && boffst == 0, 16, 32, 48 && bSize == 16, so boffset / 16 == 0, 1, 2, 3; (boffset / 16 ) & (~3) == 0 + * or is32Bits && boffset == 0, 16 && bSize == 16, so boffset / 16 == 0, 1; (boffset / 16) & (~1) == 0 + * imm range of aarch64-movk [0 - 65536] imm16 + */ +inline bool IsMoveWideKeepable(int64 offsetVal, uint32 bitOffset, uint32 bitSize, bool is64Bits) { + DEBUG_ASSERT(is64Bits || (bitOffset < k32BitSize), ""); + bool isOutOfRange = offsetVal < 0; + if (!isOutOfRange) { + isOutOfRange = (static_cast(offsetVal) >> k16BitSize) > 0; + } + return (!isOutOfRange) && + bitSize == k16BitSize && + ((bitOffset >> k16BitShift) & ~static_cast(is64Bits ? 0x3 : 0x1)) == 0; +} + +/* we use the fact that A ^ B ^ A == B, A ^ 0 = A */ +Operand *AArch64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) { + uint32 bitOffset = node.GetBitsOffset(); + uint32 bitSize = node.GetBitsSize(); + PrimType regType = node.GetPrimType(); + bool is64Bits = GetPrimTypeBitSize(regType) == k64BitSize; + /* + * if operand 1 is immediate and fits in MOVK, use it + * MOVK Wd, #imm{, LSL #shift} ; 32-bit general registers + * MOVK Xd, #imm{, LSL #shift} ; 64-bit general registers + */ + if (opnd1.IsIntImmediate() && + IsMoveWideKeepable(static_cast(opnd1).GetValue(), bitOffset, bitSize, is64Bits)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, regType); + SelectCopy(resOpnd, regType, opnd0, regType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((is64Bits ? MOP_xmovkri16 : MOP_wmovkri16), + resOpnd, opnd1, + *GetLogicalShiftLeftOperand(bitOffset, is64Bits))); + return &resOpnd; + } else { + Operand &movOpnd = LoadIntoRegister(opnd1, regType); + uint32 mopBfi = is64Bits ? MOP_xbfirri6i6 : MOP_wbfirri5i5; + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfi, opnd0, movOpnd, immOpnd1, immOpnd2)); + return &opnd0; + } +} + +Operand *AArch64CGFunc::SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, GetPrimTypeBitSize(dtype)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_EQ), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType; + if (IsPrimitiveFloat(dtype)) { + primType = dtype; + } else { + primType = is64Bits ? (PTY_i64) : (PTY_i32); /* promoted type */ + } + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectNeg(*resOpnd, opnd0, primType); + } else { + /* vector operand */ + resOpnd = SelectVectorNeg(dtype, &opnd0); + } + return resOpnd; +} + +void AArch64CGFunc::SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(srcOpnd, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + if (IsPrimitiveFloat(primType)) { + mOp = is64Bits ? MOP_xfnegrr : MOP_wfnegrr; + } else { + mOp = is64Bits ? MOP_xinegrr : MOP_winegrr; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +void AArch64CGFunc::SelectMvn(Operand &dest, Operand &src, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(src, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + DEBUG_ASSERT(!IsPrimitiveFloat(primType), "Instruction 'mvn' do not have float version."); + mOp = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +Operand *AArch64CGFunc::SelectRecip(UnaryNode &node, Operand &src, const BaseNode &parent) { + /* + * fconsts s15, #112 + * fdivs s0, s15, s0 + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + DEBUG_ASSERT(false, "should be float type"); + return nullptr; + } + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + Operand *one = nullptr; + if (GetPrimTypeBitSize(dtype) == k64BitSize) { + MIRDoubleConst *c = memPool->New(1.0, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + one = SelectDoubleConst(*c, node); + } else if (GetPrimTypeBitSize(dtype) == k32BitSize) { + MIRFloatConst *c = memPool->New(1.0f, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f32)); + one = SelectFloatConst(*c, node); + } else { + CHECK_FATAL(false, "we don't support half-precision fp operations yet"); + } + SelectDiv(resOpnd, *one, opnd0, dtype); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent) { + /* + * gcc generates code like below for better accurate + * fsqrts s15, s0 + * fcmps s15, s15 + * fmstat + * beq .L4 + * push {r3, lr} + * bl sqrtf + * pop {r3, pc} + * .L4: + * fcpys s0, s15 + * bx lr + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + DEBUG_ASSERT(false, "should be float type"); + return nullptr; + } + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_vsqrtd : MOP_vsqrts, resOpnd, opnd0)); + return &resOpnd; +} + +void AArch64CGFunc::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + bool is64BitsFloat = (ftype == PTY_f64); + MOperator mOp = 0; + + DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong from type"); + Operand &opnd0 = LoadIntoRegister(srcOpnd, ftype); + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtrf : MOP_vcvtrd; + break; + case PTY_u32: + case PTY_a32: + mOp = !is64BitsFloat ? MOP_vcvturf : MOP_vcvturd; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtrf : MOP_xvcvtrd; + break; + case PTY_u64: + case PTY_a64: + mOp = !is64BitsFloat ? MOP_xvcvturf : MOP_xvcvturd; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType) { + DEBUG_ASSERT((toType == PTY_f32) || (toType == PTY_f64), "unexpected type"); + bool is64BitsFloat = (toType == PTY_f64); + MOperator mOp = 0; + uint32 fsize = GetPrimTypeBitSize(fromType); + + PrimType itype = (GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32); + + Operand *opnd0 = &LoadIntoRegister(origOpnd0, itype); + + /* need extension before cvt */ + DEBUG_ASSERT(opnd0->IsRegister(), "opnd should be a register operand"); + Operand *srcOpnd = opnd0; + if (IsSignedInteger(fromType) && (fsize < k32BitSize)) { + srcOpnd = &CreateRegisterOperandOfType(itype); + mOp = (fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *srcOpnd, *opnd0)); + } + + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtfr : MOP_vcvtdr; + break; + case PTY_u32: + mOp = !is64BitsFloat ? MOP_vcvtufr : MOP_vcvtudr; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtfr : MOP_xvcvtdr; + break; + case PTY_u64: + mOp = !is64BitsFloat ? MOP_xvcvtufr : MOP_xvcvtudr; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *srcOpnd)); +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + DEBUG_ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, + const std::string &name) { + MapleVector argNodes = intrnNode.GetNopnd(); + std::vector opndVec; + std::vector opndTypes; + RegOperand *retOpnd = &CreateRegisterOperandOfType(retType); + opndVec.push_back(retOpnd); + opndTypes.push_back(retType); + + for (BaseNode *argexpr : argNodes) { + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + opndVec.push_back(opnd); + opndTypes.push_back(ptype); + } + SelectLibCallNArg(name, opndVec, opndTypes, retType, false); + + return retOpnd; +} + +/* According to gcc.target/aarch64/ffs.c */ +Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) { + RegOperand &destOpnd = LoadIntoRegister(argOpnd, argType); + uint32 argSize = GetPrimTypeBitSize(argType); + DEBUG_ASSERT((argSize == k64BitSize || argSize == k32BitSize), "Unexpect arg type"); + /* cmp */ + ImmOperand &zeroOpnd = CreateImmOperand(0, argSize, false); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xcmpri : MOP_wcmpri, rflag, destOpnd, zeroOpnd)); + /* rbit */ + RegOperand *tempResReg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(argType))); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xrbit : MOP_wrbit, *tempResReg, destOpnd)); + /* clz */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xclz : MOP_wclz, *tempResReg, *tempResReg)); + /* csincc */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xcsincrrrc : MOP_wcsincrrrc, + *tempResReg, GetZeroOpnd(k32BitSize), *tempResReg, GetCondOperand(CC_EQ), rflag)); + return tempResReg; +} + +Operand *AArch64CGFunc::SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0) { + PrimType ftype = node.FromType(); + PrimType rtype = node.GetPrimType(); + bool is64Bits = (ftype == PTY_f64); + std::vector opndVec; + RegOperand *resOpnd; + if (is64Bits) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(D0, k64BitSize, kRegTyFloat); + } else { + resOpnd = &GetOrCreatePhysicalRegisterOperand(S0, k32BitSize, kRegTyFloat); + } + opndVec.push_back(resOpnd); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + opndVec.push_back(®Opnd0); + std::string libName; + if (roundType == kCeil) { + libName.assign(is64Bits ? "ceil" : "ceilf"); + } else if (roundType == kFloor) { + libName.assign(is64Bits ? "floor" : "floorf"); + } else { + libName.assign(is64Bits ? "round" : "roundf"); + } + SelectLibCall(libName, opndVec, ftype, rtype); + + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, + const BaseNode &parent) { + PrimType itype = node.GetPrimType(); + if ((mirModule.GetSrcLang() == kSrcLangC) && ((itype == PTY_f64) || (itype == PTY_f32))) { + SelectRoundLibCall(roundType, node, opnd0); + } + PrimType ftype = node.FromType(); + DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong float type"); + bool is64Bits = (ftype == PTY_f64); + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + MOperator mop = MOP_undef; + if (roundType == kCeil) { + mop = is64Bits ? MOP_xvcvtps : MOP_vcvtps; + } else if (roundType == kFloor) { + mop = is64Bits ? MOP_xvcvtms : MOP_vcvtms; + } else { + mop = is64Bits ? MOP_xvcvtas : MOP_vcvtas; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, resOpnd, regOpnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kCeil, node, opnd0, parent); +} + +/* float to int floor */ +Operand *AArch64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kFloor, node, opnd0, parent); +} + +Operand *AArch64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kRound, node, opnd0, parent); +} + +static bool LIsPrimitivePointer(PrimType ptype) { + return ((ptype >= PTY_ptr) && (ptype <= PTY_a64)); +} + +Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (LIsPrimitivePointer(fromType) && LIsPrimitivePointer(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + Operand::OperandType opnd0Type = opnd0.GetKind(); + RegOperand *resOpnd = &CreateRegisterOperandOfType(toType); + if (IsPrimitiveInteger(fromType) || IsPrimitiveFloat(fromType)) { + bool isFromInt = IsPrimitiveInteger(fromType); + bool is64Bits = GetPrimTypeBitSize(fromType) == k64BitSize; + PrimType itype = + isFromInt ? ((GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)) + : (is64Bits ? PTY_f64 : PTY_f32); + + /* + * if source operand is in memory, + * simply read it as a value of 'toType 'into the dest operand + * and return + */ + if (opnd0Type == Operand::kOpdMem) { + resOpnd = &SelectCopy(opnd0, toType, toType); + return resOpnd; + } + /* according to aarch64 encoding format, convert int to float expression */ + bool isImm = false; + ImmOperand *imm = static_cast(&opnd0); + uint64 val = static_cast(imm->GetValue()); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + Operand *newOpnd0 = &opnd0; + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && canRepreset) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + newOpnd0 = &CreateImmOperand(imm8, k8BitSize, false, kNotVary, true); + isImm = true; + } else { + newOpnd0 = &LoadIntoRegister(opnd0, itype); + } + if ((IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) || + (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType))) { + MOperator mopFmov = (isImm ? (is64Bits ? MOP_xdfmovri : MOP_wsfmovri) : isFromInt) ? + (is64Bits ? MOP_xvmovdr : MOP_xvmovsr) : (is64Bits ? MOP_xvmovrd : MOP_xvmovrs); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *resOpnd, *newOpnd0)); + return resOpnd; + } else { + return newOpnd0; + } + } else { + CHECK_FATAL(false, "NYI retype"); + } + return nullptr; +} + +void AArch64CGFunc::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + Operand &opnd0 = LoadIntoRegister(srcOpnd, fromType); + MOperator mOp = 0; + switch (toType) { + case PTY_f32: { + CHECK_FATAL(fromType == PTY_f64, "unexpected cvt from type"); + mOp = MOP_xvcvtfd; + break; + } + case PTY_f64: { + CHECK_FATAL(fromType == PTY_f32, "unexpected cvt from type"); + mOp = MOP_xvcvtdf; + break; + } + default: + CHECK_FATAL(false, "unexpected cvt to type"); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +/* + * This should be regarded only as a reference. + * + * C11 specification. + * 6.3.1.3 Signed and unsigned integers + * 1 When a value with integer type is converted to another integer + * type other than _Bool, if the value can be represented by the + * new type, it is unchanged. + * 2 Otherwise, if the new type is unsigned, the value is converted + * by repeatedly adding or subtracting one more than the maximum + * value that can be represented in the new type until the value + * is in the range of the new type.60) + * 3 Otherwise, the new type is signed and the value cannot be + * represented in it; either the result is implementation-defined + * or an implementation-defined signal is raised. + */ +void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, + PrimType toType) { + uint32 fsize = GetPrimTypeBitSize(fromType); + if (fromType == PTY_i128 || fromType == PTY_u128) { + fsize = k64BitSize; + } + uint32 tsize = GetPrimTypeBitSize(toType); + if (toType == PTY_i128 || toType == PTY_u128) { + tsize = k64BitSize; + } + bool isExpand = tsize > fsize; + bool is64Bit = (tsize == k64BitSize); + if ((parent != nullptr) && opnd0->IsIntImmediate() && + ((parent->GetOpCode() == OP_band) || (parent->GetOpCode() == OP_bior) || (parent->GetOpCode() == OP_bxor) || + (parent->GetOpCode() == OP_ashr) || (parent->GetOpCode() == OP_lshr) || (parent->GetOpCode() == OP_shl))) { + ImmOperand *simm = static_cast(opnd0); + DEBUG_ASSERT(simm != nullptr, "simm is nullptr in AArch64CGFunc::SelectCvtInt2Int"); + bool isSign = false; + int64 origValue = simm->GetValue(); + int64 newValue = origValue; + int64 signValue = 0; + if (!isExpand) { + /* 64--->32 */ + if (fsize > tsize) { + if (IsSignedInteger(toType)) { + if (origValue < 0) { + signValue = static_cast(0xFFFFFFFFFFFFFFFFLL & (1ULL << static_cast(tsize))); + } + newValue = static_cast((static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u)) | + static_cast(signValue)); + } else { + newValue = static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u); + } + } + } + if (IsSignedInteger(toType)) { + isSign = true; + } + resOpnd = &static_cast(CreateImmOperand(newValue, GetPrimTypeSize(toType) * kBitsPerByte, isSign)); + return; + } + if (isExpand) { /* Expansion */ + /* if cvt expr's parent is add,and,xor and some other,we can use the imm version */ + PrimType primType = + ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) : (IsSignedInteger(fromType) ? + PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (IsSignedInteger(fromType)) { + DEBUG_ASSERT((is64Bit || (fsize == k8BitSize || fsize == k16BitSize)), "incorrect from size"); + + MOperator mOp = + (is64Bit ? ((fsize == k8BitSize) ? MOP_xsxtb64 : ((fsize == k16BitSize) ? MOP_xsxth64 : MOP_xsxtw64)) + : ((fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + /* Unsigned */ + if (is64Bit) { + if (fsize == k8BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == k16BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuxtw64, *resOpnd, *opnd0)); + } + } else { + DEBUG_ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); + if (fsize == k8BitSize) { + static_cast(opnd0)->SetValidBitsNum(k8BitSize); + static_cast(resOpnd)->SetValidBitsNum(k8BitSize); + } + if (fromType == PTY_u1) { + static_cast(opnd0)->SetValidBitsNum(1); + static_cast(resOpnd)->SetValidBitsNum(1); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + (fsize == k8BitSize) ? MOP_xuxtb32 : MOP_xuxth32, *resOpnd, *opnd0)); + } + } + } else { /* Same size or truncate */ +#ifdef CNV_OPTIMIZE + /* + * No code needed for aarch64 with same reg. + * Just update regno. + */ + RegOperand *reg = static_cast(resOpnd); + reg->regNo = static_cast(opnd0)->regNo; +#else + /* + * This is not really needed if opnd0 is result from a load. + * Hopefully the FE will get rid of the redundant conversions for loads. + */ + PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (fsize > tsize) { + if (tsize == k8BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxtb32 : MOP_xuxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else if (tsize == k16BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxth32 : MOP_xuxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0, + CreateImmOperand(0, k8BitSize, false), + CreateImmOperand(tsize, k8BitSize, false))); + } + } else { + /* same size, so resOpnd can be set */ + if ((mirModule.IsJavaModule()) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) >= k4BitSize)) { + resOpnd = opnd0; + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } else { + /* signed target */ + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } + } +#endif + } +} + +Operand *AArch64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.FromType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; /* noop */ + } + Operand *resOpnd = &GetOrCreateResOperand(parent, toType); + if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtInt2Int(&parent, resOpnd, &opnd0, fromType, toType); + } else if (IsPrimitiveVector(toType) || IsPrimitiveVector(fromType)) { + CHECK_FATAL(IsPrimitiveVector(toType) && IsPrimitiveVector(fromType), "Invalid vector cvt operands"); + SelectVectorCvt(resOpnd, toType, &opnd0, fromType); + } else { /* both are float type */ + SelectCvtFloat2Float(*resOpnd, opnd0, fromType, toType); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType ftype = node.FromType(); + bool is64Bits = (GetPrimTypeBitSize(node.GetPrimType()) == k64BitSize); + PrimType itype = (is64Bits) ? (IsSignedInteger(node.GetPrimType()) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(node.GetPrimType()) ? PTY_i32 : PTY_u32); /* promoted type */ + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + SelectCvtFloat2Int(resOpnd, opnd0, itype, ftype); + return &resOpnd; +} + +void AArch64CGFunc::SelectSelect(Operand &resOpnd, Operand &condOpnd, Operand &trueOpnd, Operand &falseOpnd, + PrimType dtype, PrimType ctype, bool hasCompare, ConditionCode cc) { + DEBUG_ASSERT(&resOpnd != &condOpnd, "resOpnd cannot be the same as condOpnd"); + bool isIntType = IsPrimitiveInteger(dtype); + DEBUG_ASSERT((IsPrimitiveInteger(dtype) || IsPrimitiveFloat(dtype)), "unknown type for select"); + // making condOpnd and cmpInsn closer will provide more opportunity for opt + Operand &newTrueOpnd = LoadIntoRegister(trueOpnd, dtype); + Operand &newFalseOpnd = LoadIntoRegister(falseOpnd, dtype); + Operand &newCondOpnd = LoadIntoRegister(condOpnd, ctype); + if (hasCompare) { + SelectAArch64Cmp(newCondOpnd, CreateImmOperand(0, ctype, false), true, GetPrimTypeBitSize(ctype)); + cc = CC_NE; + } + Operand &newResOpnd = LoadIntoRegister(resOpnd, dtype); + SelectAArch64Select(newResOpnd, newTrueOpnd, newFalseOpnd, + GetCondOperand(cc), isIntType, GetPrimTypeBitSize(dtype)); +} + +Operand *AArch64CGFunc::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent, bool hasCompare) { + PrimType dtype = expr.GetPrimType(); + PrimType ctype = expr.Opnd(0)->GetPrimType(); + + ConditionCode cc = CC_NE; + Opcode opcode = expr.Opnd(0)->GetOpCode(); + PrimType cmpType = static_cast(expr.Opnd(0))->GetOpndType(); + bool isFloat = false; + bool unsignedIntegerComparison = false; + if (!IsPrimitiveVector(cmpType)) { + isFloat = IsPrimitiveFloat(cmpType); + unsignedIntegerComparison = !isFloat && !IsSignedInteger(cmpType); + } else { + isFloat = IsPrimitiveVectorFloat(cmpType); + unsignedIntegerComparison = !isFloat && IsPrimitiveUnSignedVector(cmpType); + } + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + hasCompare = true; + break; + } + if (!IsPrimitiveVector(dtype)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + SelectSelect(resOpnd, cond, trueOpnd, falseOpnd, dtype, ctype, hasCompare, cc); + return &resOpnd; + } else { + return SelectVectorSelect(cond, dtype, trueOpnd, falseOpnd); + } +} + +/* + * syntax: select (, , ) + * must be of integer type. + * and must be of the type given by . + * If is not 0, return . Otherwise, return . + */ +void AArch64CGFunc::SelectAArch64Select(Operand &dest, Operand &o0, Operand &o1, CondOperand &cond, bool isIntType, + uint32 dsize) { + uint32 mOpCode = isIntType ? ((dsize == k64BitSize) ? MOP_xcselrrrc : MOP_wcselrrrc) + : ((dsize == k64BitSize) ? MOP_dcselrrrc + : ((dsize == k32BitSize) ? MOP_scselrrrc : MOP_hcselrrrc)); + Operand &rflag = GetOrCreateRflag(); + if (o1.IsImmediate()) { + uint32 movOp = (dsize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32); + RegOperand &movDest = CreateVirtualRegisterOperand( + NewVReg(kRegTyInt, (dsize == k64BitSize) ? k8ByteSize : k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(movOp, movDest, o1)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, movDest, cond, rflag)); + return; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, o1, cond, rflag)); +} + +void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + /* + * we store 8-byte displacement ( jump_label - offset_table_address ) + * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp + */ + std::vector sizeArray; + sizeArray.emplace_back(switchTable.size()); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(mirModule, *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + + MIRSymbol *lblSt = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(GetFunction().GetStIdx().Idx()); + uint32 labelIdxTmp = GetLabelIdx(); + lblStr.append(funcSt->GetName()).append(std::to_string(labelIdxTmp++)); + SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + AddEmitSt(GetCurBB()->GetId(), *lblSt); + + PrimType itype = rangeGotoNode.Opnd(0)->GetPrimType(); + Operand &opnd0 = LoadIntoRegister(srcOpnd, itype); + + regno_t vRegNO = NewVReg(kRegTyInt, 8u); + RegOperand *addOpnd = &CreateVirtualRegisterOperand(vRegNO); + + int32 minIdx = switchTable[0].first; + SelectAdd(*addOpnd, opnd0, + CreateImmOperand(-static_cast(minIdx) - static_cast(rangeGotoNode.GetTagOffset()), + GetPrimTypeBitSize(itype), true), itype); + + /* contains the index */ + if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + addOpnd = static_cast(&SelectCopy(*addOpnd, PTY_u64, PTY_u64)); + } + + RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*lblSt, 0, 0); + + /* load the address of the switch table */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + Operand *disp = + CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift); + RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, tgt)); +} + +Operand *AArch64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) { + DEBUG_ASSERT(opnd0.IsRegister(), "wrong type."); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr, resOpnd, opnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) { + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr_static, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) { + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_arrayclass_cache_ldr, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAlloca(UnaryNode &node, Operand &opnd0) { + if (!CGOptions::IsArm64ilp32()) { + DEBUG_ASSERT((node.GetPrimType() == PTY_a64), "wrong type"); + } + if (GetCG()->IsLmbc()) { + SetHasVLAOrAlloca(true); + } + PrimType stype = node.Opnd(0)->GetPrimType(); + Operand *resOpnd = &opnd0; + if (GetPrimTypeBitSize(stype) < GetPrimTypeBitSize(PTY_u64)) { + resOpnd = &CreateRegisterOperandOfType(PTY_u64); + SelectCvtInt2Int(nullptr, resOpnd, &opnd0, stype, PTY_u64); + } + + RegOperand &aliOp = CreateRegisterOperandOfType(PTY_u64); + + SelectAdd(aliOp, *resOpnd, CreateImmOperand(kAarch64StackPtrAlignment - 1, k64BitSize, true), PTY_u64); + Operand &shifOpnd = CreateImmOperand(__builtin_ctz(kAarch64StackPtrAlignment), k64BitSize, true); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLright, PTY_u64); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLeft, PTY_u64); + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + int64 allocaOffset = GetMemlayout()->SizeOfArgsToStackPass(); + if (GetCG()->IsLmbc()) { + allocaOffset -= kDivide2 * k8ByteSize; + } + if (allocaOffset > 0) { + RegOperand &resallo = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(resallo, spOpnd, CreateImmOperand(allocaOffset, k64BitSize, true), PTY_u64); + return &resallo; + } else { + return &SelectCopy(spOpnd, PTY_u64, PTY_u64); + } +} + +Operand *AArch64CGFunc::SelectMalloc(UnaryNode &node, Operand &opnd0) { + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + std::vector opndVec; + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + opndVec.emplace_back(&resOpnd); + opndVec.emplace_back(&opnd0); + /* Use calloc to make sure allocated memory is zero-initialized */ + const std::string &funcName = "calloc"; + PrimType srcPty = PTY_u64; + if (opnd0.GetSize() <= k32BitSize) { + srcPty = PTY_u32; + } + Operand &opnd1 = CreateImmOperand(1, srcPty, false); + opndVec.emplace_back(&opnd1); + SelectLibCall(funcName, opndVec, srcPty, retType); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectGCMalloc(GCMallocNode &node) { + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + /* Get the size and alignment of the type. */ + TyIdx tyIdx = node.GetTyIdx(); + uint64 size = GetBecommon().GetTypeSize(tyIdx); + uint8 align = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + + /* Generate the call to MCC_NewObj */ + Operand &opndSize = CreateImmOperand(static_cast(size), k64BitSize, false); + Operand &opndAlign = CreateImmOperand(align, k64BitSize, false); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec{ &resOpnd, &opndSize, &opndAlign }; + + const std::string &funcName = "MCC_NewObj"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectJarrayMalloc(JarrayMallocNode &node, Operand &opnd0) { + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + /* Extract jarray type */ + TyIdx tyIdx = node.GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "nullptr check"); + CHECK_FATAL(type->GetKind() == kTypeJArray, "expect MIRJarrayType"); + auto jaryType = static_cast(type); + uint64 fixedSize = RTSupport::GetRTSupportInstance().GetArrayContentOffset(); + uint8 align = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jaryType->GetElemTyIdx()); + PrimType elemPrimType = elemType->GetPrimType(); + uint64 elemSize = GetPrimTypeSize(elemPrimType); + + /* Generate the cal to MCC_NewObj_flexible */ + Operand &opndFixedSize = CreateImmOperand(PTY_u64, static_cast(fixedSize)); + Operand &opndElemSize = CreateImmOperand(PTY_u64, static_cast(elemSize)); + + Operand *opndNElems = &opnd0; + + Operand *opndNElems64 = &static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectCvtInt2Int(nullptr, opndNElems64, opndNElems, PTY_u32, PTY_u64); + + Operand &opndAlign = CreateImmOperand(PTY_u64, align); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec{ &resOpnd, &opndFixedSize, &opndElemSize, opndNElems64, &opndAlign }; + + const std::string &funcName = "MCC_NewObj_flexible"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + /* Generate the store of the object length field */ + MemOperand &opndArrayLengthField = CreateMemOpnd(resOpnd, + static_cast(RTSupport::GetRTSupportInstance().GetArrayLengthOffset()), k4BitSize); + RegOperand *regOpndNElems = &SelectCopy(*opndNElems, PTY_u32, PTY_u32); + DEBUG_ASSERT(regOpndNElems != nullptr, "null ptr check!"); + SelectCopy(opndArrayLengthField, PTY_u32, *regOpndNElems, PTY_u32); + + return &resOpnd; +} + +bool AArch64CGFunc::IsRegRematCand(const RegOperand ®) const { + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + if (preg->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (preg->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (preg->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ClearRegRematInfo(const RegOperand ®) const { + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + preg->SetOp(OP_undef); + } +} + +bool AArch64CGFunc::IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src) const { + MIRPreg *pregDest = GetPseudoRegFromVirtualRegNO(regDest.GetRegisterNumber(), CGOptions::DoCGSSA()); + MIRPreg *pregSrc = GetPseudoRegFromVirtualRegNO(regSrc.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (pregDest != nullptr && pregDest == pregSrc) { + if (pregDest->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (pregDest->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (pregDest->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) { + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + if (opnd.IsList()) { + std::list tempRegStore; + auto& opndList = static_cast(opnd).GetOperands(); + bool needReplace = false; + for (auto it = opndList.begin(), end = opndList.end(); it != end; ++it) { + auto *regOpnd = *it; + if (regOpnd->GetRegisterNumber() == destNO) { + needReplace = true; + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + tempRegStore.push_back(®Dest); + } else { + tempRegStore.push_back(®Src); + } + } else { + tempRegStore.push_back(regOpnd); + } + } + if (needReplace) { + opndList.clear(); + for (auto newOpnd : tempRegStore) { + static_cast(opnd).PushOpnd(*newOpnd); + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseRegOpnd = memOpnd.GetBaseRegister(); + RegOperand *indexRegOpnd = memOpnd.GetIndexRegister(); + MemOperand *newMem = static_cast(memOpnd.Clone(*GetMemoryPool())); + if ((baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) || + (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO)) { + if (baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetBaseRegister(regDest); + } else { + newMem->SetBaseRegister(regSrc); + } + } + if (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetIndexRegister(regDest); + } else { + newMem->SetIndexRegister(regSrc); + } + } + insn.SetMemOpnd(&GetOrCreateMemOpnd(*newMem)); + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterNumber() == destNO) { + DEBUG_ASSERT(regOpnd.GetRegisterNumber() != kRFLAG, "both condi and reg"); + if (regDest.GetSize() != regSrc.GetSize()) { + regOpnd.SetRegisterNumber(regSrc.GetRegisterNumber()); + } else { + insn.SetOperand(static_cast(i), regSrc); + } + } + } + } +} + +void AArch64CGFunc::CleanupDeadMov(bool dumpInfo) { + /* clean dead mov. */ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr || + insn->GetMachineOpcode() == MOP_xvmovs || insn->GetMachineOpcode() == MOP_xvmovd) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + bb->RemoveInsn(*insn); + } else if (insn->IsPhiMovInsn() && dumpInfo) { + LogInfo::MapleLogger() << "fail to remove mov: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } +} + +void AArch64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) { + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in AArch64Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCallerSaveReg(static_cast(preg))) { + realSaveRegs.insert(preg); + } + } + return; + } + } + for (uint32 i = R0; i <= kMaxRegNum; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + realSaveRegs.insert(i); + } + } +} + +RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen) { + /* + * It is possible to have a bitLen < 32, eg stb. + * Set it to 32 if it is less than 32. + */ + if (bitLen < k32BitSize) { + bitLen = k32BitSize; + } + DEBUG_ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen); + return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt) : + GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); +} + +bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const { + if (opnd.GetRegisterNumber() == RFP) { + return true; + } else { + return false; + } +} + +bool AArch64CGFunc::IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const { + AArch64CallConvImpl retLocator(cgBeCommon); + CCLocInfo retMechanism; + retLocator.InitReturnInfo(mirType, retMechanism); + if (retMechanism.GetRegCount() > 0) { + return reg.GetRegisterNumber() == retMechanism.GetReg0() || reg.GetRegisterNumber() == retMechanism.GetReg1() || + reg.GetRegisterNumber() == retMechanism.GetReg2() || reg.GetRegisterNumber() == retMechanism.GetReg3(); + } + return false; +} + +bool AArch64CGFunc::IsSPOrFP(const RegOperand &opnd) const { + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = opnd.GetRegisterNumber(); + return (regOpnd.IsPhysicalRegister() && + (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); +} + +bool AArch64CGFunc::IsReturnReg(const RegOperand &opnd) const { + regno_t regNO = opnd.GetRegisterNumber(); + return (regNO == R0) || (regNO == V0); +} + +/* + * This function returns true to indicate that the clean up code needs to be generated, + * otherwise it does not need. In GCOnly mode, it always returns false. + */ +bool AArch64CGFunc::NeedCleanup() { + if (CGOptions::IsGCOnly()) { + return false; + } + AArch64MemLayout *layout = static_cast(GetMemlayout()); + if (layout->GetSizeOfRefLocals() > 0) { + return true; + } + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + TypeAttrs ta = GetFunction().GetNthParamAttr(i); + if (ta.GetAttr(ATTR_localrefvar)) { + return true; + } + } + + return false; +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + * extended epilogue is specific for fast exception handling and is made up of + * clean up code and epilogue. + * clean up code is generated here while epilogue is generated in GeneratePrologEpilog() + */ +void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { + DEBUG_ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + + if (NeedCleanup()) { + /* this is necessary for code insertion. */ + SetCurBB(bb); + + RegOperand ®Opnd0 = + GetOrCreatePhysicalRegisterOperand(R0, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + RegOperand ®Opnd1 = + GetOrCreatePhysicalRegisterOperand(R1, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + /* allocate 16 bytes to store reg0 and reg1 (each reg has 8 bytes) */ + MemOperand &frameAlloc = CreateCallFrameOperand(-16, GetPointerSize() * kBitsPerByte); + Insn &allocInsn = GetInsnBuilder()->BuildInsn(MOP_xstp, regOpnd0, regOpnd1, frameAlloc); + allocInsn.SetDoNotRemove(true); + AppendInstructionTo(allocInsn, *this); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + /* deallocate 16 bytes which used to store reg0 and reg1 */ + MemOperand &frameDealloc = CreateCallFrameOperand(16, GetPointerSize() * kBitsPerByte); + GenRetCleanup(cleanEANode, true); + Insn &deallocInsn = GetInsnBuilder()->BuildInsn(MOP_xldp, regOpnd0, regOpnd1, frameDealloc); + deallocInsn.SetDoNotRemove(true); + AppendInstructionTo(deallocInsn, *this); + /* Update cleanupbb since bb may have been splitted */ + SetCleanupBB(*GetCurBB()); + } +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + */ +void AArch64CGFunc::GenerateCleanupCode(BB &bb) { + DEBUG_ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + if (!NeedCleanup()) { + return; + } + + /* this is necessary for code insertion. */ + SetCurBB(bb); + + /* R0 is lived-in for clean-up code, save R0 before invocation */ + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + + if (!GetCG()->GenLocalRC()) { + /* by pass local RC operations. */ + } else if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + regno_t vreg = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &backupRegOp = CreateVirtualRegisterOperand(vreg); + backupRegOp.SetRegNotBBLocal(); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } else { + /* + * Register Allocation for O0 can not handle this case, so use a callee saved register directly. + * If yieldpoint is enabled, we use R20 instead R19. + */ + AArch64reg backupRegNO = GetCG()->GenYieldPoint() ? R20 : R19; + RegOperand &backupRegOp = GetOrCreatePhysicalRegisterOperand(backupRegNO, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } + + /* invoke _Unwind_Resume */ + std::string funcName("_Unwind_Resume"); + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + sym->SetNameStrIdx(funcName); + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(livein); + AppendCall(*sym, *srcOpnds); + /* + * this instruction is unreachable, but we need it as the return address of previous + * "bl _Unwind_Resume" for stack unwinding. + */ + Insn &nop = GetInsnBuilder()->BuildInsn(MOP_xblr, livein, *srcOpnds); + GetCurBB()->AppendInsn(nop); + GetCurBB()->SetHasCall(); + + /* Update cleanupbb since bb may have been splitted */ + SetCleanupBB(*GetCurBB()); +} + +uint32 AArch64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) { + AArch64CallConvImpl parmlocator(GetBecommon()); + return parmlocator.FloatParamRegRequired(*structType, fpSize); +} + +/* + * Map param registers to formals. For small structs passed in param registers, + * create a move to vreg since lmbc IR does not create a regassign for them. + */ +void AArch64CGFunc::AssignLmbcFormalParams() { + PrimType primType; + uint32 offset; + regno_t intReg = R0; + regno_t fpReg = V0; + for (auto param : GetLmbcParamVec()) { + primType = param->GetPrimType(); + offset = param->GetOffset(); + if (param->IsReturn()) { + param->SetRegNO(R8); + } else if (IsPrimitiveInteger(primType)) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + intReg++; + } + } else if (IsPrimitiveFloat(primType)) { + if (fpReg > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(fpReg), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + fpReg++; + } + } else if (primType == PTY_agg) { + if (param->IsPureFloat()) { + uint32 numFpRegs = param->GetNumRegs(); + if ((fpReg + numFpRegs - kOneRegister) > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + param->SetNumRegs(numFpRegs); + fpReg += numFpRegs; + } + } else if (param->GetSize() > k16ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetIsOnStack(); + param->SetOnStackOffset(((intReg - R0 + fpReg) - V0) * k8ByteSize); + uint32 bytelen = GetPrimTypeSize(PTY_a64); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(param->GetOnStackOffset()), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, PTY_a64); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + intReg++; + } + } else if (param->GetSize() <= k8ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kOneRegister); + intReg++; + } + } else { + /* size > 8 && size <= 16 */ + if ((intReg + kOneRegister) > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kTwoRegister); + intReg += kTwoRegister; + } + } + if (param->GetRegNO() != 0) { + for (uint32 i = 0; i < param->GetNumRegs(); ++i) { + PrimType pType = PTY_i64; + RegType rType = kRegTyInt; + uint32 rSize = k8ByteSize; + if (param->IsPureFloat()) { + rType = kRegTyFloat; + if (param->GetFpSize() <= k4ByteSize) { + pType = PTY_f32; + rSize = k4ByteSize; + } else { + pType = PTY_f64; + } + } + regno_t vreg = NewVReg(rType, rSize); + RegOperand &dest = GetOrCreateVirtualRegisterOperand(vreg); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(static_cast(param->GetRegNO() + i), + rSize * kBitsPerByte, rType); + SelectCopy(dest, pType, src, pType); + if (param->GetVregNO() == 0) { + param->SetVregNO(vreg); + } + Operand *memOpd = &CreateMemOpnd(RFP, offset + (i * rSize), rSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + PickStInsn(rSize * kBitsPerByte, pType), dest, *memOpd)); + } + } + } else { + CHECK_FATAL(false, "lmbc formal primtype not handled"); + } + } +} + +void AArch64CGFunc::LmbcGenSaveSpForAlloca() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc || !HasVLAOrAlloca()) { + return; + } + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + RegOperand &spSaveOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPointerSize())); + Insn &save = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spSaveOpnd, spOpnd); + GetFirstBB()->AppendInsn(save); + for (auto *retBB : GetExitBBsVec()) { + Insn &restore = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spOpnd, spSaveOpnd); + retBB->AppendInsn(restore); + restore.SetFrameDef(true); + } +} + +/* if offset < 0, allocation; otherwise, deallocation */ +MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, uint32 size) { + MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); + memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex); + return *memOpnd; +} + +BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const { + /* num(0, 16, 32, 48) >> 4 is num1(0, 1, 2, 3), num1 & (~3) == 0 */ + DEBUG_ASSERT((!shiftAmount || ((shiftAmount >> 4) & ~static_cast(3)) == 0), + "shift amount should be one of 0, 16, 32, 48"); + /* movkLslOperands[4]~movkLslOperands[7] is for 64 bits */ + return &movkLslOperands[(shiftAmount >> 4) + (is64bits ? 4 : 0)]; +} + +AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { + BitShiftOperand(BitShiftOperand::kLSL, 0, 4), BitShiftOperand(BitShiftOperand::kLSL, 16, 4), + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, 0, 6), BitShiftOperand(BitShiftOperand::kLSL, 16, 6), + BitShiftOperand(BitShiftOperand::kLSL, 32, 6), BitShiftOperand(BitShiftOperand::kLSL, 48, 6), +}; + +MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + MemOperand *memOp = CreateStackMemOpnd(reg, static_cast(offset), size); + return *memOp; +} + +MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size) { + auto *memOp = memPool->New( + memPool->New(preg, k64BitSize, kRegTyInt), + &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize), + size); + if (preg == RFP || preg == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *symbol) const { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &symbol, bool noExtend) { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol, noExtend); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, + uint32 shift, bool isSigned) const { + auto *memOp = memPool->New( + mode, dSize, base, indexOpnd, shift, isSigned); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + const MIRSymbol &sym) { + auto *memOp = memPool->New(mode, dSize, sym); + return memOp; +} + +void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) { + if (GetCG()->UseFastUnwind()) { + BB *formerCurBB = GetCurBB(); + GetDummyBB()->ClearInsns(); + SetCurBB(*GetDummyBB()); + /* + * FUNCATTR_bridge for function: Ljava_2Flang_2FString_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I, to + * exclude this funciton this function is a bridge function generated for Java Genetic + */ + if ((GetFunction().GetAttr(FUNCATTR_native) || GetFunction().GetAttr(FUNCATTR_fast_native)) && + !GetFunction().GetAttr(FUNCATTR_critical_native) && !GetFunction().GetAttr(FUNCATTR_bridge)) { + RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd1); + Operand &immOpnd = CreateImmOperand(0, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadri64, parmRegOpnd1, immOpnd)); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, fpReg, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_SetRiskyUnwindContext"); + sym->SetNameStrIdx(funcName); + + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + AppendCall(*sym, *srcOpnds); + bb.SetHasCall(); + } + + bb.InsertAtBeginning(*GetDummyBB()); + SetCurBB(*formerCurBB); + } +} + +bool AArch64CGFunc::HasStackLoadStore() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS(insn, bb) { + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + + if ((base != nullptr) && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + RegType regType = regOpnd->GetRegisterType(); + uint32 regNO = regOpnd->GetRegisterNumber(); + if (((regType != kRegTyCc) && ((regNO == RFP) || (regNO == RSP))) || (regType == kRegTyVary)) { + return true; + } + } + } + } + } + } + return false; +} + +void AArch64CGFunc::GenerateYieldpoint(BB &bb) { + /* ldr wzr, [RYP] # RYP hold address of the polling page. */ + auto &wzr = GetZeroOpnd(k32BitSize); + auto &pollingPage = CreateMemOpnd(RYP, 0, k32BitSize); + auto &yieldPoint = GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, pollingPage); + if (GetCG()->GenerateVerboseCG()) { + yieldPoint.SetComment("yieldpoint"); + } + bb.AppendInsn(yieldPoint); +} + +Operand &AArch64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +Operand &AArch64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) { + uint32 bitSize = GetPrimTypeBitSize(primType) < k32BitSize ? k32BitSize : GetPrimTypeBitSize(primType); + AArch64reg pReg; + if (sReg < 0) { + return GetOrCreatePhysicalRegisterOperand( + IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0, + bitSize, GetRegTyFromPrimTy(primType)); + } else { + switch (sReg) { + case kSregRetval0: + pReg = IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0; + break; + case kSregRetval1: + pReg = R1; + break; + default: + pReg = RLAST_INT_REG; + DEBUG_ASSERT(0, "GetTargetRetOperand: NYI"); + } + return GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType)); + } +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType) { + RegType regType = GetRegTyFromPrimTy(primType); + uint32 byteLength = GetPrimTypeSize(primType); + return CreateRegisterOperandOfType(regType, byteLength); +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(RegType regty, uint32 byteLen) { + /* BUG: if half-precision floating point operations are supported? */ + /* AArch64 has 32-bit and 64-bit registers only */ + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + regno_t vRegNO = NewVReg(regty, byteLen); + return CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::CreateRflagOperand() { + /* AArch64 has Status register that is 32-bit wide. */ + regno_t vRegNO = NewVRflag(); + return CreateVirtualRegisterOperand(vRegNO); +} + +void AArch64CGFunc::MergeReturn() { + DEBUG_ASSERT(GetCurBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + + uint32 exitBBSize = GetExitBBsVec().size(); + if (exitBBSize == 0) { + return; + } + if ((exitBBSize == 1) && GetExitBB(0) == GetCurBB()) { + return; + } + if (exitBBSize == 1) { + BB *onlyExitBB = GetExitBB(0); + BB *onlyExitBBNext = onlyExitBB->GetNext(); + StmtNode *stmt = onlyExitBBNext->GetFirstStmt(); + /* only deal with the return_BB in the middle */ + if (stmt != GetCleanupLabel()) { + LabelIdx labidx = CreateLabel(); + BB *retBB = CreateNewBB(labidx, onlyExitBB->IsUnreachable(), BB::kBBReturn, onlyExitBB->GetFrequency()); + onlyExitBB->AppendBB(*retBB); + /* modify the original return BB. */ + DEBUG_ASSERT(onlyExitBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb"); + onlyExitBB->SetKind(BB::kBBFallthru); + + GetExitBBsVec().pop_back(); + GetExitBBsVec().emplace_back(retBB); + return; + } + } + + LabelIdx labidx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labidx); + uint32 freq = 0; + for (auto *tmpBB : GetExitBBsVec()) { + DEBUG_ASSERT(tmpBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb"); + tmpBB->SetKind(BB::kBBGoto); + tmpBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + freq += tmpBB->GetFrequency(); + } + BB *retBB = CreateNewBB(labidx, false, BB::kBBReturn, freq); + GetCleanupBB()->PrependBB(*retBB); + + GetExitBBsVec().clear(); + GetExitBBsVec().emplace_back(retBB); +} + +void AArch64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) { + if (!GetCG()->GenLocalRC()) { + /* handle local rc is disabled. */ + return; + } + + Opcode ops[11] = { OP_label, OP_goto, OP_brfalse, OP_brtrue, OP_return, OP_call, + OP_icall, OP_rangegoto, OP_catch, OP_try, OP_endtry }; + std::set branchOp(ops, ops + 11); + + /* get cleanup intrinsic */ + bool found = false; + StmtNode *cleanupNode = retNode.GetPrev(); + cleanEANode = nullptr; + while (cleanupNode != nullptr) { + if (branchOp.find(cleanupNode->GetOpCode()) != branchOp.end()) { + if (cleanupNode->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(cleanupNode); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if ((fsym->GetName() == "MCC_DecRef_NaiveRCFast") || (fsym->GetName() == "MCC_IncRef_NaiveRCFast") || + (fsym->GetName() == "MCC_IncDecRef_NaiveRCFast") || (fsym->GetName() == "MCC_LoadRefStatic") || + (fsym->GetName() == "MCC_LoadRefField") || (fsym->GetName() == "MCC_LoadReferentField") || + (fsym->GetName() == "MCC_LoadRefField_NaiveRCFast") || (fsym->GetName() == "MCC_LoadVolatileField") || + (fsym->GetName() == "MCC_LoadVolatileStaticField") || (fsym->GetName() == "MCC_LoadWeakField") || + (fsym->GetName() == "MCC_CheckObjMem")) { + cleanupNode = cleanupNode->GetPrev(); + continue; + } else { + break; + } + } else { + break; + } + } + + if (cleanupNode->GetOpCode() == OP_intrinsiccall) { + IntrinsiccallNode *tempNode = static_cast(cleanupNode); + if ((tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) || + (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP)) { + GenRetCleanup(tempNode); + if (cleanEANode != nullptr) { + GenRetCleanup(cleanEANode, true); + } + found = true; + break; + } + if (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + cleanEANode = tempNode; + } + } + cleanupNode = cleanupNode->GetPrev(); + } + + if (!found) { + MIRSymbol *retRef = nullptr; + if (retNode.NumOpnds() != 0) { + retRef = GetRetRefSymbol(*static_cast(retNode).Opnd(0)); + } + HandleRCCall(false, retRef); + } +} + +bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool forEA) { +#undef CC_DEBUG_INFO + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "==============" << GetFunction().GetName() << "==============" << '\n'; +#endif + + if (cleanupNode == nullptr) { + return false; + } + + int32 minByteOffset = INT_MAX; + int32 maxByteOffset = 0; + + int32 skipIndex = -1; + MIRSymbol *skipSym = nullptr; + size_t refSymNum = 0; + if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) { + refSymNum = cleanupNode->GetNopndSize(); + if (refSymNum < 1) { + return true; + } + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) { + refSymNum = cleanupNode->GetNopndSize(); + /* refSymNum == 0, no local refvars; refSymNum == 1 and cleanup skip, so nothing to do */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipExpr = cleanupNode->Opnd(refSymNum - 1); + + CHECK_FATAL(skipExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(skipExpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + + refSymNum -= 1; + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + refSymNum = cleanupNode->GetNopndSize(); + /* the number of operands of intrinsic call INTRN_MPL_CLEANUP_NORETESCOBJS must be more than 1 */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipexpr = cleanupNode->Opnd(0); + CHECK_FATAL(skipexpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refnode = static_cast(skipexpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refnode->GetStIdx()); + } + + /* now compute the offset range */ + std::vector offsets; + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + for (size_t i = 0; i < refSymNum; ++i) { + BaseNode *argExpr = cleanupNode->Opnd(i); + CHECK_FATAL(argExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(argExpr); + MIRSymbol *refSymbol = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + if (memLayout->GetSymAllocTable().size() <= refSymbol->GetStIndex()) { + ERR(kLncErr, "access memLayout->GetSymAllocTable() failed"); + return false; + } + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(refSymbol->GetStIndex())); + int32 tempOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(tempOffset); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "refsym " << refSymbol->GetName() << " offset " << tempOffset << '\n'; +#endif + minByteOffset = (minByteOffset > tempOffset) ? tempOffset : minByteOffset; + maxByteOffset = (maxByteOffset < tempOffset) ? tempOffset : maxByteOffset; + } + + /* get the skip offset */ + int32 skipOffset = -1; + if (skipSym != nullptr) { + AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(skipSym->GetStIndex())); + CHECK_FATAL(GetBaseOffset(*symLoc) < std::numeric_limits::max(), "out of range"); + skipOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(skipOffset); + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "skip " << skipSym->GetName() << " offset " << skipOffset << '\n'; +#endif + + skipIndex = symLoc->GetOffset() / kOffsetAlign; + } + + /* call runtime cleanup */ + if (minByteOffset < INT_MAX) { + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + CHECK_FATAL((refLocBase + (refNum - 1) * kIntregBytelen) < std::numeric_limits::max(), "out of range"); + int32 refLocEnd = refLocBase + (refNum - 1) * kIntregBytelen; + int32 realMin = minByteOffset < refLocBase ? refLocBase : minByteOffset; + int32 realMax = maxByteOffset > refLocEnd ? refLocEnd : maxByteOffset; + if (forEA) { + std::sort(offsets.begin(), offsets.end()); + int32 prev = offsets[0]; + for (size_t i = 1; i < offsets.size(); i++) { + CHECK_FATAL((offsets[i] == prev) || ((offsets[i] - prev) == kIntregBytelen), "must be"); + prev = offsets[i]; + } + CHECK_FATAL((refLocBase - prev) == kIntregBytelen, "must be"); + realMin = minByteOffset; + realMax = maxByteOffset; + } +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << " realMin " << realMin << " realMax " << realMax << '\n'; +#endif + if (realMax < realMin) { + /* maybe there is a cleanup intrinsic bug, use CHECK_FATAL instead? */ + CHECK_FATAL(false, "must be"); + } + + /* optimization for little slot cleanup */ + if (realMax == realMin && !forEA) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = CreateStkTopOpnd(static_cast(realMin), GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + /* because of return stmt is often the last stmt */ + GetCurBB()->SetFrequency(frequency); + + return true; + } + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand &beginOpnd = CreateImmOperand(realMin, k64BitSize, true); + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + uint32 realRefNum = (realMax - realMin) / kOffsetAlign + 1; + + ImmOperand &countOpnd = CreateImmOperand(realRefNum, k64BitSize, true); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopyImm(parmRegOpnd2, countOpnd, PTY_i64); + + MIRSymbol *funcSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if ((skipSym != nullptr) && (skipOffset >= realMin) && (skipOffset <= realMax)) { + /* call cleanupskip */ + uint32 stOffset = (skipOffset - realMin) / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopyImm(parmRegOpnd3, retLoc, PTY_i64); + + std::string funcName; + if (forEA) { + funcName = "MCC_CleanupNonRetEscObj"; + } else { + funcName = "MCC_CleanupLocalStackRefSkip_NaiveRCFast"; + } + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << " skip loc " << stOffset << '\n'; +#endif + } else { + /* call cleanup */ + CHECK_FATAL(!forEA, "must be"); + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << '\n'; +#endif + } + + funcSym->SetStorageClass(kScText); + funcSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*funcSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + GetCurBB()->SetFrequency(frequency); + } + return true; +} + +RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const { + RegOperand *res = memPool->New(vRegNO, size, kind, flg); + return res; +} + +RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { + DEBUG_ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); + DEBUG_ASSERT(vRegNO < vRegTable.size(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); + vRegOperandTable[vRegNO] = res; + return *res; +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { + auto it = vRegOperandTable.find(vRegNO); + return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { + regno_t regNO = regOpnd.GetRegisterNumber(); + auto it = vRegOperandTable.find(regNO); + if (it != vRegOperandTable.end()) { + it->second->SetSize(regOpnd.GetSize()); + it->second->SetRegisterNumber(regNO); + it->second->SetRegisterType(regOpnd.GetRegisterType()); + it->second->SetValidBitsNum(regOpnd.GetValidBitsNum()); + return *it->second; + } else { + auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); + regno_t newRegNO = newRegOpnd->GetRegisterNumber(); + if (newRegNO >= maxRegCount) { + maxRegCount = newRegNO + kRegIncrStepLen; + vRegTable.resize(maxRegCount); + } + vRegOperandTable[newRegNO] = newRegOpnd; + VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); + vRegTable[newRegNO] = *vregNode; + vRegCount = maxRegCount; + return *newRegOpnd; + } +} + +/* + * Traverse all call insn to determine return type of it + * If the following insn is mov/str/blr and use R0/V0, it means the call insn have reture value + */ +void AArch64CGFunc::DetermineReturnTypeofCall() { + FOR_ALL_BB(bb, this) { + if (bb->IsUnreachable() || !bb->HasCall()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsTargetInsn()) { + continue; + } + if (!insn->IsCall() || insn->GetMachineOpcode() == MOP_asm) { + continue; + } + Insn *nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + continue; + } + if ((nextInsn->GetMachineOpcode() != MOP_asm) && + ((nextInsn->IsMove() && nextInsn->GetOperand(kInsnSecondOpnd).IsRegister()) || + nextInsn->IsStore() || + (nextInsn->IsCall() && nextInsn->GetOperand(kInsnFirstOpnd).IsRegister()))) { + auto *srcOpnd = static_cast(&nextInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(srcOpnd != nullptr, "nullptr"); + if (!srcOpnd->IsPhysicalRegister()) { + continue; + } + if (srcOpnd->GetRegisterNumber() == R0) { + insn->SetRetType(Insn::kRegInt); + continue; + } + if (srcOpnd->GetRegisterNumber() == V0) { + insn->SetRetType(Insn::kRegFloat); + } + } + } + } +} + +void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { + if (!GetCG()->GenLocalRC() && !begin) { + /* handle local rc is disabled. */ + return; + } + + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + int32 refNum = static_cast(memLayout->GetSizeOfRefLocals() / kOffsetAlign); + if (!refNum) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + + /* no MCC_CleanupLocalStackRefSkip when ret_ref is the only ref symbol */ + if ((refNum == 1) && (retRef != nullptr)) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + CHECK_FATAL(refNum < 0xFFFF, "not enough room for size."); + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + CHECK_FATAL((refLocBase >= 0) && (refLocBase < 0xFFFF), "not enough room for offset."); + int32 formalRef = 0; + /* avoid store zero to formal localrefvars. */ + if (begin) { + for (uint32 i = 0; i < GetFunction().GetFormalCount(); ++i) { + if (GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + refNum--; + formalRef++; + } + } + } + /* + * if the number of local refvar is less than 12, use stp or str to init local refvar + * else call function MCC_InitializeLocalStackRef to init. + */ + if (begin && (refNum <= kRefNum12) && ((refLocBase + kIntregBytelen * (refNum - 1)) < kStpLdpImm64UpperBound)) { + int32 pairNum = refNum / kDivide2; + int32 singleNum = refNum % kDivide2; + const int32 pairRefBytes = 16; /* the size of each pair of ref is 16 bytes */ + int32 ind = 0; + while (ind < pairNum) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + pairRefBytes * ind; + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstp, zeroOp, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + ind++; + } + if (singleNum > 0) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + kIntregBytelen * (refNum - 1); + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstr, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + } + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + return; + } + + /* refNum is 1 and refvar is not returned, this refvar need to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 1) && !begin && (retRef == nullptr)) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), + GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + + AppendCall(*callSym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + /* refNum is 2 and one of refvar is returned, only another one is needed to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 2) && !begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand *stackLoc = nullptr; + if (stOffset == 0) { + /* just have to Dec the next one. */ + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()) + kIntregBytelen, + GetPointerSize() * kBitsPerByte); + } else { + /* just have to Dec the current one. */ + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerSize() * kBitsPerByte); + } + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, *stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(stOffset); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + bool needSkip = false; + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand *beginOpnd = + &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef, k64BitSize, true); + ImmOperand *countOpnd = &CreateImmOperand(refNum, k64BitSize, true); + int32 refSkipIndex = -1; + if (!begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + refSkipIndex = stOffset; + if (stOffset == 0) { + /* ret_ref at begin. */ + beginOpnd = &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen, k64BitSize, true); + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else if (stOffset == (refNum - 1)) { + /* ret_ref at end. */ + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else { + needSkip = true; + } + } + + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, *beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + SelectCopyImm(vReg1, *countOpnd, PTY_i64); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, vReg1, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if (begin) { + std::string funcName("MCC_InitializeLocalStackRef"); + sym->SetNameStrIdx(funcName); + CHECK_FATAL(countOpnd->GetValue() > 0, "refCount should be greater than 0."); + refCount = static_cast(countOpnd->GetValue()); + beginOffset = beginOpnd->GetValue(); + } else if (!needSkip) { + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } else { + CHECK_NULL_FATAL(retRef); + if (retRef->GetStIndex() >= memLayout->GetSymAllocTable().size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::HandleRCCall"); + } + AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + SelectCopyImm(vReg2, retLoc, PTY_i64); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopy(parmRegOpnd3, PTY_a64, vReg2, PTY_a64); + + std::string funcName("MCC_CleanupLocalStackRefSkip_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + + Insn &callInsn = AppendCall(*sym, *srcOpnds); + callInsn.SetRefSkipIdx(refSkipIndex); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + GetCurBB()->SetHasCall(); + if (begin) { + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } +} + +void AArch64CGFunc::SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID) { + /* + * in two param regs if possible + * If struct is <= 8 bytes, then it fits into one param reg. + * If struct is <= 16 bytes, then it fits into two param regs. + * Otherwise, it goes onto the stack. + * If the number of available param reg is less than what is + * needed to fit the entire struct into them, then the param + * reg is skipped and the struct goes onto the stack. + * Example 1. + * struct size == 8 bytes. + * param regs x0 to x6 are used. + * struct is passed in x7. + * Example 2. + * struct is 16 bytes. + * param regs x0 to x5 are used. + * struct is passed in x6 and x7. + * Example 3. + * struct is 16 bytes. + * param regs x0 to x6 are used. x7 alone is not enough to pass the struct. + * struct is passed on the stack. + * x7 is not used, as the following param will go onto the stack also. + */ + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + /* If symSize is <= 8 bytes then use 1 reg, else 2 */ + CreateCallStructParamPassByStack(symSize, &sym, nullptr, ploc.memOffset); + } else { + /* pass by param regs. */ + RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); + srcOpnds.PushOpnd(*parmOpnd0); + if (ploc.reg1) { + RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); + srcOpnds.PushOpnd(*parmOpnd1); + } + if (ploc.reg2) { + RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); + srcOpnds.PushOpnd(*parmOpnd2); + } + if (ploc.reg3) { + RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); + srcOpnds.PushOpnd(*parmOpnd3); + } + } +} + +void AArch64CGFunc::SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, + ListOperand &srcOpnds, int32 offset, + AArch64CallConvImpl &parmLocator) { + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + CreateCallStructParamPassByStack(symSize, nullptr, addrOpnd1, ploc.memOffset); + } else { + /* pass by param regs. */ + fpParamState state = kStateUnknown; + uint32 memSize = 0; + switch (ploc.fpSize) { + case k0BitSize: + state = kNotFp; + memSize = k64BitSize; + break; + case k4BitSize: + state = kFp32Bit; + memSize = k32BitSize; + break; + case k8BitSize: + state = kFp64Bit; + memSize = k64BitSize; + break; + default: + break; + } + OfstOperand *offOpnd0 = &GetOrCreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + MemOperand *mopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd0, nullptr); + CreateCallStructParamPassByReg(ploc.reg0, *mopnd, srcOpnds, state); + if (ploc.reg1) { + OfstOperand *offOpnd1 = + &GetOrCreateOfstOpnd(((ploc.fpSize ? ploc.fpSize : GetPointerSize()) + static_cast(offset)), k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd1, nullptr); + CreateCallStructParamPassByReg(ploc.reg1, *mopnd, srcOpnds, state); + } + if (ploc.reg2) { + OfstOperand *offOpnd2 = + &GetOrCreateOfstOpnd(((ploc.fpSize ? (ploc.fpSize * k4BitShift) : GetPointerSize()) + static_cast(offset)), + k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd2, nullptr); + CreateCallStructParamPassByReg(ploc.reg2, *mopnd, srcOpnds, state); + } + if (ploc.reg3) { + OfstOperand *offOpnd3 = + &GetOrCreateOfstOpnd(((ploc.fpSize ? (ploc.fpSize * k8BitShift) : GetPointerSize()) + static_cast(offset)), + k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd3, nullptr); + CreateCallStructParamPassByReg(ploc.reg3, *mopnd, srcOpnds, state); + } + } +} + +void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, + int32 fromOffset) { + /* + * Pass larger sized struct on stack. + * Need to copy the entire structure onto the stack. + * The pointer to the starting address of the copied struct is then + * used as the parameter for the struct. + * This pointer is passed as the next parameter. + * Example 1: + * struct is 23 bytes. + * param regs x0 to x5 are used. + * First around up 23 to 24, so 3 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed in x6. + * Example 2: + * struct is 25 bytes. + * param regs x0 to x7 are used. + * First around up 25 to 32, so 4 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed on stack as the 9th parameter. + */ + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + /* Create the struct copies. */ + RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, + fromOffset, ploc); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } + structCopyOffset += static_cast(numMemOp * GetPointerSize()); +} + +void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + int32 &structCopyOffset, int32 fromOffset) { + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + RegOperand *parmOpnd = + CreateCallStructParamCopyToStack(numMemOp, nullptr, addrOpnd1, structCopyOffset, fromOffset, ploc); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } +} + +void AArch64CGFunc::CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, + RegOperand *addrOpnd, int32 baseOffset) { + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + uint32 numRegNeeded = (static_cast(symSize) <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (int j = 0; j < static_cast(numRegNeeded); j++) { + if (sym) { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(k8ByteSize)), k64BitSize); + } else { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(GetPointerSize())), k64BitSize); + } + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * k8ByteSize, k32BitSize), nullptr); + } else { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * GetPointerSize(), k32BitSize), nullptr); + } + } + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + if (CGOptions::IsArm64ilp32()) { + stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * static_cast(k8ByteSize))), k64BitSize); + } else { + stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * GetPointerSize())), k64BitSize); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } +} + +RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, + const CCLocInfo &ploc, int32 offset, uint32 parmNum) { + uint32 memSize; + PrimType primType; + RegOperand *parmOpnd; + uint32 dataSizeBits; + AArch64reg reg; + switch (parmNum) { + case 0: + reg = static_cast(ploc.reg0); + break; + case 1: + reg = static_cast(ploc.reg1); + break; + case 2: + reg = static_cast(ploc.reg2); + break; + case 3: + reg = static_cast(ploc.reg3); + break; + default: + CHECK_FATAL(false, "Exceeded maximum allowed fp parameter registers for struct passing"); + } + if (ploc.fpSize == 0) { + memSize = k64BitSize; + primType = PTY_i64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + } else if (ploc.fpSize == k4ByteSize) { + memSize = k32BitSize; + primType = PTY_f32; + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + } else if (ploc.fpSize == k8ByteSize) { + memSize = k64BitSize; + primType = PTY_f64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + } else { + CHECK_FATAL(false, "Unknown call parameter state"); + } + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && fieldID > 0) { + MemOperand &baseOpnd = GetOrCreateMemOpnd(sym, 0, memSize); + RegOperand &base = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), base, baseOpnd)); + memOpnd = &CreateMemOpnd(base, (static_cast(offset) + parmNum * GetPointerSize()), memSize); + } else if (ploc.fpSize) { + memOpnd = &GetOrCreateMemOpnd(sym, (ploc.fpSize * parmNum + static_cast(offset)), memSize); + } else { + if (CGOptions::IsArm64ilp32()) { + memOpnd = &GetOrCreateMemOpnd(sym, (k8ByteSize * parmNum + static_cast(offset)), memSize); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, (GetPointerSize() * parmNum + static_cast(offset)), memSize); + } + } + MOperator selectedMop = PickLdInsn(dataSizeBits, primType); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + !IsOperandImmValid(selectedMop, memOpnd, kInsnSecondOpnd)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSizeBits); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, *memOpnd)); + + return parmOpnd; +} + +void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, + fpParamState state) { + RegOperand *parmOpnd; + uint32 dataSizeBits = 0; + PrimType pType = PTY_void; + parmOpnd = nullptr; + AArch64reg reg = static_cast(regno); + if (state == kNotFp) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + pType = PTY_i64; + } else if (state == kFp32Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + pType = PTY_f32; + } else if (state == kFp64Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f64) * kBitsPerByte; + pType = PTY_f64; + } else { + DEBUG_ASSERT(0, "CreateCallStructParamPassByReg: Unknown state"); + } + + MOperator selectedMop = PickLdInsn(dataSizeBits, pType); + if (!IsOperandImmValid(selectedMop, &memOpnd, kInsnSecondOpnd)) { + memOpnd = SplitOffsetWithAddInstruction(memOpnd, dataSizeBits); + } + DEBUG_ASSERT(parmOpnd != nullptr, "parmOpnd should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, memOpnd)); + srcOpnds.PushOpnd(*parmOpnd); +} + +void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand *addropnd, + uint32 structSize, int32 copyOffset, int32 fromOffset) { + std::vector opndVec; + + RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + opndVec.push_back(vreg1); /* result */ + + RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); + SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); + opndVec.push_back(parmOpnd); /* param 0 */ + + if (sym != nullptr) { + if (sym->GetStorageClass() == kScGlobal || sym->GetStorageClass() == kScExtern) { + StImmOperand &stopnd = CreateStImmOperand(*sym, fromOffset, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectAddrof(staddropnd, stopnd); + opndVec.push_back(&staddropnd); /* param 1 */ + } else if (sym->GetStorageClass() == kScAuto || sym->GetStorageClass() == kScFormal) { + RegOperand *parm1Reg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + AArch64SymbolAlloc *symloc = static_cast(GetMemlayout()->GetSymAllocInfo(sym->GetStIndex())); + RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); + int32 stoffset = GetBaseOffset(*symloc); + ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *parm1Reg, *baseOpnd, *offsetOpnd1)); + if (sym->GetStorageClass() == kScFormal) { + MemOperand *ldmopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, parm1Reg, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), static_cast(nullptr)); + RegOperand *tmpreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + RegOperand *vreg2 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), + *tmpreg, *ldmopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *vreg2, *tmpreg, + CreateImmOperand(fromOffset, k64BitSize, false))); + parm1Reg = vreg2; + } + opndVec.push_back(parm1Reg); /* param 1 */ + } else if (sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic) { + CHECK_FATAL(sym->GetSKind() != kStConst, "Unsupported sym const for struct param"); + StImmOperand *stopnd = &CreateStImmOperand(*sym, 0, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, staddropnd, *stopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, staddropnd, staddropnd, *stopnd)); + opndVec.push_back(&staddropnd); /* param 1 */ + } else { + CHECK_FATAL(0, "Unsupported sym for struct param"); + } + } else { + opndVec.push_back(addropnd); /* param 1 */ + } + + RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); + opndVec.push_back(&vreg3); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, + RegOperand *addrOpd, int32 copyOffset, + int32 fromOffset, const CCLocInfo &ploc) { + /* Create the struct copies. */ + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + for (uint32 j = 0; j < numMemOp; j++) { + if (sym != nullptr) { + if (sym->GetStorageClass() == kScFormal) { + MemOperand &base = GetOrCreateMemOpnd(*sym, 0, k64BitSize); + RegOperand &vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), vreg, base); + GetCurBB()->AppendInsn(ldInsn); + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &vreg, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), nullptr); + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k32BitSize); + } else { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k64BitSize); + } + } + } else { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpd, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), nullptr); + } + if (CGOptions::IsArm64ilp32()) { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k32BitSize, PTY_i32), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k32BitSize, PTY_i32), *vreg, *stMopnd)); + } else { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } + } + /* Create the copy address parameter for the struct */ + RegOperand *fpopnd = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); + if (ploc.reg0 == kRinvalid) { + RegOperand &res = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(res, *fpopnd, *offset, PTY_u64); + MemOperand &stMopnd2 = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), res, stMopnd2)); + return nullptr; + } else { + RegOperand *parmOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), + k64BitSize, kRegTyInt); + SelectAdd(*parmOpnd, *fpopnd, *offset, PTY_a64); + return parmOpnd; + } +} + +void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, + AArch64CallConvImpl &parmLocator, ListOperand &srcOpnds) { + RegOperand &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); + + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 != 0) { + RegOperand &res = GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); + SelectAdd(res, spReg, offsetOpnd, PTY_a64); + srcOpnds.PushOpnd(res); + } else { + RegOperand &parmOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(parmOpnd, spReg, offsetOpnd, PTY_a64); + MemOperand &stmopnd = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), parmOpnd, stmopnd)); + } +} + +void AArch64CGFunc::SelectParmListForAggregate(BaseNode &argExpr, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset) { + uint64 symSize; + int32 rhsOffset = 0; + if (argExpr.GetOpCode() == OP_dread) { + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListDreadSmallAggregate(*sym, *ty, srcOpnds, rhsOffset, parmLocator, dread.GetFieldID()); + } else if (symSize > kParmMemcpySize) { + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListDreadLargeAggregate(*sym, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); + } else if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + if (rhsOffset > 0) { + RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); + RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, + CreateImmOperand(rhsOffset, k64BitSize, false))); + } + + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListIreadLargeAggregate(iread, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } + } else { + CHECK_FATAL(0, "NYI"); + } +} + +size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { + if (naryNode.GetOpCode() == OP_call) { + CallNode &callNode = static_cast(naryNode); + MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + TyIdx retIdx = callFunc->GetReturnTyIdx(); + if (callFunc->IsFirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[0].formalTyIdx); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); + if ((retSize == 0) && callFunc->IsReturnStruct()) { + TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); + return GetBecommon().GetTypeSize(tyIdx); + } + return retSize; + } else if (naryNode.GetOpCode() == OP_icall) { + IcallNode &icallNode = static_cast(naryNode); + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr) { + return GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()); + } + } + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode &icallProto = static_cast(naryNode); + MIRFuncType *funcTy = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallProto.GetRetTyIdx())); + if (funcTy->FirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTy->GetNthParamType(0)); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + return GetBecommon().GetTypeSize(funcTy->GetRetTyIdx()); + } + return 0; +} + +void AArch64CGFunc::SelectParmListPreprocessLargeStruct(BaseNode &argExpr, int32 &structCopyOffset) { + uint64 symSize; + int32 rhsOffset = 0; + if (argExpr.GetOpCode() == OP_dread) { + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + CreateCallStructParamMemcpy(sym, nullptr, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + if (rhsOffset > 0) { + regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); + RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, + CreateImmOperand(rhsOffset, k64BitSize, false))); + addrOpnd = result; + } + + CreateCallStructParamMemcpy(nullptr, addrOpnd, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } + } +} + +/* preprocess call in parmlist */ +bool AArch64CGFunc::MarkParmListCall(BaseNode &expr) { + if (!CGOptions::IsPIC()) { + return false; + } + switch (expr.GetOpCode()) { + case OP_addrof: { + auto &addrNode = static_cast(expr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrNode.GetStIdx()); + if (symbol->IsThreadLocal()) { + return true; + } + break; + } + default: { + for (auto i = 0; i < expr.GetNumOpnds(); i++) { + if (expr.Opnd(i)) { + if (MarkParmListCall(*expr.Opnd(i))) { + return true; + } + } + } + break; + } + } + return false; +} + +void AArch64CGFunc::SelectParmListPreprocess(const StmtNode &naryNode, size_t start, std::set &specialArgs) { + size_t i = start; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + for (; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + if (MarkParmListCall(*argExpr)) { + (void)specialArgs.emplace(i); + } + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + if (primType != PTY_agg) { + continue; + } + SelectParmListPreprocessLargeStruct(*argExpr, structCopyOffset); + } +} + +/* + SelectParmList generates an instrunction for each of the parameters + to load the parameter value into the corresponding register. + We return a list of registers to the call instruction because + they may be needed in the register allocation phase. + */ +void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) { + size_t i = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) { + i++; + } + std::set specialArgs; + SelectParmListPreprocess(naryNode, i, specialArgs); + bool specialArg = false; + bool firstArgReturn = false; + MIRFunction *callee = nullptr; + if (dynamic_cast(&naryNode) != nullptr) { + auto calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + firstArgReturn = callee->IsFirstArgReturn(); + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode *icallnode = &static_cast(naryNode); + MIRFuncType *funcType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallnode->GetRetTyIdx())); + firstArgReturn = funcType->FirstArgReturn(); + } + BB *curBBrecord = GetCurBB(); + BB *tmpBB = nullptr; + if (!specialArgs.empty()) { + tmpBB = CreateNewBB(); + specialArg = true; + } + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + std::vector insnForStackArgs; + uint32 stackArgsCount = 0; + for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) { + if (specialArg) { + DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args"); + SetCurBB(specialArgs.count(i) ? *curBBrecord : *tmpBB); + } + bool is64x1vec = false; + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + if (callee != nullptr && pnum < callee->GetFormalCount() && callee->GetFormal(pnum) != nullptr) { + is64x1vec = callee->GetFormal(pnum)->GetAttr(ATTR_oneelem_simd); + } + switch (argExpr->op) { + case OP_dread: { + DreadNode *dNode = static_cast(argExpr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(dNode->GetStIdx()); + if (dNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectParmList: non-zero fieldID for non-structure"); + FieldAttrs fa = structType->GetFieldAttrs(dNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + is64x1vec = symbol->GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_iread: { + IreadNode *iNode = static_cast(argExpr); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iNode->GetTyIdx()); + MIRPtrType *ptrTyp = static_cast(type); + DEBUG_ASSERT(ptrTyp != nullptr, "expect a pointer type at iread node"); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyp->GetPointedTyIdx()); + if (iNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(pointedTy); + FieldAttrs fa = structType->GetFieldAttrs(iNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + TypeAttrs ta = static_cast(ptrTyp)->GetTypeAttrs(); + is64x1vec = ta.GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_constval: { + CallNode *call = safe_cast(&naryNode); + if (call == nullptr) { + break; + } + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + if (fn == nullptr || fn->GetFormalCount() == 0 || fn->GetFormalCount() <= pnum) { + break; + } + is64x1vec = fn->GetFormalDefAt(pnum).formalAttrs.GetAttr(ATTR_oneelem_simd); + break; + } + default: + break; + } + /* use alloca */ + if (primType == PTY_agg) { + SelectParmListForAggregate(*argExpr, srcOpnds, parmLocator, structCopyOffset); + continue; + } + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + RegOperand *expRegOpnd = nullptr; + Operand *opnd = HandleExpr(naryNode, *argExpr); + if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { + is64x1vec = true; + } + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, primType); + } + expRegOpnd = static_cast(opnd); + + if ((pnum == 0) && firstArgReturn) { + parmLocator.InitCCLocInfo(ploc); + ploc.reg0 = R8; + } else { + parmLocator.LocateNextParm(*ty, ploc); + } + /* is64x1vec should be an int64 value in an FP/simd reg for ABI compliance, + convert R-reg to equivalent V-reg */ + PrimType destPrimType = primType; + if (is64x1vec && ploc.reg0 != kRinvalid && ploc.reg0 < R7) { + ploc.reg0 = AArch64Abi::floatParmRegs[static_cast(ploc.reg0) - 1]; + destPrimType = PTY_f64; + } + + /* skip unused args */ + if (callee != nullptr && callee->GetFuncDesc().IsArgUnused(pnum)) continue; + + if (ploc.reg0 != kRinvalid) { /* load to the register. */ + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); + SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + } else { /* store to the memory segment for stack-passsed arguments. */ + if (CGOptions::IsBigEndian()) { + if (GetPrimTypeBitSize(primType) < k64BitSize) { + ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); + } + } + MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd, + actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == 2 && stackArgsCount < kShiftAmount12) { + (void)insnForStackArgs.emplace_back(&strInsn); + stackArgsCount++; + } else { + GetCurBB()->AppendInsn(strInsn); + } + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + if (specialArg) { + DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args"); + curBBrecord->InsertAtEnd(*tmpBB); + SetCurBB(*curBBrecord); + } + for (auto &strInsn : insnForStackArgs) { + GetCurBB()->AppendInsn(*strInsn); + } +} + +/* + * for MCC_DecRefResetPair(addrof ptr %Reg17_R5592, addrof ptr %Reg16_R6202) or + * MCC_ClearLocalStackRef(addrof ptr %Reg17_R5592), the parameter (addrof ptr xxx) is converted to asm as follow: + * add vreg, x29, #imm + * mov R0/R1, vreg + * this function is used to prepare parameters, the generated vreg is returned, and #imm is saved in offsetValue. + */ +Operand *AArch64CGFunc::SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue) { + MIRSymbol *symbol = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(expr.GetStIdx()); + PrimType ptype = expr.GetPrimType(); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(ptype)); + Operand &result = CreateVirtualRegisterOperand(vRegNO); + CHECK_FATAL(expr.GetFieldID() == 0, "the fieldID of parameter in clear stack reference call must be 0"); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + auto *symLoc = static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else { + CHECK_FATAL(false, "the symLoc of parameter in clear stack reference call is unreasonable"); + } + DEBUG_ASSERT(offset != nullptr, "offset should not be nullptr"); + offsetValue = offset->GetValue(); + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + return &result; +} + +/* select paramters for MCC_DecRefResetPair and MCC_ClearLocalStackRef function */ +void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, + std::vector &stackPostion) { + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < naryNode.NumOpnds(); ++i) { + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType check"); + /* use alloc */ + CHECK_FATAL(primType != PTY_agg, "the type of argument is unreasonable"); + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + CHECK_FATAL(argExpr->GetOpCode() == OP_addrof, "the argument of clear stack call is unreasonable"); + auto *expr = static_cast(argExpr); + int64 offsetValue = 0; + Operand *opnd = SelectClearStackCallParam(*expr, offsetValue); + stackPostion.emplace_back(offsetValue); + auto *expRegOpnd = static_cast(opnd); + parmLocator.LocateNextParm(*ty, ploc); + CHECK_FATAL(ploc.reg0 != 0, "the parameter of ClearStackCall must be passed by register"); + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(primType)); + SelectCopy(parmRegOpnd, primType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } +} + +/* + * intrinsify Unsafe.getAndAddInt and Unsafe.getAndAddLong + * generate an intrinsic instruction instead of a function call + * intrinsic_get_add_int w0, xt, ws, ws, x1, x2, w3, label + */ +void AArch64CGFunc::IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndAddInt has more than 4 parameters */ + DEBUG_ASSERT(opnds.size() >= 4, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *deltaOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + RegOperand &tempOpnd2 = CreateRegisterOperandOfType(PTY_i32); + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_addL : MOP_get_and_addI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(&tempOpnd2); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(deltaOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.getAndSetInt and Unsafe.getAndSetLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndSetInt has 4 parameters */ + DEBUG_ASSERT(opnds.size() == 4, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(PTY_i32); + + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_setL : MOP_get_and_setI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.compareAndSwapInt and Unsafe.compareAndSwapLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.compareAndSwapInt has more than 5 parameters */ + DEBUG_ASSERT(opnds.size() >= 5, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *expectedValueOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i64, -1)); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + LabelIdx labIdx1 = CreateLabel(); + LabelOperand &label1Opnd = GetOrCreateLabelOperand(labIdx1); + LabelIdx labIdx2 = CreateLabel(); + LabelOperand &label2Opnd = GetOrCreateLabelOperand(labIdx2); + MOperator mOp = (pty == PTY_i32) ? MOP_compare_and_swapI : MOP_compare_and_swapL; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(expectedValueOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&label1Opnd); + intrnOpnds.emplace_back(&label2Opnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * the lowest bit of count field is used to indicate whether or not the string is compressed + * if the string is not compressed, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, + LabelIdx jumpLabIdx) { + MemOperand &memOpnd = CreateMemOpnd(str, countOffset, str.GetSize()); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator loadOp = PickLdInsn(bitSize, countPty); + RegOperand &countOpnd = CreateRegisterOperandOfType(countPty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(loadOp, countOpnd, memOpnd)); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + RegOperand &countLowestBitOpnd = CreateRegisterOperandOfType(countPty); + MOperator andOp = bitSize == k64BitSize ? MOP_xandrri13 : MOP_wandrri12; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(andOp, countLowestBitOpnd, countOpnd, immValueOne)); + RegOperand &wzr = GetZeroOpnd(bitSize); + MOperator cmpOp = (bitSize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpOp, rflag, wzr, countLowestBitOpnd)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_beq, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &countOpnd; +} + +/* + * count field stores the length shifted one bit to the left + * if the length is less than eight, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringLengthLessThanEight(BB &bb, RegOperand &countOpnd, PrimType countPty, + LabelIdx jumpLabIdx) { + RegOperand &lengthOpnd = CreateRegisterOperandOfType(countPty); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, lengthOpnd, countOpnd, immValueOne)); + constexpr int kConstIntEight = 8; + ImmOperand &immValueEight = CreateImmOperand(countPty, kConstIntEight); + MOperator cmpImmOp = (bitSize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpImmOp, rflag, lengthOpnd, immValueEight)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blt, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &lengthOpnd; +} + +void AArch64CGFunc::GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString, RegOperand &patternString, + RegOperand &srcCountOpnd, RegOperand &patternLengthOpnd, + PrimType countPty, LabelIdx jumpLabIdx) { + RegOperand &srcLengthOpnd = CreateRegisterOperandOfType(countPty); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, srcLengthOpnd, srcCountOpnd, immValueOne)); +#ifdef USE_32BIT_REF + const int64 stringBaseObjSize = 16; /* shadow(4)+monitor(4)+count(4)+hash(4) */ +#else + const int64 stringBaseObjSize = 20; /* shadow(8)+monitor(4)+count(4)+hash(4) */ +#endif /* USE_32BIT_REF */ + PrimType pty = (srcString.GetSize() == k64BitSize) ? PTY_i64 : PTY_i32; + ImmOperand &immStringBaseOffset = CreateImmOperand(pty, stringBaseObjSize); + MOperator addOp = (pty == PTY_i64) ? MOP_xaddrri12 : MOP_waddrri12; + RegOperand &srcStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, srcStringBaseOpnd, srcString, immStringBaseOffset)); + RegOperand &patternStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, patternStringBaseOpnd, patternString, immStringBaseOffset)); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i32, -1)); + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&srcStringBaseOpnd); + intrnOpnds.emplace_back(&srcLengthOpnd); + intrnOpnds.emplace_back(&patternStringBaseOpnd); + intrnOpnds.emplace_back(&patternLengthOpnd); + const uint32 tmpRegOperandNum = 6; + for (uint32 i = 0; i < tmpRegOperandNum - 1; ++i) { + RegOperand &tmpOpnd = CreateRegisterOperandOfType(PTY_i64); + intrnOpnds.emplace_back(&tmpOpnd); + } + intrnOpnds.emplace_back(&CreateRegisterOperandOfType(PTY_i32)); + const uint32 labelNum = 7; + for (uint32 i = 0; i < labelNum; ++i) { + LabelIdx labIdx = CreateLabel(); + LabelOperand &labelOpnd = GetOrCreateLabelOperand(labIdx); + intrnOpnds.emplace_back(&labelOpnd); + } + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_string_indexof, intrnOpnds)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBGoto); +} + +/* + * intrinsify String.indexOf + * generate an intrinsic instruction instead of a function call if both the source string and the specified substring + * are compressed and the length of the substring is not less than 8, i.e. + * bl String.indexOf, srcString, patternString ===>> + * + * ldr srcCountOpnd, [srcString, offset] + * and srcCountLowestBitOpnd, srcCountOpnd, #1 + * cmp wzr, srcCountLowestBitOpnd + * beq Label.call + * ldr patternCountOpnd, [patternString, offset] + * and patternCountLowestBitOpnd, patternCountOpnd, #1 + * cmp wzr, patternCountLowestBitOpnd + * beq Label.call + * lsr patternLengthOpnd, patternCountOpnd, #1 + * cmp patternLengthOpnd, #8 + * blt Label.call + * lsr srcLengthOpnd, srcCountOpnd, #1 + * add srcStringBaseOpnd, srcString, immStringBaseOffset + * add patternStringBaseOpnd, patternString, immStringBaseOffset + * intrinsic_string_indexof retVal, srcStringBaseOpnd, srcLengthOpnd, patternStringBaseOpnd, patternLengthOpnd, + * tmpOpnd1, tmpOpnd2, tmpOpnd3, tmpOpnd4, tmpOpnd5, tmpOpnd6, + * label1, label2, label3, lable3, label4, label5, label6, label7 + * b Label.joint + * Label.call: + * bl String.indexOf, srcString, patternString + * Label.joint: + */ +void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym) { + MapleList &opnds = srcOpnds.GetOperands(); + /* String.indexOf opnd size must be more than 2 */ + DEBUG_ASSERT(opnds.size() >= 2, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *srcString = *iter; + RegOperand *patternString = *(++iter); + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangStringStr); + MIRType *type = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx)); + auto stringType = static_cast(type); + CHECK_FATAL(stringType != nullptr, "Ljava_2Flang_2FString_3B type can not be null"); + FieldID fieldID = GetMirModule().GetMIRBuilder()->GetStructFieldIDFromFieldNameParentFirst(stringType, "count"); + MIRType *fieldType = stringType->GetFieldType(fieldID); + PrimType countPty = fieldType->GetPrimType(); + int32 offset = GetBecommon().GetFieldOffset(*stringType, fieldID).first; + LabelIdx callBBLabIdx = CreateLabel(); + RegOperand *srcCountOpnd = CheckStringIsCompressed(*GetCurBB(), *srcString, offset, countPty, callBBLabIdx); + + BB *srcCompressedBB = CreateNewBB(); + GetCurBB()->AppendBB(*srcCompressedBB); + RegOperand *patternCountOpnd = CheckStringIsCompressed(*srcCompressedBB, *patternString, offset, countPty, + callBBLabIdx); + + BB *patternCompressedBB = CreateNewBB(); + RegOperand *patternLengthOpnd = CheckStringLengthLessThanEight(*patternCompressedBB, *patternCountOpnd, countPty, + callBBLabIdx); + + BB *intrinsicBB = CreateNewBB(); + LabelIdx jointLabIdx = CreateLabel(); + GenerateIntrnInsnForStrIndexOf(*intrinsicBB, *srcString, *patternString, *srcCountOpnd, *patternLengthOpnd, + countPty, jointLabIdx); + + BB *callBB = CreateNewBB(); + callBB->AddLabel(callBBLabIdx); + SetLab2BBMap(callBBLabIdx, *callBB); + SetCurBB(*callBB); + Insn &callInsn = AppendCall(funcSym, srcOpnds); + MIRType *retType = funcSym.GetFunction()->GetReturnType(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + } + GetFunction().SetHasCall(); + + BB *jointBB = CreateNewBB(); + jointBB->AddLabel(jointLabIdx); + SetLab2BBMap(jointLabIdx, *jointBB); + srcCompressedBB->AppendBB(*patternCompressedBB); + patternCompressedBB->AppendBB(*intrinsicBB); + intrinsicBB->AppendBB(*callBB); + callBB->AppendBB(*jointBB); + SetCurBB(*jointBB); +} + +/* Lmbc calls have no argument, they are all explicit iassignspoff or + blkassign. Info collected and to be emitted here */ +void AArch64CGFunc::LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn) { + if (GetLmbcArgInfo() == nullptr) { + return; /* no arg */ + } + CHECK_FATAL(GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc, "To be called for Lmbc model only"); + MapleVector &args = GetLmbcCallArgs(); + MapleVector &types = GetLmbcCallArgTypes(); + MapleVector &offsets = GetLmbcCallArgOffsets(); + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + int iCnt = 0; + int fCnt = 0; + for (size_t i = isArgReturn ? 1 : 0; i < args.size(); i++) { + RegType ty = args[i]->GetRegisterType(); + PrimType pTy = types[i]; + AArch64reg reg; + if (args[i]->IsOfIntClass() && (iCnt + regs[i]) <= static_cast(k8ByteSize)) { + reg = static_cast(R0 + iCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else if (!args[i]->IsOfIntClass() && (fCnt + regs[i]) <= static_cast(k8ByteSize)) { + reg = static_cast(V0 + fCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else { + uint32 pSize = GetPrimTypeSize(pTy); + Operand &memOpd = CreateMemOpnd(RSP, offsets[i], pSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(pSize * kBitsPerByte, pTy), + *args[i], memOpd)); + } + } + /* Load x8 if 1st arg is for agg return */ + if (isArgReturn) { + AArch64reg reg = static_cast(R8); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, + GetPrimTypeSize(PTY_a64) * kBitsPerByte, + kRegTyInt); + SelectCopy(*res, PTY_a64, *args[0], PTY_a64); + srcOpnds->PushOpnd(*res); + } + ResetLmbcArgInfo(); /* reset */ + ResetLmbcArgsInRegs(); + ResetLmbcTotalArgs(); +} + +void AArch64CGFunc::SelectCall(CallNode &callNode) { + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + MIRType *retType = fn->GetReturnType(); + + if (GetCG()->GenerateVerboseCG()) { + const std::string &comment = fsym->GetName(); + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + SetLmbcCallReturnType(nullptr); + bool largeStructRet = false; + if (fn->IsFirstArgReturn()) { + MIRPtrType *ptrTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx( + fn->GetFormalDefVec()[0].formalTyIdx)); + MIRType *sTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTy->GetPointedTyIdx()); + largeStructRet = sTy->GetSize() > k16ByteSize; + SetLmbcCallReturnType(sTy); + } else { + MIRType *ty = fn->GetReturnType(); + SetLmbcCallReturnType(ty); + } + LmbcSelectParmList(srcOpnds, largeStructRet); + } + bool callNative = false; + if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || + (fsym->GetName() == "MCC_CallSlowNative0") || (fsym->GetName() == "MCC_CallSlowNative1") || + (fsym->GetName() == "MCC_CallSlowNative2") || (fsym->GetName() == "MCC_CallSlowNative3") || + (fsym->GetName() == "MCC_CallSlowNative4") || (fsym->GetName() == "MCC_CallSlowNative5") || + (fsym->GetName() == "MCC_CallSlowNative6") || (fsym->GetName() == "MCC_CallSlowNative7") || + (fsym->GetName() == "MCC_CallSlowNative8") || (fsym->GetName() == "MCC_CallSlowNativeExt")) { + callNative = true; + } + + std::vector stackPosition; + if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + SelectClearStackCallParmList(callNode, *srcOpnds, stackPosition); + } else { + SelectParmList(callNode, *srcOpnds, callNative); + } + if (callNative) { + GetCurBB()->AppendInsn(CreateCommentInsn("call native func")); + + BaseNode *funcArgExpr = callNode.Opnd(0); + PrimType ptype = funcArgExpr->GetPrimType(); + Operand *funcOpnd = HandleExpr(callNode, *funcArgExpr); + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, + GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(livein, ptype, *funcOpnd, ptype); + + RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + srcOpnds->PushOpnd(extraOpnd); + } + const std::string &funcName = fsym->GetName(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && + funcName == "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I") { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + IntrinsifyStringIndexOf(*srcOpnds, *st); + return; + } + Insn &callInsn = AppendCall(*fsym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + + /* check if this call use stack slot to return */ + if (fn->IsFirstArgReturn()) { + SetStackProtectInfo(kRetureStackSlot); + } + + GetFunction().SetHasCall(); + if (GetMirModule().IsCModule()) { /* do not mark abort BB in C at present */ + if (fsym->GetName() == "__builtin_unreachable") { + GetCurBB()->ClearInsns(); + GetCurBB()->SetUnreachable(true); + } + return; + } + if ((fsym->GetName() == "MCC_ThrowException") || (fsym->GetName() == "MCC_RethrowException") || + (fsym->GetName() == "MCC_ThrowArithmeticException") || + (fsym->GetName() == "MCC_ThrowArrayIndexOutOfBoundsException") || + (fsym->GetName() == "MCC_ThrowNullPointerException") || + (fsym->GetName() == "MCC_ThrowStringIndexOutOfBoundsException") || (fsym->GetName() == "abort") || + (fsym->GetName() == "exit") || (fsym->GetName() == "MCC_Array_Boundary_Check")) { + callInsn.SetIsThrow(true); + GetCurBB()->SetKind(BB::kBBThrow); + } else if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + for (size_t i = 0; i < stackPosition.size(); ++i) { + callInsn.SetClearStackOffset(i, stackPosition[i]); + } + } +} + +void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + SelectParmList(icallNode, *srcOpnds); + + Operand *fptrOpnd = &srcOpnd; + if (fptrOpnd->GetKind() != Operand::kOpdRegister) { + PrimType ty = icallNode.Opnd(0)->GetPrimType(); + fptrOpnd = &SelectCopy(srcOpnd, ty, ty); + } + DEBUG_ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand"); + RegOperand *regOpnd = static_cast(fptrOpnd); + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds); + + MIRType *retType = icallNode.GetCallReturnType(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + + /* check if this icall use stack slot to return */ + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) { + SetStackProtectInfo(kRetureStackSlot); + } + } + + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + DEBUG_ASSERT(GetCurBB()->GetLastInsn()->IsCall(), "lastInsn should be a call"); + GetFunction().SetHasCall(); +} + +void AArch64CGFunc::HandleCatch() { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + regno_t regNO = uCatch.regNOCatch; + RegOperand &vregOpnd = GetOrCreateVirtualRegisterOperand(regNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovrr, vregOpnd, + GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt))); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(uCatch.opndCatch->GetSize(), PTY_a64), + GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt), *uCatch.opndCatch)); + } +} + +void AArch64CGFunc::SelectMembar(StmtNode &membar) { + switch (membar.GetOpCode()) { + case OP_membaracquire: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishld, AArch64CG::kMd[MOP_dmb_ishld])); + break; + case OP_membarrelease: + case OP_membarstoreload: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + break; + case OP_membarstorestore: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishst, AArch64CG::kMd[MOP_dmb_ishst])); + break; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void AArch64CGFunc::SelectComment(CommentNode &comment) { + GetCurBB()->AppendInsn(CreateCommentInsn(comment.GetComment())); +} + +void AArch64CGFunc::SelectReturn(Operand *opnd0) { + bool is64x1vec = GetFunction().GetAttr(FUNCATTR_oneelem_simd) ? true : false; + MIRType *floatType = GlobalTables::GetTypeTable().GetDouble(); + MIRType *retTyp = is64x1vec ? floatType : GetFunction().GetReturnType(); + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*retTyp, retMech); + if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { + RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); + PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); + AArch64reg retReg = static_cast(retMech.GetReg0()); + if (opnd0->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd0); + if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) { + RegOperand &retOpnd = + GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType); + } + } else if (opnd0->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd0); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp); + MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, retOpnd, *memopnd)); + } else if (opnd0->IsConstImmediate()) { + ImmOperand *immOpnd = static_cast(opnd0); + if (!is64x1vec) { + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0())); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0()); + } else { + PrimType rType = GetFunction().GetReturnType()->GetPrimType(); + RegOperand *reg = &CreateRegisterOperandOfType(rType); + SelectCopy(*reg, rType, *immOpnd, rType); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(PTY_f64), GetRegTyFromPrimTy(PTY_f64)); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xvmovdr, retOpnd, *reg); + GetCurBB()->AppendInsn(insn); + } + } else { + CHECK_FATAL(false, "nyi"); + } + } + GetExitBBsVec().emplace_back(GetCurBB()); +} + +RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType) { + AArch64reg reg = R0; + switch (sregIdx) { + case kSregSp: + reg = RSP; + break; + case kSregFp: + reg = RFP; + break; + case kSregGp: { + MIRSymbol *sym = GetCG()->GetGP(); + if (sym == nullptr) { + sym = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + std::string strBuf("__file__local__GP"); + sym->SetNameStrIdx(GetMirModule().GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + GetCG()->SetGP(sym); + } + RegOperand &result = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAddrof(result, CreateStImmOperand(*sym, 0, 0)); + return result; + } + case kSregThrownval: { /* uses x0 == R0 */ + DEBUG_ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + RegOperand ®Opnd = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + PickLdInsn(uCatch.opndCatch->GetSize(), PTY_a64), regOpnd, *uCatch.opndCatch)); + return regOpnd; + } else { + return GetOrCreateVirtualRegisterOperand(uCatch.regNOCatch); + } + } + case kSregRetval0: + if (!IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType)) { + reg = V0; + } + break; + case kSregMethodhdl: + if (methodHandleVreg == regno_t(-1)) { + methodHandleVreg = NewVReg(kRegTyInt, k8BitSize); + } + return GetOrCreateVirtualRegisterOperand(methodHandleVreg); + default: + DEBUG_ASSERT(false, "Special pseudo registers NYI"); + break; + } + return GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) { + DEBUG_ASSERT(!asmAttr.empty(), "Get inline asm string failed in GetOrCreatePhysicalRegisterOperand"); + RegType rKind = kRegTyUndef; + uint32 rSize = 0; + /* Get Register Type and Size */ + switch (asmAttr[0]) { + case 'x': { + rKind = kRegTyInt; + rSize = k64BitSize; + break; + } + case 'w': { + rKind = kRegTyInt; + rSize = k32BitSize; + break; + } + default: { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "Have not support this kind of register "); + } + } + AArch64reg rNO = kRinvalid; + /* Get Register Number */ + uint32 regNumPos = 1; + char numberChar = asmAttr[regNumPos++]; + if (numberChar >= '0' && numberChar <= '9') { + uint32 val = static_cast(numberChar - '0'); + if (regNumPos < asmAttr.length()) { + char numberCharSecond = asmAttr[regNumPos++]; + DEBUG_ASSERT(regNumPos == asmAttr.length(), "Invalid asm attribute"); + if (numberCharSecond >= '0' && numberCharSecond <= '9') { + val = val * kDecimalMax + static_cast((numberCharSecond - '0')); + } + } + rNO = static_cast(static_cast(R0) + val); + if (val > (kAsmInputRegPrefixOpnd + 1)) { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "have not support this kind of register "); + } + } else if (numberChar == 0) { + return CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + } else { + CHECK_FATAL(false, "Unexpect input in GetOrCreatePhysicalRegisterOperand"); + } + return GetOrCreatePhysicalRegisterOperand(rNO, rSize, rKind); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, + RegType kind, uint32 flag) { + uint64 aarch64PhyRegIdx = regNO; + DEBUG_ASSERT(flag == 0, "Do not expect flag here"); + if (size <= k32BitSize) { + size = k32BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 1; + } else if (size <= k64BitSize) { + size = k64BitSize; + aarch64PhyRegIdx = (aarch64PhyRegIdx << 1) + 1; + } else { + size = (size == k128BitSize) ? k128BitSize : k64BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 2; + } + RegOperand *phyRegOpnd = nullptr; + auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx); + if (phyRegIt != phyRegOperandTable.end()) { + phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx]; + } else { + phyRegOpnd = memPool->New(regNO, size, kind, flag); + phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd); + } + return *phyRegOpnd; +} + +const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const { + const MapleUnorderedMap::const_iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return it->second; + } + return nullptr; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) { + MapleUnorderedMap::iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return *(it->second); + } + const char *funcName = GetShortFuncName().c_str(); + LabelOperand *res = memPool->New(funcName, labIdx); + hashLabelOpndTable[labIdx] = res; + return *res; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb) { + LabelIdx labelIdx = bb.GetLabIdx(); + if (labelIdx == MIRLabelTable::GetDummyLabel()) { + labelIdx = CreateLabel(); + bb.AddLabel(labelIdx); + } + return GetOrCreateLabelOperand(labelIdx); +} + +uint32 AArch64CGFunc::GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const { + /* Generating a larger sized mem op than alignment if allowed by aggregate starting address */ + uint32 offsetAlign1 = (offset1 == 0) ? k8ByteSize : offset1; + uint32 offsetAlign2 = (offset2 == 0) ? k8ByteSize : offset2; + uint32 alignOffset = 1U << (std::min(__builtin_ffs(static_cast(offsetAlign1)), + __builtin_ffs(static_cast(offsetAlign2))) - 1); + if (alignOffset == k8ByteSize || alignOffset == k4ByteSize || alignOffset == k2ByteSize) { + return alignOffset; + } else if (alignOffset > k8ByteSize) { + return k8ByteSize; + } else { + return alignment; + } +} + +OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size) { + uint64 aarch64OfstRegIdx = offset; + aarch64OfstRegIdx = (aarch64OfstRegIdx << 1); + if (size == k64BitSize) { + ++aarch64OfstRegIdx; + } + DEBUG_ASSERT(size == k32BitSize || size == k64BitSize, "ofStOpnd size check"); + auto it = hashOfstOpndTable.find(aarch64OfstRegIdx); + if (it != hashOfstOpndTable.end()) { + return *it->second; + } + OfstOperand *res = &CreateOfstOpnd(offset, size); + hashOfstOpndTable[aarch64OfstRegIdx] = res; + return *res; +} + +void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector& rematInsns) { + const MIRSymbol *symbol = stImm.GetSymbol(); + DEBUG_ASSERT ((symbol->GetStorageClass() != kScAuto) || (symbol->GetStorageClass() != kScFormal), ""); + Operand *srcOpnd = &result; + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + static_cast(srcOpnd), nullptr, &offset, nullptr); + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn( + memOpnd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xaddrri12, result, result, immOpnd)); + return; + } + } else { + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, + bool needLow12, RegOperand *regOp, + std::vector& rematInsns) { + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + if (symbol.GetSKind() == kStConst) { + DEBUG_ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + if (needLow12) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + rematInsns.emplace_back(&insn); + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef, + bool needLow12, RegOperand *regOp) { + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + if (forLocalRef) { + auto p = GetMemlayout()->GetLocalRefLocMap().find(symbol.GetStIdx()); + CHECK_FATAL(p != GetMemlayout()->GetLocalRefLocMap().end(), "sym loc should have been defined"); + symLoc = static_cast(p->second); + } + DEBUG_ASSERT(symLoc != nullptr, "sym loc should have been defined"); + /* At this point, we don't know which registers the callee needs to save. */ + DEBUG_ASSERT((IsFPLRAddedToCalleeSavedList() || (SizeOfCalleeSaved() == 0)), + "CalleeSaved won't be known until after Register Allocation"); + StIdx idx = symbol.GetStIdx(); + auto it = memOpndsRequiringOffsetAdjustment.find(idx); + DEBUG_ASSERT((!IsFPLRAddedToCalleeSavedList() || + ((it != memOpndsRequiringOffsetAdjustment.end()) || (storageClass == kScFormal))), + "Memory operand of this symbol should have been added to the hash table"); + int32 stOffset = GetBaseOffset(*symLoc); + if (it != memOpndsRequiringOffsetAdjustment.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else if (mirModule.IsJavaModule()) { + return *(it->second); + } else { + Operand* offOpnd = (it->second)->GetOffset(); + if (((static_cast(offOpnd))->GetOffsetValue() == (stOffset + offset)) && + (it->second->GetSize() == size)) { + return *(it->second); + } + } + } + it = memOpndsForStkPassedArguments.find(idx); + if (it != memOpndsForStkPassedArguments.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else { + return *(it->second); + } + } + + RegOperand *baseOpnd = static_cast(GetBaseReg(*symLoc)); + int32 totalOffset = stOffset + static_cast(offset); + /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */ + OfstOperand *offsetOpnd = nullptr; + if (CGOptions::IsBigEndian()) { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) { + offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast(totalOffset), k64BitSize); + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && + MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) { + ImmOperand *offsetOprand; + offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary); + Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, + static_cast(*resImmOpnd), nullptr, symbol, true); + } else { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd->SetVary(kUnAdjustVary); + } + MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, + nullptr, offsetOpnd, &symbol); + if ((symbol.GetType()->GetKind() != kTypeClass) && !forLocalRef) { + memOpndsRequiringOffsetAdjustment[idx] = res; + } + return *res; + } + } else if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + if (symbol.GetSKind() == kStConst) { + DEBUG_ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + /* not guaranteed align for uninitialized symbol */ + if (needLow12 || (!symbol.IsConst() && CGOptions::IsPIC())) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + GetCurBB()->AppendInsn(insn); + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd) { + auto it = hashMemOpndTable.find(tMemOpnd); + if (it != hashMemOpndTable.end()) { + return *(it->second); + } + auto *res = memPool->New(tMemOpnd); + hashMemOpndTable[tMemOpnd] = res; + return *res; +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, ImmOperand *offset, + const MIRSymbol *st) { + DEBUG_ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, index, offset, st); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, int32 shift, + bool isSigned) { + DEBUG_ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem) { + return HashMemOpnd(oldMem); +} + +/* offset: base offset from FP or SP */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size) { + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + /* do not need to check bit size rotate of sign immediate */ + bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair); + if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) { + Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, + static_cast(resImmOpnd), nullptr, nullptr); + } else { + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, + nullptr, &offsetOpnd, nullptr); + } +} + +/* offset: base offset + #:lo12:Label+immediate */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym) { + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + DEBUG_ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), ""); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); +} + +RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, + PrimType baseType, PrimType targetType) { + RegOperand *index = &LoadIntoRegister(*HandleExpr(indexExpr, *(indexExpr.Opnd(0))), PTY_a64); + RegOperand *srcOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand *imm = &CreateImmOperand(PTY_a64, shift); + SelectShift(*srcOpnd, *index, *imm, kShiftLeft, PTY_a64); + RegOperand *result = &CreateRegisterOperandOfType(PTY_a64); + SelectAdd(*result, base, *srcOpnd, PTY_a64); + + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand &mo = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, result, nullptr, offopnd, nullptr); + RegOperand &structAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(baseType), targetType), structAddr, mo)); + return structAddr; +} + +/* + * case 1: iread a64 <* <* void>> 0 (add a64 ( + * addrof a64 $__reg_jni_func_tab$$libcore_all_dex, + * mul a64 ( + * cvt a64 i32 (constval i32 21), + * constval a64 8))) + * + * case 2 : iread u32 <* u8> 0 (add a64 (regread a64 %61, constval a64 3)) + * case 3 : iread u32 <* u8> 0 (add a64 (regread a64 %61, regread a64 %65)) + * case 4 : iread u32 <* u8> 0 (add a64 (cvt a64 i32(regread %n))) + */ +MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + aggParamReg = nullptr; + if (memOrd != AArch64isa::kMoNone || addrExpr.GetOpCode() != OP_add || offset != 0) { + return nullptr; + } + BaseNode *baseExpr = addrExpr.Opnd(0); + BaseNode *addendExpr = addrExpr.Opnd(1); + + if (baseExpr->GetOpCode() == OP_regread) { + /* case 2 */ + if (addendExpr->GetOpCode() == OP_constval) { + DEBUG_ASSERT(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + ConstvalNode *constOfstNode = static_cast(addendExpr); + DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + /* discard large offset and negative offset */ + if (intOfst->GetExtValue() > INT32_MAX || intOfst->IsNegative()) { + return nullptr; + } + uint32 scale = static_cast(intOfst->GetExtValue()); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize); + uint32 dsize = GetPrimTypeBitSize(ptype); + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), nullptr, &ofstOpnd, nullptr); + return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr; + /* case 3 */ + } else if (addendExpr->GetOpCode() == OP_regread) { + CHECK_FATAL(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + if (GetPrimTypeSize(baseExpr->GetPrimType()) != GetPrimTypeSize(addendExpr->GetPrimType())) { + return nullptr; + } + + auto *baseReg = SelectRegread(*static_cast(baseExpr)); + auto *indexReg = SelectRegread(*static_cast(addendExpr)); + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), baseReg, indexReg, + nullptr, nullptr); + return memOpnd; + /* case 4 */ + } else if (addendExpr->GetOpCode() == OP_cvt && addendExpr->GetNumOpnds() == 1) { + int shiftAmount = 0; + BaseNode *cvtRegreadNode = addendExpr->Opnd(kInsnFirstOpnd); + if (cvtRegreadNode->GetOpCode() == OP_regread && cvtRegreadNode->IsLeaf()) { + uint32 fromSize = GetPrimTypeBitSize(cvtRegreadNode->GetPrimType()); + uint32 toSize = GetPrimTypeBitSize(addendExpr->GetPrimType()); + + if (toSize < fromSize) { + return nullptr; + } + + MemOperand *memOpnd = &GetOrCreateMemOpnd( + MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), + SelectRegread(*static_cast(cvtRegreadNode)), shiftAmount, toSize != fromSize); + return memOpnd; + } + } + } + if (addendExpr->GetOpCode() != OP_mul || !IsPrimitiveInteger(ptype)) { + return nullptr; + } + BaseNode *indexExpr, *scaleExpr; + indexExpr = addendExpr->Opnd(0); + scaleExpr = addendExpr->Opnd(1); + if (scaleExpr->GetOpCode() != OP_constval) { + return nullptr; + } + ConstvalNode *constValNode = static_cast(scaleExpr); + CHECK_FATAL(constValNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *mirIntConst = safe_cast(constValNode->GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int32 scale = mirIntConst->GetExtValue(); + if (scale < 0) { + return nullptr; + } + uint32 unsignedScale = static_cast(scale); + if (unsignedScale != GetPrimTypeSize(ptype) || indexExpr->GetOpCode() != OP_cvt) { + return nullptr; + } + /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */ + int32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0)); + RegOperand &base = static_cast(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64)); + TypeCvtNode *typeCvtNode = static_cast(indexExpr); + PrimType fromType = typeCvtNode->FromType(); + PrimType toType = typeCvtNode->GetPrimType(); + if (isAggParamInReg) { + aggParamReg = &GenStructParamIndex(base, *indexExpr, shift, ptype, fromType); + return nullptr; + } + MemOperand *memOpnd = nullptr; + if ((fromType == PTY_i32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + shift, true); + } else if ((fromType == PTY_u32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + shift, false); + } + return memOpnd; +} + +MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, + int64 offset) { + Operand *addrOpnd = nullptr; + if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) && + addrExpr.Opnd(1)->GetOpCode() == OP_constval) { + addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0)); + ConstvalNode *constOfstNode = static_cast(addrExpr.Opnd(1)); + DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue(); + } else { + addrOpnd = HandleExpr(parent, addrExpr); + } + addrOpnd = static_cast(&LoadIntoRegister(*addrOpnd, PTY_a64)); + Insn *lastInsn = GetCurBB() == nullptr ? nullptr : GetCurBB()->GetLastInsn(); + if ((addrExpr.GetOpCode() == OP_CG_array_elem_add) && (offset == 0) && lastInsn && + (lastInsn->GetMachineOpcode() == MOP_xadrpl12) && + (&lastInsn->GetOperand(kInsnFirstOpnd) == &lastInsn->GetOperand(kInsnSecondOpnd))) { + Operand &opnd = lastInsn->GetOperand(kInsnThirdOpnd); + StImmOperand &stOpnd = static_cast(opnd); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(stOpnd.GetOffset()), k32BitSize); + MemOperand &tmpMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, stOpnd.GetSymbol()); + GetCurBB()->RemoveInsn(*GetCurBB()->GetLastInsn()); + return tmpMemOpnd; + } else { + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k64BitSize); + return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, nullptr); + } +} + +/* + * Create a memory operand with specified data type and memory ordering, making + * use of aarch64 extend register addressing mode when possible. + */ +MemOperand &AArch64CGFunc::CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return *memOpnd; + } + return CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +MemOperand *AArch64CGFunc::CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return memOpnd; + } else if (aggParamReg != nullptr) { + return nullptr; + } + return &CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +Operand &AArch64CGFunc::GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const { + return *memPool->New(symbol); +} + +Operand &AArch64CGFunc::GetOrCreateRflag() { + if (rcc == nullptr) { + rcc = &CreateRflagOperand(); + } + return *rcc; +} + +const Operand *AArch64CGFunc::GetRflag() const { + return rcc; +} + +Operand &AArch64CGFunc::GetOrCreatevaryreg() { + if (vary == nullptr) { + regno_t vRegNO = NewVReg(kRegTyVary, k8ByteSize); + vary = &CreateVirtualRegisterOperand(vRegNO); + } + return *vary; +} + +/* the first operand in opndvec is return opnd */ +void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector &opndVec, PrimType primType, + PrimType retPrimType, bool is2ndRet) { + std::vector pt; + pt.push_back(retPrimType); + for (size_t i = 0; i < opndVec.size(); ++i) { + pt.push_back(primType); + } + SelectLibCallNArg(funcName, opndVec, pt, retPrimType, is2ndRet); + return; +} + +void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt, PrimType retPrimType, bool is2ndRet) { + std::string newName = funcName; + // Check whether we have a maple version of libcall and we want to use it instead. + if (!CGOptions::IsDuplicateAsmFileEmpty() && asmMap.find(funcName) != asmMap.end()) { + newName = asmMap.at(funcName); + } + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + (void)vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + st->SetTyIdx(GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + if (GetCG()->GenerateVerboseCG()) { + const std::string &comment = "lib call : " + newName; + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + /* setup actual parameters */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 1; i < opndVec.size(); ++i) { + DEBUG_ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + if (stOpnd->GetKind() != Operand::kOpdRegister) { + stOpnd = &SelectCopy(*stOpnd, pt[i], pt[i]); + } + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, pt[i], *expRegOpnd, pt[i]); + srcOpnds->PushOpnd(parmRegOpnd); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Insn &callInsn = AppendCall(*sym, *srcOpnds); + MIRType *callRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + if (callRetType != nullptr) { + callInsn.SetRetSize(static_cast(callRetType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); + } + GetFunction().SetHasCall(); + /* get return value */ + Operand *opnd0 = opndVec[0]; + CCLocInfo retMech; + parmLocator.InitReturnInfo(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech); + if (retMech.GetRegCount() <= 0) { + CHECK_FATAL(false, "should return from register"); + } + if (!opnd0->IsRegister()) { + CHECK_FATAL(false, "nyi"); + } + RegOperand *regOpnd = static_cast(opnd0); + AArch64reg regNum = static_cast(is2ndRet ? retMech.GetReg1() : retMech.GetReg0()); + if (regOpnd->GetRegisterNumber() != regNum) { + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(regNum, regOpnd->GetSize(), + GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*opnd0, retPrimType, retOpnd, retPrimType); + } +} + +Operand *AArch64CGFunc::GetBaseReg(const AArch64SymbolAlloc &symAlloc) { + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), "NYI"); + + if (sgKind == kMsArgsStkPassed) { + return &GetOrCreatevaryreg(); + } + + if (fsp == nullptr) { + fsp = &GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } + return fsp; +} + +int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &sa) { + const AArch64SymbolAlloc *symAlloc = static_cast(&sa); + /* Call Frame layout of AArch64 + * Refer to V2 in aarch64_memlayout.h. + * Do Not change this unless you know what you do + */ + const int32 sizeofFplr = 2 * kIntregBytelen; + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsArgsStkPassed) { /* for callees */ + int32 offset = static_cast(symAlloc->GetOffset()); + return offset; + } else if (sgKind == kMsArgsRegPassed) { + int32 baseOffset = memLayout->GetSizeOfLocals() + symAlloc->GetOffset() + memLayout->GetSizeOfRefLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsRefLocals) { + int32 baseOffset = symAlloc->GetOffset() + memLayout->GetSizeOfLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsLocals) { + int32 baseOffset = symAlloc->GetOffset(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsSpillReg) { + if (GetCG()->IsLmbc()) { + return symAlloc->GetOffset() + memLayout->SizeOfArgsToStackPass(); + } + int32 baseOffset = symAlloc->GetOffset() + memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfLocals() + + memLayout->GetSizeOfRefLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsArgsToStkPass) { /* this is for callers */ + return static_cast(symAlloc->GetOffset()); + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol) { + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + AppendCall(funcSymbol, *srcOpnds); +} + +void AArch64CGFunc::DBGFixCallFrameLocationOffsets() { + for (DBGExprLoc *el : GetDbgCallFrameLocations()) { + if (el->GetSimpLoc()->GetDwOp() == DW_OP_fbreg) { + SymbolAlloc *symloc = static_cast(el->GetSymLoc()); + int32 offset = GetBaseOffset(*symloc) - GetDbgCallFrameOffset(); + el->SetFboffset(offset); + } + } +} + +void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, + bool isDest, Insn &insn) { + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + DEBUG_ASSERT(opnd0.GetKind() == Operand::kOpdRegister, "Spill memory operand should based on register"); + DEBUG_ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset), + "Spill memory operand should be with a immediate offset."); + + ImmOperand *immOpnd = static_cast(&opnd1); + + MOperator mOpCode = MOP_undef; + Insn *curInsn = &insn; + /* lower 24 bits has 1, higher bits are all 0 */ + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* lower 12 bits and higher 12 bits both has 1 */ + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd); + DEBUG_ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(insn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + /* get lower 12 bits value */ + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + newOpnd0 = &resOpnd; + curInsn = &newInsn; + } + /* process lower 12 bits value */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + DEBUG_ASSERT(IsOperandImmValid(mOpCode, immOpnd, kInsnThirdOpnd), "immOpnd appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(*curInsn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } else { + /* load into register */ + RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = GetInsnBuilder()->BuildInsn(mOpCode, movOpnd, *immOpnd); + mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, movOpnd); + if (isDest) { + (void)insn.GetBB()->InsertInsnAfter(insn, newInsn); + (void)insn.GetBB()->InsertInsnAfter(insn, movInsn); + } else { + (void)insn.GetBB()->InsertInsnBefore(insn, movInsn); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } +} + +MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( + MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); + } + uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); + if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + if (CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) { + isOutOfRange = true; + } + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn); + } else { + isOutOfRange = false; + } + return memOpnd; +} + +void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum) { + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + DEBUG_ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); + } + uint32 memBitSize = k64BitSize; + auto it = reuseSpillLocMem.find(memBitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int64 offset = GetOrCreatSpillRegLocation(vrNum); + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(offset), k64BitSize); + MemOperand *memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, + nullptr, offsetOpnd, nullptr); + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i) { + MapleUnorderedMap::iterator p; + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + p = pRegSpillMemOperands.end(); + } else { + p = pRegSpillMemOperands.find(i); + } + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + int64 offset = GetPseudoRegisterSpillLocation(i); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(i); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetOrCreateFramePointerRegOperand(); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr); + if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) { + MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen); + (void)pRegSpillMemOperands.emplace(std::pair(i, &newMemOpnd)); + return &newMemOpnd; + } + (void)pRegSpillMemOperands.emplace(std::pair(i, &memOpnd)); + return &memOpnd; +} + +MIRPreg *AArch64CGFunc::GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA) const { + PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); + if (pri == -1) return nullptr; + return GetFunction().GetPregTab()->PregFromPregIdx(pri); +} + +/* Get the number of return register of current function. */ +AArch64reg AArch64CGFunc::GetReturnRegisterNumber() { + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*(GetFunction().GetReturnType()), retMech); + if (retMech.GetRegCount() > 0) { + return static_cast(retMech.GetReg0()); + } + return kRinvalid; +} + +bool AArch64CGFunc::CanLazyBinding(const Insn &ldrInsn) const { + Operand &memOpnd = ldrInsn.GetOperand(1); + auto &aarchMemOpnd = static_cast(memOpnd); + if (aarchMemOpnd.GetAddrMode() != MemOperand::kAddrModeLo12Li) { + return false; + } + + const MIRSymbol *sym = aarchMemOpnd.GetSymbol(); + CHECK_FATAL(sym != nullptr, "sym can't be nullptr"); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || + sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab() || + (sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo())) { + return true; + } + + return false; +} + +/* + * add reg, reg, __PTR_C_STR_... + * ldr reg1, [reg] + * => + * ldr reg1, [reg, #:lo12:__Ptr_C_STR_...] + */ +void AArch64CGFunc::ConvertAdrpl12LdrToLdr() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + /* check first insn */ + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + continue; + } + /* check second insn */ + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (!(((nextMop >= MOP_wldrsb) && (nextMop <= MOP_dldp)) || ((nextMop >= MOP_wstrb) && (nextMop <= MOP_dstp)))) { + continue; + } + + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + CHECK_FATAL(memOpnd != nullptr, "memOpnd can't be nullptr"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + continue; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + continue; + } + + auto ®Opnd = static_cast(insn->GetOperand(0)); + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + RegOperand *baseReg = memOpnd->GetBaseRegister(); + CHECK_FATAL(baseReg != nullptr, "baseReg can't be nullptr"); + if (baseReg->GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + + StImmOperand &stImmOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd( + static_cast(stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue()), k32BitSize); + RegOperand &newBaseOpnd = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + &newBaseOpnd, nullptr, &ofstOpnd, stImmOpnd.GetSymbol()); + nextInsn->SetOperand(1, newMemOpnd); + bb->RemoveInsn(*insn); + } + } +} + +/* + * adrp reg1, __muid_func_undef_tab.. + * ldr reg2, [reg1, #:lo12:__muid_func_undef_tab..] + * => + * intrinsic_adrp_ldr reg2, __muid_func_undef_tab... + */ +void AArch64CGFunc::ConvertAdrpLdrToIntrisic() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + + MOperator firstMop = insn->GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!((firstMop == MOP_xadrp) && ((secondMop == MOP_wldr) || (secondMop == MOP_xldr)))) { + continue; + } + + if (CanLazyBinding(*nextInsn)) { + bb->ReplaceInsn(*insn, GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, nextInsn->GetOperand(0), insn->GetOperand(1))); + bb->RemoveInsn(*nextInsn); + } + } + } +} + +void AArch64CGFunc::ProcessLazyBinding() { + ConvertAdrpl12LdrToLdr(); + ConvertAdrpLdrToIntrisic(); +} + +/* + * Generate global long call + * adrp VRx, symbol + * ldr VRx, [VRx, #:lo12:symbol] + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(func.GetStIdx()); + symbol->SetStorageClass(kScGlobal); + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*symbol, 0, 0); + OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + MemOperand &memOrd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPointerSize() * kBitsPerByte, + static_cast(&tmpReg), + nullptr, &offsetOpnd, symbol); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(memOrd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, tmpReg, memOrd); + GetCurBB()->AppendInsn(ldrInsn); + + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + return callInsn; +} + +/* + * Generate local long call + * adrp VRx, symbol + * add VRx, VRx, #:lo12:symbol + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) { + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(func, 0, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, tmpReg, tmpReg, stOpnd); + GetCurBB()->AppendInsn(addInsn); + Insn *callInsn = &GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + return *callInsn; +} + +Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds) { + Insn *callInsn = nullptr; + if (CGOptions::IsLongCalls()) { + MIRFunction *mirFunc = sym.GetFunction(); + if (IsDuplicateAsmList(sym) || (mirFunc && mirFunc->GetAttr(FUNCATTR_local))) { + callInsn = &GenerateLocalLongCallAfterInsn(sym, srcOpnds); + } else { + callInsn = &GenerateGlobalLongCallAfterInsn(sym, srcOpnds); + } + } else { + Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym); + callInsn = &GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + } + return *callInsn; +} + +bool AArch64CGFunc::IsDuplicateAsmList(const MIRSymbol &sym) const { + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return false; + } + + const std::string &name = sym.GetName(); + if ((name == "strlen") || + (name == "strncmp") || + (name == "memcpy") || + (name == "memmove") || + (name == "strcmp") || + (name == "memcmp") || + (name == "memcmpMpl")) { + return true; + } + return false; +} + +void AArch64CGFunc::SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode) { + if (Options::profileGen) { + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + + // Ref: MeProfGen::InstrumentFunc on ctrTbl namiLogicalShiftLeftOperandng + std::string ctrTblName = namemangler::kprefixProfCtrTbl + + GetMirModule().GetFileName() + "_" + GetName(); + std::replace(ctrTblName.begin(), ctrTblName.end(), '.', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '-', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '/', '_'); + + if (!bbProfileTab || bbProfileTab->GetName() != ctrTblName) { + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(ctrTblName); + CHECK_FATAL(bbProfileTab != nullptr, "expect counter table"); + } + + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 offset = GetPrimTypeSize(PTY_u64) * mirIntConst->GetExtValue(); + + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlInfo) << "At counter table offset: " << offset << std::endl; + } + MemOperand *memOpnd = &GetOrCreateMemOpnd(*bbProfileTab, offset, k64BitSize); + if (IsImmediateOffsetOutOfRange(*memOpnd, k64BitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, k64BitSize); + } + Operand *reg = &SelectCopy(*memOpnd, PTY_u64, PTY_u64); + ImmOperand &one = CreateImmOperand(1, k64BitSize, false); + SelectAdd(*reg, *reg, one, PTY_u64); + SelectCopy(*memOpnd, PTY_u64, *reg, PTY_u64); + return; + } + + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + if (!bbProfileTab) { + std::string bbProfileName = namemangler::kBBProfileTabPrefixStr + GetMirModule().GetFileNameAsPostfix(); + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(bbProfileName); + CHECK_FATAL(bbProfileTab != nullptr, "expect bb profile tab"); + } + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 idx = GetPrimTypeSize(PTY_u32) * mirIntConst->GetExtValue(); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << "Id index " << idx << std::endl; + } + StImmOperand &stOpnd = CreateStImmOperand(*bbProfileTab, idx, 0); + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_counter, vReg1, stOpnd); + newInsn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(newInsn); +} + +void AArch64CGFunc::SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode) { + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg = intrnNode.Opnd(0); + Operand *stOpnd = nullptr; + bool bClinitSeperate = false; + DEBUG_ASSERT(CGOptions::IsPIC(), "must be doPIC"); + if (arg->GetOpCode() == OP_addrof) { + AddrofNode *addrof = static_cast(arg); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + DEBUG_ASSERT(symbol->GetName().find(CLASSINFO_PREFIX_STR) == 0, "must be a symbol with __classinfo__"); + + if (!symbol->IsMuidDataUndefTab()) { + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + bClinitSeperate = true; + symbol->SetStorageClass(kScFstatic); + } + stOpnd = &CreateStImmOperand(*symbol, 0, 0); + } else { + arg = arg->Opnd(0); + BaseNode *arg0 = arg->Opnd(0); + BaseNode *arg1 = arg->Opnd(1); + DEBUG_ASSERT(arg0 != nullptr, "nullptr check"); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + DEBUG_ASSERT(arg0->GetOpCode() == OP_addrof, "expect the operand to be addrof"); + AddrofNode *addrof = static_cast(arg0); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + DEBUG_ASSERT(addrof->GetFieldID() == 0, "For debug SelectMPLClinitCheck."); + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + stOpnd = &CreateStImmOperand(*symbol, mirIntConst->GetExtValue(), 0); + } + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + vReg2.SetRegNotBBLocal(); + if (bClinitSeperate) { + /* Seperate MOP_clinit to MOP_adrp_ldr + MOP_clinit_tail. */ + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + newInsn.SetDoNotRemove(true); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_clinit_tail, vReg2); + insn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(insn); + } else { + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_clinit, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} +void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { + /* FPLR only pushed in regalloc() after intrin function */ + Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + + /* __stack */ + ImmOperand *offsOpnd; + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + } else { + offsOpnd = &CreateImmOperand(0, k64BitSize, true); + } + ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); + RegOperand &vReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(GetLoweredPtrType()))); + if (stkSize) { + SelectAdd(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + } else { + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); /* stack pointer */ + } + OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); /* va_list ptr */ + /* mem operand in va_list struct (lhs) */ + MemOperand *strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + if (CGOptions::IsArm64ilp32()) { + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize(), k64BitSize); + } else { + offOpnd = &GetOrCreateOfstOpnd(k8BitSize, k64BitSize); + } + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(GetMemlayout())->GetSizeOfGRSaveArea()); + if (CGOptions::IsArm64ilp32()) { + offsOpnd2 = &CreateImmOperand(static_cast(RoundUp(static_cast(grAreaSize), k8ByteSize * 2)), + k64BitSize, false); + } else { + offsOpnd2 = &CreateImmOperand(static_cast(RoundUp(static_cast(grAreaSize), GetPointerSize() * 2)), + k64BitSize, false); + } + SelectSub(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); /* if 1st opnd is register => sub */ + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 2, k64BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_offs */ + int32 offs = 0 - grAreaSize; + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + RegOperand *tmpReg = &CreateRegisterOperandOfType(PTY_i32); /* offs value to be assigned (rhs) */ + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 3, k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); + + /* __vr_offs */ + offs = static_cast(UINT32_MAX - (static_cast(GetMemlayout())->GetSizeOfVRSaveArea() - 1UL)); + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + tmpReg = &CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd((GetPointerSize() * 3 + sizeof(int32)), k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); +} + +void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + isIntrnCallForC = true; + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = LoadIntoRegister(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + uint32 inReg = 0; + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetFunction().GetNthParamTyIdx(i)); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } else { + inReg++; + } + } + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stkSize += (inReg * k8ByteSize); + } + if (CGOptions::IsArm64ilp32()) { + stkSize = static_cast(RoundUp(stkSize, k8ByteSize)); + } else { + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + } + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +/* + * intrinsiccall C___Atomic_store_N(ptr, val, memorder)) + * ====> *ptr = val + * let ptr -> x0 + * let val -> x1 + * implement to asm: str/stlr x1, [x0] + * a store-release would replace str if memorder is not 0 + */ +void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = intrinsiccallNode.Opnd(1)->GetPrimType(); + auto *addr = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)); + auto *value = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + SelectAtomicStore(*value, *addr, primType, PickMemOrder(memOrder, false)); +} + +void AArch64CGFunc::SelectAtomicStore( + Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto mOp = PickStInsn(GetPrimTypeBitSize(primType), primType, memOrder); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, LoadIntoRegister(srcOpnd, primType), memOpnd)); +} + +void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) { + if (CGOptions::IsPIC()) { + SelectCTlsGlobalDesc(result, stImm); + } else { + SelectCTlsLocalDesc(result, stImm); + } + if (stImm.GetOffset() > 0) { + auto &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } +} + +void AArch64CGFunc::SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm) { + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, result, *tpidr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_rel, result, result, stImm)); +} + +void AArch64CGFunc::SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm) { + /* according to AArch64 Machine Directives */ + auto &r0opnd = GetOrCreatePhysicalRegisterOperand (R0, k64BitSize, GetRegTyFromPrimTy(PTY_u64)); + RegOperand *tlsAddr = &CreateRegisterOperandOfType(PTY_u64); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_call, r0opnd, *tlsAddr, stImm)); + /* release tls address */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_pseduo_tls_release, *tlsAddr)); + // mrs xn, tpidr_el0 + // add x0, x0, xn + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + SelectAdd(result, r0opnd, *specialFunc, PTY_u64); +} + +void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (GetCG()->GenerateVerboseCG()) { + std::string comment = GetIntrinsicName(intrinsic); + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + + /* + * At this moment, we eagerly evaluates all argument expressions. In theory, + * there could be intrinsics that extract meta-information of variables, such as + * their locations, rather than computing their values. Applications + * include building stack maps that help runtime libraries to find the values + * of local variables (See @stackmap in LLVM), in which case knowing their + * locations will suffice. + */ + if (intrinsic == INTRN_MPL_CLINIT_CHECK) { /* special case */ + SelectMPLClinitCheck(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_MPL_PROF_COUNTER_INC) { /* special case */ + SelectMPLProfCounterInc(intrinsiccallNode); + return; + } + if ((intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS) || (intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) || + (intrinsic == INTRN_MPL_CLEANUP_NORETESCOBJS)) { + return; + } + switch (intrinsic) { + case INTRN_C_va_start: + SelectCVaStart(intrinsiccallNode); + return; + case INTRN_C___sync_lock_release_1: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u8); + return; + case INTRN_C___sync_lock_release_2: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u16); + return; + case INTRN_C___sync_lock_release_4: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u32); + return; + case INTRN_C___sync_lock_release_8: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u64); + return; + case INTRN_C___atomic_store_n: + SelectCAtomicStoreN(intrinsiccallNode); + return; + case INTRN_vector_zip_v8u8: case INTRN_vector_zip_v8i8: + case INTRN_vector_zip_v4u16: case INTRN_vector_zip_v4i16: + case INTRN_vector_zip_v2u32: case INTRN_vector_zip_v2i32: + SelectVectorZip(intrinsiccallNode.Opnd(0)->GetPrimType(), + HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)), + HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1))); + return; + case INTRN_C_stack_save: + return; + case INTRN_C_stack_restore: + return; + default: + break; + } + std::vector operands; /* Temporary. Deallocated on return. */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 0; i < intrinsiccallNode.NumOpnds(); i++) { + BaseNode *argExpr = intrinsiccallNode.Opnd(i); + Operand *opnd = HandleExpr(intrinsiccallNode, *argExpr); + operands.emplace_back(opnd); + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, argExpr->GetPrimType()); + } + RegOperand *expRegOpnd = static_cast(opnd); + srcOpnds->PushOpnd(*expRegOpnd); + } + CallReturnVector *retVals = &intrinsiccallNode.GetReturnVec(); + + switch (intrinsic) { + case INTRN_MPL_ATOMIC_EXCHANGE_PTR: { + BB *origFtBB = GetCurBB()->GetNext(); + Operand *loc = operands[kInsnFirstOpnd]; + Operand *newVal = operands[kInsnSecondOpnd]; + Operand *memOrd = operands[kInsnThirdOpnd]; + + MemOrd ord = OperandToMemOrd(*memOrd); + bool isAcquire = MemOrdIsAcquire(ord); + bool isRelease = MemOrdIsRelease(ord); + + const PrimType kValPrimType = PTY_a64; + + RegOperand &locReg = LoadIntoRegister(*loc, PTY_a64); + /* Because there is no live analysis when -O1 */ + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + locReg.SetRegNotBBLocal(); + } + MemOperand &locMem = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, + k64BitSize, &locReg, nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + RegOperand &newValReg = LoadIntoRegister(*newVal, PTY_a64); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + newValReg.SetRegNotBBLocal(); + } + GetCurBB()->SetKind(BB::kBBFallthru); + + LabelIdx retryLabIdx = CreateLabeledBB(intrinsiccallNode); + + RegOperand *oldVal = SelectLoadExcl(kValPrimType, locMem, isAcquire); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + oldVal->SetRegNotBBLocal(); + } + RegOperand *succ = SelectStoreExcl(kValPrimType, locMem, newValReg, isRelease); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + succ->SetRegNotBBLocal(); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *succ, GetOrCreateLabelOperand(retryLabIdx))); + GetCurBB()->SetKind(BB::kBBIntrinsic); + GetCurBB()->SetNext(origFtBB); + + SaveReturnValueInLocal(*retVals, 0, kValPrimType, *oldVal, intrinsiccallNode); + break; + } + case INTRN_GET_AND_ADDI: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_ADDL: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_GET_AND_SETI: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_SETL: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_COMP_AND_SWAPI: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_COMP_AND_SWAPL: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i64); + break; + } + default: { + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the AArch64 CG.", intrinsic, GetIntrinsicName(intrinsic)); + break; + } + } +} + +Operand *AArch64CGFunc::SelectCclz(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + MOperator mop; + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + if (GetPrimTypeSize(ptype) == k4ByteSize) { + mop = MOP_wclz; + } else { + mop = MOP_xclz; + } + RegOperand &dst = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dst, *opnd)); + return &dst; +} + +Operand *AArch64CGFunc::SelectCctz(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + MOperator clzmop; + MOperator rbitmop; + if (GetPrimTypeSize(ptype) == k4ByteSize) { + clzmop = MOP_wclz; + rbitmop = MOP_wrbit; + } else { + clzmop = MOP_xclz; + rbitmop = MOP_xrbit; + } + RegOperand &dst1 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(rbitmop, dst1, *opnd)); + RegOperand &dst2 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, dst2, dst1)); + return &dst2; +} + +Operand *AArch64CGFunc::SelectCpopcount(IntrinsicopNode &intrnNode) { + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCparity(IntrinsicopNode &intrnNode) { + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCclrsb(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + bool is32Bit = (GetPrimTypeSize(ptype) == k4ByteSize); + RegOperand &res = CreateRegisterOperandOfType(ptype); + SelectMvn(res, *opnd, ptype); + SelectAArch64Cmp(*opnd, GetZeroOpnd(is32Bit ? k32BitSize : k64BitSize), true, is32Bit ? k32BitSize : k64BitSize); + SelectAArch64Select(*opnd, res, *opnd, GetCondOperand(CC_LT), true, is32Bit ? k32BitSize : k64BitSize); + MOperator clzmop = (is32Bit ? MOP_wclz : MOP_xclz); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, *opnd, *opnd)); + SelectSub(*opnd, *opnd, CreateImmOperand(1, is32Bit ? k32BitSize : k64BitSize, true), ptype); + return opnd; +} + +Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) { + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + + RegOperand &ldDest0 = CreateRegisterOperandOfType(ptype0); + if (opnd0->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype0), ptype0), ldDest0, *opnd0)); + opnd0 = &ldDest0; + } else if (opnd0->IsImmediate()) { + SelectCopyImm(ldDest0, *static_cast(opnd0), ptype0); + opnd0 = &ldDest0; + } + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + + RegOperand &ldDest1 = CreateRegisterOperandOfType(ptype1); + if (opnd1->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype1), ptype1), ldDest1, *opnd1)); + opnd1 = &ldDest1; + } else if (opnd1->IsImmediate()) { + SelectCopyImm(ldDest1, *static_cast(opnd1), ptype1); + opnd1 = &ldDest1; + } + // mov w4, #1 + RegOperand ®0 = CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(reg0, CreateImmOperand(1, k32BitSize, true), PTY_i32); + // sxtw x4, w4 + MOperator mOp = MOP_xsxtw64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, reg0, reg0)); + // sub x3, x3, x4 + SelectSub(*opnd1, *opnd1, reg0, ptype1); + // and x2, x2, x3 + SelectBand(*opnd0, *opnd0, *opnd1, ptype1); + // mov w3, #0 + // sxtw x3, w3 + // cmp x2, x3 + SelectAArch64Cmp(*opnd0, GetZeroOpnd(k64BitSize), true, k64BitSize); + // cset w2, EQ + SelectAArch64CSet(*opnd0, GetCondOperand(CC_EQ), false); + return opnd0; +} + +void AArch64CGFunc::SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, + PrimType primType, Opcode op) { + switch (op) { + case OP_add: + SelectAdd(resOpnd, opnd0, opnd1, primType); + break; + case OP_sub: + SelectSub(resOpnd, opnd0, opnd1, primType); + break; + case OP_band: + SelectBand(resOpnd, opnd0, opnd1, primType); + break; + case OP_bior: + SelectBior(resOpnd, opnd0, opnd1, primType); + break; + case OP_bxor: + SelectBxor(resOpnd, opnd0, opnd1, primType); + break; + default: + CHECK_FATAL(false, "unconcerned opcode for arithmetical and logical insns"); + break; + } +} + +Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + auto primType = intrinopNode.GetPrimType(); + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + /* keep variables inside same BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* update loaded value */ + auto *regOperated = &CreateRegisterOperandOfType(primType); + SelectArithmeticAndLogical(*regOperated, *regLoaded, *valueOpnd, primType, op); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *regOperated, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + atomicBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + return fetchBefore ? regLoaded : regOperated; +} + +Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool) { + PrimType primType = intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType(); + DEBUG_ASSERT(primType == intrinopNode.GetNopndAt(kInsnThirdOpnd)->GetPrimType(), "gcc built_in rule"); + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *oldVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *newVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + + uint32 primTypeP2Size = GetPrimTypeP2Size(primType); + /* ldxr */ + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + Operand *regExtend = &CreateRegisterOperandOfType(primType); + PrimType targetType = (oldVal->GetSize() <= k32BitSize) ? + (IsSignedInteger(primType) ? PTY_i32 : PTY_u32) : (IsSignedInteger(primType) ? PTY_i64 : PTY_u64); + SelectCvtInt2Int(nullptr, regExtend, regLoaded, primType, targetType); + /* cmp */ + SelectAArch64Cmp(*regExtend, *oldVal, true, oldVal->GetSize()); + /* bne */ + Operand &rflag = GetOrCreateRflag(); + LabelIdx nextBBLableIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(nextBBLableIdx); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, rflag, targetOpnd)); + /* stlxr */ + BB *stlxrBB = CreateNewBB(); + stlxrBB->SetKind(BB::kBBIf); + atomicBB->AppendBB(*stlxrBB); + SetCurBB(*stlxrBB); + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto &newRegVal = LoadIntoRegister(*newVal, primType); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, newRegVal, memOpnd)); + /* cbnz ==> check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + nextBB->AddLabel(nextBBLableIdx); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + SetLab2BBMap(static_cast(nextBBLableIdx), *nextBB); + stlxrBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + /* bool version return true if the comparison is successful and newval is written */ + if (retBool) { + auto *retOpnd = &CreateRegisterOperandOfType(PTY_u32); + SelectAArch64CSet(*retOpnd, GetCondOperand(CC_EQ), false); + return retOpnd; + } + /* type version return the contents of *addrOpnd before the operation */ + return regLoaded; +} + +Operand *AArch64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + return SelectAArch64CSyncFetch(intrinopNode, op, fetchBefore); +} + +Operand *AArch64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) { + return SelectCSyncCmpSwap(intrinopNode, true); +} + +Operand *AArch64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) { + return SelectCSyncCmpSwap(intrinopNode); +} + +Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { + auto primType = intrinopNode.GetPrimType(); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + SetCurBB(*atomicBB); + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *valueOpnd, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + atomicBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + return regLoaded; +} + +void AArch64CGFunc::SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType) { + auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + auto mOp = PickStInsn(primTypeBitSize, primType, AArch64isa::kMoRelease); + auto &zero = GetZeroOpnd(primTypeBitSize); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, primTypeBitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, zero, memOpnd)); +} + +Operand *AArch64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinopNode) { + (void)intrinopNode; + CHECK_FATAL(false, "have not implement SelectCSyncSynchronize yet"); + return nullptr; +} + +AArch64isa::MemoryOrdering AArch64CGFunc::PickMemOrder(std::memory_order memOrder, bool isLdr) const { + switch (memOrder) { + case std::memory_order_relaxed: + return AArch64isa::kMoNone; + case std::memory_order_consume: + case std::memory_order_acquire: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoNone; + case std::memory_order_release: + return isLdr ? AArch64isa::kMoNone : AArch64isa::kMoRelease; + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoRelease; + default: + CHECK_FATAL(false, "unexpected memorder"); + return AArch64isa::kMoNone; + } +} + +/* + * regassign %1 (intrinsicop C___Atomic_Load_N(ptr, memorder)) + * ====> %1 = *ptr + * let %1 -> x0 + * let ptr -> x1 + * implement to asm: ldr/ldar x0, [x1] + * a load-acquire would replace ldr if memorder is not 0 + */ +Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *memOrderOpnd = intrinsicopNode.Opnd(1); + auto primType = intrinsicopNode.GetPrimType(); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + return SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); +} + +/* + * regassign %1 (intrinsicop C___Atomic_exchange_n(ptr, val, memorder)) + * ====> %1 = *ptr; *ptr = val; + * let %1 -> x0 + * let ptr -> x1 + * let val -> x2 + * implement to asm: + * ldr/ldar x0, [x1] + * str/stlr x2, [x1] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +Operand *AArch64CGFunc::SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) { + auto primType = intrinsicopNode.GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *valueOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(1)); + auto *memOrderOpnd = intrinsicopNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + auto *result = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*valueOpnd, *addrOpnd, primType, PickMemOrder(memOrder, false)); + return result; +} + +Operand *AArch64CGFunc::SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto mOp = PickLdInsn(GetPrimTypeBitSize(primType), primType, memOrder); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto *resultOpnd = &CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resultOpnd, memOpnd)); + return resultOpnd; +} + +Operand *AArch64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { + if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_extract_return_addr) { + DEBUG_ASSERT(intrinopNode.GetNumOpnds() == 1, "expect one parameter"); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + return &LoadIntoRegister(*addrOpnd, PTY_a64); + } else if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_return_address) { + BaseNode *argexpr0 = intrinopNode.Opnd(0); + while (!argexpr0->IsLeaf()) { + argexpr0 = argexpr0->Opnd(0); + } + CHECK_FATAL(argexpr0->IsConstval(), "Invalid argument of __builtin_return_address"); + auto &constNode = static_cast(*argexpr0); + DEBUG_ASSERT(constNode.GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + DEBUG_ASSERT(mirIntConst != nullptr, "nullptr checking"); + int64 scale = mirIntConst->GetExtValue(); + /* + * Do not support getting return address with a nonzero argument + * inline / tail call opt will destory this behavior + */ + CHECK_FATAL(scale == 0, "Do not support recursion"); + Operand *resReg = &static_cast(CreateRegisterOperandOfType(PTY_i64)); + SelectCopy(*resReg, PTY_i64, GetOrCreatePhysicalRegisterOperand(RLR, k64BitSize, kRegTyInt), PTY_i64); + return resReg; + } + return nullptr; +} + +Operand *AArch64CGFunc::SelectCalignup(IntrinsicopNode &intrnNode) { + return SelectAArch64align(intrnNode, true); +} + +Operand *AArch64CGFunc::SelectCaligndown(IntrinsicopNode &intrnNode) { + return SelectAArch64align(intrnNode, false); +} + +Operand *AArch64CGFunc::SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp) { + /* Handle Two args */ + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + PrimType resultPtype = intrnNode.GetPrimType(); + RegOperand &ldDest0 = LoadIntoRegister(*opnd0, ptype0); + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + RegOperand &arg1 = LoadIntoRegister(*opnd1, ptype1); + DEBUG_ASSERT(IsPrimitiveInteger(ptype0) && IsPrimitiveInteger(ptype1), "align integer type only"); + Operand *ldDest1 = &static_cast(CreateRegisterOperandOfType(ptype0)); + SelectCvtInt2Int(nullptr, ldDest1, &arg1, ptype1, ptype0); + + Operand *resultReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + Operand &immReg = CreateImmOperand(1, GetPrimTypeBitSize(ptype0), true); + /* Do alignment x0 -- value to be aligned x1 -- alignment */ + if (isUp) { + /* add res, x0, x1 */ + SelectAdd(*resultReg, ldDest0, *ldDest1, ptype0); + /* sub res, res, 1 */ + SelectSub(*resultReg, *resultReg, immReg, ptype0); + } + Operand *tempReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + /* sub temp, x1, 1 */ + SelectSub(*tempReg, *ldDest1, immReg, ptype0); + /* mvn temp, temp */ + SelectMvn(*tempReg, *tempReg, ptype0); + /* and res, res, temp */ + if (isUp) { + SelectBand(*resultReg, *resultReg, *tempReg, ptype0); + } else { + SelectBand(*resultReg, ldDest0, *tempReg, ptype0); + } + if (resultPtype != ptype0) { + SelectCvtInt2Int(&intrnNode, resultReg, resultReg, ptype0, resultPtype); + } + return resultReg; +} + +/* + * NOTE: consider moving the following things into aarch64_cg.cpp They may + * serve not only inrinsics, but other MapleIR instructions as well. + * Do it as if we are adding a label in straight-line assembly code. + */ +LabelIdx AArch64CGFunc::CreateLabeledBB(StmtNode &stmt) { + LabelIdx labIdx = CreateLabel(); + BB *newBB = StartNewBBImpl(false, stmt); + newBB->AddLabel(labIdx); + SetLab2BBMap(labIdx, *newBB); + SetCurBB(*newBB); + return labIdx; +} + +/* Save value into the local variable for the index-th return value; */ +void AArch64CGFunc::SaveReturnValueInLocal(CallReturnVector &retVals, size_t index, PrimType primType, Operand &value, + StmtNode &parentStmt) { + CallReturnPair &pair = retVals.at(index); + BB tempBB(static_cast(-1), *GetFuncScopeAllocator()); + BB *realCurBB = GetCurBB(); + CHECK_FATAL(!pair.second.IsReg(), "NYI"); + Operand* destOpnd = &value; + /* for O0 ,corss-BB var is not support, do extra store/load but why new BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(pair.first); + MIRType *sPty = symbol->GetType(); + PrimType ty = symbol->GetType()->GetPrimType(); + if (sPty->GetKind() == kTypeStruct || sPty->GetKind() == kTypeUnion) { + MIRStructType *structType = static_cast(sPty); + ty = structType->GetFieldType(pair.second.GetFieldID())->GetPrimType(); + } else if (sPty->GetKind() == kTypeClass) { + CHECK_FATAL(false, "unsuppotr type for inlineasm / intrinsic"); + } + RegOperand &tempReg = CreateVirtualRegisterOperand(NewVReg(GetRegTyFromPrimTy(ty), GetPrimTypeSize(ty))); + SelectCopy(tempReg, ty, value, ty); + destOpnd = &tempReg; + } + SetCurBB(tempBB); + SelectDassign(pair.first, pair.second.GetFieldID(), primType, *destOpnd); + + CHECK_FATAL(realCurBB->GetNext() == nullptr, "current BB must has not nextBB"); + realCurBB->SetLastStmt(parentStmt); + realCurBB->SetNext(StartNewBBImpl(true, parentStmt)); + realCurBB->GetNext()->SetKind(BB::kBBFallthru); + realCurBB->GetNext()->SetPrev(realCurBB); + + realCurBB->GetNext()->InsertAtBeginning(*GetCurBB()); + /* restore it */ + SetCurBB(*realCurBB->GetNext()); +} + +/* The following are translation of LL/SC and atomic RMW operations */ +MemOrd AArch64CGFunc::OperandToMemOrd(Operand &opnd) const { + CHECK_FATAL(opnd.IsImmediate(), "Memory order must be an int constant."); + auto immOpnd = static_cast(&opnd); + int32 val = immOpnd->GetValue(); + CHECK_FATAL(val >= 0, "val must be non-negtive"); + return MemOrdFromU32(static_cast(val)); +} + +/* + * Generate ldxr or ldaxr instruction. + * byte_p2x: power-of-2 size of operand in bytes (0: 1B, 1: 2B, 2: 4B, 3: 8B). + */ +MOperator AArch64CGFunc::PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const { + CHECK_FATAL(byteP2Size < kIntByteSizeDimension, "Illegal argument p2size: %d", byteP2Size); + + static MOperator operators[4][2][2] = { { { MOP_wldxrb, MOP_wldaxrb }, { MOP_wstxrb, MOP_wstlxrb } }, + { { MOP_wldxrh, MOP_wldaxrh }, { MOP_wstxrh, MOP_wstlxrh } }, + { { MOP_wldxr, MOP_wldaxr }, { MOP_wstxr, MOP_wstlxr } }, + { { MOP_xldxr, MOP_xldaxr }, { MOP_xstxr, MOP_xstlxr } } }; + + MOperator optr = operators[byteP2Size][store][acqRel]; + CHECK_FATAL(optr != MOP_undef, "Unsupported type p2size: %d", byteP2Size); + + return optr; +} + +RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire) { + uint32 p2size = GetPrimTypeP2Size(valPrimType); + + RegOperand &result = CreateRegisterOperandOfType(valPrimType); + MOperator mOp = PickLoadStoreExclInsn(p2size, false, acquire); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, loc)); + + return &result; +} + +RegOperand *AArch64CGFunc::SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release) { + uint32 p2size = GetPrimTypeP2Size(valPty); + + /* the result (success/fail) is to be stored in a 32-bit register */ + RegOperand &result = CreateRegisterOperandOfType(PTY_u32); + + MOperator mOp = PickLoadStoreExclInsn(p2size, true, release); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, newVal, loc)); + + return &result; +} + +RegType AArch64CGFunc::GetRegisterType(regno_t reg) const { + if (AArch64isa::IsPhysicalRegister(reg)) { + return AArch64isa::GetRegType(static_cast(reg)); + } else if (reg == kRFLAG) { + return kRegTyCc; + } else { + return CGFunc::GetRegisterType(reg); + } +} + +MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize) { + /* For struct formals > 16 bytes, this is the pointer to the struct copy. */ + /* Load the base pointer first. */ + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + MemOperand *baseMemOpnd = &GetOrCreateMemOpnd(symbol, 0, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *baseMemOpnd)); + /* Create the indirect load mem opnd from the base pointer. */ + return CreateMemOpnd(*vreg, offset, static_cast(dataSize)); +} + + /* For long branch, insert an unconditional branch. + * From To + * cond_br targe_label reverse_cond_br fallthru_label + * fallthruBB unconditional br target_label + * fallthru_label: + * fallthruBB + */ +void AArch64CGFunc::InsertJumpPad(Insn *insn) { + BB *bb = insn->GetBB(); + DEBUG_ASSERT(bb, "instruction has no bb"); + DEBUG_ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto, + "instruction is in neither if bb nor goto bb"); + if (bb->GetKind() == BB::kBBGoto) { + return; + } + DEBUG_ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors"); + + BB *longBrBB = CreateNewBB(); + + BB *fallthruBB = bb->GetNext(); + LabelIdx fallthruLBL = fallthruBB->GetLabIdx(); + if (fallthruLBL == 0) { + fallthruLBL = CreateLabel(); + SetLab2BBMap(static_cast(fallthruLBL), *fallthruBB); + fallthruBB->AddLabel(fallthruLBL); + } + + BB *targetBB; + if (bb->GetSuccs().front() == fallthruBB) { + targetBB = bb->GetSuccs().back(); + } else { + targetBB = bb->GetSuccs().front(); + } + LabelIdx targetLBL = targetBB->GetLabIdx(); + if (targetLBL == 0) { + targetLBL = CreateLabel(); + SetLab2BBMap(static_cast(targetLBL), *targetBB); + targetBB->AddLabel(targetLBL); + } + + // Adjustment on br and CFG + bb->RemoveSuccs(*targetBB); + bb->PushBackSuccs(*longBrBB); + bb->SetNext(longBrBB); + // reverse cond br targeting fallthruBB + uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*insn); + MOperator mOp = AArch64isa::FlipConditionOp(insn->GetMachineOpcode()); + insn->SetMOP( AArch64CG::kMd[mOp]); + LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL); + insn->SetOperand(targetIdx, fallthruBBLBLOpnd); + + longBrBB->PushBackPreds(*bb); + longBrBB->PushBackSuccs(*targetBB); + LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL); + longBrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetLBLOpnd)); + longBrBB->SetPrev(bb); + longBrBB->SetNext(fallthruBB); + longBrBB->SetKind(BB::kBBGoto); + + fallthruBB->SetPrev(longBrBB); + + targetBB->RemovePreds(*bb); + targetBB->PushBackPreds(*longBrBB); +} + +RegOperand *AArch64CGFunc::AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd) { + RegOperand *resCvt = &CreateRegisterOperandOfType(oType); + Insn *insnCvt = &GetInsnBuilder()->BuildInsn(MOP_xvmovrd, *resCvt, *opnd); + GetCurBB()->AppendInsn(*insnCvt); + return resCvt; +} + +RegOperand *AArch64CGFunc::SelectOneElementVectorCopy(Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *src, sType); + static_cast(res)->SetIF64Vec(); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vabsvv : MOP_vabsuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, + PrimType otyp, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result type */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp); /* vector operand 2 */ + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddlvuu : MOP_vsaddlvuu; + } else { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddl2vvv : MOP_vsaddl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(otyp1); /* restype is same as o1 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(otyp1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddwvvu : MOP_vsaddwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddw2vvv : MOP_vsaddw2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorImmMov(PrimType rType, Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + int64 val = static_cast(src)->GetValue(); + /* copy the src imm operand to a reg if out of range */ + if ((GetVecEleSize(rType) >= k64BitSize) || + (GetPrimTypeSize(sType) > k4ByteSize && val != 0) || + (val < kMinImmVal || val > kMaxImmVal)) { + Operand *reg = &CreateRegisterOperandOfType(sType); + SelectCopy(*reg, sType, *src, sType); + return SelectVectorRegMov(rType, reg, sType); + } + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + if (GetVecEleSize(rType) == k8BitSize && val < 0) { + src = &CreateImmOperand(static_cast(val), k8BitSize, true); + } else if (val < 0) { + src = &CreateImmOperand(-(val + 1), k8BitSize, true); + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvi : MOP_vnotui; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorRegMov(PrimType rType, Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + MOperator mOp; + if (GetPrimTypeSize(sType) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorFromScalar(PrimType rType, Operand *src, PrimType sType) { + if (!IsPrimitiveVector(rType)) { + return SelectOneElementVectorCopy(src, sType); + } else if (src->IsConstImmediate()) { + return SelectVectorImmMov(rType, src, sType); + } else { + return SelectVectorRegMov(rType, src, sType); + } +} + +RegOperand *AArch64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) { + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(k2ByteSize, k64BitSize, getLow ? 0 : 1); + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vduprv, AArch64CG::kMd[MOP_vduprv]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + static_cast(res)->SetIF64Vec(); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType, lane); /* vector operand */ + + MOperator mop; + if (!IsPrimitiveVector(sType)) { + mop = MOP_xmovrr; + } else if (GetPrimTypeBitSize(rType) >= k64BitSize) { + mop = MOP_vxmovrv; + } else { + mop = (GetPrimTypeBitSize(sType) > k64BitSize) ? MOP_vwmovrv : MOP_vwmovru; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* adalp o1, o2 instruction accumulates into o1, overwriting the original operand. + Hence we perform c = vadalp(a,b) as + T tmp = a; + return tmp+b; + The return value of vadalp is then assigned to c, leaving value of a intact. + */ +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, + Operand *src2, PrimType sty2) { + VectorRegSpec *vecSpecDest; + RegOperand *res; + + if (!IsPrimitiveVector(sty1)) { + RegOperand *resF = SelectOneElementVectorCopy(src1, sty1); + res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *resF, PTY_f64); + vecSpecDest = GetMemoryPool()->New(k1ByteSize, k64BitSize); + } else { + res = &CreateRegisterOperandOfType(sty1); /* result type same as sty1 */ + SelectCopy(*res, sty1, *src1, sty1); + vecSpecDest = GetMemoryPool()->New(sty1); + } + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sty2); + + MOperator mop; + if (IsUnsignedInteger(sty1)) { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vupadalvv : MOP_vupadaluu; + } else { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vspadalvv : MOP_vspadaluu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(sty1)) { + res = AdjustOneElementVectorOperand(sty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) { + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* source operand */ + + if (rType == PTY_f64) { + vecSpecDest->vecLaneMax = 1; + } + + MOperator mop; + if (IsUnsignedInteger(sType)) { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vupaddvv : MOP_vupadduu; + } else { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vspaddvv : MOP_vspadduu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + /* dest pushed first, popped first */ + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSetElement(Operand *eOpnd, PrimType eType, Operand *vOpnd, + PrimType vType, int32 lane) { + if (!IsPrimitiveVector(vType)) { + return SelectOneElementVectorCopy(eOpnd, eType); + } + RegOperand *reg = &CreateRegisterOperandOfType(eType); /* vector element type */ + SelectCopy(*reg, eType, *eOpnd, eType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(vType, lane); /* vector operand == result */ + + MOperator mOp; + if (GetPrimTypeSize(eType) > k4ByteSize) { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vxinsvr : MOP_vxinsur; + } else { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vwinsvr : MOP_vwinsur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*vOpnd).AddOpndChain(*reg); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return static_cast(vOpnd); +} + +RegOperand *AArch64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, + PrimType oTy, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(oTy); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(oTy); /* same opnd types */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdlvuu : MOP_vsabdlvuu; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdl2vvv : MOP_vsabdl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMerge(PrimType rType, Operand *o1, Operand *o2, int32 index) { + if (!IsPrimitiveVector(rType)) { + static_cast(o1)->SetIF64Vec(); + return static_cast(o1); /* 64x1_t, index equals 0 */ + } + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(rType); + + ImmOperand *imm = &CreateImmOperand(index, k8BitSize, true); + + MOperator mOp = (GetPrimTypeSize(rType) > k8ByteSize) ? MOP_vextvvvi : MOP_vextuuui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorReverse(PrimType rType, Operand *src, PrimType sType, uint32 size) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* vector operand */ + + MOperator mOp; + if (GetPrimTypeBitSize(rType) == k128BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64qq : (size >= k32BitSize ? MOP_vrev32qq : MOP_vrev16qq); + } else if (GetPrimTypeBitSize(rType) == k64BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64dd : (size >= k32BitSize ? MOP_vrev32dd : MOP_vrev16dd); + } else { + CHECK_FATAL(false, "should not be here"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSum(PrimType rType, Operand *o1, PrimType oType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* uint32_t result */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); + RegOperand *iOpnd = &CreateRegisterOperandOfType(oType); /* float intermediate result */ + uint32 eSize = GetVecEleSize(oType); /* vector opd in bits */ + bool is16ByteVec = GetPrimTypeSize(oType) >= k16ByteSize; + MOperator mOp; + if (is16ByteVec) { + mOp = eSize <= k8BitSize ? MOP_vbaddvrv : (eSize <= k16BitSize ? MOP_vhaddvrv : + (eSize <= k32BitSize ? MOP_vsaddvrv : MOP_vdaddvrv)); + } else { + mOp = eSize <= k8BitSize ? MOP_vbaddvru : (eSize <= k16BitSize ? MOP_vhaddvru : MOP_vsaddvru); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*iOpnd).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + + mOp = eSize > k32BitSize ? MOP_vxmovrv : MOP_vwmovrv; + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + auto *vecSpec2 = GetMemoryPool()->New(oType); + vInsn2.AddOpndChain(*res).AddOpndChain(*iOpnd); + vecSpec2->vecLane = 0; + vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + return res; +} + +void AArch64CGFunc::PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2) { + /* Only 1 operand can be non vector, otherwise it's a scalar operation, wouldn't come here */ + if (IsPrimitiveVector(oty1) == IsPrimitiveVector(oty2)) { + return; + } + PrimType origTyp = !IsPrimitiveVector(oty2) ? oty2 : oty1; + Operand *opd = !IsPrimitiveVector(oty2) ? *o2 : *o1; + PrimType rType = !IsPrimitiveVector(oty2) ? oty1 : oty2; /* Type to dup into */ + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + bool immOpnd = false; + if (opd->IsConstImmediate()) { + int64 val = static_cast(opd)->GetValue(); + if (val >= kMinImmVal && val <= kMaxImmVal && GetVecEleSize(rType) < k64BitSize) { + immOpnd = true; + } else { + RegOperand *regOpd = &CreateRegisterOperandOfType(origTyp); + SelectCopyImm(*regOpd, origTyp, static_cast(*opd), origTyp); + opd = static_cast(regOpd); + } + } + + /* need dup to vector operand */ + MOperator mOp; + if (immOpnd) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; /* a const */ + } else { + if (GetPrimTypeSize(origTyp) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; /* a scalar var */ + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*opd); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(oty2)) { + *o2 = static_cast(res); + oty2 = rType; + } else { + *o1 = static_cast(res); + oty1 = rType; + } +} + +void AArch64CGFunc::SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType) { + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + MOperator mOp; + VectorInsn *insn; + if (GetPrimTypeSize(rType) > GetPrimTypeSize(oType)) { + /* expand, similar to vmov_XX() intrinsics */ + mOp = IsUnsignedInteger(rType) ? MOP_vushllvvi : MOP_vshllvvi; + ImmOperand *imm = &CreateImmOperand(0, k8BitSize, true); + insn = &GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + insn->AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + } else if (GetPrimTypeSize(rType) < GetPrimTypeSize(oType)) { + /* extract, similar to vqmovn_XX() intrinsics */ + insn = &GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + insn->AddOpndChain(*res).AddOpndChain(*o1); + } else { + CHECK_FATAL(0, "Invalid cvt between 2 operands of the same size"); + } + insn->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(*insn); +} + +RegOperand *AArch64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) { + if (IsUnsignedInteger(oty1) && (opc != OP_eq && opc != OP_ne)) { + return nullptr; /* no unsigned instr for zero */ + } + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmeqvv : MOP_vzcmequu; + break; + case OP_gt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgtvv : MOP_vzcmgtuu; + break; + case OP_ge: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgevv : MOP_vzcmgeuu; + break; + case OP_lt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmltvv : MOP_vzcmltuu; + break; + case OP_le: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmlevv : MOP_vzcmleuu; + break; + default: + CHECK_FATAL(0, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +/* Neon compare intrinsics always return unsigned vector, MapleIR for comparison always return + signed. Using type of 1st operand for operation here */ +RegOperand *AArch64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) { + if (o2->IsConstImmediate() && static_cast(o2)->GetValue() == 0) { + RegOperand *zeroCmp = SelectVectorCompareZero(o1, oty1, o2, opc); + if (zeroCmp != nullptr) { + return zeroCmp; + } + } + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector operand 2 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmeqvvv : MOP_vcmequuu; + break; + case OP_lt: + case OP_gt: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhivvv : MOP_vcmhiuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgtvvv : MOP_vcmgtuuu; + } + break; + case OP_le: + case OP_ge: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhsvvv : MOP_vcmhsuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgevvv : MOP_vcmgeuuu; + } + break; + default: + CHECK_FATAL(0, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + if (opc == OP_lt || opc == OP_le) { + vInsn.AddOpndChain(*res).AddOpndChain(*o2).AddOpndChain(*o1); + } else { + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + } + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, + Operand *o2, PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + PrimType resultType = rType; + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + if (!IsPrimitiveVector(rType)) { + o1 = &SelectCopy(*o1, rType, PTY_f64); + o2 = &SelectCopy(*o2, rType, PTY_f64); + resultType = PTY_f64; + } + RegOperand *res = &CreateRegisterOperandOfType(resultType); /* result operand */ + + /* signed and unsigned shl(v,v) both use sshl or ushl, they are the same */ + MOperator mOp; + if (IsPrimitiveUnsigned(rType)) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vushlvvv : MOP_vushluuu; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vshlvvv : MOP_vshluuu; + } + + if (opc != OP_shl) { + o2 = SelectVectorNeg(rType, o2); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +uint32 ValidShiftConst(PrimType rType) { + switch (rType) { + case PTY_v8u8: + case PTY_v8i8: + case PTY_v16u8: + case PTY_v16i8: + return k8BitSize; + case PTY_v4u16: + case PTY_v4i16: + case PTY_v8u16: + case PTY_v8i16: + return k16BitSize; + case PTY_v2u32: + case PTY_v2i32: + case PTY_v4u32: + case PTY_v4i32: + return k32BitSize; + case PTY_v2u64: + case PTY_v2i64: + return k64BitSize; + default: + CHECK_FATAL(0, "Invalid Shift operand type"); + } + return 0; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + if (!imm->IsConstImmediate()) { + CHECK_FATAL(0, "VectorUShiftImm has invalid shift const"); + } + uint32 shift = static_cast(ValidShiftConst(rType)); + bool needDup = false; + if (opc == OP_shl) { + if ((shift == k8BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k16BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k32BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k64BitSize && (sVal < 0 || static_cast(sVal) >= shift))) { + needDup = true; + } + } else { + if ((shift == k8BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k16BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k32BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k64BitSize && (sVal < 1 || static_cast(sVal) > shift))) { + needDup = true; + } + } + if (needDup) { + /* Dup constant to vector reg */ + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest); + GetCurBB()->AppendInsn(vInsn); + res = SelectVectorShift(rType, o1, rType, res, rType, opc); + return res; + } + MOperator mOp; + if (GetPrimTypeSize(rType) > k8ByteSize) { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vushrvvi; + } else { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vshrvvi; + } + } else { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vushruui; + } else { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vshruui; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); /* 8B or 16B */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + vecSpec1->compositeOpnds = 1; /* composite operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vtbl1vvv, AArch64CG::kMd[MOP_vtbl1vvv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Operand *o3, PrimType oTyp3) { + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* operand 1 and result */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 2 */ + VectorRegSpec *vecSpec3 = GetMemoryPool()->New(oTyp3); /* vector operand 2 */ + + MOperator mop = IsPrimitiveUnSignedVector(oTyp1) ? MOP_vumaddvvv : MOP_vsmaddvvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*o3); + vInsn.PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2).PushRegSpecEntry(vecSpec3); + GetCurBB()->AppendInsn(vInsn); + return static_cast(o1); +} + +RegOperand *AArch64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 1 */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumullvvv : MOP_vsmullvvv; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumull2vvv : MOP_vsmull2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* source operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* source operand 2 */ + + MOperator mOp; + if (opc == OP_add) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vaddvvv : MOP_vadduuu; + } else if (opc == OP_sub) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vsubvvv : MOP_vsubuuu; + } else if (opc == OP_mul) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmulvvv : MOP_vmuluuu; + } else { + CHECK_FATAL(0, "Invalid opcode for SelectVectorBinOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + /* dest pushed first, popped first */ + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp; + if (opc == OP_band) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vandvvv : MOP_vanduuu; + } else if (opc == OP_bior) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vorvvv : MOP_voruuu; + } else if (opc == OP_bxor) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxorvvv : MOP_vxoruuu; + } else { + CHECK_FATAL(0, "Invalid opcode for SelectVectorBitwiseOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) { + (void)oty1; /* 1st opnd was loaded already, type no longer needed */ + RegOperand *res = static_cast(o1); /* o1 is also the result */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector opnd2 */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtn2uv, AArch64CG::kMd[MOP_vxtn2uv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvv : MOP_vnotuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnegvv : MOP_vneguu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* + * Called internally for auto-vec, no intrinsics for now + */ +RegOperand *AArch64CGFunc::SelectVectorSelect(Operand &cond, PrimType rType, Operand &o0, Operand &o1) { + rType = GetPrimTypeSize(rType) > k8ByteSize ? PTY_v16u8 : PTY_v8u8; + RegOperand *res = &CreateRegisterOperandOfType(rType); + SelectCopy(*res, rType, cond, rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); + + uint32 mOp = GetPrimTypeBitSize(rType) > k64BitSize ? MOP_vbslvvv : MOP_vbsluuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(o0).AddOpndChain(o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, + Operand *o2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + ImmOperand *imm = static_cast(o2); + MOperator mOp; + if (isLow) { + mOp = MOP_vshrnuvi; + } else { + CHECK_FATAL(0, "NYI: vshrn_high_"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) { + RegOperand *res = &CreateRegisterOperandOfType(resType); /* result reg */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(resType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (!isWide) { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusublvuu : MOP_vssublvuu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubl2vvv : MOP_vssubl2vvv; + } + } else { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubwvvu : MOP_vssubwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubw2vvv : MOP_vssubw2vvv; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +void AArch64CGFunc::SelectVectorZip(PrimType rType, Operand *o1, Operand *o2) { + RegOperand *res1 = &CreateRegisterOperandOfType(rType); /* result operand 1 */ + RegOperand *res2 = &CreateRegisterOperandOfType(rType); /* result operand 2 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + VectorInsn &vInsn1 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip1vvv, AArch64CG::kMd[MOP_vzip1vvv]); + vInsn1.AddOpndChain(*res1).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn1.PushRegSpecEntry(vecSpecDest); + vInsn1.PushRegSpecEntry(vecSpec1); + vInsn1.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn1); + + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip2vvv, AArch64CG::kMd[MOP_vzip2vvv]); + vInsn2.AddOpndChain(*res2).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn2.PushRegSpecEntry(vecSpecDest); + vInsn2.PushRegSpecEntry(vecSpec1); + vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + + if (GetPrimTypeSize(rType) <= k16ByteSize) { + Operand *preg1 = &GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg1, *res1)); + Operand *preg2 = &GetOrCreatePhysicalRegisterOperand(V1, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg2, *res2)); + } +} + +RegOperand *AArch64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + MOperator mOp; + if (isLow) { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtlvu : MOP_vsxtlvu; + } else { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtl2vv : MOP_vsxtl2vv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* + * Check the distance between the first insn of BB with the lable(targ_labidx) + * and the insn with targ_id. If the distance greater than kShortBRDistance + * return false. + */ +bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId) const { + for (auto *tBB : bb.GetSuccs()) { + if (tBB->GetLabIdx() != targLabIdx) { + continue; + } + Insn *tInsn = tBB->GetFirstInsn(); + while (tInsn == nullptr || !tInsn->IsMachineInstruction()) { + if (tInsn == nullptr) { + tBB = tBB->GetNext(); + if (tBB == nullptr) { /* tailcallopt may make the target block empty */ + return true; + } + tInsn = tBB->GetFirstInsn(); + } else { + tInsn = tInsn->GetNext(); + } + } + uint32 tmp = (tInsn->GetId() > targId) ? (tInsn->GetId() - targId) : (targId - tInsn->GetId()); + return (tmp < kShortBRDistance); + } + CHECK_FATAL(false, "CFG error"); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b10ef618ff45e20601612c26245f80ab41f8b9aa --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -0,0 +1,4997 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_color_ra.h" +#include +#include +#include "aarch64_cg.h" +#include "mir_lower.h" +#include "securec.h" +/* + * Based on concepts from Chow and Hennessey. + * Phases are as follows: + * Prepass to collect local BB information. + * Compute local register allocation demands for global RA. + * Compute live ranges. + * Live ranges LR represented by a vector of size #BBs. + * for each cross bb vreg, a bit is set in the vector. + * Build interference graph with basic block as granularity. + * When intersection of two LRs is not null, they interfere. + * Separate unconstrained and constrained LRs. + * unconstrained - LR with connect edges less than available colors. + * These LR can always be colored. + * constrained - not uncontrained. + * Split LR based on priority cost + * Repetitive adding BB from original LR to new LR until constrained. + * Update all LR the new LR interferes with. + * Color the new LR + * Each LR has a forbidden list, the registers cannot be assigned + * Coalesce move using preferred color first. + * Mark the remaining uncolorable LR after split as spill. + * Local register allocate. + * Emit and insert spills. + */ +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CLANG (cgFunc->GetMirModule().IsCModule()) + +/* + * for physical regOpnd phyOpnd, + * R0->GetRegisterNumber() == 1 + * V0->GetRegisterNumber() == 33 + */ +constexpr uint32 kLoopWeight = 20; +constexpr uint32 kAdjustWeight = 2; +constexpr uint32 kInsnStep = 2; +constexpr uint32 kMaxSplitCount = 3; +constexpr uint32 kRematWeight = 3; +constexpr uint32 kPriorityDefThreashold = 1; +constexpr uint32 kPriorityUseThreashold = 5; +constexpr uint32 kPriorityBBThreashold = 1000; +constexpr float kPriorityRatioThreashold = 0.9; + +#define GCRA_DUMP CG_DEBUG_FUNC(*cgFunc) + +void LiveUnit::PrintLiveUnit() const { + LogInfo::MapleLogger() << "[" << begin << "," << end << "]" + << ""; + if (!hasCall) { + /* Too many calls, so only print when there is no call. */ + LogInfo::MapleLogger() << " nc"; + } + if (needReload) { + LogInfo::MapleLogger() << " rlod"; + } + if (needRestore) { + LogInfo::MapleLogger() << " rstr"; + } +} + +bool LiveRange::IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLev) const { + if (rematLev == rematOff) + return false; + + switch (op) { + case OP_undef: + return false; + case OP_constval: { + const MIRConst *mirConst = rematInfo.mirConst; + if (mirConst->GetKind() != kConstInt) { + return false; + } + const MIRIntConst *intConst = static_cast(rematInfo.mirConst); + int64 val = intConst->GetExtValue(); + if (val >= -kMax16UnsignedImm && val <= kMax16UnsignedImm) { + return true; + } + auto uval = static_cast(val); + if (IsMoveWidableImmediate(uval, GetSpillSize())) { + return true; + } + return IsBitmaskImmediate(uval, GetSpillSize()); + } + case OP_addrof: { + if (rematLev < rematAddr) { + return false; + } + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + /* cost too much to remat */ + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && ((fieldID != 0) || + (cgFunc.GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + return false; + } + if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || + (symbol->GetStorageClass() == kScExtern))) { + /* check if in loop */ + bool useInLoop = false; + bool defOutLoop = false; + for (auto luIt: luMap) { + BB *bb = cgFunc.GetBBFromID(luIt.first); + LiveUnit *curLu = luIt.second; + if (bb->GetLoop() != nullptr && curLu->GetUseNum() != 0) { + useInLoop = true; + } + if (bb->GetLoop() == nullptr && curLu->GetDefNum() != 0) { + defOutLoop = true; + } + } + return !(useInLoop && defOutLoop); + } + return true; + } + case OP_dread: { + if (rematLev < rematDreadLocal) { + return false; + } + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + MIRStorageClass storageClass = symbol->GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + /* cost too much to remat. */ + return false; + } + PrimType symType = symbol->GetType()->GetPrimType(); + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(fieldID)->GetPrimType(); + offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + /* check stImm.GetOffset() is in addri12 */ + StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*symbol, offset, 0); + uint32 dataSize = GetPrimTypeBitSize(symType); + ImmOperand &immOpnd = cgFunc.CreateImmOperand(stOpnd.GetOffset(), dataSize, false); + if (!immOpnd.IsInBitSize(kMaxImmVal12Bits, 0)) { + return false; + } + if (rematLev < rematDreadGlobal && !symbol->IsLocal()) { + return false; + } + return true; + } + default: + return false; + } +} + +std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, + RegOperand ®Op) { + std::vector insns; + switch (op) { + case OP_constval: + switch (rematInfo.mirConst->GetKind()) { + case kConstInt: { + MIRIntConst *intConst = const_cast(static_cast(rematInfo.mirConst)); + + Operand *immOp = cgFunc->SelectIntConst(*intConst); + MOperator movOp = (GetSpillSize() == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(movOp, regOp, *immOp)); + } + break; + default: + DEBUG_ASSERT(false, "Unsupported constant for rematerialization"); + } + break; + case OP_dread: { + const MIRSymbol *symbol = rematInfo.sym; + PrimType symType = symbol->GetType()->GetPrimType(); + RegOperand *regOp64 = &cgFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(regOp.GetRegisterNumber()), k64BitSize, regOp.GetRegisterType()); + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(fieldID)->GetPrimType(); + offset = cgFunc->GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + + uint32 dataSize = GetPrimTypeBitSize(symType); + MemOperand *spillMemOp = &cgFunc->GetOrCreateMemOpndAfterRa(*symbol, offset, dataSize, false, regOp64, insns); + MOperator mOp = cgFunc->PickLdInsn(spillMemOp->GetSize(), symType); + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOp, *spillMemOp)); + } + break; + case OP_addrof: { + const MIRSymbol *symbol = rematInfo.sym; + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + offset = cgFunc->GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + StImmOperand &stImm = cgFunc->CreateStImmOperand(*symbol, offset, 0); + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + AArch64SymbolAlloc *symLoc = static_cast( + cgFunc->GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offsetOp = nullptr; + offsetOp = &cgFunc->CreateImmOperand(cgFunc->GetBaseOffset(*symLoc) + offset, k64BitSize, false); + + Insn *insn = &cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, *cgFunc->GetBaseReg(*symLoc), *offsetOp); + if (CGOptions::kVerboseCG) { + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + insns.push_back(insn); + } else { + Insn *insn = &cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, regOp, stImm); + insns.push_back(insn); + if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || + (symbol->GetStorageClass() == kScExtern))) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offsetOp = cgFunc->CreateOfstOpnd(*symbol, offset, 0); + MemOperand &memOpnd = cgFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + static_cast(®Op), nullptr, &offsetOp, nullptr); + MOperator ldOp = (memOpnd.GetSize() == k64BitSize) ? MOP_xldr : MOP_wldr; + insn = &cgFunc->GetInsnBuilder()->BuildInsn(ldOp, regOp, memOpnd); + insns.push_back(insn); + if (offset > 0) { + OfstOperand &ofstOpnd = cgFunc->GetOrCreateOfstOpnd(static_cast(static_cast(offset)), + k32BitSize); + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, regOp, ofstOpnd)); + } + } else if (!addrUpper) { + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, regOp, regOp, stImm)); + } + } + } + break; + default: + DEBUG_ASSERT(false, "Unexpected op in live range"); + } + + return insns; +} + +template +void GraphColorRegAllocator::ForEachBBArrElem(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +template +void GraphColorRegAllocator::ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + if (functor(iBBArrElem * kU64 + bBBArrElem)) { + return; + } + } + } + } +} + +template +void GraphColorRegAllocator::ForEachRegArrElem(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < regBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +void GraphColorRegAllocator::PrintLiveUnitMap(const LiveRange &lr) const { + LogInfo::MapleLogger() << "\n\tlu:"; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (!IsBitArrElemSet(lr.GetBBMember(), i)) { + continue; + } + auto lu = lr.GetLuMap().find(i); + if (lu != lr.GetLuMap().end() && (lu->second->GetDefNum() || lu->second->GetUseNum())) { + LogInfo::MapleLogger() << "(" << i << " "; + lu->second->PrintLiveUnit(); + LogInfo::MapleLogger() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRangeConflicts(const LiveRange &lr) const { + LogInfo::MapleLogger() << "\n\tinterfere(" << lr.GetNumBBConflicts() << "): "; + for (uint32 i = 0; i < regBuckets; ++i) { + uint64 chunk = lr.GetBBConflictElem(i); + for (uint64 bit = 0; bit < kU64; ++bit) { + if (chunk & (1ULL << bit)) { + regno_t newNO = i * kU64 + bit; + LogInfo::MapleLogger() << newNO << ","; + } + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveBBBit(const LiveRange &lr) const { + LogInfo::MapleLogger() << "live_bb(" << lr.GetNumBBMembers() << "): "; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (IsBitArrElemSet(lr.GetBBMember(), i)) { + LogInfo::MapleLogger() << i << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRange(const LiveRange &lr, const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + + LogInfo::MapleLogger() << "R" << lr.GetRegNO(); + if (lr.GetRegType() == kRegTyInt) { + LogInfo::MapleLogger() << "(I)"; + } else if (lr.GetRegType() == kRegTyFloat) { + LogInfo::MapleLogger() << "(F)"; + } else { + LogInfo::MapleLogger() << "(U)"; + } + if (lr.GetSpillSize() == k32) { + LogInfo::MapleLogger() << "S32"; + } else if (lr.GetSpillSize() == k64) { + LogInfo::MapleLogger() << "S64"; + } else { + LogInfo::MapleLogger() << "S0(nodef)"; + } + LogInfo::MapleLogger() << "\tnumCall " << lr.GetNumCall(); + LogInfo::MapleLogger() << "\tpriority " << lr.GetPriority(); + LogInfo::MapleLogger() << "\tforbidden: "; + for (regno_t preg = kInvalidRegNO; preg < kMaxRegNum; preg++) { + if (lr.GetForbidden(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + LogInfo::MapleLogger() << "\tpregveto: "; + for (regno_t preg = kInvalidRegNO; preg < kMaxRegNum; preg++) { + if (lr.GetPregveto(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + if (lr.IsSpilled()) { + LogInfo::MapleLogger() << " spilled"; + } + if (lr.GetSplitLr()) { + LogInfo::MapleLogger() << " split"; + } + LogInfo::MapleLogger() << "\top: " << kOpcodeInfo.GetName(lr.GetOp()); + LogInfo::MapleLogger() << "\n"; + PrintLiveBBBit(lr); + PrintLiveRangeConflicts(lr); + PrintLiveUnitMap(lr); + if (lr.GetSplitLr()) { + PrintLiveRange(*lr.GetSplitLr(), "===>Split LR"); + } +} + +void GraphColorRegAllocator::PrintLiveRanges() const { + LogInfo::MapleLogger() << "PrintLiveRanges: size = " << lrMap.size() << "\n"; + for (auto it : lrMap) { + PrintLiveRange(*it.second, ""); + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLocalRAInfo(const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + for (uint32 id = 0; id < cgFunc->NumBBs(); ++id) { + LocalRaInfo *lraInfo = localRegVec[id]; + if (lraInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "bb " << id << " def "; + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + LogInfo::MapleLogger() << "[" << defCntPair.first << ":" << defCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "use "; + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + LogInfo::MapleLogger() << "[" << useCntPair.first << ":" << useCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::PrintBBAssignInfo() const { + for (size_t id = 0; id < bfs->sortedBBs.size(); ++id) { + uint32 bbID = bfs->sortedBBs[id]->GetId(); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "BBinfo(" << id << ")"; + LogInfo::MapleLogger() << " lra-needed int " << bbInfo->GetIntLocalRegsNeeded(); + LogInfo::MapleLogger() << " fp " << bbInfo->GetFpLocalRegsNeeded(); + LogInfo::MapleLogger() << " greg-used "; + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LogInfo::MapleLogger() << regNO << ","; + } + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const { +#ifdef RANDOM_PRIORITY + unsigned long seed = 0; + size_t size = sizeof(seed); + std::ifstream randomNum("/dev/random", std::ios::in | std::ios::binary); + if (randomNum) { + randomNum.read(reinterpret_cast(&seed), size); + if (randomNum) { + lr.SetPriority(1 / (seed + 1)); + } + randomNum.close(); + } else { + std::cerr << "Failed to open /dev/urandom" << '\n'; + } + return; +#endif /* RANDOM_PRIORITY */ + float pri = 0.0; + uint32 bbNum = 0; + uint32 numDefs = 0; + uint32 numUses = 0; + auto *a64CGFunc = static_cast(cgFunc); + CG *cg = a64CGFunc->GetCG(); + + if (cg->GetRematLevel() >= rematConst && lr.IsRematerializable(*a64CGFunc, rematConst)) { + lr.SetRematLevel(rematConst); + } else if (cg->GetRematLevel() >= rematAddr && lr.IsRematerializable(*a64CGFunc, rematAddr)) { + lr.SetRematLevel(rematAddr); + } else if (cg->GetRematLevel() >= rematDreadLocal && lr.IsRematerializable(*a64CGFunc, rematDreadLocal)) { + lr.SetRematLevel(rematDreadLocal); + } else if (cg->GetRematLevel() >= rematDreadGlobal && lr.IsRematerializable(*a64CGFunc, rematDreadGlobal)) { + lr.SetRematLevel(rematDreadGlobal); + } + + auto calculatePriorityFunc = [&lr, &bbNum, &numDefs, &numUses, &pri, this] (uint32 bbID) { + auto lu = lr.FindInLuMap(bbID); + DEBUG_ASSERT(lu != lr.EndOfLuMap(), "can not find live unit"); + BB *bb = bbVec[bbID]; + if (bb->GetFirstInsn() != nullptr && !bb->IsSoloGoto()) { + ++bbNum; + numDefs += lu->second->GetDefNum(); + numUses += lu->second->GetUseNum(); + uint32 useCnt = lu->second->GetDefNum() + lu->second->GetUseNum(); + uint32 mult; +#ifdef USE_BB_FREQUENCY + mult = bb->GetFrequency(); +#else /* USE_BB_FREQUENCY */ + if (bb->GetLoop() != nullptr) { + uint32 loopFactor; + if (lr.GetNumCall() > 0 && lr.GetRematLevel() == rematOff) { + loopFactor = bb->GetLoop()->GetLoopLevel() * kAdjustWeight; + } else { + loopFactor = bb->GetLoop()->GetLoopLevel() / kAdjustWeight; + } + mult = static_cast(pow(kLoopWeight, loopFactor)); + } else { + mult = 1; + } +#endif /* USE_BB_FREQUENCY */ + pri += useCnt * mult; + } + }; + ForEachBBArrElem(lr.GetBBMember(), calculatePriorityFunc); + + if (lr.GetRematLevel() == rematAddr || lr.GetRematLevel() == rematConst) { + if (numDefs <= 1 && numUses <= 1) { + pri = -0xFFFF; + } else { + pri /= kRematWeight; + } + } else if (lr.GetRematLevel() == rematDreadLocal) { + pri /= 4; + } else if (lr.GetRematLevel() == rematDreadGlobal) { + pri /= 2; + } + + lr.SetPriority(pri); + lr.SetNumDefs(numDefs); + lr.SetNumUses(numUses); + if (lr.GetPriority() > 0 && numDefs <= kPriorityDefThreashold && numUses <= kPriorityUseThreashold && + cgFunc->NumBBs() > kPriorityBBThreashold && + (static_cast(lr.GetNumBBMembers()) / cgFunc->NumBBs()) > kPriorityRatioThreashold) { + /* for large functions, delay allocating long LR with few defs and uses */ + lr.SetPriority(0.0); + } +} + +void GraphColorRegAllocator::PrintBBs() const { + for (auto *bb : bfs->sortedBBs) { + LogInfo::MapleLogger() << "\n< === > "; + LogInfo::MapleLogger() << bb->GetId(); + LogInfo::MapleLogger() << " succs:"; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + LogInfo::MapleLogger() << " eh_succs:"; + for (auto *succBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + } + LogInfo::MapleLogger() << "\n"; +} + +uint32 GraphColorRegAllocator::MaxIntPhysRegNum() const { + return (R28 - R0); +} + +uint32 GraphColorRegAllocator::MaxFloatPhysRegNum() const { + return (V31 - V0); +} + +bool GraphColorRegAllocator::IsReservedReg(AArch64reg regNO) const { + if (!doMultiPass || cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return (regNO == R16) || (regNO == R17); + } else { + return (regNO == R16); + } +} + +void GraphColorRegAllocator::InitFreeRegPool() { + /* + * ==== int regs ==== + * FP 29, LR 30, SP 31, 0 to 7 parameters + + * MapleCG defines 32 as ZR (zero register) + * use 8 if callee does not return large struct ? No + * 16 and 17 are intra-procedure call temp, can be caller saved + * 18 is platform reg, still use it + */ + uint32 intNum = 0; + uint32 fpNum = 0; + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + if (!AArch64Abi::IsAvailableReg(static_cast(regNO))) { + continue; + } + + /* + * Because of the try-catch scenario in JAVALANG, + * we should use specialized spill register to prevent register changes when exceptions occur. + */ + if (JAVALANG && AArch64Abi::IsSpillRegInRA(static_cast(regNO), needExtraSpillReg)) { + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + /* Preset int spill registers */ + (void)intSpillRegSet.insert(regNO - R0); + } else { + /* Preset float spill registers */ + (void)fpSpillRegSet.insert(regNO - V0); + } + continue; + } + +#ifdef RESERVED_REGS + /* r16,r17 are used besides ra. */ + if (IsReservedReg(static_cast(regNO))) { + continue; + } +#endif /* RESERVED_REGS */ + + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (IsYieldPointReg(static_cast(regNO))) { + continue; + } + if (regNO == R29) { + if (!cgFunc->UseFP()) { + (void)intCalleeRegSet.insert(regNO - R0); + ++intNum; + } + continue; + } + if (AArch64Abi::IsCalleeSavedReg(static_cast(regNO))) { + (void)intCalleeRegSet.insert(regNO - R0); + } else { + (void)intCallerRegSet.insert(regNO - R0); + } + ++intNum; + } else { + if (AArch64Abi::IsCalleeSavedReg(static_cast(regNO))) { + (void)fpCalleeRegSet.insert(regNO - V0); + } else { + (void)fpCallerRegSet.insert(regNO - V0); + } + ++fpNum; + } + } + intRegNum = intNum; + fpRegNum = fpNum; +} + +void GraphColorRegAllocator::InitCCReg() { + Operand &opnd = cgFunc->GetOrCreateRflag(); + auto &tmpRegOp = static_cast(opnd); + ccReg = tmpRegOp.GetRegisterNumber(); +} + +bool GraphColorRegAllocator::IsYieldPointReg(regno_t regNO) const { + if (cgFunc->GetCG()->GenYieldPoint()) { + return (regNO == RYP); + } + return false; +} + +bool GraphColorRegAllocator::IsUnconcernedReg(regno_t regNO) const { + /* RFP = 32, RLR = 31, RSP = 33, RZR = 34 */ + if ((regNO >= RLR && regNO <= RZR) || regNO == RFP || regNO == ccReg) { + return true; + } + + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + return true; + } + + return false; +} + +bool GraphColorRegAllocator::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { + return true; + } + return IsUnconcernedReg(regNO); +} + +/* + * Based on live analysis, the live-in and live-out set determines + * the bit to be set in the LR vector, which is of size #BBs. + * If a vreg is in the live-in and live-out set, it is live in the BB. + * + * Also keep track if a LR crosses a call. If a LR crosses a call, it + * interferes with all caller saved registers. Add all caller registers + * to the LR's forbidden list. + * + * Return created LiveRange object + * + * maybe need extra info: + * Add info for setjmp. + * Add info for defBB, useBB, index in BB for def and use + * Add info for startingBB and endingBB + */ +LiveRange *GraphColorRegAllocator::NewLiveRange() { + LiveRange *lr = memPool->New(alloc); + + if (bbBuckets == 0) { + bbBuckets = (cgFunc->NumBBs() / kU64) + 1; + } + lr->SetBBBuckets(bbBuckets); + lr->InitBBMember(*memPool, bbBuckets); + if (regBuckets == 0) { + regBuckets = (cgFunc->GetMaxRegNum() / kU64) + 1; + } + lr->SetRegBuckets(regBuckets); + lr->InitBBConflict(*memPool, regBuckets); + lr->InitPregveto(); + lr->InitForbidden(); + return lr; +} + +/* Create local info for LR. return true if reg is not local. */ +bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef) { + if (FindIn(bb.GetLiveInRegNO(), regNO) || FindIn(bb.GetLiveOutRegNO(), regNO)) { + return true; + } + /* + * register not in globals for the bb, so it is local. + * Compute local RA info. + */ + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + if (isDef) { + /* movk is handled by different id for use/def in the same insn. */ + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } else { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + /* lr info is useful for lra, so continue lr info */ + return false; +} + +LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, + uint32 currId) { + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + lr = NewLiveRange(); + lr->SetID(currId); + + LiveUnit *lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + if (isDef) { + /* means no use after def for reg, chances for ebo opt */ + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + } + } else { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb.GetId()); + if (lu == nullptr) { + lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + } + if (lu->GetBegin() > currId) { + lu->SetBegin(currId); + } + } + + if (CLANG) { + auto *a64CGFunc = static_cast(cgFunc); + MIRPreg *preg = a64CGFunc->GetPseudoRegFromVirtualRegNO(regNO, CGOptions::DoCGSSA()); + if (preg) { + switch (preg->GetOp()) { + case OP_constval: + lr->SetRematerializable(preg->rematInfo.mirConst); + break; + case OP_addrof: + case OP_dread: + lr->SetRematerializable(preg->GetOp(), preg->rematInfo.sym, + preg->fieldID, preg->addrUpper); + break; + case OP_undef: + break; + default: + DEBUG_ASSERT(false, "Unexpected op in Preg"); + } + } + } + + return lr; +} + +void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount) { + bool isNonLocal = CreateLiveRangeHandleLocal(regNO, bb, isDef); + + if (!isDef) { + --currId; + } + + LiveRange *lr = CreateLiveRangeAllocateAndUpdate(regNO, bb, isDef, currId); + lr->SetRegNO(regNO); + lr->SetIsNonLocal(isNonLocal); + if (isDef) { + (void)vregLive.erase(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumDefs() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumDefs(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } else { + (void)vregLive.insert(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumUses() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumUses(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + + /* only handle it in live_in and def point? */ + uint32 bbID = bb.GetId(); + lr->SetMemberBitArrElem(bbID); + + lrMap[regNO] = lr; +} + +bool GraphColorRegAllocator::SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, + regno_t regNO, bool isDef) { + if (!regOpnd.IsPhysicalRegister()) { + return false; + } + LocalRaInfo *lraInfo = localRegVec[insn.GetBB()->GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[insn.GetBB()->GetId()] = lraInfo; + } + + if (isDef) { + if (FindNotIn(pregLive, regNO)) { + for (const auto &vRegNO : vregLive) { + if (IsUnconcernedReg(vRegNO)) { + continue; + } + lrMap[vRegNO]->InsertElemToPregveto(regNO); + } + } + pregLive.erase(regNO); + if (lraInfo != nullptr) { + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } + } else { + (void)pregLive.insert(regNO); + for (const auto &vregNO : vregLive) { + if (IsUnconcernedReg(vregNO)) { + continue; + } + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(regNO); + } + + if (lraInfo != nullptr) { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + } + return true; +} + +/* + * add pregs to forbidden list of lr. If preg is in + * the live list, then it is forbidden for other vreg on the list. + */ +void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regOpnd)) { + if (GetLiveRange(regNO) != nullptr) { + DEBUG_ASSERT(false, "Unconcerned reg"); + lrMap.erase(regNO); + } + return; + } + if (SetupLiveRangeByOpHandlePhysicalReg(regOpnd, insn, regNO, isDef)) { + return; + } + + CreateLiveRange(regNO, *insn.GetBB(), isDef, insn.GetId(), true); + + LiveRange *lr = GetLiveRange(regNO); + DEBUG_ASSERT(lr != nullptr, "lr should not be nullptr"); + if (isDef) { + lr->SetSpillSize((regOpnd.GetSize() <= k32) ? k32 : k64); + } + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (isDef) { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncDefNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsDef); + } else { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncUseNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsUse); + ++numUses; + } +#ifdef MOVE_COALESCE + if (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + if (opnd1.GetRegisterNumber() < kAllRegNum && !IsUnconcernedReg(opnd1)) { + lr->InsertElemToPrefs(opnd1.GetRegisterNumber() - R0); + } + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (opnd0.GetRegisterNumber() < kAllRegNum) { + lr->InsertElemToPrefs(opnd0.GetRegisterNumber() - R0); + } + } +#endif /* MOVE_COALESCE */ + if (!insn.IsSpecialIntrinsic() && insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + lr->SetDefUse(); + } +} + +/* handle live range for bb->live_out */ +void GraphColorRegAllocator::SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint) { + if (IsUnconcernedReg(liveOut)) { + return; + } + if (liveOut >= kAllRegNum) { + (void)vregLive.insert(liveOut); + CreateLiveRange(liveOut, bb, false, currPoint, false); + return; + } + + (void)pregLive.insert(liveOut); + for (const auto &vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(liveOut); + } + + /* See if phys reg is livein also. Then assume it span the entire bb. */ + if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { + return; + } + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + /* Make it a large enough so no locals can be allocated. */ + lraInfo->SetUseCntElem(liveOut, kMaxUint16); +} + +void GraphColorRegAllocator::ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) const { + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regNO)) { + return; + } + if (regOpnd.IsPhysicalRegister()) { + (void)pregs.insert(regNO); + } else { + (void)vregs.insert(regNO); + } +} + +void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) { + uint32 opndNum = insn.GetOperandSize(); + if (opndNum <= 1) { + return; + } + const InsnDesc *md = insn.GetDesc(); + std::unordered_set pregs; + std::unordered_set vregs; + + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (!onlyDef) { + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + ClassifyOperand(pregs, vregs, *op); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + ClassifyOperand(pregs, vregs, *base); + } + if (offset != nullptr) { + ClassifyOperand(pregs, vregs, *offset); + } + } else if (opnd.IsRegister()) { + ClassifyOperand(pregs, vregs, opnd); + } + } else { + if (md->GetOpndDes(i)->IsRegDef()) { + ClassifyOperand(pregs, vregs, opnd); + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && !memOpnd.IsIntactIndexed()) { + ClassifyOperand(pregs, vregs, *base); + } + } + } + } + + if (vregs.empty()) { + return; + } + /* Set BBConflict and Pregveto */ + for (regno_t vregNO : vregs) { + for (regno_t conflictVregNO : vregs) { + if (conflictVregNO != vregNO) { + lrMap[vregNO]->SetConflictBitArrElem(conflictVregNO); + } + } + for (regno_t conflictPregNO : pregs) { + lrMap[vregNO]->InsertElemToPregveto(conflictPregNO); + } + } +} + +void GraphColorRegAllocator::UpdateOpndConflict(const Insn &insn, bool multiDef) { + /* if IsSpecialIntrinsic or IsAtomicStore, set conflicts for all opnds */ + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + SetOpndConflict(insn, false); + return; + } + if (multiDef) { + SetOpndConflict(insn, true); + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef) { + uint32 numDefs = 0; + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*static_cast(opnd), insn, true, numUses); + ++numDefs; + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + DEBUG_ASSERT(numUses == 0, "should only be def opnd"); + if (numDefs > 1) { + multiDef = true; + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachUseOperand(Insn &insn) { + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && i == kAsmInputListOpnd) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*static_cast(opnd), insn, false, numUses); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveRangeByOp(*op, insn, false, numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveRangeByOp(*base, insn, false, numUses); + } + if (offset != nullptr) { + SetupLiveRangeByOp(*offset, insn, false, numUses); + } + } else { + SetupLiveRangeByOp(opnd, insn, false, numUses); + } + } + if (numUses >= AArch64Abi::kNormalUseOperandNum || insn.GetMachineOpcode() == MOP_lazy_ldr) { + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn) { + if (!insn.IsCall()) { + return; + } + /* def the return value */ + pregLive.erase(R0); + pregLive.erase(V0); + + /* active the parametes */ + Operand &opnd1 = insn.GetOperand(1); + if (opnd1.IsList()) { + auto &srcOpnds = static_cast(opnd1); + for (auto regOpnd : srcOpnds.GetOperands()) { + DEBUG_ASSERT(!regOpnd->IsVirtualRegister(), "not be a virtual register"); + auto physicalReg = static_cast(regOpnd->GetRegisterNumber()); + (void)pregLive.insert(physicalReg); + } + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint) { + for (auto lin : bb.GetLiveInRegNO()) { + if (lin < kAllRegNum) { + continue; + } + LiveRange *lr = GetLiveRange(lin); + if (lr == nullptr) { + continue; + } + auto lu = lr->FindInLuMap(bb.GetId()); + DEBUG_ASSERT(lu != lr->EndOfLuMap(), "container empty check"); + if (bb.GetFirstInsn()) { + lu->second->SetBegin(bb.GetFirstInsn()->GetId()); + } else { + /* since bb is empty, then use pointer as is */ + lu->second->SetBegin(currPoint); + } + lu->second->SetBegin(lu->second->GetBegin() - 1); + } +} + +bool GraphColorRegAllocator::UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const { + insn.SetId(currPoint); + if (insn.IsImmaterialInsn() || !insn.IsMachineInstruction()) { + --currPoint; + return true; + } + return false; +} + +void GraphColorRegAllocator::UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn) { + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCallerSaveReg(static_cast(preg))) { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToCallDef(preg); + } + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->IncNumCall(); + lr->AddRef(bbId, currPoint, kIsCall); + + auto lu = lr->FindInLuMap(bbId); + if (lu != lr->EndOfLuMap()) { + lu->second->SetHasCall(true); + } + } +} + +void GraphColorRegAllocator::SetLrMustAssign(const RegOperand *regOpnd) { + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr) { + lr->SetMustAssigned(); + lr->SetIsNonLocal(true); + } +} + +void GraphColorRegAllocator::SetupMustAssignedLiveRanges(const Insn &insn) { + if (!insn.IsSpecialIntrinsic()) { + return; + } + if (insn.GetMachineOpcode() == MOP_asm) { + for (auto regOpnd : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + for (auto regOpnd : static_cast(insn.GetOperand(kAsmInputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + return; + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn.GetOperand(i); + if (!opnd->IsRegister()) { + continue; + } + auto regOpnd = static_cast(opnd); + SetLrMustAssign(regOpnd); + } +} + +/* + * For each succ bb->GetSuccs(), if bb->liveout - succ->livein is not empty, the vreg(s) is + * dead on this path (but alive on the other path as there is some use of it on the + * other path). This might be useful for optimization of reload placement later for + * splits (lr split into lr1 & lr2 and lr2 will need to reload.) + * Not for now though. + */ +void GraphColorRegAllocator::ComputeLiveRanges() { + bbVec.clear(); + bbVec.resize(cgFunc->NumBBs()); + + auto currPoint = + static_cast(cgFunc->GetTotalNumberOfInstructions() + bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + bbVec[bb->GetId()] = bb; + bb->SetLevel(bbIdx - 1); + + pregLive.clear(); + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveRangeByRegNO(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *bb->GetLastInsn()); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { +#ifdef MOVE_COALESCE + if ((insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr) && + (!AArch64isa::IsPhysicalRegister(static_cast( + insn->GetOperand(0)).GetRegisterNumber())) && + (static_cast(insn->GetOperand(0)).GetRegisterNumber() == + static_cast(insn->GetOperand(1)).GetRegisterNumber())) { + bb->RemoveInsn(*insn); + continue; + } +#endif + if (UpdateInsnCntAndSkipUseless(*insn, currPoint)) { + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *ninsn); + } + continue; + } + + bool multiDef = false; + ComputeLiveRangesForEachDefOperand(*insn, multiDef); + ComputeLiveRangesForEachUseOperand(*insn); + + UpdateOpndConflict(*insn, multiDef); + SetupMustAssignedLiveRanges(*insn); + + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint - kInsnStep, *ninsn); + } + + ComputeLiveRangesUpdateIfInsnIsCall(*insn); + /* distinguish use/def */ + currPoint -= 2; + } + ComputeLiveRangesUpdateLiveUnitInsnRange(*bb, currPoint); + /* move one more step for each BB */ + --currPoint; + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After ComputeLiveRanges\n"; + PrintLiveRanges(); +#ifdef USE_LRA + if (doLRA) { + PrintLocalRAInfo("After ComputeLiveRanges"); + } +#endif /* USE_LRA */ + } +} + +/* Create a common stack space for spilling with need_spill */ +MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, SpillMemCheck check) { + if (spillIdx >= spillMemOpnds.size()) { + return nullptr; + } + + if (operandSpilled[spillIdx]) { + /* For this insn, spill slot already used, need to find next available slot. */ + uint32 i; + for (i = spillIdx + 1; i < kSpillMemOpndNum; ++i) { + if (!operandSpilled[i]) { + break; + } + } + CHECK_FATAL(i < kSpillMemOpndNum, "no more available spill mem slot"); + spillIdx = i; + } + if (check == kSpillMemPost) { + operandSpilled[spillIdx] = true; + } + + if (spillMemOpnds[spillIdx] == nullptr) { + regno_t reg = cgFunc->NewVReg(kRegTyInt, sizeof(int64)); + auto *a64CGFunc = static_cast(cgFunc); + spillMemOpnds[spillIdx] = a64CGFunc->GetOrCreatSpillMem(reg); + } + return spillMemOpnds[spillIdx]; +} + +bool GraphColorRegAllocator::IsLocalReg(regno_t regNO) const { + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + LogInfo::MapleLogger() << "unexpected regNO" << regNO; + return true; + } + return IsLocalReg(*lr); +} + +bool GraphColorRegAllocator::IsLocalReg(const LiveRange &lr) const { + return !lr.GetSplitLr() && (lr.GetNumBBMembers() == 1) && !lr.IsNonLocal(); +} + +bool GraphColorRegAllocator::CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const { + regno_t lr1RegNO = lr1.GetRegNO(); + regno_t lr2RegNO = lr2.GetRegNO(); + for (uint32 x = 0; x < kU64; ++x) { + if ((val & (1ULL << x)) != 0) { + uint32 lastBitSet = i * kU64 + x; + /* + * begin and end should be in the bb info (LU) + * Need to rethink this if. + * Under some circumstance, lr->begin can occur after lr->end. + */ + auto lu1 = lr1.FindInLuMap(lastBitSet); + auto lu2 = lr2.FindInLuMap(lastBitSet); + if (lu1 != lr1.EndOfLuMap() && lu2 != lr2.EndOfLuMap() && + !((lu1->second->GetBegin() < lu2->second->GetBegin() && lu1->second->GetEnd() < lu2->second->GetBegin()) || + (lu2->second->GetBegin() < lu1->second->GetEnd() && lu2->second->GetEnd() < lu1->second->GetBegin()))) { + lr1.SetConflictBitArrElem(lr2RegNO); + lr2.SetConflictBitArrElem(lr1RegNO); + return true; + } + } + } + return false; +} + +void GraphColorRegAllocator::CheckInterference(LiveRange &lr1, LiveRange &lr2) const { + uint64 bitArr[bbBuckets]; + for (uint32 i = 0; i < bbBuckets; ++i) { + bitArr[i] = lr1.GetBBMember()[i] & lr2.GetBBMember()[i]; + } + + for (uint32 i = 0; i < bbBuckets; ++i) { + uint64 val = bitArr[i]; + if (val == 0) { + continue; + } + if (CheckOverlap(val, i, lr1, lr2)) { + break; + } + } +} + +void GraphColorRegAllocator::BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, + std::vector &fpLrVec) { + for (auto it : lrMap) { + LiveRange *lr = it.second; + if (lr->GetRegNO() == 0) { + continue; + } +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ + if (lr->GetRegType() == kRegTyInt) { + intLrVec.emplace_back(lr); + } else if (lr->GetRegType() == kRegTyFloat) { + fpLrVec.emplace_back(lr); + } else { + DEBUG_ASSERT(false, "Illegal regType in BuildInterferenceGraph"); + LogInfo::MapleLogger() << "error: Illegal regType in BuildInterferenceGraph\n"; + } + } +} + +/* + * Based on intersection of LRs. When two LRs interfere, add to each other's + * interference list. + */ +void GraphColorRegAllocator::BuildInterferenceGraph() { + std::vector intLrVec; + std::vector fpLrVec; + BuildInterferenceGraphSeparateIntFp(intLrVec, fpLrVec); + + /* + * Once number of BB becomes larger for big functions, the checking for interferences + * takes significant long time. Taking advantage of unique bucket is one of strategies + * to avoid unnecessary computation + */ + auto lrSize = intLrVec.size(); + std::vector uniqueBucketIdx(lrSize); + for (uint32 i = 0; i < lrSize; i++) { + uint32 count = 0; + uint32 uniqueIdx; + LiveRange *lr = intLrVec[i]; + for (uint32 j = 0; j < bbBuckets; ++j) { + if (lr->GetBBMember()[j]) { + count++; + uniqueIdx = j; + } + } + if (count == 1) { + uniqueBucketIdx[i] = static_cast(uniqueIdx); + } else { + /* LR spans multiple buckets */ + DEBUG_ASSERT(count >= 1, "A live range can not be empty"); + uniqueBucketIdx[i] = -1; + } + } + + for (auto it1 = intLrVec.begin(); it1 != intLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + int32 lr1UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it1))]; + for (auto it2 = it1 + 1; it2 != intLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + int32 lr2UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it2))]; + if (lr1UniqueBucketIdx == -1 && lr2UniqueBucketIdx == -1) { + CheckInterference(*lr1, *lr2); + } else if (((lr1UniqueBucketIdx >= 0) && lr1->GetBBMember()[lr1UniqueBucketIdx] & + lr2->GetBBMember()[lr1UniqueBucketIdx]) || ((lr2UniqueBucketIdx >= 0) && + lr1->GetBBMember()[lr2UniqueBucketIdx] & lr2->GetBBMember()[lr2UniqueBucketIdx])) { + CheckInterference(*lr1, *lr2); + } + } + } + } + + // Might need to do same as to intLrVec + for (auto it1 = fpLrVec.begin(); it1 != fpLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + for (auto it2 = it1 + 1; it2 != fpLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + CheckInterference(*lr1, *lr2); + } + } + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After BuildInterferenceGraph\n"; + PrintLiveRanges(); + } +} + +void GraphColorRegAllocator::SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO) { + DEBUG_ASSERT(bbID < bbRegInfo.size(), "index out of range in GraphColorRegAllocator::SetBBInfoGlobalAssigned"); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->InsertElemToGlobalsAssigned(regNO); +} + +bool GraphColorRegAllocator::HaveAvailableColor(const LiveRange &lr, uint32 num) const { + return ((lr.GetRegType() == kRegTyInt && num < intRegNum) || (lr.GetRegType() == kRegTyFloat && num < fpRegNum)); +} + +/* + * If the members on the interference list is less than #colors, then + * it can be trivially assigned a register. Otherwise it is constrained. + * Separate the LR based on if it is contrained or not. + * + * The unconstrained LRs are colored last. + * + * Compute a sorted list of constrained LRs based on priority cost. + */ +void GraphColorRegAllocator::Separate() { + for (auto it : lrMap) { + LiveRange *lr = it.second; +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && ((lr->GetNumDefs() <= 1) && (lr->GetNumUses() <= 1) && (lr->GetNumCall() > 0)) && + (lr->GetFrequency() <= (cgFunc->GetFirstBB()->GetFrequency() << 1))) { + if (lr->GetRegType() == kRegTyInt) { + intDelayed.emplace_back(lr); + } else { + fpDelayed.emplace_back(lr); + } + continue; + } +#endif /* OPTIMIZE_FOR_PROLOG */ + if (lr->GetRematLevel() != rematOff) { + unconstrained.emplace_back(lr); + } else if (HaveAvailableColor(*lr, lr->GetNumBBConflicts() + static_cast(lr->GetPregvetoSize()) + + static_cast(lr->GetForbiddenSize()))) { + if (lr->GetPrefs().size()) { + unconstrainedPref.emplace_back(lr); + } else { + unconstrained.emplace_back(lr); + } + } else if (lr->IsMustAssigned()) { + mustAssigned.emplace_back(lr); + } else { + if (lr->GetPrefs().size() && lr->GetNumCall() == 0) { + unconstrainedPref.emplace_back(lr); + } else { + constrained.emplace_back(lr); + } + } + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "Unconstrained : "; + for (auto lr : unconstrainedPref) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + for (auto lr : unconstrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "Constrained : "; + for (auto lr : constrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "mustAssigned : "; + for (auto lr : mustAssigned) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + } +} + +MapleVector::iterator GraphColorRegAllocator::GetHighPriorityLr(MapleVector &lrSet) const { + auto it = lrSet.begin(); + auto highestIt = it; + LiveRange *startLr = *it; + float maxPrio = startLr->GetPriority(); + ++it; + for (; it != lrSet.end(); ++it) { + LiveRange *lr = *it; + if (lr->GetPriority() > maxPrio) { + maxPrio = lr->GetPriority(); + highestIt = it; + } + } + return highestIt; +} + +void GraphColorRegAllocator::UpdateForbiddenForNeighbors(const LiveRange &lr) const { + auto updateForbidden = [&lr, this] (regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + DEBUG_ASSERT(newLr != nullptr, "newLr should not be nullptr"); + if (!newLr->GetPregveto(lr.GetAssignedRegNO())) { + newLr->InsertElemToForbidden(lr.GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateForbidden); +} + +void GraphColorRegAllocator::UpdatePregvetoForNeighbors(const LiveRange &lr) const { + auto updatePregveto = [&lr, this] (regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + DEBUG_ASSERT(newLr != nullptr, "newLr should not be nullptr"); + newLr->InsertElemToPregveto(lr.GetAssignedRegNO()); + newLr->EraseElemFromForbidden(lr.GetAssignedRegNO()); + }; + ForEachRegArrElem(lr.GetBBConflict(), updatePregveto); +} + +/* + * For cases with only one def/use and crosses a call. + * It might be more beneficial to spill vs save/restore in prolog/epilog. + * But if the callee register is already used, then it is ok to reuse it again. + * Or in certain cases, just use the callee. + */ +bool GraphColorRegAllocator::ShouldUseCallee(LiveRange &lr, const MapleSet &calleeUsed, + const MapleVector &delayed) const { + if (FindIn(calleeUsed, lr.GetAssignedRegNO())) { + return true; + } + if (AArch64Abi::IsCalleeSavedReg(static_cast(lr.GetAssignedRegNO())) && + (calleeUsed.size() % kDivide2) != 0) { + return true; + } + if (delayed.size() > 1 && calleeUsed.empty()) { + /* If there are more than 1 vreg that can benefit from callee, use callee */ + return true; + } + lr.SetAssignedRegNO(0); + return false; +} + +void GraphColorRegAllocator::AddCalleeUsed(regno_t regNO, RegType regType) { + DEBUG_ASSERT(AArch64isa::IsPhysicalRegister(regNO), "regNO should be physical register"); + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(regNO)); + if (isCalleeReg) { + if (regType == kRegTyInt) { + (void)intCalleeUsed.insert(regNO); + } else { + (void)fpCalleeUsed.insert(regNO); + } + } +} + +regno_t GraphColorRegAllocator::FindColorForLr(const LiveRange &lr) const { + regno_t reg = 0; + regno_t base; + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + const MapleSet *nextRegSet = nullptr; + if (regType == kRegTyInt) { + if (lr.GetNumCall() != 0) { + currRegSet = &intCalleeRegSet; + nextRegSet = &intCallerRegSet; + } else { + currRegSet = &intCallerRegSet; + nextRegSet = &intCalleeRegSet; + } + base = R0; + } else { + if (lr.GetNumCall() != 0) { + currRegSet = &fpCalleeRegSet; + nextRegSet = &fpCallerRegSet; + } else { + currRegSet = &fpCallerRegSet; + nextRegSet = &fpCalleeRegSet; + } + base = V0; + } + +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto &it : lr.GetPrefs()) { + reg = it + base; + if ((FindIn(*currRegSet, reg) || FindIn(*nextRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto &it : *currRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + /* Failed to allocate in first choice. Try 2nd choice. */ + for (const auto &it : *nextRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + DEBUG_ASSERT(false, "Failed to find a register"); + return 0; +} + +regno_t GraphColorRegAllocator::TryToAssignCallerSave(const LiveRange &lr) const { + regno_t base; + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + if (regType == kRegTyInt) { + currRegSet = &intCallerRegSet; + base = R0; + } else { + currRegSet = &fpCallerRegSet; + base = V0; + } + + regno_t reg = 0; +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto &it : lr.GetPrefs()) { + reg = it + base; + if ((FindIn(*currRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto &it : *currRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + return 0; +} + +/* + * If forbidden list has more registers than max of all BB's local reg + * requirement, then LR can be colored. + * Update LR's color if success, return true, else return false. + */ +bool GraphColorRegAllocator::AssignColorToLr(LiveRange &lr, bool isDelayed) { + if (lr.GetAssignedRegNO() > 0) { + /* Already assigned. */ + return true; + } + if (!HaveAvailableColor(lr, lr.GetForbiddenSize() + lr.GetPregvetoSize())) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned fail to R" << lr.GetRegNO() << "\n"; + } + return false; + } + regno_t callerSaveReg = 0; + regno_t reg = FindColorForLr(lr); + if (lr.GetNumCall() != 0 && !lr.GetCrossCall()) { + callerSaveReg = TryToAssignCallerSave(lr); + bool prefCaller = AArch64Abi::IsCalleeSavedReg(static_cast(reg)) && + intCalleeUsed.find(reg) == intCalleeUsed.end() && fpCalleeUsed.find(reg) == fpCalleeUsed.end(); + if (callerSaveReg != 0 && (prefCaller || !AArch64Abi::IsCalleeSavedReg(static_cast(reg)))) { + reg = callerSaveReg; + lr.SetNumCall(0); + } + } + lr.SetAssignedRegNO(reg); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned " << lr.GetAssignedRegNO() << " to R" << lr.GetRegNO() << "\n"; + } + if (lr.GetAssignedRegNO() == 0) { + return false; + } +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && isDelayed) { + if ((lr.GetRegType() == kRegTyInt && !ShouldUseCallee(lr, intCalleeUsed, intDelayed)) || + (lr.GetRegType() == kRegTyFloat && !ShouldUseCallee(lr, fpCalleeUsed, fpDelayed))) { + return false; + } + } +#endif /* OPTIMIZE_FOR_PROLOG */ + + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + + UpdateForbiddenForNeighbors(lr); + ForEachBBArrElem(lr.GetBBMember(), + [&lr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, lr.GetAssignedRegNO()); }); + return true; +} + +void GraphColorRegAllocator::PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, + std::set &candidateInLoop, + std::set &defInLoop) { + if (bb.GetInternalFlag1()) { + /* already visited */ + return; + } + + bb.SetInternalFlag1(true); + auto lu = lr.FindInLuMap(bb.GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + if (bb.GetLoop() != nullptr && FindIn(candidateInLoop, bb.GetLoop())) { + /* + * Upward search has found a loop. Regardless of def/use + * The loop members must be included in the new LR. + */ + remove = false; + } else { + /* No ref in this bb. mark as potential remove. */ + bb.SetInternalFlag2(true); + return; + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + } + } + + if (bb.GetLoop() != nullptr) { + /* With a def in loop, cannot prune that loop */ + if (defNum > 0) { + (void)defInLoop.insert(bb.GetLoop()); + } + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb.GetLoop()); + } + for (auto pred : bb.GetPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } + for (auto pred : bb.GetEhPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::FindBBSharedInSplit(LiveRange &lr, + const std::set &candidateInLoop, + std::set &defInLoop) { + /* A loop might be split into two. Need to see over the entire LR if there is a def in the loop. */ + auto FindBBSharedFunc = [&lr, &candidateInLoop, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + auto lu = lr.FindInLuMap(bb->GetId()); + if (lu != lr.EndOfLuMap() && lu->second->GetDefNum() > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + } + }; + ForEachBBArrElem(lr.GetBBMember(), FindBBSharedFunc); +} + +/* + * Backward traversal of the top part of the split LR. + * Prune the part of the LR that has no downward exposing references. + * Take into account of loops and loop carried dependencies. + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ +void GraphColorRegAllocator::ComputeBBForNewSplit(LiveRange &newLr, LiveRange &origLr) { + /* + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + std::set smember; + ForEachBBArrElem(newLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bbIt = smember.rbegin(); bbIt != smember.rend(); ++bbIt) { + BB *bb = *bbIt; + if (bb->GetInternalFlag1() != 0) { + continue; + } + PruneLrForSplit(newLr, *bb, true, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(origLr, candidateInLoop, defInLoop); + auto pruneTopLr = [this, &newLr, &candidateInLoop, &defInLoop] (uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + return; + } + if (bb->GetLoop() != nullptr || FindNotIn(defInLoop, bb->GetLoop())) { + /* defInLoop should be a subset of candidateInLoop. remove. */ + newLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(newLr.GetBBMember(), pruneTopLr); /* prune the top LR. */ +} + +bool GraphColorRegAllocator::UseIsUncovered(const BB &bb, const BB &startBB, std::vector &visitedBB) { + CHECK_FATAL(bb.GetId() < visitedBB.size(), "index out of range"); + visitedBB[bb.GetId()] = true; + for (auto pred : bb.GetPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + for (auto pred : bb.GetEhPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + return false; +} + +void GraphColorRegAllocator::FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, bool &remove, + std::set &candidateInLoop, + std::set &defInLoop) { + BB *bb = bbInfo.GetCandidateBB(); + const BB *startBB = bbInfo.GetStartBB(); + if (bb->GetInternalFlag1() != 0) { + /* already visited */ + return; + } + for (auto pred : bb->GetPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + for (auto pred : bb->GetEhPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + + bb->SetInternalFlag1(true); + auto lu = lr.FindInLuMap(bb->GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + std::vector visitedBB(cgFunc->GetAllBBs().size(), false); + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + /* No ref in this bb. mark as potential remove. */ + bb->SetInternalFlag2(true); + if (bb->GetLoop() != nullptr) { + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb->GetLoop()); + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + /* A potential point for a upward exposing use. (might be a def). */ + lu->second->SetNeedReload(true); + } + } else if ((defNum > 0 || useNum > 0) && UseIsUncovered(*bb, *startBB, visitedBB)) { + lu->second->SetNeedReload(true); + } + + /* With a def in loop, cannot prune that loop */ + if (bb->GetLoop() != nullptr && defNum > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + + for (auto succ : bb->GetSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } + for (auto succ : bb->GetEhSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::ClearLrBBFlags(const std::set &member) const { + for (auto bb : member) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(0); + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + } +} + +/* + * Downward traversal of the bottom part of the split LR. + * Prune the part of the LR that has no upward exposing references. + * Take into account of loops and loop carried dependencies. + */ +void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &origLr) { + /* The candidate bb to be removed, if in a loop, store that info. */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + SplitBBInfo bbInfo; + bool remove = true; + + std::set smember; + ForEachBBArrElem(origLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + ClearLrBBFlags(smember); + for (auto bb : smember) { + if (bb->GetInternalFlag1() != 0) { + continue; + } + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(true); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(true); + } + bbInfo.SetCandidateBB(*bb); + bbInfo.SetStartBB(*bb); + FindUseForSplit(origLr, bbInfo, remove, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(newLr, candidateInLoop, defInLoop); + auto pruneLrFunc = [&origLr, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindNotIn(defInLoop, bb->GetLoop())) { + origLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(origLr.GetBBMember(), pruneLrFunc); +} + +/* + * There is at least one available color for this BB from the neighbors + * minus the ones reserved for local allocation. + * bbAdded : The new BB to be added into the split LR if color is available. + * conflictRegs : Reprent the LR before adding the bbAdded. These are the + * forbidden regs before adding the new BBs. + * Side effect : Adding the new forbidden regs from bbAdded into + * conflictRegs if the LR can still be colored. + */ +bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdded, + std::unordered_set &conflictRegs) { + RegType type = lr.GetRegType(); + + std::unordered_set newConflict; + auto updateConflictFunc = [&bbAdded, &conflictRegs, &newConflict, &lr, this](regno_t regNO) { + /* check the real conflict in current bb */ + LiveRange *conflictLr = lrMap[regNO]; + /* + * If the bb to be added to the new LR has an actual + * conflict with another LR, and if that LR has already + * assigned a color that is not in the conflictRegs, + * then add it as a newConflict. + */ + if (IsBitArrElemSet(conflictLr->GetBBMember(), bbAdded.GetId())) { + regno_t confReg = conflictLr->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } else if (conflictLr->GetSplitLr() != nullptr && + IsBitArrElemSet(conflictLr->GetSplitLr()->GetBBMember(), bbAdded.GetId())) { + /* + * The after split LR is split into pieces, and this ensures + * the after split color is taken into consideration. + */ + regno_t confReg = conflictLr->GetSplitLr()->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateConflictFunc); + + size_t numRegs = newConflict.size() + lr.GetPregvetoSize() + conflictRegs.size(); + + bool canColor = false; + if (type == kRegTyInt) { + if (numRegs < intRegNum) { + canColor = true; + } + } else if (numRegs < fpRegNum) { + canColor = true; + } + + if (canColor) { + for (auto regNO : newConflict) { + (void)conflictRegs.insert(regNO); + } + } + + /* Update all the registers conflicting when adding thew new bb. */ + return canColor; +} + +/* Support function for LR split. Move one BB from LR1 to LR2. */ +void GraphColorRegAllocator::MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const { + /* initialize backward traversal flag for the bb pruning phase */ + bb.SetInternalFlag1(false); + /* initialize bb removal marker */ + bb.SetInternalFlag2(false); + /* Insert BB into new LR */ + uint32 bbID = bb.GetId(); + newLr.SetMemberBitArrElem(bbID); + + /* Move LU from old LR to new LR */ + auto luIt = oldLr.FindInLuMap(bb.GetId()); + if (luIt != oldLr.EndOfLuMap()) { + newLr.SetElemToLuMap(luIt->first, *(luIt->second)); + oldLr.EraseLuMap(luIt); + } + + /* Remove BB from old LR */ + oldLr.UnsetMemberBitArrElem(bbID); +} + +/* Is the set of loops inside the loop? */ +bool GraphColorRegAllocator::ContainsLoop(const CGFuncLoops &loop, + const std::set &loops) const { + for (const CGFuncLoops *lp : loops) { + while (lp != nullptr) { + if (lp == &loop) { + return true; + } + lp = lp->GetOuterLoop(); + } + } + return false; +} + +void GraphColorRegAllocator::GetAllLrMemberLoops(LiveRange &lr, std::set &loops) { + auto GetLrMemberFunc = [&loops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + CGFuncLoops *loop = bb->GetLoop(); + if (loop != nullptr) { + (void)loops.insert(loop); + } + }; + ForEachBBArrElem(lr.GetBBMember(), GetLrMemberFunc); +} + +bool GraphColorRegAllocator::SplitLrShouldSplit(LiveRange &lr) { + if (lr.GetSplitLr() != nullptr || lr.GetNumBBMembers() == 1) { + return false; + } + /* Need to split within the same hierarchy */ + uint32 loopID = 0xFFFFFFFF; /* loopID is initialized the maximum value,and then be assigned in function */ + bool needSplit = true; + auto setNeedSplit = [&needSplit, &loopID, this](uint32 bbID) -> bool { + BB *bb = bbVec[bbID]; + if (loopID == 0xFFFFFFFF) { + if (bb->GetLoop() != nullptr) { + loopID = static_cast(bb->GetLoop()->GetHeader()->GetId()); + } else { + loopID = 0; + } + } else if ((bb->GetLoop() != nullptr && bb->GetLoop()->GetHeader()->GetId() != loopID) || + (bb->GetLoop() == nullptr && loopID != 0)) { + needSplit = false; + return true; + } + return false; + }; + ForEachBBArrElemWithInterrupt(lr.GetBBMember(), setNeedSplit); + return needSplit; +} + +/* + * When a BB in the LR has no def or use in it, then potentially + * there is no conflict within these BB for the new LR, since + * the new LR will need to spill the defs which terminates the + * new LR unless there is a use later which extends the new LR. + * There is no need to compute conflicting register set unless + * there is a def or use. + * It is assumed that the new LR is extended to the def or use. + * Initially newLr is empty, then add bb if can be colored. + * Return true if there is a split. + */ +bool GraphColorRegAllocator::SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, + std::unordered_set &conflictRegs) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "start split lr for vreg " << lr.GetRegNO() << "\n"; + } + std::set smember; + ForEachBBArrElem(lr.GetBBMember(), [&smember, this](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bb : smember) { + if (!LrCanBeColored(lr, *bb, conflictRegs)) { + break; + } + MoveLrBBInfo(lr, newLr, *bb); + } + + /* return ture if split is successful */ + return newLr.GetNumBBMembers() != 0; +} + +void GraphColorRegAllocator::SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, + const std::set &origLoops, + const std::set &newLoops) { + /* + * bb in loops might need a reload due to loop carried dependency. + * Compute this before pruning the LRs. + * if there is no re-definition, then reload is not necessary. + * Part of the new LR region after the last reference is + * no longer in the LR. Remove those bb. + */ + ComputeBBForNewSplit(newLr, lr); + + /* With new LR, recompute conflict. */ + auto recomputeConflict = [&lr, &newLr, this](uint32 bbID) { + auto lrFunc = [&newLr, &bbID, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBitArrElemSet(confLrVec->GetBBMember(), bbID) || + (confLrVec->GetSplitLr() != nullptr && IsBitArrElemSet(confLrVec->GetSplitLr()->GetBBMember(), bbID))) { + /* + * New LR getting the interference does not mean the + * old LR can remove the interference. + * Old LR's interference will be handled at the end of split. + */ + newLr.SetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), lrFunc); + }; + ForEachBBArrElem(newLr.GetBBMember(), recomputeConflict); + + /* update bb/loop same as for new LR. */ + ComputeBBForOldSplit(newLr, lr); + /* Update the conflict interference for the original LR later. */ + for (auto loop : newLoops) { + if (!ContainsLoop(*loop, origLoops)) { + continue; + } + for (auto bb : loop->GetLoopMembers()) { + if (!IsBitArrElemSet(newLr.GetBBMember(), bb->GetId())) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bb->GetId()); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + } +} + +void GraphColorRegAllocator::SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, + const std::set &origLoops) { + /* If a 2nd split loop is before the bb in 1st split bb. */ + newLr.SetNumCall(0); + auto fixCallsAndRlod = [&newLr, &origLoops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + for (auto loop : origLoops) { + if (loop->GetHeader()->GetLevel() >= bb->GetLevel()) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + newLr.IncNumCall(); + } + }; + ForEachBBArrElem(newLr.GetBBMember(), fixCallsAndRlod); +} + +void GraphColorRegAllocator::SplitLrFixOrigLrCalls(LiveRange &lr) const { + lr.SetNumCall(0); + auto fixOrigCalls = [&lr](uint32 bbID) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + lr.IncNumCall(); + } + }; + ForEachBBArrElem(lr.GetBBMember(), fixOrigCalls); +} + +void GraphColorRegAllocator::SplitLrUpdateInterference(LiveRange &lr) { + /* + * newLr is now a separate LR from the original lr. + * Update the interference info. + * Also recompute the forbidden info + */ + lr.ClearForbidden(); + auto updateInterfrence = [&lr, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBBsetOverlap(lr.GetBBMember(), confLrVec->GetBBMember(), bbBuckets)) { + /* interfere */ + if (confLrVec->GetAssignedRegNO() && !lr.GetPregveto(confLrVec->GetAssignedRegNO())) { + lr.InsertElemToForbidden(confLrVec->GetAssignedRegNO()); + } + } else { + /* no interference */ + lr.UnsetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateInterfrence); +} + +void GraphColorRegAllocator::SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, + std::unordered_set &conflictRegs) const { + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (origLr.GetPregveto(regNO)) { + newLr.InsertElemToPregveto(regNO); + } + } + for (auto regNO : conflictRegs) { + if (!newLr.GetPregveto(regNO)) { + newLr.InsertElemToForbidden(regNO); + } + } +} + +void GraphColorRegAllocator::SplitLrErrorCheckAndDebug(const LiveRange &origLr) const { + if (origLr.GetNumBBMembers() == 0) { + DEBUG_ASSERT(origLr.GetNumBBConflicts() == 0, "Error: member and conflict not match"); + } +} + +/* + * Pick a starting BB, then expand to maximize the new LR. + * Return the new LR. + */ +void GraphColorRegAllocator::SplitLr(LiveRange &lr) { + if (!SplitLrShouldSplit(lr)) { + return; + } + LiveRange *newLr = NewLiveRange(); + /* + * For the new LR, whenever a BB with either a def or + * use is added, then add the registers that the neighbor + * is using to the conflict register set indicating that these + * registers cannot be used for the new LR's color. + */ + std::unordered_set conflictRegs; + if (!SplitLrFindCandidateLr(lr, *newLr, conflictRegs)) { + return; + } +#ifdef REUSE_SPILLMEM + /* Copy the original conflict vector for spill reuse optimization */ + lr.SetOldConflict(memPool->NewArray(regBuckets)); + for (uint32 i = 0; i < regBuckets; ++i) { + lr.SetBBConflictElem(static_cast(i), lr.GetBBConflictElem(static_cast(i))); + } +#endif /* REUSE_SPILLMEM */ + + std::set newLoops; + std::set origLoops; + GetAllLrMemberLoops(*newLr, newLoops); + GetAllLrMemberLoops(lr, origLoops); + SplitLrHandleLoops(lr, *newLr, origLoops, newLoops); + SplitLrFixNewLrCallsAndRlod(*newLr, origLoops); + SplitLrFixOrigLrCalls(lr); + + SplitLrUpdateRegInfo(lr, *newLr, conflictRegs); + + CalculatePriority(lr); + /* At this point, newLr should be unconstrained. */ + lr.SetSplitLr(*newLr); + + newLr->SetRegNO(lr.GetRegNO()); + newLr->SetRegType(lr.GetRegType()); + newLr->SetID(lr.GetID()); + newLr->CopyRematerialization(lr); + CalculatePriority(*newLr); + SplitLrUpdateInterference(lr); + newLr->SetAssignedRegNO(FindColorForLr(*newLr)); + + AddCalleeUsed(newLr->GetAssignedRegNO(), newLr->GetRegType()); + + /* For the new LR, update assignment for local RA */ + ForEachBBArrElem(newLr->GetBBMember(), + [&newLr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, newLr->GetAssignedRegNO()); }); + + UpdatePregvetoForNeighbors(*newLr); + + SplitLrErrorCheckAndDebug(lr); +} + +void GraphColorRegAllocator::ColorForOptPrologEpilog() { +#ifdef OPTIMIZE_FOR_PROLOG + if (!doOptProlog) { + return; + } + for (auto lr : intDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } + for (auto lr : fpDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } +#endif +} + +/* + * From the sorted list of constrained LRs, pick the most profitable LR. + * Split the LR into LRnew1 LRnew2 where LRnew1 has the maximum number of + * BB and is colorable. + * The starting BB for traversal must have a color available. + * + * Assign a color, update neighbor's forbidden list. + * + * Update the conflict graph by change the interference list. + * In the case of both LRnew1 and LRnew2 conflicts with a BB, this BB's + * #neightbors increased. If this BB was unconstrained, must check if + * it is still unconstrained. Move to constrained if necessary. + * + * Color the unconstrained LRs. + */ +void GraphColorRegAllocator::SplitAndColorForEachLr(MapleVector &targetLrVec) { + while (!targetLrVec.empty()) { + auto highestIt = GetHighPriorityLr(targetLrVec); + LiveRange *lr = *highestIt; + /* check those lrs in lr->sconflict which is in unconstrained whether it turns to constrined */ + if (highestIt != targetLrVec.end()) { + targetLrVec.erase(highestIt); + } else { + DEBUG_ASSERT(false, "Error: not in targetLrVec"); + } + if (AssignColorToLr(*lr)) { + continue; + } +#ifdef USE_SPLIT + SplitLr(*lr); +#endif /* USE_SPLIT */ + /* + * When LR is spilled, it potentially has no conflicts as + * each def/use is spilled/reloaded. + */ +#ifdef COLOR_SPLIT + if (!AssignColorToLr(*lr)) { +#endif /* COLOR_SPLIT */ + lr->SetSpilled(true); + hasSpill = true; +#ifdef COLOR_SPLIT + } +#endif /* COLOR_SPLIT */ + } +} + +void GraphColorRegAllocator::SplitAndColor() { + /* handle mustAssigned */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting mustAssigned : \n"; + } + SplitAndColorForEachLr(mustAssigned); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrainedPref : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrainedPref); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting constrained : \n"; + } + /* handle constrained */ + SplitAndColorForEachLr(constrained); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrained : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrained); + +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog) { + ColorForOptPrologEpilog(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ +} + +void GraphColorRegAllocator::HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt) { + /* vreg, get a reg for it if not assigned already. */ + if (!localRa.IsInRegAssigned(regNO, isInt) && !localRa.isInRegSpilled(regNO, isInt)) { + /* find an available phys reg */ + bool founded = false; + LiveRange *lr = lrMap[regNO]; + regno_t maxIntReg = R0 + MaxIntPhysRegNum(); + regno_t maxFpReg = V0 + MaxFloatPhysRegNum(); + regno_t startReg = isInt ? R0 : V0; + regno_t endReg = isInt ? maxIntReg : maxFpReg; + for (uint32 preg = startReg; preg <= endReg; ++preg) { + if (!localRa.IsPregAvailable(preg, isInt)) { + continue; + } + if (lr->GetNumCall() != 0 && !AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + if (lr->GetPregveto(preg)) { + continue; + } + regno_t assignedReg = preg; + localRa.ClearPregs(assignedReg, isInt); + localRa.SetPregUsed(assignedReg, isInt); + localRa.SetRegAssigned(regNO, isInt); + localRa.SetRegAssignmentMap(isInt, regNO, assignedReg); + lr->SetAssignedRegNO(assignedReg); + founded = true; + break; + } + if (!founded) { + localRa.SetRegSpilled(regNO, isInt); + lr->SetSpilled(true); + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, bool isDef, + bool isInt) const { + auto usedIt = localRa.GetUseInfo().find(regNO); + if (usedIt != localRa.GetUseInfo().end() && !isDef) { + /* reg use, decrement count */ + DEBUG_ASSERT(usedIt->second > 0, "Incorrect local ra info"); + localRa.SetUseInfoElem(regNO, usedIt->second - 1); + if (!AArch64isa::IsPhysicalRegister(static_cast(regNO)) && localRa.IsInRegAssigned(regNO, isInt)) { + localRa.IncUseInfoElem(localRa.GetRegAssignmentItem(isInt, regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #use to " << localRa.GetUseInfoElem(regNO) << "\n"; + } + } + + auto defIt = localRa.GetDefInfo().find(regNO); + if (defIt != localRa.GetDefInfo().end() && isDef) { + /* reg def, decrement count */ + DEBUG_ASSERT(defIt->second > 0, "Incorrect local ra info"); + localRa.SetDefInfoElem(regNO, defIt->second - 1); + if (!AArch64isa::IsPhysicalRegister(static_cast(regNO)) && localRa.IsInRegAssigned(regNO, isInt)) { + localRa.IncDefInfoElem(localRa.GetRegAssignmentItem(isInt, regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #def to " << localRa.GetDefInfoElem(regNO) << "\n"; + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegConflict(regno_t regNO, LocalRegAllocator &localRa, bool isInt) { + LiveRange *lr = lrMap[regNO]; + if (lr->GetNumBBConflicts() == 0) { + return; + } + if (!localRa.IsInRegAssigned(regNO, isInt)) { + return; + } + regno_t preg = localRa.GetRegAssignmentItem(isInt, regNO); + ForEachRegArrElem(lr->GetBBConflict(), + [&preg, this](regno_t regNO) { lrMap[regNO]->InsertElemToPregveto(preg); }); +} + +void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const { + LogInfo::MapleLogger() << "HandleLocalReg " << regNO << "\n"; + LogInfo::MapleLogger() << "\tregUsed:"; + uint64 regUsed = localRa.GetPregUsed(isInt); + regno_t base = isInt ? R0 : V0; + regno_t end = isInt ? (RLR - R0) : (V31 - V0); + + for (uint32 i = 0; i <= end; ++i) { + if ((regUsed & (1ULL << i)) != 0) { + LogInfo::MapleLogger() << " " << (i + base); + } + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "\tregs:"; + uint64 regs = localRa.GetPregs(isInt); + for (uint32 regnoInLoop = 0; regnoInLoop <= end; ++regnoInLoop) { + if ((regs & (1ULL << regnoInLoop)) != 0) { + LogInfo::MapleLogger() << " " << (regnoInLoop + base); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::HandleLocalReg(Operand &op, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo, + bool isDef, bool isInt) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (IsUnconcernedReg(regOpnd)) { + return; + } + + /* is this a local register ? */ + if (regNO >= kAllRegNum && !IsLocalReg(regNO)) { + return; + } + + if (GCRA_DUMP) { + HandleLocalRaDebug(regNO, localRa, isInt); + } + + if (regOpnd.IsPhysicalRegister()) { + /* conflict with preg is record in lr->pregveto and BBAssignInfo->globalsAssigned */ + UpdateLocalRegDefUseCount(regNO, localRa, isDef, isInt); + /* See if it is needed by global RA */ + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0) { + if (bbInfo && !bbInfo->GetGlobalsAssigned(regNO)) { + /* This phys reg is now available for assignment for a vreg */ + localRa.SetPregs(regNO, isInt); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, phys-reg " << regNO << " now available\n"; + } + } + } + } else { + HandleLocalRegAssignment(regNO, localRa, isInt); + UpdateLocalRegDefUseCount(regNO, localRa, isDef, isInt); + UpdateLocalRegConflict(regNO, localRa, isInt); + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0 && + localRa.IsInRegAssigned(regNO, isInt)) { + /* last ref of vreg, release assignment */ + localRa.SetPregs(localRa.GetRegAssignmentItem(isInt, regNO), isInt); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, release reg " << localRa.GetRegAssignmentItem(isInt, regNO) + << " for " << regNO << "\n"; + } + } + } +} + +void GraphColorRegAllocator::LocalRaRegSetEraseReg(LocalRegAllocator &localRa, regno_t regNO) const { + bool isInt = AArch64isa::IsGPRegister(static_cast(regNO)); + if (localRa.IsPregAvailable(regNO, isInt)) { + localRa.ClearPregs(regNO, isInt); + } +} + +bool GraphColorRegAllocator::LocalRaInitRegSet(LocalRegAllocator &localRa, uint32 bbID) { + bool needLocalRa = false; + /* Note physical regs start from R0, V0. */ + localRa.InitPregs(MaxIntPhysRegNum(), MaxFloatPhysRegNum(), cgFunc->GetCG()->GenYieldPoint(), intSpillRegSet, + fpSpillRegSet); + + localRa.ClearUseInfo(); + localRa.ClearDefInfo(); + LocalRaInfo *lraInfo = localRegVec[bbID]; + DEBUG_ASSERT(lraInfo != nullptr, "lraInfo not be nullptr"); + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + regno_t regNO = useCntPair.first; + if (regNO >= kAllRegNum) { + needLocalRa = true; + } + localRa.SetUseInfoElem(useCntPair.first, useCntPair.second); + } + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + regno_t regNO = defCntPair.first; + if (regNO >= kAllRegNum) { + needLocalRa = true; + } + localRa.SetDefInfoElem(defCntPair.first, defCntPair.second); + } + return needLocalRa; +} + +void GraphColorRegAllocator::LocalRaInitAllocatableRegs(LocalRegAllocator &localRa, uint32 bbID) { + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo != nullptr) { + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LocalRaRegSetEraseReg(localRa, regNO); + } + } + } +} + +void GraphColorRegAllocator::LocalRaForEachDefOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + /* handle def opnd */ + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, true, isInt); + } +} + +void GraphColorRegAllocator::LocalRaForEachUseOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + HandleLocalReg(*base, localRa, bbInfo, false, true); + } + if (!memOpnd.IsIntactIndexed()) { + HandleLocalReg(*base, localRa, bbInfo, true, true); + } + if (offset != nullptr) { + HandleLocalReg(*offset, localRa, bbInfo, false, true); + } + } else if (md->GetOpndDes(i)->IsRegUse()) { + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, false, isInt); + } + } +} + +void GraphColorRegAllocator::LocalRaPrepareBB(BB &bb, LocalRegAllocator &localRa) { + BBAssignInfo *bbInfo = bbRegInfo[bb.GetId()]; + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* + * Use reverse operand order, assuming use first then def for allocation. + * need to free the use resource so it can be reused for def. + */ + LocalRaForEachUseOperand(*insn, localRa, bbInfo); + LocalRaForEachDefOperand(*insn, localRa, bbInfo); + } +} + +void GraphColorRegAllocator::LocalRaFinalAssignment(const LocalRegAllocator &localRa, + BBAssignInfo &bbInfo) { + for (const auto &intRegAssignmentMapPair : localRa.GetIntRegAssignmentMap()) { + regno_t regNO = intRegAssignmentMapPair.second; + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "[" << intRegAssignmentMapPair.first << "," << regNO << "],"; + } + /* Might need to get rid of this copy. */ + bbInfo.SetRegMapElem(intRegAssignmentMapPair.first, regNO); + AddCalleeUsed(regNO, kRegTyInt); + } + for (const auto &fpRegAssignmentMapPair : localRa.GetFpRegAssignmentMap()) { + regno_t regNO = fpRegAssignmentMapPair.second; + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "[" << fpRegAssignmentMapPair.first << "," << regNO << "],"; + } + /* Might need to get rid of this copy. */ + bbInfo.SetRegMapElem(fpRegAssignmentMapPair.first, regNO); + AddCalleeUsed(regNO, kRegTyFloat); + } +} + +void GraphColorRegAllocator::LocalRaDebug(const BB &bb, const LocalRegAllocator &localRa) const { + LogInfo::MapleLogger() << "bb " << bb.GetId() << " local ra INT need " << localRa.GetNumIntPregUsed() << " regs\n"; + LogInfo::MapleLogger() << "bb " << bb.GetId() << " local ra FP need " << localRa.GetNumFpPregUsed() << " regs\n"; + LogInfo::MapleLogger() << "\tpotential assignments:"; + for (auto it : localRa.GetIntRegAssignmentMap()) { + LogInfo::MapleLogger() << "[" << it.first << "," << it.second << "],"; + } + for (auto it : localRa.GetFpRegAssignmentMap()) { + LogInfo::MapleLogger() << "[" << it.first << "," << it.second << "],"; + } + LogInfo::MapleLogger() << "\n"; +} + +/* + * When do_allocate is false, it is prepass: + * Traverse each BB, keep track of the number of registers required + * for local registers in the BB. Communicate this to global RA. + * + * When do_allocate is true: + * Allocate local registers for each BB based on unused registers + * from global RA. Spill if no register available. + */ +void GraphColorRegAllocator::LocalRegisterAllocator(bool doAllocate) { + if (GCRA_DUMP) { + if (doAllocate) { + LogInfo::MapleLogger() << "LRA allocation start\n"; + PrintBBAssignInfo(); + } else { + LogInfo::MapleLogger() << "LRA preprocessing start\n"; + } + } + LocalRegAllocator *localRa = memPool->New(*cgFunc, alloc); + for (auto *bb : bfs->sortedBBs) { + uint32 bbID = bb->GetId(); + + LocalRaInfo *lraInfo = localRegVec[bb->GetId()]; + if (lraInfo == nullptr) { + /* No locals to allocate */ + continue; + } + + localRa->ClearLocalRaInfo(); + bool needLocalRa = LocalRaInitRegSet(*localRa, bbID); + if (!needLocalRa) { + /* Only physical regs in bb, no local ra needed. */ + continue; + } + + if (doAllocate) { + LocalRaInitAllocatableRegs(*localRa, bbID); + } + + LocalRaPrepareBB(*bb, *localRa); + + BBAssignInfo *bbInfo = bbRegInfo[bb->GetId()]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->SetIntLocalRegsNeeded(localRa->GetNumIntPregUsed()); + bbInfo->SetFpLocalRegsNeeded(localRa->GetNumFpPregUsed()); + + if (doAllocate) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tbb(" << bb->GetId() << ")final local ra assignments:"; + } + LocalRaFinalAssignment(*localRa, *bbInfo); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\n"; + } + } else if (GCRA_DUMP) { + LocalRaDebug(*bb, *localRa); + } + } +} + +MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict, + const std::set &usedMemOpnd, + uint32 size, RegType regType) { + std::set sconflict; + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + if (GetLiveRange(regNO) != nullptr) { + (void)sconflict.insert(lrMap[regNO]); + } + } + } + + for (auto *noConflictLr : sconflict) { + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + return nullptr; +} + +MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, + uint32 size, RegType regType) { + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + LiveRange *noConflictLr = GetLiveRange(regNO); + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + } + return nullptr; +} + +/* See if any of the non-conflict LR is spilled and use its memOpnd. */ +MemOperand *GraphColorRegAllocator::GetReuseMem(uint32 vregNO, uint32 size, RegType regType) { + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return nullptr; + } + if (IsLocalReg(vregNO)) { + return nullptr; + } + + LiveRange *lr = lrMap[vregNO]; + const uint64 *conflict; + if (lr->GetSplitLr() != nullptr) { + /* + * For split LR, the vreg liveness is optimized, but for spill location + * the stack location needs to be maintained for the entire LR. + */ + return nullptr; + } else { + conflict = lr->GetBBConflict(); + } + + std::set usedMemOpnd; + auto updateMemOpnd = [&usedMemOpnd, this](regno_t regNO) { + if (regNO >= numVregs) { + return; + } + LiveRange *lrInner = GetLiveRange(regNO); + if (lrInner && lrInner->GetSpillMem() != nullptr) { + (void)usedMemOpnd.insert(lrInner->GetSpillMem()); + } + }; + ForEachRegArrElem(conflict, updateMemOpnd); + uint32 regSize = (size <= k32) ? k32 : k64; + /* + * This is to order the search so memOpnd given out is consistent. + * When vreg#s do not change going through VtableImpl.mpl file + * then this can be simplified. + */ +#ifdef CONSISTENT_MEMOPND + return GetConsistentReuseMem(conflict, usedMemOpnd, regSize, regType); +#else /* CONSISTENT_MEMOPND */ + return GetCommonReuseMem(conflict, usedMemOpnd, regSize, regType); +#endif /* CONSISTENT_MEMOPNDi */ +} + +MemOperand *GraphColorRegAllocator::GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, AArch64reg regNO, + bool &isOutOfRange) const { + auto *a64CGFunc = static_cast(cgFunc); + MemOperand *memOpnd = a64CGFunc->GetOrCreatSpillMem(vregNO); + return (a64CGFunc->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vregNO, isDest, insn, regNO, isOutOfRange)); +} + +void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) { + if (!needSpill) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + + auto *a64CGFunc = static_cast(cgFunc); + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPre); + DEBUG_ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { + regno_t pregNO = R16; + spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, + static_cast(pregNO), false, &insn); + } + Insn &stInsn = cgFunc->GetInsnBuilder()->BuildInsn( + a64CGFunc->PickStInsn(spillMem->GetSize(), stype), phyOpnd, *spillMem); + std::string comment = " SPILL for spill vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + stInsn.SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, stInsn); +} + +void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) { + if (!needSpill) { + return; + } + + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + auto *a64CGFunc = static_cast(cgFunc); + bool isLastInsn = false; + if (insn.GetBB()->GetKind() == BB::kBBIf && insn.GetBB()->IsLastInsn(&insn)) { + isLastInsn = true; + } + + if (lr->GetRematLevel() != rematOff) { + std::string comment = " REMATERIALIZE for spill vreg: " + + std::to_string(regNO); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + std::vector rematInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + tgtBB->InsertInsnBegin(*remat); + } + } + } else { + std::vector rematInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + insn.GetBB()->InsertInsnAfter(insn, *remat); + } + } + return; + } + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPost); + DEBUG_ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { + regno_t pregNO = R16; + spillMem = &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, + static_cast(pregNO), true, &insn); + isOutOfRange = true; + } + std::string comment = " RELOAD for spill vreg: " + std::to_string(regNO) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + MOperator mOp = a64CGFunc->PickLdInsn(spillMem->GetSize(), stype); + Insn *newLd = &cgFunc->GetInsnBuilder()->BuildInsn(mOp, phyOpnd, *spillMem); + newLd->SetComment(comment); + tgtBB->InsertInsnBegin(*newLd); + } + } else { + MOperator mOp = a64CGFunc->PickLdInsn(spillMem->GetSize(), stype); + Insn &ldrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, phyOpnd, *spillMem); + ldrInsn.SetComment(comment); + if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(ldrInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, ldrInsn); + } + } else { + insn.GetBB()->InsertInsnAfter(insn, ldrInsn); + } + } +} + +MemOperand *GraphColorRegAllocator::GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, + bool isDef) { + (void)regSize; + MemOperand *memOpnd = nullptr; + if (lr.GetSpillMem() != nullptr) { + /* the saved memOpnd cannot be out-of-range */ + memOpnd = lr.GetSpillMem(); + } else { +#ifdef REUSE_SPILLMEM + memOpnd = GetReuseMem(lr.GetRegNO(), regSize, lr.GetRegType()); + if (memOpnd != nullptr) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32) ? k32 : k64); + } else { +#endif /* REUSE_SPILLMEM */ + regno_t baseRegNO; + if (!isDef && lr.GetRegNO() == kRegTyInt) { + /* src will use its' spill reg as baseRegister when offset out-of-range + * add x16, x29, #max-offset //out-of-range + * ldr x16, [x16, #offset] //reload + * mov xd, x16 + */ + baseRegNO = lr.GetSpillReg(); + if (baseRegNO > RLAST_INT_REG) { + baseRegNO = R16; + } + } else { + /* dest will use R16 as baseRegister when offset out-of-range + * mov x16, xs + * add x17, x29, #max-offset //out-of-range + * str x16, [x17, #offset] //spill + */ + baseRegNO = R16; + } + DEBUG_ASSERT(baseRegNO != kRinvalid, "invalid base register number"); + memOpnd = GetSpillMem(lr.GetRegNO(), isDef, insn, static_cast(baseRegNO), isOutOfRange); + /* dest's spill reg can only be R15 and R16 () */ + if (isOutOfRange && isDef) { + DEBUG_ASSERT(lr.GetSpillReg() != R16, "can not find valid memopnd's base register"); + } +#ifdef REUSE_SPILLMEM + if (isOutOfRange == 0) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32) ? k32 : k64); + } + } +#endif /* REUSE_SPILLMEM */ + } + return memOpnd; +} + +/* + * Create spill insn for the operand. + * When need_spill is true, need to spill the spill operand register first + * then use it for the current spill, then reload it again. + */ +Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool isDef, + RegOperand &phyOpnd, bool forCall) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + uint32 pregNO = phyOpnd.GetRegisterNumber(); + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(pregNO)); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + LiveRange *lr = lrMap[regNO]; + bool isForCallerSave = lr->GetSplitLr() == nullptr && lr->GetNumCall() && !isCalleeReg; + uint32 regSize = regOpnd.GetSize(); + bool isOutOfRange = false; + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + auto *a64CGFunc = static_cast(cgFunc); + + Insn *spillDefInsn = nullptr; + if (isDef) { + if (lr->GetRematLevel() == rematOff) { + lr->SetSpillReg(pregNO); + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, forCall ? false : true); + spillDefInsn = &cgFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(regSize, stype), phyOpnd, *memOpnd); + spillDefInsn->SetIsSpill(); + std::string comment = " SPILL vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + spillDefInsn->SetComment(comment); + if (forCall) { + insn.GetBB()->InsertInsnBefore(insn, *spillDefInsn); + } else if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillDefInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillDefInsn); + } + } else if (insn.GetNext() && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *spillDefInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *spillDefInsn); + } + } + + if ((insn.GetMachineOpcode() != MOP_xmovkri16) && (insn.GetMachineOpcode() != MOP_wmovkri16)) { + return spillDefInsn; + } + } + if (insn.GetMachineOpcode() == MOP_clinit_tail) { + return nullptr; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + lr->SetSpillReg(pregNO); + + std::vector spillUseInsns; + std::string comment; + if (lr->GetRematLevel() != rematOff) { + spillUseInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + comment = " REMATERIALIZE vreg: " + std::to_string(regNO); + } else { + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, + forCall ? true : false); + Insn &spillUseInsn = cgFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickLdInsn(regSize, stype), phyOpnd, *memOpnd); + spillUseInsn.SetIsReload(); + spillUseInsns.push_back(&spillUseInsn); + comment = " RELOAD vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + } + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + for (auto &&spillUseInsn : spillUseInsns) { + spillUseInsn->SetComment(comment); + if (forCall) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillUseInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillUseInsn); + } + } else { + insn.GetBB()->InsertInsnBefore(insn, *spillUseInsn); + } + } + if (spillDefInsn != nullptr) { + return spillDefInsn; + } + return &insn; +} + +/* Try to find available reg for spill. */ +bool GraphColorRegAllocator::SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, + uint64 &usedRegMask) { + bool isInt = (lr.GetRegType() == kRegTyInt); + regno_t base = isInt ? R0 : V0; + uint32 pregInterval = isInt ? 0 : (V0 - R30); + MapleSet &callerRegSet = isInt ? intCallerRegSet : fpCallerRegSet; + MapleSet &calleeRegSet = isInt ? intCalleeRegSet : fpCalleeRegSet; + + for (const auto &it : callerRegSet) { + regno_t spillReg = it + base; + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + lr.SetAssignedRegNO(spillReg); + usedRegMask |= 1ULL << (spillReg - pregInterval); + return true; + } + } + for (const auto &it : calleeRegSet) { + regno_t spillReg = it + base; + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + lr.SetAssignedRegNO(spillReg); + usedRegMask |= 1ULL << (spillReg - pregInterval); + return true; + } + } + return false; +} + +void GraphColorRegAllocator::CollectCannotUseReg(std::unordered_set &cannotUseReg, const LiveRange &lr, + Insn &insn) { + /* Find the bb in the conflict LR that actually conflicts with the current bb. */ + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + if (lr.GetPregveto(regNO)) { + (void)cannotUseReg.insert(regNO); + } + } + auto updateCannotUse = [&insn, &cannotUseReg, this](regno_t regNO) { + LiveRange *conflictLr = lrMap[regNO]; + /* + * conflictLr->GetAssignedRegNO() might be zero + * caller save will be inserted so the assigned reg can be released actually + */ + if ((conflictLr->GetAssignedRegNO() > 0) && IsBitArrElemSet(conflictLr->GetBBMember(), insn.GetBB()->GetId())) { + if (!AArch64Abi::IsCalleeSavedReg(static_cast(conflictLr->GetAssignedRegNO())) && + conflictLr->GetNumCall() && !conflictLr->GetProcessed()) { + return; + } + (void)cannotUseReg.insert(conflictLr->GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateCannotUse); +#ifdef USE_LRA + if (!doLRA) { + return; + } + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo != nullptr) { + for (const auto ®MapPair : bbInfo->GetRegMap()) { + (void)cannotUseReg.insert(regMapPair.second); + } + } +#endif /* USE_LRA */ +} + +regno_t GraphColorRegAllocator::PickRegForSpill(uint64 &usedRegMask, RegType regType, uint32 spillIdx, + bool &needSpillLr) { + regno_t base; + regno_t spillReg; + uint32 pregInterval; + bool isIntReg = (regType == kRegTyInt); + if (isIntReg) { + base = R0; + pregInterval = 0; + } else { + base = V0; + pregInterval = V0 - R30; + } + + if (JAVALANG) { + /* Use predetermined spill register */ + MapleSet &spillRegSet = isIntReg ? intSpillRegSet : fpSpillRegSet; + DEBUG_ASSERT(spillIdx < spillRegSet.size(), "spillIdx large than spillRegSet.size()"); + auto regNumIt = spillRegSet.begin(); + for (; spillIdx > 0; --spillIdx) { + ++regNumIt; + } + spillReg = *regNumIt + base; + return spillReg; + } + + /* Temporary find a unused reg to spill */ + uint32 maxPhysRegNum = isIntReg ? MaxIntPhysRegNum() : MaxFloatPhysRegNum(); + for (spillReg = (maxPhysRegNum + base); spillReg > base; --spillReg) { + if (spillReg >= k64BitSize) { + spillReg = k64BitSize - 1; + } + if ((usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + usedRegMask |= (1ULL << (spillReg - pregInterval)); + needSpillLr = true; + return spillReg; + } + } + + DEBUG_ASSERT(false, "can not find spillReg"); + return 0; +} + +/* return true if need extra spill */ +bool GraphColorRegAllocator::SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, uint64 &usedRegMask, + bool isDef) { + std::unordered_set cannotUseReg; + /* SPILL COALESCE */ + if (!isDef && (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr)) { + auto &ropnd = static_cast(insn.GetOperand(0)); + if (ropnd.IsPhysicalRegister()) { + lr.SetAssignedRegNO(ropnd.GetRegisterNumber()); + return false; + } + } + + CollectCannotUseReg(cannotUseReg, lr, insn); + + if (SetAvailableSpillReg(cannotUseReg, lr, usedRegMask)) { + return false; + } + + bool needSpillLr = false; + if (!lr.GetAssignedRegNO()) { + /* + * All regs are assigned and none are free. + * Pick a reg to spill and reuse for this spill. + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + RegType regType = lr.GetRegType(); + regno_t spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + if (insn.GetMachineOpcode() == MOP_lazy_ldr && spillReg == R17) { + CHECK_FATAL(false, "register IP1(R17) may be changed when lazy_ldr"); + } + lr.SetAssignedRegNO(spillReg); + } + return needSpillLr; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpndForLRA(Insn &insn, const Operand &opnd, uint32 &spillIdx, + uint64 &usedRegMask, bool isDef) { + auto ®Opnd = static_cast(opnd); + uint32 vregNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo == nullptr) { + return nullptr; + } + auto regIt = bbInfo->GetRegMap().find(vregNO); + if (regIt != bbInfo->GetRegMap().end()) { + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(regIt->second), regOpnd.GetSize(), regType); + return &phyOpnd; + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "spill vreg " << vregNO << "\n"; + } + regno_t spillReg; + bool needSpillLr = false; + if (insn.IsBranch() || insn.IsCall() || (insn.GetMachineOpcode() == MOP_clinit_tail) || + (insn.GetNext() && isDef && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail)) { + spillReg = R16; + } else { + /* + * use the reg that exclude livein/liveout/bbInfo->regMap + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + if (insn.GetMachineOpcode() == MOP_lazy_ldr && spillReg == R17) { + CHECK_FATAL(false, "register IP1(R17) may be changed when lazy_ldr"); + } + AddCalleeUsed(spillReg, regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tassigning lra spill reg " << spillReg << "\n"; + } + } + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(spillReg), regOpnd.GetSize(), regType); + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, regOpnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + return &phyOpnd; +} + +/* get spill reg and check if need extra spill */ +bool GraphColorRegAllocator::GetSpillReg(Insn &insn, LiveRange &lr, const uint32 &spillIdx, + uint64 &usedRegMask, bool isDef) { + bool needSpillLr = false; + /* + * Find a spill reg for the BB among interfereing LR. + * Without LRA, this info is very inaccurate. It will falsely interfere + * with all locals which the spill might not be interfering. + * For now, every instance of the spill requires a brand new reg assignment. + */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "LR-regNO " << lr.GetRegNO() << " spilled, finding a spill reg\n"; + } + if (insn.IsBranch() || insn.IsCall() || (insn.GetMachineOpcode() == MOP_clinit_tail) || + (insn.GetNext() && isDef && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail)) { + /* + * When a cond branch reg is spilled, it cannot + * restore the value after the branch since it can be the target from other br. + * Todo it properly, it will require creating a intermediate bb for the reload. + * Use x16, it is taken out from available since it is used as a global in the system. + */ + lr.SetAssignedRegNO(R16); + } else { + lr.SetAssignedRegNO(0); + needSpillLr = SetRegForSpill(lr, insn, spillIdx, usedRegMask, isDef); + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + } + return needSpillLr; +} + +// find prev use/def after prev call +bool GraphColorRegAllocator::EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector& visitedMap) { + if (!visitedMap[pred.GetId()] && lr.FindInLuMap(pred.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(pred.GetId()); + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(pred.GetId()); + auto it = refs.rbegin(); + bool findPrevRef = (it->second & kIsCall) == 0; + return findPrevRef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[pred.GetId()] = true; + bool found = true; + for (auto predBB: pred.GetPreds()) { + if (!visitedMap[predBB->GetId()]) { + found &= EncountPrevRef(*predBB, lr, isDef, visitedMap); + } + } + return found; +} + +bool GraphColorRegAllocator::FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef) { + bool hasFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto pred: insn.GetBB()->GetPreds()) { + hasFind &= EncountPrevRef(*pred, lr, isDef, visitedMap); + if (!hasFind) { + return false; + } + } + return insn.GetBB()->GetPreds().size() == 0 ? false : true; +} + +// find next def before next call ? and no next use +bool GraphColorRegAllocator::EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap) { + if (lr.FindInLuMap(succ.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(succ.GetId()); + bool findNextDef = false; + if (lu->GetDefNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(succ.GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if ((it->second & kIsDef) != 0) { + findNextDef = true; + break; + } + if ((it->second & kIsCall) != 0) { + break; + } + if ((it->second & kIsUse) != 0) { + continue; + } + } + return findNextDef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[succ.GetId()] = true; + bool found = true; + for (auto succBB: succ.GetSuccs()) { + if (!visitedMap[succBB->GetId()]) { + found &= EncountNextRef(*succBB, lr, isDef, visitedMap); + if (!found) { + return false; + } + } + } + return found; +} + +bool GraphColorRegAllocator::FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef) { + bool haveFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto succ: insn.GetBB()->GetSuccs()) { + haveFind &= EncountNextRef(*succ, lr, isDef, visitedMap); + if (!haveFind) { + return false; + } + } + return insn.GetBB()->GetSuccs().size() > 0; +} + +bool GraphColorRegAllocator::HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findPrevRef = false; + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.rbegin(); it != refs.rend(); ++it) { + if (it->first >= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if (((it->second & kIsUse) != 0) || ((it->second & kIsDef) != 0)) { + findPrevRef = true; + contSearch = false; + break; + } + } + } + return findPrevRef; +} + +bool GraphColorRegAllocator::HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findNextDef = false; + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if (it->first <= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if ((it->second & kIsDef) != 0) { + findNextDef = true; + contSearch = false; + } + } + } + return findNextDef; +} + +bool GraphColorRegAllocator::NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef) { + if (doLRA) { + return true; + } + if (lr.HasDefUse()) { + return true; + } + + bool contSearch = true; + bool needed = true; + if (isDef) { + needed = !HaveNextDefInCurBB(insn, lr, contSearch); + } else { + needed = !HavePrevRefInCurBB(insn, lr, contSearch); + } + if (!contSearch) { + return needed; + } + + if (isDef) { + needed = true; + } else { + needed = !FoundPrevBeforeCall(insn, lr, isDef); + } + return needed; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, + uint64 &usedRegMask, bool isDef) { + if (!opnd.IsRegister()) { + return nullptr; + } + auto ®Opnd = static_cast(opnd); + + uint32 vregNO = regOpnd.GetRegisterNumber(); + if (vregNO == RFP) { + seenFP = true; + } + RegType regType = regOpnd.GetRegisterType(); + if (vregNO < kAllRegNum) { + return nullptr; + } + if (IsUnconcernedReg(regOpnd)) { + return nullptr; + } + +#ifdef USE_LRA + if (doLRA && IsLocalReg(vregNO)) { + return GetReplaceOpndForLRA(insn, opnd, spillIdx, usedRegMask, isDef); + } +#endif /* USE_LRA */ + + DEBUG_ASSERT(vregNO < numVregs, "index out of range of MapleVector in GraphColorRegAllocator::GetReplaceOpnd"); + LiveRange *lr = lrMap[vregNO]; + + bool isSplitPart = false; + bool needSpillLr = false; + if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + isSplitPart = true; + } + + if (lr->IsSpilled() && !isSplitPart) { + needSpillLr = GetSpillReg(insn, *lr, spillIdx, usedRegMask, isDef); + } + + regno_t regNO; + if (isSplitPart) { + regNO = lr->GetSplitLr()->GetAssignedRegNO(); + } else { + regNO = lr->GetAssignedRegNO(); + } + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(regNO)); + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(regNO), opnd.GetSize(), regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "replace R" << vregNO << " with R" << (regNO - R0) << "\n"; + } + + insn.AppendComment(" [R" + std::to_string(vregNO) + "] "); + + if (isSplitPart && (isCalleeReg || lr->GetSplitLr()->GetNumCall() == 0)) { + if (isDef) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } else { + if (lr->GetSplitLr()->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload()) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } + } + return &phyOpnd; + } + + bool needCallerSave = false; + if (lr->GetNumCall() && !isCalleeReg) { + if (isDef) { + needCallerSave = NeedCallerSave(insn, *lr, isDef) && lr->GetRematLevel() == rematOff; + } else { + needCallerSave = !lr->GetProcessed(); + } + } + + if (lr->IsSpilled() || (isSplitPart && (lr->GetSplitLr()->GetNumCall() != 0)) || needCallerSave || + (!isSplitPart && !(lr->IsSpilled()) && lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload())) { + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, opnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + } + + return &phyOpnd; +} + +void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, uint64 &usedRegMask) { + auto ®Opnd = static_cast(opnd); + uint32 pregInterval = (regOpnd.GetRegisterType() == kRegTyInt) ? 0 : (V0 - R30); + uint32 vregNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregNO); + if (lr != nullptr) { + if (lr->IsSpilled()) { + lr->SetAssignedRegNO(0); + } + if (lr->GetAssignedRegNO() != 0) { + usedRegMask |= (1ULL << (lr->GetAssignedRegNO() - pregInterval)); + } + if (lr->GetSplitLr() && lr->GetSplitLr()->GetAssignedRegNO()) { + usedRegMask |= (1ULL << (lr->GetSplitLr()->GetAssignedRegNO() - pregInterval)); + } + } +} + +uint64 GraphColorRegAllocator::FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, + const Insn &insn, bool &needProcess) { + uint64 usedRegMask = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + bool hasVirtual = false; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + DEBUG_ASSERT(md->GetOpndDes(i) != nullptr, "pointer is null in GraphColorRegAllocator::FinalizeRegisters"); + + if (opnd.IsList()) { + if (insn.GetMachineOpcode() != MOP_asm) { + continue; + } + hasVirtual = true; + if (i == kAsmOutputListOpnd) { + fInfo.SetDefOperand(opnd, static_cast(i)); + } + if (i == kAsmInputListOpnd) { + fInfo.SetUseOperand(opnd, static_cast(i)); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + fInfo.SetBaseOperand(opnd, static_cast(i)); + MarkUsedRegs(*base, usedRegMask); + hasVirtual |= static_cast(base)->IsVirtualRegister(); + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + fInfo.SetOffsetOperand(opnd); + MarkUsedRegs(*offset, usedRegMask); + hasVirtual |= static_cast(offset)->IsVirtualRegister(); + } + } else { + bool isDef = md->GetOpndDes(i)->IsRegDef(); + if (isDef) { + fInfo.SetDefOperand(opnd, static_cast(i)); + /* + * Need to exclude def also, since it will clobber the result when the + * original value is reloaded. + */ + hasVirtual |= static_cast(opnd).IsVirtualRegister(); + MarkUsedRegs(opnd, usedRegMask); + } else { + fInfo.SetUseOperand(opnd, static_cast(i)); + if (opnd.IsRegister()) { + hasVirtual |= static_cast(opnd).IsVirtualRegister(); + MarkUsedRegs(opnd, usedRegMask); + } + } + } + } /* operand */ + needProcess = hasVirtual; + return usedRegMask; +} + +void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) { + static regno_t intRegs[kSpillMemOpndNum] = { R10, R11, R12, R13 }; // R9 is used for large stack offset temp + static regno_t fpRegs[kSpillMemOpndNum] = { V16, V17, V18, V19 }; + uint32 opndNum = insn.GetOperandSize(); + std::set defPregs; + std::set usePregs; + std::vector defLrs; + std::vector useLrs; + if (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (opnd1.GetRegisterNumber() < R20 && opnd0.GetRegisterNumber() >= kAllRegNum) { + LiveRange *lr = lrMap[opnd0.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd1.GetRegisterNumber()); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + if (opnd0.GetRegisterNumber() < R20 && opnd1.GetRegisterNumber() >= kAllRegNum) { + LiveRange *lr = lrMap[opnd1.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd0.GetRegisterNumber()); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + } + const InsnDesc *md = insn.GetDesc(); + bool isIndexedMemOp = false; + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand *opnd = &insn.GetOperand(opndIdx); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd); + if (memopnd->GetIndexOpt() == MemOperand::kPreIndex || + memopnd->GetIndexOpt() == MemOperand::kPostIndex) { + isIndexedMemOp = true; + } + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && !IsUnconcernedReg(*base)) { + if (!memopnd->IsIntactIndexed()) { + if (base->IsPhysicalRegister()) { + defPregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + if (base->IsPhysicalRegister()) { + usePregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr) { + if (offset->IsPhysicalRegister()) { + usePregs.insert(offset->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[offset->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + } else if (opnd->IsRegister()) { + bool isDef = md->GetOpndDes(static_cast(opndIdx))->IsRegDef(); + bool isUse = md->GetOpndDes(static_cast(opndIdx))->IsRegUse(); + RegOperand *ropnd = static_cast(opnd); + if (IsUnconcernedReg(*ropnd)) { + continue; + } + if (ropnd != nullptr) { + if (isUse) { + if (ropnd->IsPhysicalRegister()) { + usePregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + if (isDef) { + if (ropnd->IsPhysicalRegister()) { + defPregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + } + } + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { + return lr1->GetID() > lr2->GetID(); + }; + std::sort(useLrs.begin(), useLrs.end(), comparator); + for (auto lr: useLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = rtype == kRegTyInt ? intRegs[0] : fpRegs[0]; + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() >= firstSpillReg && + usePregs.find(lr->GetSpillReg()) == usePregs.end()) { + usePregs.insert(lr->GetSpillReg()); + continue; + } else { + lr->SetSpillReg(0); + } + for (uint i = 0; i < kSpillMemOpndNum; i++) { + regno_t preg = rtype == kRegTyInt ? intRegs[i] : fpRegs[i]; + if (usePregs.find(preg) == usePregs.end()) { + lr->SetSpillReg(preg); + usePregs.insert(preg); + break; + } + } + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg"); + } + size_t spillRegIdx; + if (isIndexedMemOp) { + spillRegIdx = useLrs.size(); + } else { + spillRegIdx = 0; + } + for (auto lr: defLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = rtype == kRegTyInt ? intRegs[0] : fpRegs[0]; + if (lr->GetSpillReg() != 0) { + if (lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() >= firstSpillReg && defPregs.find(lr->GetSpillReg()) != defPregs.end()) { + lr->SetSpillReg(0); + } + } + if (lr->GetSpillReg() != 0) { + continue; + } + for (; spillRegIdx < kSpillMemOpndNum; spillRegIdx++) { + regno_t preg = rtype == kRegTyInt ? intRegs[spillRegIdx] : fpRegs[spillRegIdx]; + if (defPregs.find(preg) == defPregs.end()) { + lr->SetSpillReg(preg); + defPregs.insert(preg); + break; + } + } + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg"); + } +} + +RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, Insn &insn, uint32 spillCnt, bool isdef) { + regno_t vregno = opnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregno); + if (lr != nullptr && lr->IsSpilled()) { + AArch64CGFunc *a64cgfunc = static_cast(cgFunc); + uint32 bits = opnd.GetSize(); + if (bits < k32BitSize) { + bits = k32BitSize; + } + if (cgFunc->IsExtendReg(vregno)) { + bits = k64BitSize; + } + regno_t spreg = 0; + RegType rtype = lr->GetRegType(); + spreg = lr->GetSpillReg(); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg in CreateSpillFillCode"); + RegOperand *regopnd = + &a64cgfunc->GetOrCreatePhysicalRegisterOperand(static_cast(spreg), opnd.GetSize(), rtype); + + if (lr->GetRematLevel() != rematOff) { + if (isdef) { + return nullptr; + } else { + std::vector rematInsns = lr->Rematerialize(a64cgfunc, *static_cast(regopnd)); + for (auto &&remat : rematInsns) { + std::string comment = " REMATERIALIZE color vreg: " + + std::to_string(vregno); + remat->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *remat); + } + return regopnd; + } + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *loadmem = GetSpillOrReuseMem(*lr, opnd.GetSize(), isOutOfRange, + insn, isdef); + PrimType pty = + (lr->GetRegType() == kRegTyInt) ? ((bits > k32BitSize) ? PTY_i64 : PTY_i32) + : ((bits > k32BitSize) ? PTY_f64 : PTY_f32); + CHECK_FATAL(spillCnt < kSpillMemOpndNum, "spill count exceeded"); + Insn *memInsn; + if (isdef) { + memInsn = &cgFunc->GetInsnBuilder()->BuildInsn(a64cgfunc->PickStInsn(bits, pty), *regopnd, *loadmem); + memInsn->SetIsSpill(); + std::string comment = " SPILLcolor vreg: " + std::to_string(vregno) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*memInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *memInsn); + } + } else { + memInsn = &cgFunc->GetInsnBuilder()->BuildInsn(a64cgfunc->PickLdInsn(bits, pty), *regopnd, *loadmem); + memInsn->SetIsReload(); + std::string comment = " RELOADcolor vreg: " + std::to_string(vregno) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *memInsn); + } + return regopnd; + } + return nullptr; +} + +bool GraphColorRegAllocator::SpillLiveRangeForSpills() { + bool done = false; + for (uint32_t bbIdx = 0; bbIdx < bfs->sortedBBs.size(); bbIdx++) { + BB *bb = bfs->sortedBBs[bbIdx]; + FOR_BB_INSNS(insn, bb) { + uint32 spillCnt; + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + continue; + } + spillCnt = 0; + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + GenerateSpillFillRegs(*insn); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *newmemopnd = nullptr; + auto *memopnd = static_cast(opnd); + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*base, *insn, spillCnt); + if (!memopnd->IsIntactIndexed()) { + (void)CreateSpillFillCode(*base, *insn, spillCnt, true); + } + if (replace != nullptr) { + spillCnt++; + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + newmemopnd->SetBaseRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*offset, *insn, spillCnt); + if (replace != nullptr) { + spillCnt++; + if (newmemopnd == nullptr) { + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + } + newmemopnd->SetIndexRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + } else if (opnd->IsRegister()) { + bool isdef = md->opndMD[i]->IsRegDef(); + bool isuse = md->opndMD[i]->IsRegUse(); + RegOperand *replace = CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, isdef); + if (isuse && isdef) { + (void)CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, false); + } + if (replace != nullptr) { + if (!isdef) { + spillCnt++; + } + insn->SetOperand(i, *replace); + done = true; + } + } + } + } + } + return done; +} + +static bool ReloadAtCallee(CgOccur *occ) { + auto *defOcc = occ->GetDef(); + if (defOcc == nullptr || defOcc->GetOccType() != kOccStore) { + return false; + } + return static_cast(defOcc)->Reload(); +} + +void CallerSavePre::DumpWorkCandAndOcc() { + if (workCand->GetTheOperand()->IsRegister()) { + LogInfo::MapleLogger() << "Cand R"; + LogInfo::MapleLogger() << static_cast(workCand->GetTheOperand())->GetRegisterNumber() << '\n'; + } else { + LogInfo::MapleLogger() << "Cand Index" << workCand->GetIndex() << '\n'; + } + for (CgOccur *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +void CallerSavePre::CodeMotion() { + constexpr uint32 limitNum = UINT32_MAX; + uint32 cnt = 0; + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + ++cnt; + beyondLimit |= (cnt == limitNum); + if (!beyondLimit && dump) { + LogInfo::MapleLogger() << "opt use occur: "; + occ->Dump(); + } + } + if (occ->GetOccType() == kOccUse && + (beyondLimit || (static_cast(occ)->Reload() && !ReloadAtCallee(occ)))) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd); + continue; + } + if (occ->GetOccType() == kOccPhiopnd && static_cast(occ)->Reload() && !ReloadAtCallee(occ)) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + Insn *insn = occ->GetBB()->GetLastInsn(); + if (insn == nullptr) { + insn = &(static_cast(func)->CreateCommentInsn("reload caller save register")); + occ->GetBB()->AppendInsn(*insn); + } + auto defOcc = occ->GetDef(); + bool forCall = (defOcc != nullptr && insn == defOcc->GetInsn()); + (void)regAllocator->SpillOperand(*insn, *occ->GetOperand(), false, phyOpnd, forCall); + continue; + } + if (occ->GetOccType() == kOccStore && static_cast(occ)->Reload()) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd, true); + continue; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() << " after codemotion ===========\n"; + DumpWorkCandAndOcc(); + func->DumpCFGToDot("raCodeMotion-"); + } +} + +void CallerSavePre::UpdateLoadSite(CgOccur *occ) { + if (occ == nullptr) { + return; + } + auto *defOcc = occ->GetDef(); + if (occ->GetOccType() == kOccUse) { + defOcc = static_cast(occ)->GetPrevVersionOccur(); + } + if (defOcc == nullptr) { + return; + } + switch (defOcc->GetOccType()) { + case kOccDef: + break; + case kOccUse: + UpdateLoadSite(defOcc); + return; + case kOccStore: { + auto *storeOcc = static_cast(defOcc); + if (storeOcc->Reload()) { + break; + } + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(defOcc); + if (phiOcc->IsFullyAvailable()) { + break; + } + if (!phiOcc->IsDownSafe() || phiOcc->IsNotAvailable()) { + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + + if (defOcc->Processed()) { + return; + } + defOcc->SetProcessed(true); + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + UpdateLoadSite(opndOcc); + } + return; + } + default: { + CHECK_FATAL(false, "NIY"); + break; + } + } +} + +void CallerSavePre::CalLoadSites() { + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + UpdateLoadSite(occ); + } + } + std::vector availableDef(classCount, nullptr); + for (auto *occ : allOccs) { + auto classID = static_cast(occ->GetClassID()); + switch (occ->GetOccType()) { + case kOccDef: + availableDef[classID] = occ; + break; + case kOccStore: { + if (static_cast(occ)->Reload()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(occ); + if (!phiOcc->IsNotAvailable() && phiOcc->IsDownSafe()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccUse: { + auto *useOcc = static_cast(occ); + if (useOcc->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *useOcc->GetBB())) { + useOcc->SetReload(false); + } else { + availableDef[classID] = useOcc; + } + } + break; + } + case kOccPhiopnd: { + auto *phiOpnd = static_cast(occ); + if (phiOpnd->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *phiOpnd->GetBB())) { + phiOpnd->SetReload(false); + } else { + availableDef[classID] = phiOpnd; + } + } + break; + } + case kOccExit: + break; + default: + CHECK_FATAL(false, "not supported occur type"); + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() + << " after CalLoadSite===================\n"; + DumpWorkCandAndOcc(); + LogInfo::MapleLogger() << "\n"; + } +} + +void CallerSavePre::ComputeAvail() { + bool changed = true; + while (changed) { + changed = false; + for (auto *phiOcc : phiOccs) { + if (phiOcc->IsNotAvailable()) { + continue; + } + size_t killedCnt = 0; + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + auto defOcc = opndOcc->GetDef(); + if (defOcc == nullptr) { + continue; + } + // for not move load too far from use site, set not-fully-available-phi killing availibity of phiOpnd + if ((defOcc->GetOccType() == kOccPhiocc && !static_cast(defOcc)->IsFullyAvailable()) + || defOcc->GetOccType() == kOccStore) { + ++killedCnt; + opndOcc->SetHasRealUse(false); + // opnd at back-edge is killed, set phi not avail + if (dom->Dominate(*phiOcc->GetBB(), *opndOcc->GetBB())) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + if (opndOcc->GetBB()->IsSoloGoto() && opndOcc->GetBB()->GetLoop() != nullptr) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + continue; + } + } + if (killedCnt == phiOcc->GetPhiOpnds().size()) { + changed |= !phiOcc->IsNotAvailable(); + phiOcc->SetAvailability(kNotAvailable); + } else if (killedCnt > 0) { + changed |= !phiOcc->IsPartialAvailable(); + phiOcc->SetAvailability(kPartialAvailable); + } else {} // fully available is default state + } + } +} + +void CallerSavePre::Rename1() { + std::stack occStack; + classCount = 1; + // iterate the occurrence according to its preorder dominator tree + for (CgOccur *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(*dom, *occ)) { + occStack.pop(); + } + switch (occ->GetOccType()) { + case kOccUse: { + if (occStack.empty()) { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccStore || topOccur->GetOccType() == kOccDef || + topOccur->GetOccType() == kOccPhiocc) { + // assign new class + occ->SetClassID(topOccur->GetClassID()); + occ->SetPrevVersionOccur(topOccur); + occStack.push(occ); + break; + } else if (topOccur->GetOccType() == kOccUse) { + occ->SetClassID(topOccur->GetClassID()); + if (topOccur->GetDef() != nullptr) { + occ->SetDef(topOccur->GetDef()); + } else { + occ->SetDef(topOccur); + } + break; + } + CHECK_FATAL(false, "unsupported occur type"); + break; + } + case kOccPhiocc: { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccPhiopnd: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto *phiOpndOcc = static_cast(occ); + phiOpndOcc->SetDef(topOccur); + phiOpndOcc->SetClassID(topOccur->GetClassID()); + if (topOccur->GetOccType() == kOccUse) { + phiOpndOcc->SetHasRealUse(true); + } + } + break; + } + case kOccDef: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccStore: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto prevVersionOcc = topOccur->GetDef() ? topOccur->GetDef() : topOccur; + static_cast(occ)->SetPrevVersionOccur(prevVersionOcc); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccExit: { + if (occStack.empty()) { + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + break; + } + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() << " after rename1============\n"; + DumpWorkCandAndOcc(); + } +} + +void CallerSavePre::ComputeVarAndDfPhis() { + dfPhiDfns.clear(); + PreWorkCand *workCand = GetWorkCand(); + for (auto *realOcc : workCand->GetRealOccs()) { + BB *defBB = realOcc->GetBB(); + GetIterDomFrontier(defBB, &dfPhiDfns); + } +} + +void CallerSavePre::BuildWorkList() { + size_t numBBs = dom->GetDtPreOrderSize(); + std::vector callSaveLrs; + for (auto it: regAllocator->GetLrMap()) { + LiveRange *lr = it.second; + if (lr == nullptr || lr->IsSpilled()) { + continue; + } + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(lr->GetAssignedRegNO())); + if (lr->GetSplitLr() == nullptr && lr->GetNumCall() && !isCalleeReg) { + callSaveLrs.emplace_back(lr); + } + } + const MapleVector &preOrderDt = dom->GetDtPreOrder(); + for (size_t i = 0; i < numBBs; ++i) { + BB *bb = func->GetBBFromID(preOrderDt[i]); + std::map insnMap; + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + insnMap.insert(std::make_pair(insn->GetId(), insn)); + } + for (auto lr: callSaveLrs) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + RegOperand &opnd = func->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + if (lu != nullptr && (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall())) { + MapleMap refs = lr->GetRefs(bb->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if (it->second & kIsUse) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccUse); + } + if (it->second & kIsDef) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccDef); + } + if (it->second & kIsCall) { + Insn *callInsn = insnMap[it->first]; + auto *targetOpnd = callInsn->GetCallTargetOperand(); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *mirFunc = funcSt->GetFunction(); + if (mirFunc != nullptr && mirFunc->IsReferedRegsValid()) { + auto regSet = mirFunc->GetReferedRegs(); + if (regSet.find(lr->GetAssignedRegNO()) == regSet.end()) { + continue; + } + } + } + (void) CreateRealOcc(*callInsn, opnd, kOccStore); + } + } + } + } + if (bb->GetKind() == BB::kBBReturn) { + CreateExitOcc(*bb); + } + } +} + +void CallerSavePre::ApplySSAPRE() { + // #0 build worklist + BuildWorkList(); + uint32 cnt = 0; + constexpr uint32 preLimit = UINT32_MAX; + while (!workList.empty()) { + ++cnt; + if (cnt == preLimit) { + beyondLimit = true; + } + workCand = workList.front(); + workCand->SetIndex(static_cast(cnt)); + workLr = regAllocator->GetLiveRange(static_cast(workCand->GetTheOperand())->GetRegisterNumber()); + DEBUG_ASSERT(workLr != nullptr, "exepected non null lr"); + workList.pop_front(); + if (workCand->GetRealOccs().empty()) { + continue; + } + + allOccs.clear(); + phiOccs.clear(); + // #1 Insert PHI; results in allOccs and phiOccs + ComputeVarAndDfPhis(); + CreateSortedOccs(); + if (workCand->GetRealOccs().empty()) { + continue; + } + // #2 Rename + Rename1(); + ComputeDS(); + ComputeAvail(); + CalLoadSites(); + // #6 CodeMotion and recompute worklist based on newly occurrence + CodeMotion(); + DEBUG_ASSERT(workLr->GetProcessed() == false, "exepected unprocessed"); + workLr->SetProcessed(); + } +} + +void GraphColorRegAllocator::OptCallerSave() { + CallerSavePre callerSavePre(this, *cgFunc, domInfo, *memPool, *memPool, kLoadPre, UINT32_MAX); + callerSavePre.SetDump(GCRA_DUMP); + callerSavePre.ApplySSAPRE(); +} + +void GraphColorRegAllocator::SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, + BB &headerPred, BB &exitSucc, const std::set &cands) { + size_t maxSplitCount = lrs.size() - intCalleeRegSet.size(); + maxSplitCount = maxSplitCount > kMaxSplitCount ? kMaxSplitCount : maxSplitCount; + uint32 splitCount = 0; + auto it = cands.begin(); + size_t candsSize = cands.size(); + maxSplitCount = maxSplitCount > candsSize ? candsSize : maxSplitCount; + for (auto &lr: lrs) { + if (lr->IsSpilled()) { + continue; + } + if (!AArch64Abi::IsCalleeSavedReg(static_cast(lr->GetAssignedRegNO()))) { + continue; + } + bool hasRef = false; + for (auto *bb : loop.GetLoopMembers()) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + if (lu != nullptr && (lu->GetDefNum() != 0 || lu->GetUseNum() != 0)) { + hasRef = true; + break; + } + } + if (!hasRef) { + splitCount++; + RegOperand *ropnd = &cgFunc->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(lr->GetAssignedRegNO()), ropnd->GetSize(), + (lr->GetRegType())); + + Insn *headerCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop begin")); + headerPred.AppendInsn(*headerCom); + Insn *last = headerPred.GetLastInsn(); + (void)SpillOperand(*last, *ropnd, true, static_cast(phyOpnd)); + + Insn *exitCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop end")); + exitSucc.InsertInsnBegin(*exitCom); + Insn *first = exitSucc.GetFirstInsn(); + (void)SpillOperand(*first, *ropnd, false, static_cast(phyOpnd)); + + LiveRange *replacedLr = lrMap[*it]; + replacedLr->SetAssignedRegNO(lr->GetAssignedRegNO()); + replacedLr->SetSpilled(false); + ++it; + } + if (splitCount >= maxSplitCount) { + break; + } + } +} + +bool GraphColorRegAllocator::LrGetBadReg(const LiveRange &lr) const { + if (lr.IsSpilled()) { + return true; + } + if (lr.GetNumCall() != 0 && + !AArch64Abi::IsCalleeSavedReg(static_cast(lr.GetAssignedRegNO()))) { + return true; + } + return false; +} + +bool GraphColorRegAllocator::LoopNeedSplit(const CGFuncLoops &loop, std::set &cands) { + std::set regPressure; + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + loopBBs.insert(bb); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd.GetRegisterNumber()); + } + } + } + } + } + if (regPressure.size() != 0) { + for (auto reg: regPressure) { + LiveRange *lr = lrMap[reg]; + std::vector smember; + ForEachBBArrElem(lr->GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.emplace_back(bbVec[bbID]); }); + bool liveBeyondLoop = false; + for (auto bb: smember) { + if (loopBBs.find(bb) == loopBBs.end()) { + liveBeyondLoop = true; + break; + } + } + if (liveBeyondLoop) { + continue; + } + cands.insert(reg); + } + if (cands.empty()) { + return false; + } + return true; + } + return false; +} + +void GraphColorRegAllocator::AnalysisLoop(const CGFuncLoops &loop) { + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::vector lrs; + size_t intCalleeNum = intCalleeRegSet.size(); + if (loop.GetMultiEntries().size() != 0) { + return; + } + for (auto regno: liveIn) { + LiveRange *lr = GetLiveRange(regno); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && lr->GetNumCall() != 0) { + lrs.emplace_back(lr); + } + } + if (lrs.size() < intCalleeNum) { + return; + } + bool hasCall = false; + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + if (bb->HasCall()) { + hasCall = true; + } + loopBBs.insert(bb); + } + if (!hasCall) { + return; + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { + return lr1->GetPriority() < lr2->GetPriority(); + }; + std::sort(lrs.begin(), lrs.end(), comparator); + const MapleVector &exits = loop.GetExits(); + std::set loopExits; + for (auto &bb: exits) { + for (auto &succ: bb->GetSuccs()) { + if (loopBBs.find(succ) != loopBBs.end()) { + continue; + } + if (succ->IsSoloGoto() || succ->IsEmpty()) { + BB *realSucc = CGCFG::GetTargetSuc(*succ); + if (realSucc != nullptr) { + loopExits.insert(realSucc); + } + } else { + loopExits.insert(succ); + } + } + } + std::set loopEntra; + for (auto &pred: header->GetPreds()) { + if (loopBBs.find(pred) != loopBBs.end()) { + continue; + } + loopEntra.insert(pred); + } + if (loopEntra.size() != 1 || loopExits.size() != 1) { + return; + } + BB *headerPred = *loopEntra.begin(); + BB *exitSucc = *loopExits.begin(); + if (headerPred->GetKind() != BB::kBBFallthru) { + return; + } + if (exitSucc->GetPreds().size() != loop.GetExits().size()) { + return; + } + std::set cands; + if (!LoopNeedSplit(loop, cands)) { + return; + } + SplitVregAroundLoop(loop, lrs, *headerPred, *exitSucc, cands); +} +void GraphColorRegAllocator::AnalysisLoopPressureAndSplit(const CGFuncLoops &loop) { + if (loop.GetInnerLoops().empty()) { + // only handle inner-most loop + AnalysisLoop(loop); + return; + } + for (const auto *lp : loop.GetInnerLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void GraphColorRegAllocator::FinalizeRegisters() { + if (doMultiPass && hasSpill) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "In this round, spill vregs : \n"; + for (auto it: lrMap) { + LiveRange *lr = it.second; + if (lr->IsSpilled()) { + LogInfo::MapleLogger() << "R" << lr->GetRegNO() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + } + bool done = SpillLiveRangeForSpills(); + if (done) { + return; + } + } + if (CLANG) { + if (!cgFunc->GetLoops().empty()) { + cgFunc->GetTheCFG()->InitInsnVisitor(*cgFunc); + for (const auto *lp : cgFunc->GetLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } + } + OptCallerSave(); + } + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (insn->IsImmaterialInsn()) { + continue; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + + for (uint32 i = 0; i < kSpillMemOpndNum; ++i) { + operandSpilled[i] = false; + } + + FinalizeRegisterInfo *fInfo = memPool->New(alloc); + bool needProcces = true; + uint64 usedRegMask = FinalizeRegisterPreprocess(*fInfo, *insn, needProcces); + if (!needProcces) { + continue; + } + uint32 defSpillIdx = 0; + uint32 useSpillIdx = 0; + MemOperand *memOpnd = nullptr; + if (fInfo->GetBaseOperand()) { + memOpnd = static_cast(fInfo->GetBaseOperand())->Clone(*cgFunc->GetMemoryPool()); + insn->SetOperand(fInfo->GetMemOperandIdx(), *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + /* if base register is both defReg and useReg, defSpillIdx should also be increased. But it doesn't exist yet */ + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + if (!memOpnd->IsIntactIndexed()) { + (void)GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, true); + } + } + if (fInfo->GetOffsetOperand()) { + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + Operand *offset = memOpnd->GetIndexRegister(); + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *offset, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + for (size_t i = 0; i < fInfo->GetDefOperandsSize(); ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + const Operand *defOpnd = fInfo->GetDefOperandsElem(i); + if (defOpnd->IsList()) { + ListOperand *outList = const_cast(static_cast(defOpnd)); + auto *a64CGFunc = static_cast(cgFunc); + auto *srcOpndsNew = + a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); + RegOperand *phyOpnd; + for (auto opnd : outList->GetOperands()) { + if (opnd->IsPhysicalRegister()) { + phyOpnd = opnd; + } else { + phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, true); + } + srcOpndsNew->PushOpnd(*phyOpnd); + } + insn->SetOperand(kAsmOutputListOpnd, *srcOpndsNew); + continue; + } + } + const Operand *opnd = fInfo->GetDefOperandsElem(i); + RegOperand *phyOpnd = nullptr; + if (insn->IsSpecialIntrinsic()) { + phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, true); + } else { + phyOpnd = GetReplaceOpnd(*insn, *opnd, defSpillIdx, usedRegMask, true); + } + if (phyOpnd != nullptr) { + insn->SetOperand(fInfo->GetDefIdxElem(i), *phyOpnd); + } + } + for (size_t i = 0; i < fInfo->GetUseOperandsSize(); ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + const Operand *useOpnd = fInfo->GetUseOperandsElem(i); + if (useOpnd->IsList()) { + ListOperand *inList = const_cast(static_cast(useOpnd)); + auto *a64CGFunc = static_cast(cgFunc); + auto *srcOpndsNew = a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); + for (auto opnd : inList->GetOperands()) { + if ((static_cast(opnd))->GetRegisterNumber() < kAllRegNum) { + srcOpndsNew->PushOpnd(*opnd); + } else { + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, false); + srcOpndsNew->PushOpnd(*phyOpnd); + } + } + insn->SetOperand(kAsmInputListOpnd, *srcOpndsNew); + continue; + } + } + const Operand *opnd = fInfo->GetUseOperandsElem(i); + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + insn->SetOperand(fInfo->GetUseIdxElem(i), *phyOpnd); + } + } + if (insn->GetMachineOpcode() == MOP_wmovrr || insn->GetMachineOpcode() == MOP_xmovrr) { + auto ®1 = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn->GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb->RemoveInsn(*insn); + } + } + } /* insn */ + } /* BB */ +} + +void GraphColorRegAllocator::MarkCalleeSaveRegs() { + for (auto regNO : intCalleeUsed) { + static_cast(cgFunc)->AddtoCalleeSaved(static_cast(regNO)); + } + for (auto regNO : fpCalleeUsed) { + static_cast(cgFunc)->AddtoCalleeSaved(static_cast(regNO)); + } +} + +bool GraphColorRegAllocator::AllocateRegisters() { +#ifdef RANDOM_PRIORITY + /* Change this seed for different random numbers */ + srand(0); +#endif /* RANDOM_PRIORITY */ + auto *a64CGFunc = static_cast(cgFunc); + + if (GCRA_DUMP && doMultiPass) { + LogInfo::MapleLogger() << "\n round start: \n"; + cgFunc->DumpCGIR(); + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + */ + a64CGFunc->AddtoCalleeSaved(RFP); + a64CGFunc->AddtoCalleeSaved(RLR); + a64CGFunc->NoteFPLRAddedToCalleeSavedList(); + +#if DEBUG + int32 cnt = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + ++cnt; + } + } + DEBUG_ASSERT(cnt <= cgFunc->GetTotalNumberOfInstructions(), "Incorrect insn count"); +#endif + cgFunc->SetIsAfterRegAlloc(); + /* EBO propgation extent the live range and might need to be turned off. */ + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); + + InitCCReg(); + + ComputeLiveRanges(); + + InitFreeRegPool(); + + BuildInterferenceGraph(); + + Separate(); + + SplitAndColor(); + +#ifdef USE_LRA + if (doLRA) { + LocalRegisterAllocator(true); + } +#endif /* USE_LRA */ + + FinalizeRegisters(); + + MarkCalleeSaveRegs(); + + if (!seenFP) { + cgFunc->UnsetSeenFP(); + } + if (GCRA_DUMP) { + cgFunc->DumpCGIR(); + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + + if (doMultiPass && hasSpill) { + return false; + } else { + return true; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5f1c850e2964a52cd87f60afaa8134c524f194bc --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp @@ -0,0 +1,85 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "aarch64_dce.h" +#include "aarch64_operand.h" +namespace maplebe { +bool AArch64Dce::RemoveUnuseDef(VRegVersion &defVersion) { + /* delete defs which have no uses */ + if (defVersion.GetAllUseInsns().empty()) { + DUInsnInfo *defInsnInfo = defVersion.GetDefInsnInfo(); + if (defInsnInfo == nullptr) { + return false; + } + CHECK_FATAL(defInsnInfo->GetInsn() != nullptr, "Get def insn failed"); + Insn *defInsn = defInsnInfo->GetInsn(); + /* have not support asm/neon opt yet */ + if (defInsn->GetMachineOpcode() == MOP_asm || defInsn->IsVectorOp() || defInsn->IsAtomic()) { + return false; + } + std::set defRegs = defInsn->GetDefRegs(); + if (defRegs.size() != 1) { + return false; + } + uint32 bothDUIdx = defInsn->GetBothDefUseOpnd(); + if (!(bothDUIdx != kInsnMaxOpnd && defInsnInfo->GetOperands().count(bothDUIdx))) { + defInsn->GetBB()->RemoveInsn(*defInsn); + if (defInsn->IsPhi()) { + defInsn->GetBB()->RemovePhiInsn(defVersion.GetOriginalRegNO()); + } + defVersion.MarkDeleted(); + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = opndNum; i > 0; --i) { + Operand &opnd = defInsn->GetOperand(i - 1); + A64DeleteRegUseVisitor deleteUseRegVisitor(*GetSSAInfo(), defInsn->GetId()); + opnd.Accept(deleteUseRegVisitor); + } + return true; + } + } + return false; +} + +void A64DeleteRegUseVisitor::Visit(RegOperand *v) { + if (v->IsSSAForm()) { + VRegVersion *regVersion = GetSSAInfo()->FindSSAVersion(v->GetRegisterNumber()); + MapleUnorderedMap &useInfos = regVersion->GetAllUseInsns(); + auto it = useInfos.find(deleteInsnId); + if (it != useInfos.end()) { + useInfos.erase(it); + } + } +} +void A64DeleteRegUseVisitor::Visit(ListOperand *v) { + for (auto *regOpnd : v->GetOperands()) { + Visit(regOpnd); + } +} +void A64DeleteRegUseVisitor::Visit(MemOperand *a64MemOpnd) { + RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); + RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + Visit(baseRegOpnd); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + Visit(indexRegOpnd); + } +} + +void A64DeleteRegUseVisitor::Visit(PhiOperand *v) { + for (auto phiOpndIt : v->GetOperands()) { + Visit(phiOpndIt.second); + } +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp new file mode 100644 index 0000000000000000000000000000000000000000..151a44dcb93e0766c57db695355bad2e106482df --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp @@ -0,0 +1,1152 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_dependence.h" +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "pressure.h" + +/* For building dependence graph, The entry is AArch64DepAnalysis::Run. */ +namespace maplebe { +/* constructor */ +AArch64DepAnalysis::AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA) + : DepAnalysis(func, mp, mad, beforeRA), stackUses(alloc.Adapter()), + stackDefs(alloc.Adapter()), heapUses(alloc.Adapter()), + heapDefs(alloc.Adapter()), mayThrows(alloc.Adapter()), + ambiInsns(alloc.Adapter()), ehInRegs(alloc.Adapter()) { + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + regDefs = memPool.NewArray(maxRegNum); + regUses = memPool.NewArray(maxRegNum); +} + +/* print dep node information */ +void AArch64DepAnalysis::DumpDepNode(DepNode &node) const { + node.GetInsn()->Dump(); + uint32 num = node.GetUnitNum(); + LogInfo::MapleLogger() << "unit num : " << num << ", "; + for (uint32 i = 0; i < num; ++i) { + const Unit *unit = node.GetUnitByIndex(i); + if (unit != nullptr) { + PRINT_VAL(unit->GetName()); + } else { + PRINT_VAL("none"); + } + } + LogInfo::MapleLogger() << '\n'; + node.DumpSchedInfo(); + if (beforeRA) { + node.DumpRegPressure(); + } +} + +/* print dep link information */ +void AArch64DepAnalysis::DumpDepLink(DepLink &link, const DepNode *node) const { + PRINT_VAL(GetDepTypeName(link.GetDepType())); + PRINT_STR_VAL("Latency: ", link.GetLatency()); + if (node != nullptr) { + node->GetInsn()->Dump(); + return; + } + LogInfo::MapleLogger() << "from : "; + link.GetFrom().GetInsn()->Dump(); + LogInfo::MapleLogger() << "to : "; + link.GetTo().GetInsn()->Dump(); +} + +/* Append use register to the list. */ +void AArch64DepAnalysis::AppendRegUseList(Insn &insn, regno_t regNO) { + RegList *regList = memPool.New(); + regList->insn = &insn; + regList->next = nullptr; + if (regUses[regNO] == nullptr) { + regUses[regNO] = regList; + if (beforeRA) { + Insn *defInsn = regDefs[regNO]; + if (defInsn == nullptr) { + return; + } + DepNode *defNode = defInsn->GetDepNode(); + defNode->SetRegDefs(regNO, regList); + } + return; + } + RegList *lastRegList = regUses[regNO]; + while (lastRegList->next != nullptr) { + lastRegList = lastRegList->next; + } + lastRegList->next = regList; +} + +/* + * Add dependence edge. + * Two dependence node has a unique edge. + * True dependence overwirtes other dependences. + */ +void AArch64DepAnalysis::AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) { + /* Can not build a self loop dependence. */ + if (&fromNode == &toNode) { + return; + } + /* Check if exist edge. */ + if (!fromNode.GetSuccs().empty()) { + DepLink *depLink = fromNode.GetSuccs().back(); + if (&(depLink->GetTo()) == &toNode) { + if (depLink->GetDepType() != kDependenceTypeTrue) { + if (depType == kDependenceTypeTrue) { + /* Has exist edge, replace it. */ + depLink->SetDepType(kDependenceTypeTrue); + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + } + return; + } + } + DepLink *depLink = memPool.New(fromNode, toNode, depType); + if (depType == kDependenceTypeTrue) { + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + fromNode.AddSucc(*depLink); + toNode.AddPred(*depLink); +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type) { + for (auto anyInsn : insns) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, + const DepType &type) { + for (auto anyInsn : insns) { + if (anyInsn != &insn) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } + } +} + +/* Remove self dependence (self loop) in dependence graph. */ +void AArch64DepAnalysis::RemoveSelfDeps(Insn &insn) { + DepNode *node = insn.GetDepNode(); + DEBUG_ASSERT(node->GetSuccs().back()->GetTo().GetInsn() == &insn, "Is not a self dependence."); + DEBUG_ASSERT(node->GetPreds().back()->GetFrom().GetInsn() == &insn, "Is not a self dependence."); + node->RemoveSucc(); + node->RemovePred(); +} + +/* Build dependences of source register operand. */ +void AArch64DepAnalysis::BuildDepsUseReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddUseReg(regNO); + if (regDefs[regNO] != nullptr) { + /* Build true dependences. */ + AddDependence(*regDefs[regNO]->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + } +} + +/* Build dependences of destination register operand. */ +void AArch64DepAnalysis::BuildDepsDefReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddDefReg(regNO); + /* Build anti dependences. */ + RegList *regList = regUses[regNO]; + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); + regList = regList->next; + } + /* Build output depnedence. */ + if (regDefs[regNO] != nullptr) { + AddDependence(*regDefs[regNO]->GetDepNode(), *node, kDependenceTypeOutput); + } +} + +void AArch64DepAnalysis::ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, + bool isFromClinit) const { + if (isFromClinit) { + firstNode.AddClinitInsn(*firstNode.GetInsn()); + firstNode.AddClinitInsn(*secondNode.GetInsn()); + firstNode.SetCfiInsns(secondNode.GetCfiInsns()); + } else { + for (Insn *insn : secondNode.GetCfiInsns()) { + firstNode.AddCfiInsn(*insn); + } + for (Insn *insn : secondNode.GetComments()) { + firstNode.AddComments(*insn); + } + secondNode.ClearComments(); + } + firstNode.SetInsn(newInsn); + Reservation *rev = mad.FindReservation(newInsn); + CHECK_FATAL(rev != nullptr, "reservation is nullptr."); + firstNode.SetReservation(*rev); + firstNode.SetUnits(rev->GetUnit()); + firstNode.SetUnitNum(rev->GetUnitNum()); + newInsn.SetDepNode(firstNode); +} + +void AArch64DepAnalysis::ClearDepNodeInfo(DepNode &depNode) const { + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + insn.SetDepNode(depNode); + Reservation *seRev = mad.FindReservation(insn); + depNode.SetInsn(insn); + depNode.SetType(kNodeTypeEmpty); + depNode.SetReservation(*seRev); + depNode.SetUnitNum(0); + depNode.ClearCfiInsns(); + depNode.SetUnits(nullptr); +} + +/* Combine adrpldr&clinit_tail to clinit. */ +void AArch64DepAnalysis::CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) { + DEBUG_ASSERT(firstNode.GetInsn()->GetMachineOpcode() == MOP_adrp_ldr, "first insn should be adrpldr"); + DEBUG_ASSERT(secondNode.GetInsn()->GetMachineOpcode() == MOP_clinit_tail, "second insn should be clinit_tail"); + DEBUG_ASSERT(firstNode.GetCfiInsns().empty(), "There should not be any comment/cfi instructions between clinit."); + DEBUG_ASSERT(secondNode.GetComments().empty(), "There should not be any comment/cfi instructions between clinit."); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn( + MOP_clinit, firstNode.GetInsn()->GetOperand(0), firstNode.GetInsn()->GetOperand(1)); + newInsn.SetId(firstNode.GetInsn()->GetId()); + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, true); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, isAcrossSeparator); +} + +/* + * Combine memory access pair: + * 1.ldr to ldp. + * 2.str to stp. + */ +void AArch64DepAnalysis::CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) { + DEBUG_ASSERT(firstNode.GetInsn(), "the insn of first Node should not be nullptr"); + DEBUG_ASSERT(secondNode.GetInsn(), "the insn of second Node should not be nullptr"); + MOperator thisMop = firstNode.GetInsn()->GetMachineOpcode(); + MOperator mopPair = GetMopPair(thisMop); + DEBUG_ASSERT(mopPair != 0, "mopPair should not be zero"); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + Operand *opnd2 = nullptr; + if (useFirstOffset) { + opnd0 = &(firstNode.GetInsn()->GetOperand(0)); + opnd1 = &(secondNode.GetInsn()->GetOperand(0)); + opnd2 = &(firstNode.GetInsn()->GetOperand(1)); + } else { + opnd0 = &(secondNode.GetInsn()->GetOperand(0)); + opnd1 = &(firstNode.GetInsn()->GetOperand(0)); + opnd2 = &(secondNode.GetInsn()->GetOperand(1)); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopPair, *opnd0, *opnd1, *opnd2); + newInsn.SetId(firstNode.GetInsn()->GetId()); + std::string newComment; + const MapleString &comment = firstNode.GetInsn()->GetComment(); + if (comment.c_str() != nullptr) { + newComment += comment.c_str(); + } + const MapleString &secondComment = secondNode.GetInsn()->GetComment(); + if (secondComment.c_str() != nullptr) { + newComment += " "; + newComment += secondComment.c_str(); + } + if ((newComment.c_str() != nullptr) && (strlen(newComment.c_str()) > 0)) { + newInsn.SetComment(newComment); + } + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, false); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, false, true); +} + +/* Combine two dependence nodes to one */ +void AArch64DepAnalysis::CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine) { + if (isAcrossSeparator) { + /* Clear all latency of the second node. */ + for (auto predLink : secondNode.GetPreds()) { + predLink->SetLatency(0); + } + for (auto succLink : secondNode.GetSuccs()) { + succLink->SetLatency(0); + } + return; + } + std::set uniqueNodes; + + for (auto predLink : firstNode.GetPreds()) { + if (predLink->GetDepType() == kDependenceTypeTrue) { + predLink->SetLatency(mad.GetLatency(*predLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&predLink->GetFrom()); + } + for (auto predLink : secondNode.GetPreds()) { + if (&predLink->GetFrom() != &firstNode) { + if (uniqueNodes.insert(&(predLink->GetFrom())).second) { + AddDependence(predLink->GetFrom(), firstNode, predLink->GetDepType()); + } + } + predLink->SetLatency(0); + } + uniqueNodes.clear(); + for (auto succLink : firstNode.GetSuccs()) { + if (succLink->GetDepType() == kDependenceTypeTrue) { + succLink->SetLatency(mad.GetLatency(*succLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&(succLink->GetTo())); + } + for (auto succLink : secondNode.GetSuccs()) { + if (uniqueNodes.insert(&(succLink->GetTo())).second) { + AddDependence(firstNode, succLink->GetTo(), succLink->GetDepType()); + if (isMemCombine) { + succLink->GetTo().IncreaseValidPredsSize(); + } + } + succLink->SetLatency(0); + } +} + +/* + * Build dependences of ambiguous instruction. + * ambiguous instruction : instructions that can not across may throw instructions. + */ +void AArch64DepAnalysis::BuildDepsAmbiInsn(Insn &insn) { + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); + ambiInsns.emplace_back(&insn); +} + +/* Build dependences of may throw instructions. */ +void AArch64DepAnalysis::BuildDepsMayThrowInsn(Insn &insn) { + AddDependence4InsnInVectorByType(ambiInsns, insn, kDependenceTypeThrow); +} + +bool AArch64DepAnalysis::IsFrameReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == RFP) || (opnd.GetRegisterNumber() == RSP); +} + +MemOperand *AArch64DepAnalysis::BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, + uint32 byteSize) const { + MemOperand *nextMemOpnd = aarchMemOpnd.Clone(memPool); + Operand *nextOfstOpnd = nextMemOpnd->GetOffsetImmediate()->Clone(memPool); + OfstOperand *aarchNextOfstOpnd = static_cast(nextOfstOpnd); + CHECK_NULL_FATAL(aarchNextOfstOpnd); + int32 offsetVal = static_cast(aarchNextOfstOpnd->GetOffsetValue()); + aarchNextOfstOpnd->SetOffsetValue(offsetVal + byteSize); + nextMemOpnd->SetOffsetOperand(*aarchNextOfstOpnd); + return nextMemOpnd; +} + +/* Get the second memory access operand of stp/ldp instructions. */ +MemOperand *AArch64DepAnalysis::GetNextMemOperand( + const Insn &insn, const MemOperand &aarchMemOpnd) const { + MemOperand *nextMemOpnd = nullptr; + switch (insn.GetMachineOpcode()) { + case MOP_wldp: + case MOP_sldp: + case MOP_xldpsw: + case MOP_wstp: + case MOP_sstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k4ByteSize); + break; + } + case MOP_xldp: + case MOP_dldp: + case MOP_xstp: + case MOP_dstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k8ByteSize); + break; + } + default: + break; + } + + return nextMemOpnd; +} + +/* + * Build dependences of symbol memory access. + * Memory access with symbol must be a heap memory access. + */ +void AArch64DepAnalysis::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { + if (isDest) { + /* + * Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + heapDefs.emplace_back(&insn); + } else { + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +/* Build dependences of stack memory and heap memory uses. */ +void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); + /* Stack memory address */ + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + continue; + } + } + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackUses.emplace_back(&insn); + } else { + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +static bool NoAlias(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + rightOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && leftOpnd.GetIndexOpt() == MemOperand::kIntact && + rightOpnd.GetIndexOpt() == MemOperand::kIntact) { + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() == RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() == RFP) { + Operand *ofstOpnd = leftOpnd.GetOffsetOperand(); + Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); + DEBUG_ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); + DEBUG_ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); + ImmOperand *ofst = static_cast(ofstOpnd); + ImmOperand *rofst = static_cast(rofstOpnd); + DEBUG_ASSERT(ofst != nullptr, "CG internal error, invalid type."); + DEBUG_ASSERT(rofst != nullptr, "CG internal error, invalid type."); + return (!ofst->ValueEquals(*rofst)); + } + } + return false; +} + +static bool NoOverlap(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + rightOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + leftOpnd.GetIndexOpt() != MemOperand::kIntact || + rightOpnd.GetIndexOpt() != MemOperand::kIntact) { + return false; + } + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() != RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { + return false; + } + int64 ofset1 = leftOpnd.GetOffsetOperand()->GetValue(); + int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); + if (ofset1 < ofset2) { + return ((ofset1 + leftOpnd.GetAccessSize()) <= ofset2); + } else { + return ((ofset2 + rightOpnd.GetAccessSize()) <= ofset1); + } +} + +/* Return true if memInsn's memOpnd no alias with memOpnd and nextMemOpnd */ +bool AArch64DepAnalysis::NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, + const Insn &memInsn) const { + auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); + if (!NoAlias(memOpnd, *memOpndOfmemInsn) || ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *memOpndOfmemInsn))) { + return true; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && !memInsn.IsCall()) { + static_cast(memInsn.GetMemOpnd())->SetAccessSize(memInsn.GetMemoryByteSize()); + return (!NoOverlap(memOpnd, *memOpndOfmemInsn)); + } + MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); + if (nextMemOpndOfmemInsn != nullptr) { + if (!NoAlias(memOpnd, *nextMemOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *nextMemOpndOfmemInsn))) { + return true; + } + } + return false; +} + +/* + * Build anti dependences between insn and other insn that use stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, + const MemOperand *nextMemOpnd) { + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto *useInsn : stackUses) { + if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *useInsn)) { + AddDependence(*useInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); + } + } +} + +/* + * Build output dependences between insn with other insn that define stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, + const MemOperand *nextMemOpnd) { + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + } + } +} + +/* Build dependences of stack memory and heap memory definitions. */ +void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + /* Build anti dependences. */ + BuildAntiDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + /* Build output depnedence. */ + BuildOutputDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + if (lastCallInsn != nullptr) { + /* Build a dependence between stack passed arguments and call. */ + DEBUG_ASSERT(baseRegister != nullptr, "baseRegister shouldn't be null here"); + if (baseRegister->GetRegisterNumber() == RSP) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } + + /* Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackDefs.emplace_back(&insn); + } else { + heapDefs.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + /* Memory definition can not across may-throw insns. */ + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); +} + +/* Build dependences of memory barrior instructions. */ +void AArch64DepAnalysis::BuildDepsMemBar(Insn &insn) { + AddDependence4InsnInVectorByTypeAndCmp(stackUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(stackDefs, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapDefs, insn, kDependenceTypeMembar); + memBarInsn = &insn; +} + +/* A pseudo separator node depends all the other nodes. */ +void AArch64DepAnalysis::BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) { + uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() ? (separatorIndex + kMaxDependenceNum) + : static_cast(nodes.size() - 1); + newSepNode.ReservePreds(nextSepIndex - separatorIndex); + newSepNode.ReserveSuccs(nextSepIndex - separatorIndex); + for (uint32 i = separatorIndex; i < nextSepIndex; ++i) { + AddDependence(*nodes[i], newSepNode, kDependenceTypeSeparator); + } +} + + +/* Build control dependence for branch/ret instructions. */ +void AArch64DepAnalysis::BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) { + for (uint32 i = separatorIndex; i < depNode.GetIndex(); ++i) { + AddDependence(*nodes[i], depNode, kDependenceTypeControl); + } +} + +/* + * Build dependences of call instructions. + * Caller-saved physical registers will defined by a call instruction. + * Also a conditional register may modified by a call. + */ +void AArch64DepAnalysis::BuildCallerSavedDeps(Insn &insn) { + /* Build anti dependence and output dependence. */ + for (uint32 i = R0; i <= R7; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = V0; i <= V7; ++i) { + BuildDepsDefReg(insn, i); + } + if (!beforeRA) { + for (uint32 i = R8; i <= R18; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = RLR; i <= RSP; ++i) { + BuildDepsUseReg(insn, i); + } + for (uint32 i = V16; i <= V31; ++i) { + BuildDepsDefReg(insn, i); + } + } + /* For condition operand, such as NE, EQ, and so on. */ + if (cgFunc.GetRflag() != nullptr) { + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* + * Build dependence between control register and last call instruction. + * insn : instruction that with control register operand. + * isDest : if the control register operand is a destination operand. + */ +void AArch64DepAnalysis::BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) { + if (lastCallInsn == nullptr) { + return; + } + if (isDest) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + return; + } + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); +} + +/* + * Build dependence between stack-define-instruction that deal with call-insn's args and a call-instruction. + * insn : a call instruction (call/tail-call) + */ +void AArch64DepAnalysis::BuildStackPassArgsDeps(Insn &insn) { + for (auto stackDefInsn : stackDefs) { + if (stackDefInsn->IsCall()) { + continue; + } + Operand *opnd = stackDefInsn->GetMemOpnd(); + DEBUG_ASSERT(opnd->IsMemoryAccessOperand(), "make sure opnd is memOpnd"); + MemOperand *memOpnd = static_cast(opnd); + RegOperand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && (baseReg->GetRegisterNumber() == RSP)) { + AddDependence(*stackDefInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } +} + +/* Some insns may dirty all stack memory, such as "bl MCC_InitializeLocalStackRef". */ +void AArch64DepAnalysis::BuildDepsDirtyStack(Insn &insn) { + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); + stackDefs.emplace_back(&insn); +} + +/* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast". */ +void AArch64DepAnalysis::BuildDepsUseStack(Insn &insn) { + /* Build true dependences. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeTrue); +} + +/* Some insns may dirty all heap memory, such as a call insn. */ +void AArch64DepAnalysis::BuildDepsDirtyHeap(Insn &insn) { + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + heapDefs.emplace_back(&insn); +} + +/* Build a pseudo node to seperate dependence graph. */ +DepNode *AArch64DepAnalysis::BuildSeparatorNode() { + Insn &pseudoSepInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_dependence_seperator); + DepNode *separatorNode = memPool.New(pseudoSepInsn, alloc); + separatorNode->SetType(kNodeTypeSeparator); + pseudoSepInsn.SetDepNode(*separatorNode); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + separatorNode->SetRegPressure(*regPressure); + separatorNode->InitPressure(); + } + return separatorNode; +} + +/* Init depAnalysis data struction */ +void AArch64DepAnalysis::Init(BB &bb, MapleVector &nodes) { + curBB = &bb; + ClearAllDepData(); + lastComments.clear(); + /* Analysis live-in registers in catch BB. */ + AnalysisAmbiInsns(bb); + /* Clear all dependence nodes and push the first separator node. */ + nodes.clear(); + DepNode *pseudoSepNode = BuildSeparatorNode(); + nodes.emplace_back(pseudoSepNode); + separatorIndex = 0; + + if (beforeRA) { + /* assump first pseudo_dependence_seperator insn of current bb define live-in's registers */ + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + regDefs[regNO] = pseudoSepInsn; + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } +} + +/* When a separator build, it is the same as a new basic block. */ +void AArch64DepAnalysis::ClearAllDepData() { + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + errno_t ret = memset_s(regDefs, sizeof(Insn*) * maxRegNum, 0, sizeof(Insn*) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + ret = memset_s(regUses, sizeof(RegList*) * maxRegNum, 0, sizeof(RegList*) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + memBarInsn = nullptr; + lastCallInsn = nullptr; + lastFrameDef = nullptr; + + stackUses.clear(); + stackDefs.clear(); + heapUses.clear(); + heapDefs.clear(); + mayThrows.clear(); + ambiInsns.clear(); +} + +/* Analysis live-in registers in catch bb and cleanup bb. */ +void AArch64DepAnalysis::AnalysisAmbiInsns(BB &bb) { + hasAmbiRegs = false; + if (bb.GetEhSuccs().empty()) { + return; + } + + /* Union all catch bb */ + for (auto succBB : bb.GetEhSuccs()) { + const MapleSet &liveInRegSet = succBB->GetLiveInRegNO(); + set_union(liveInRegSet.begin(), liveInRegSet.end(), + ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + } + + /* Union cleanup entry bb. */ + const MapleSet ®NOSet = cgFunc.GetCleanupEntryBB()->GetLiveInRegNO(); + std::set_union(regNOSet.begin(), regNOSet.end(), + ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + + /* Subtract R0 and R1, that is defined by eh runtime. */ + ehInRegs.erase(R0); + ehInRegs.erase(R1); + if (ehInRegs.empty()) { + return; + } + hasAmbiRegs = true; +} + +/* Check if regNO is in ehInRegs. */ +bool AArch64DepAnalysis::IfInAmbiRegs(regno_t regNO) const { + if (!hasAmbiRegs) { + return false; + } + if (ehInRegs.find(regNO) != ehInRegs.end()) { + return true; + } + return false; +} + +static bool IsYieldPoint(Insn &insn) { + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* + * Build dependences of memory operand. + * insn : a instruction with the memory access operand. + * opnd : the memory access operand. + * regProp : operand property of the memory access operandess operand. + */ +void AArch64DepAnalysis::BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop) { + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be memory Operand"); + MemOperand *memOpnd = static_cast(&opnd); + RegOperand *baseRegister = memOpnd->GetBaseRegister(); + if (baseRegister != nullptr) { + regno_t regNO = baseRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed())) { + /* Base operand has changed. */ + BuildDepsDefReg(insn, regNO); + } + } + RegOperand *indexRegister = memOpnd->GetIndexRegister(); + if (indexRegister != nullptr) { + regno_t regNO = indexRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + if (regProp.IsUse()) { + BuildDepsUseMem(insn, *memOpnd); + } else { + BuildDepsDefMem(insn, *memOpnd); + BuildDepsAmbiInsn(insn); + } + if (IsYieldPoint(insn)) { + BuildDepsMemBar(insn); + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* Build Dependency for each Operand of insn */ +void AArch64DepAnalysis::BuildOpndDependency(Insn &insn) { + const InsnDesc* md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + const OpndDesc *regProp = md->opndMD[i]; + if (opnd.IsMemoryAccessOperand()) { + BuildMemOpndDependency(insn, opnd, *regProp); + } else if (opnd.IsStImmediate()) { + if (mOp != MOP_xadrpl12) { + BuildDepsAccessStImmMem(insn, false); + } + } else if (opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (regProp->IsUse()) { + BuildDepsUseReg(insn, regNO); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, regNO); + } + } else if (opnd.IsConditionCode()) { + /* For condition operand, such as NE, EQ, and so on. */ + if (regProp->IsUse()) { + BuildDepsUseReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, false); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, true); + } + } else if (opnd.IsList()) { + ListOperand &listOpnd = static_cast(opnd); + /* Build true dependences */ + for (auto lst : listOpnd.GetOperands()) { + regno_t regNO = lst->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + } + } +} + +static bool IsLazyLoad(MOperator op) { + return (op == MOP_lazy_ldr) || (op == MOP_lazy_ldr_static) || (op == MOP_lazy_tail); +} + +/* + * Build dependences in some special issue (stack/heap/throw/clinit/lazy binding/control flow). + * insn : a instruction. + * depNode : insn's depNode. + * nodes : the dependence nodes inclue insn's depNode. + */ +void AArch64DepAnalysis::BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes) { + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + if (insn.IsCall() || insn.IsTailCall()) { + /* Caller saved registers. */ + BuildCallerSavedDeps(insn); + BuildStackPassArgsDeps(insn); + + if (mOp == MOP_xbl) { + FuncNameOperand &target = static_cast(insn.GetOperand(0)); + if ((target.GetName() == "MCC_InitializeLocalStackRef") || + (target.GetName() == "MCC_ClearLocalStackRef") || + (target.GetName() == "MCC_DecRefResetPair")) { + /* Write stack memory. */ + BuildDepsDirtyStack(insn); + } else if ((target.GetName() == "MCC_CleanupLocalStackRef_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip")) { + /* UseStackMemory. */ + BuildDepsUseStack(insn); + } else if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC) { + /* potential C aliasing. */ + BuildDepsDirtyStack(insn); + } + } + BuildDepsDirtyHeap(insn); + BuildDepsAmbiInsn(insn); + if (lastCallInsn != nullptr) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + lastCallInsn = &insn; + } else if (insn.IsClinit() || IsLazyLoad(insn.GetMachineOpcode()) || + insn.GetMachineOpcode() == MOP_arrayclass_cache_ldr) { + BuildDepsDirtyHeap(insn); + BuildDepsDefReg(insn, kRFLAG); + if (insn.GetMachineOpcode() != MOP_adrp_ldr) { + BuildDepsDefReg(insn, R16); + BuildDepsDefReg(insn, R17); + } + } else if ((mOp == MOP_xret) || md->IsBranch()) { + BuildDepsControlAll(depNode, nodes); + } else if (insn.IsMemAccessBar()) { + BuildDepsMemBar(insn); + } else if (insn.IsSpecialIntrinsic()) { + BuildDepsDirtyHeap(insn); + } +} + +/* + * If the instruction's number of current basic block more than kMaxDependenceNum, + * then insert some pseudo separator node to split baic block. + */ +void AArch64DepAnalysis::SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum) { + if ((nodeSum > 0) && ((nodeSum % kMaxDependenceNum) == 0)) { + DEBUG_ASSERT(nodeSum == nodes.size(), "CG internal error, nodeSum should equal to nodes.size."); + /* Add a pseudo node to seperate dependence graph. */ + DepNode *separatorNode = BuildSeparatorNode(); + separatorNode->SetIndex(nodeSum); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } + ClearAllDepData(); + separatorIndex = nodeSum++; + } +} + +/* + * Generate a depNode, + * insn : create depNode for the instruction. + * nodes : a vector to store depNode. + * nodeSum : the new depNode's index. + * comments : those comment insn between last no-comment's insn and insn. + */ +DepNode *AArch64DepAnalysis::GenerateDepNode(Insn &insn, MapleVector &nodes, + int32 nodeSum, const MapleVector &comments) { + DepNode *depNode = nullptr; + Reservation *rev = mad.FindReservation(insn); + DEBUG_ASSERT(rev != nullptr, "rev is nullptr"); + depNode = memPool.New(insn, alloc, rev->GetUnit(), rev->GetUnitNum(), *rev); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + depNode->SetRegPressure(*regPressure); + depNode->InitPressure(); + } + depNode->SetIndex(nodeSum); + nodes.emplace_back(depNode); + insn.SetDepNode(*depNode); + + constexpr size_t vectorSize = 5; + depNode->ReservePreds(vectorSize); + depNode->ReserveSuccs(vectorSize); + + if (!comments.empty()) { + depNode->SetComments(comments); + } + return depNode; +} + +void AArch64DepAnalysis::BuildAmbiInsnDependency(Insn &insn) { + const auto &defRegnos = insn.GetDepNode()->GetDefRegnos(); + for (const auto ®NO : defRegnos) { + if (IfInAmbiRegs(regNO)) { + BuildDepsAmbiInsn(insn); + break; + } + } +} + +void AArch64DepAnalysis::BuildMayThrowInsnDependency(Insn &insn) { + /* build dependency for maythrow insn; */ + if (insn.MayThrow()) { + BuildDepsMayThrowInsn(insn); + if (lastFrameDef != nullptr) { + AddDependence(*lastFrameDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeThrow); + } + } +} + +void AArch64DepAnalysis::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) { + const auto &useRegnos = depNode.GetUseRegnos(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } + for (auto regNO : useRegnos) { + AppendRegUseList(insn, regNO); + if (beforeRA) { + depNode.SetRegUses(*regUses[regNO]); + if (regDefs[regNO] == nullptr) { + regDefs[regNO] = nodes[separatorIndex]->GetInsn(); + nodes[separatorIndex]->AddDefReg(regNO); + nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), regUses[regNO]); + } + } + } + + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } + for (const auto regNO : defRegnos) { + regDefs[regNO] = &insn; + regUses[regNO] = nullptr; + if (beforeRA) { + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); + } + } + ++i; + } +} + +/* Update stack and heap dependency */ +void AArch64DepAnalysis::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn) { + if (!insn.MayThrow()) { + return; + } + depNode.SetLocInsn(locInsn); + mayThrows.emplace_back(&insn); + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeThrow); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeThrow); +} + +/* Add a separatorNode to the end of a nodes + * * before RA: add all live-out registers to this separatorNode'Uses + * */ +void AArch64DepAnalysis::AddEndSeparatorNode(MapleVector &nodes) { + DepNode *separatorNode = BuildSeparatorNode(); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } +} + +/* + * Build dependence graph. + * 1: Build dependence nodes. + * 2: Build edges between dependence nodes. Edges are: + * 2.1) True dependences + * 2.2) Anti dependences + * 2.3) Output dependences + * 2.4) Barrier dependences + */ +void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) { + /* Initial internal datas. */ + Init(bb, nodes); + uint32 nodeSum = 1; + MapleVector comments(alloc.Adapter()); + const Insn *locInsn = bb.GetFirstLoc(); + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + if (insn->IsImmaterialInsn()) { + if (!insn->IsComment()) { + locInsn = insn; + } else { + comments.emplace_back(insn); + } + } else if (insn->IsCfiInsn()) { + if (!nodes.empty()) { + nodes.back()->AddCfiInsn(*insn); + } + } + continue; + } + /* Add a pseudo node to seperate dependence graph when appropriate */ + SeperateDependenceGraph(nodes, nodeSum); + /* generate a DepNode */ + DepNode *depNode = GenerateDepNode(*insn, nodes, nodeSum, comments); + ++nodeSum; + comments.clear(); + /* Build Dependency for maythrow insn; */ + BuildMayThrowInsnDependency(*insn); + /* Build Dependency for each Operand of insn */ + BuildOpndDependency(*insn); + /* Build Dependency for special insn */ + BuildSpecialInsnDependency(*insn, *depNode, nodes); + /* Build Dependency for AmbiInsn if needed */ + BuildAmbiInsnDependency(*insn); + /* Update stack and heap dependency */ + UpdateStackAndHeapDependency(*depNode, *insn, *locInsn); + if (insn->IsFrameDef()) { + lastFrameDef = insn; + } + /* Seperator exists. */ + AddDependence(*nodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); + /* Update register use and register def */ + UpdateRegUseAndDef(*insn, *depNode, nodes); + } + + AddEndSeparatorNode(nodes); + + if (!comments.empty()) { + lastComments = comments; + } + comments.clear(); +} + +/* return dependence type name */ +const std::string &AArch64DepAnalysis::GetDepTypeName(DepType depType) const { + DEBUG_ASSERT(depType <= kDependenceTypeNone, "array boundary check failed"); + return kDepTypeName[depType]; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bcb2796a4b07b8a71488155e85fc39e385341aa0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -0,0 +1,1503 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_ebo.h" +#include "aarch64_cg.h" +#include "mpl_logging.h" +#include "aarch64_utils.h" + +namespace maplebe { +using namespace maple; +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) + +enum AArch64Ebo::ExtOpTable : uint8 { + AND, + SXTB, + SXTH, + SXTW, + ZXTB, + ZXTH, + ZXTW, + ExtTableSize +}; + +namespace { + +using PairMOperator = MOperator[2]; + +constexpr uint8 insPairsNum = 5; + +PairMOperator extInsnPairTable[ExtTableSize][insPairsNum] = { + /* {origMop, newMop} */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsh, MOP_wldrb}, {MOP_wldrh, MOP_wldrb}, {MOP_xldrsw, MOP_wldrb}, + {MOP_wldr, MOP_wldrb}}, /* AND */ + {{MOP_wldrb, MOP_wldrsb}, {MOP_wldr, MOP_wldrsb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_wldrh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldrsh, MOP_wldrsh}, + {MOP_undef, MOP_undef}}, /* SXTH */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrsh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldr, MOP_xldrsw}}, /* SXTW */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTB */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldr, MOP_wldrh}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTH */ + {{MOP_wldr, MOP_wldr}, {MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}} /* ZXTW */ +}; + +} // anonymous namespace + +MOperator AArch64Ebo::ExtLoadSwitchBitSize(MOperator lowMop) const { + switch (lowMop) { + case MOP_wldrsb : + return MOP_xldrsb; + case MOP_wldrsh : + return MOP_xldrsh; + default: + break; + } + return lowMop; +} + +bool AArch64Ebo::IsFmov(const Insn &insn) const { + return ((insn.GetMachineOpcode() >= MOP_xvmovsr) && (insn.GetMachineOpcode() <= MOP_xvmovrd)); +} + +bool AArch64Ebo::IsAdd(const Insn &insn) const { + return ((insn.GetMachineOpcode() >= MOP_xaddrrr) && (insn.GetMachineOpcode() <= MOP_ssub)); +} + +bool AArch64Ebo::IsInvalidReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == AArch64reg::kRinvalid); +} + +bool AArch64Ebo::IsZeroRegister(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AArch64Ebo::IsConstantImmOrReg(const Operand &opnd) const { + if (opnd.IsConstImmediate()) { + return true; + } + return IsZeroRegister(opnd); +} + +bool AArch64Ebo::IsClinitCheck(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + return ((mOp == MOP_clinit) || (mOp == MOP_clinit_tail)); +} + +bool AArch64Ebo::IsDecoupleStaticOp(Insn &insn) const { + if (insn.GetMachineOpcode() == MOP_lazy_ldr_static) { + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(opnd1 != nullptr, "opnd1 is null!"); + auto *stImmOpnd = static_cast(opnd1); + return StringUtils::StartsWith(stImmOpnd->GetName(), namemangler::kDecoupleStaticValueStr); + } + return false; +} + +static bool IsYieldPoint(Insn &insn) { + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* retrun true if insn is globalneeded */ +bool AArch64Ebo::IsGlobalNeeded(Insn &insn) const { + /* Calls may have side effects. */ + if (insn.IsCall()) { + return true; + } + + /* Intrinsic call should not be removed. */ + if (insn.IsSpecialIntrinsic()) { + return true; + } + + /* Clinit should not be removed. */ + if (IsClinitCheck(insn)) { + return true; + } + + /* Yieldpoints should not be removed by optimizer. */ + if (cgFunc->GetCG()->GenYieldPoint() && IsYieldPoint(insn)) { + return true; + } + + std::set defRegs = insn.GetDefRegs(); + for (auto defRegNo : defRegs) { + if (defRegNo == RZR || defRegNo == RSP || (defRegNo == RFP && CGOptions::UseFramePointer())) { + return true; + } + } + return false; +} + +/* in aarch64,resOp will not be def and use in the same time */ +bool AArch64Ebo::ResIsNotDefAndUse(Insn &insn) const { + (void)insn; + return true; +} + +/* Return true if opnd live out of bb. */ +bool AArch64Ebo::LiveOutOfBB(const Operand &opnd, const BB &bb) const { + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + /* when optimize_level < 2, there is need to anlyze live range. */ + if (live == nullptr) { + return false; + } + bool isLiveOut = false; + if (bb.GetLiveOut()->TestBit(static_cast(&opnd)->GetRegisterNumber())) { + isLiveOut = true; + } + return isLiveOut; +} + +bool AArch64Ebo::IsLastAndBranch(BB &bb, Insn &insn) const { + return (bb.GetLastInsn() == &insn) && insn.IsBranch(); +} + +bool AArch64Ebo::IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const { + MOperator mOp = insn.GetMachineOpcode(); + if (!(mOp == MOP_wmovri32 || mOp == MOP_xmovri64 || mOp == MOP_wsfmovri || mOp == MOP_xdfmovri)) { + return false; + } + OpndInfo *sameInfo = opndInfo.same; + if (sameInfo == nullptr || sameInfo->insn == nullptr || sameInfo->bb != &bb || + sameInfo->insn->GetMachineOpcode() != mOp) { + return false; + } + Insn *prevInsn = sameInfo->insn; + if (!prevInsn->GetOperand(kInsnSecondOpnd).IsImmediate()) { + return false; + } + auto &sameOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (sameOpnd.GetValue() == opnd.GetValue()) { + sameInfo->refCount += opndInfo.refCount; + return true; + } + return false; +} + +const RegOperand &AArch64Ebo::GetRegOperand(const Operand &opnd) const { + CHECK_FATAL(opnd.IsRegister(), "aarch64 shoud not have regShiftOp! opnd is not register!"); + const auto &res = static_cast(opnd); + return res; +} + +/* Create infomation for local_opnd from its def insn current_insn. */ +OpndInfo *AArch64Ebo::OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) { + int32 hashVal = localOpnd.IsRegister() ? -1 : ComputeOpndHash(localOpnd); + OpndInfo *opndInfoPrev = GetOpndInfo(localOpnd, hashVal); + OpndInfo *opndInfo = GetNewOpndInfo(currentBB, ¤tInsn, localOpnd, hashVal); + if (localOpnd.IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + MemOperand *mem = static_cast(&localOpnd); + Operand *base = mem->GetBaseRegister(); + Operand *offset = mem->GetOffset(); + if (base != nullptr && base->IsRegister()) { + memInfo->SetBaseInfo(*OperandInfoUse(currentBB, *base)); + } + if (offset != nullptr && offset->IsRegister()) { + memInfo->SetOffsetInfo(*OperandInfoUse(currentBB, *offset)); + } + } + opndInfo->same = opndInfoPrev; + if ((opndInfoPrev != nullptr)) { + opndInfoPrev->redefined = true; + if (opndInfoPrev->bb == ¤tBB) { + opndInfoPrev->redefinedInBB = true; + opndInfoPrev->redefinedInsn = ¤tInsn; + } + UpdateOpndInfo(localOpnd, *opndInfoPrev, opndInfo, hashVal); + } else { + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + return opndInfo; +} + +void AArch64Ebo::DefineClinitSpecialRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "nullptr of currInsnInfo"); + RegOperand &phyOpnd1 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd1); + opndInfo->insnInfo = &insnInfo; + + RegOperand &phyOpnd2 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R17, k64BitSize, kRegTyInt); + opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd2); + opndInfo->insnInfo = &insnInfo; +} + +void AArch64Ebo::BuildCallerSaveRegisters() { + callerSaveRegTable.clear(); + RegOperand &phyOpndR0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + RegOperand &phyOpndV0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpndR0); + callerSaveRegTable.emplace_back(&phyOpndV0); + for (uint32 i = R1; i <= R18; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V1; i <= V7; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V16; i <= V31; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + CHECK_FATAL(callerSaveRegTable.size() < kMaxCallerSaveReg, + "number of elements in callerSaveRegTable must less then 45!"); +} + +void AArch64Ebo::DefineAsmRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + DEBUG_ASSERT(insn->GetMachineOpcode() == MOP_asm, "insn should be a call insn."); + ListOperand &outList = const_cast( + static_cast(insn->GetOperand(kAsmOutputListOpnd))); + for (auto opnd : outList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + ListOperand &clobberList = const_cast( + static_cast(insn->GetOperand(kAsmClobberListOpnd))); + for (auto opnd : clobberList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + ListOperand &inList = const_cast(static_cast(insn->GetOperand(kAsmInputListOpnd))); + for (auto opnd : inList.GetOperands()) { + OperandInfoUse(*(insn->GetBB()), *opnd); + } +} + +void AArch64Ebo::DefineCallerSaveRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + if (insn->IsAsmInsn()) { + DefineAsmRegisters(insnInfo); + return; + } + DEBUG_ASSERT(insn->IsCall() || insn->IsTailCall(), "insn should be a call insn."); + if (CGOptions::DoIPARA()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + return; + } + } + } + for (auto opnd : callerSaveRegTable) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } +} + +void AArch64Ebo::DefineReturnUseRegister(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xret) { + return; + } + /* Define scalar callee save register and FP, LR. */ + for (uint32 i = R19; i <= R30; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); + + /* Define FP callee save registers. */ + for (uint32 i = V8; i <= V15; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } +} + +void AArch64Ebo::DefineCallUseSpecialRegister(Insn &insn) { + if (insn.GetMachineOpcode() == MOP_asm) { + return; + } + AArch64reg fpRegNO = RFP; + if (!beforeRegAlloc && cgFunc->UseFP()) { + fpRegNO = R29; + } + /* Define FP, LR. */ + RegOperand &phyOpndFP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(fpRegNO, k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndFP); + RegOperand &phyOpndLR = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RLR), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndLR); + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); +} + +/* return true if op1 == op2 */ +bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const { + switch (op1.GetKind()) { + case Operand::kOpdRegister: { + const RegOperand ®1 = static_cast(op1); + const RegOperand ®2 = static_cast(op2); + return reg1 == reg2; + } + case Operand::kOpdImmediate: { + const ImmOperand &imm1 = static_cast(op1); + const ImmOperand &imm2 = static_cast(op2); + return imm1 == imm2; + } + case Operand::kOpdOffset: { + const OfstOperand &ofst1 = static_cast(op1); + const OfstOperand &ofst2 = static_cast(op2); + return ofst1 == ofst2; + } + case Operand::kOpdStImmediate: { + const StImmOperand &stImm1 = static_cast(op1); + const StImmOperand &stImm2 = static_cast(op2); + return stImm1 == stImm2; + } + case Operand::kOpdMem: { + const MemOperand &mem1 = static_cast(op1); + const MemOperand &mem2 = static_cast(op2); + if (mem1.GetAddrMode() == mem2.GetAddrMode()) { + DEBUG_ASSERT(mem1.GetBaseRegister() != nullptr, "nullptr check"); + DEBUG_ASSERT(mem2.GetBaseRegister() != nullptr, "nullptr check"); + } + return ((mem1.GetAddrMode() == mem2.GetAddrMode()) && + OperandEqual(*(mem1.GetBaseRegister()), *(mem2.GetBaseRegister())) && + OperandEqual(*(mem1.GetIndexRegister()), *(mem2.GetIndexRegister())) && + OperandEqual(*(mem1.GetOffsetOperand()), *(mem2.GetOffsetOperand())) && + (mem1.GetSymbol() == mem2.GetSymbol()) && (mem1.GetSize() == mem2.GetSize())); + } + default: { + return false; + } + } +} + +int32 AArch64Ebo::GetOffsetVal(const MemOperand &memOpnd) const { + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + int32 val = 0; + if (offset != nullptr) { + val += static_cast(offset->GetOffsetValue()); + + if (offset->IsSymOffset() || offset->IsSymAndImmOffset()) { + val += offset->GetSymbol()->GetStIdx().Idx(); + } + } + return val; +} + +/* + * move vreg1, #1 + * move vreg2, vreg1 + * ===> + * move vreg1, #1 + * move vreg2, #1 + * return true if do simplify successfully. + */ +bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { + ImmOperand *src = static_cast(&opnd); + const InsnDesc *md = &AArch64CG::kMd[(insn.GetMachineOpcode())]; + /* avoid the invalid case "cmp wzr, #0"/"add w1, wzr, #100" */ + Operand &destOpnd = insn.GetOperand(idx); + if (src->IsZero() && destOpnd.IsRegister() && + (static_cast(destOpnd).GetRegisterType() == kRegTyInt) && + (insn.IsStore() || insn.IsMove() || md->IsCondDef())) { + insn.SetOperand(idx, *GetZeroOpnd(src->GetSize())); + return true; + } + MOperator mopCode = insn.GetMachineOpcode(); + switch (mopCode) { + case MOP_xmovrr: + case MOP_wmovrr: { + DEBUG_ASSERT(idx == kInsnSecondOpnd, "src const for move must be the second operand."); + uint32 targetSize = insn.GetOperand(idx).GetSize(); + if (src->GetSize() != targetSize) { + src = static_cast(src->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(src != nullptr, "pointer result is null"); + src->SetSize(targetSize); + } + if (src->IsSingleInstructionMovable() && (insn.GetOperand(kInsnFirstOpnd).GetSize() == targetSize)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + insn.SetOperand(kInsnSecondOpnd, *src); + MOperator mOp = (mopCode == MOP_wmovrr) ? MOP_wmovri32 : MOP_xmovri64; + insn.SetMOP(AArch64CG::kMd[mOp]); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xsubrrr: + case MOP_wsubrrr: { + if ((idx != kInsnThirdOpnd) || !src->IsInBitSize(kMaxImmVal24Bits, 0) || + !(src->IsInBitSize(kMaxImmVal12Bits, 0) || src->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + return false; + } + Operand &result = insn.GetOperand(0); + bool is64Bits = (result.GetSize() == k64BitSize); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + if (src->IsZero()) { + MOperator mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + insn.SetMOP(AArch64CG::kMd[mOp]); + insn.PopBackOperand(); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + insn.SetOperand(kInsnThirdOpnd, *src); + if ((mopCode == MOP_xaddrrr) || (mopCode == MOP_waddrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xaddrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_waddrri12]); + } else if ((mopCode == MOP_xsubrrr) || (mopCode == MOP_wsubrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xsubrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_wsubrri12]); + } + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + default: + break; + } + return false; +} + +/* optimize csel to cset */ +bool AArch64Ebo::Csel2Cset(Insn &insn, const MapleVector &opnds) { + MOperator opCode = insn.GetMachineOpcode(); + /* csel ->cset */ + if ((opCode == MOP_wcselrrrc) || (opCode == MOP_xcselrrrc)) { + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + DEBUG_ASSERT(res != nullptr, "expect a register"); + DEBUG_ASSERT(res->IsRegister(), "expect a register"); + /* only do integers */ + RegOperand *reg = static_cast(res); + if ((res == nullptr) || (!reg->IsOfIntClass())) { + return false; + } + Operand *op0 = opnds.at(kInsnSecondOpnd); + Operand *op1 = opnds.at(kInsnThirdOpnd); + ImmOperand *imm0 = nullptr; + ImmOperand *imm1 = nullptr; + if (op0->IsImmediate()) { + imm0 = static_cast(op0); + } + if (op1->IsImmediate()) { + imm1 = static_cast(op1); + } + + bool reverse = (imm1 != nullptr) && imm1->IsOne() && + (((imm0 != nullptr) && imm0->IsZero()) || IsZeroRegister(*op0)); + if (((imm0 != nullptr) && imm0->IsOne() && (((imm1 != nullptr) && imm1->IsZero()) || IsZeroRegister(*op1))) || + reverse) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "change csel insn :\n"; + insn.Dump(); + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &condOperand = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = aarFunc->GetOrCreateRflag(); + if (!reverse) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, condOperand, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } else { + auto &cond = static_cast(condOperand); + if (!CheckCondCode(cond)) { + return false; + } + CondOperand &reverseCond = a64CGFunc->GetCondOperand(GetReverseCond(cond)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, reverseCond, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } + return true; + } + } + return false; +} + +/* Look at an expression that has a constant operand and attempt to simplify the computations. */ +bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) { + BB *bb = insn.GetBB(); + bool result = false; + if (insn.GetOperandSize() <= 1) { + return false; + } + DEBUG_ASSERT(opnds.size() > 1, "opnds size must greater than 1"); + Operand *op0 = opnds[kInsnSecondOpnd]; + Operand *op1 = opnds[kInsnThirdOpnd]; + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(res != nullptr, "null ptr check"); + uint32 opndSize = insn.GetDesc()->GetOperandSize(); + bool op0IsConstant = IsConstantImmOrReg(*op0) && !IsConstantImmOrReg(*op1); + bool op1IsConstant = !IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + bool bothConstant = IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + ImmOperand *immOpnd = nullptr; + Operand *op = nullptr; + int32 idx0 = kInsnSecondOpnd; + if (op0IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + op = op1; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnThirdOpnd)); + } + idx0 = kInsnThirdOpnd; + } else if (op1IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + op = op0; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnSecondOpnd)); + } + } else if (bothConstant) { + ImmOperand *immOpnd0 = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + ImmOperand *immOpnd1 = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + return SimplifyBothConst(*insn.GetBB(), insn, *immOpnd0, *immOpnd1, opndSize); + } + CHECK_FATAL(immOpnd != nullptr, "constant operand required!"); + CHECK_FATAL(op != nullptr, "constant operand required!"); + /* For orr insn and one of the opnd is zero + * orr resOp, imm1, #0 | orr resOp, #0, imm1 + * =======> + * mov resOp, imm1 */ + if (((insn.GetMachineOpcode() == MOP_wiorrri12) || (insn.GetMachineOpcode() == MOP_xiorrri13)) && immOpnd->IsZero()) { + MOperator mOp = opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + /* For the imm is 0. Then replace the insn by a move insn. */ + if (((insn.GetMachineOpcode() >= MOP_xaddrrr ) && (insn.GetMachineOpcode() <= MOP_sadd) && immOpnd->IsZero()) || + (op1IsConstant && (insn.GetMachineOpcode() >= MOP_xsubrrr ) && (insn.GetMachineOpcode() <= MOP_ssub) && + immOpnd->IsZero())) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + + if ((insn.GetMachineOpcode() == MOP_xaddrrr) || (insn.GetMachineOpcode() == MOP_waddrrr)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + MOperator mOp = opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op, *immOpnd); + bb->ReplaceInsn(insn, newInsn); + result = true; + } + } + } + /* Look for the sequence which can be simpified. */ + if (result || (insn.GetMachineOpcode() == MOP_xaddrri12) || (insn.GetMachineOpcode() == MOP_waddrri12)) { + Insn *prev = opndInfo[idx0]->insn; + if ((prev != nullptr) && ((prev->GetMachineOpcode() == MOP_xaddrri12) || + (prev->GetMachineOpcode() == MOP_waddrri12))) { + OpndInfo *prevInfo0 = opndInfo[idx0]->insnInfo->origOpnd[kInsnSecondOpnd]; + /* if prevop0 has been redefined. skip this optimiztation. */ + if (prevInfo0->redefined) { + return result; + } + /* Implicit conversion */ + if (insn.GetOperand(kInsnFirstOpnd).GetSize() != insn.GetOperand(kInsnSecondOpnd).GetSize()) { + return result; + } + Operand &prevOpnd0 = prev->GetOperand(kInsnSecondOpnd); + ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); + int64_t val = imm0.GetValue() + immOpnd->GetValue(); + ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); + if (imm1.IsInBitSize(kMaxImmVal24Bits, 0) && (imm1.IsInBitSize(kMaxImmVal12Bits, 0) || + imm1.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + MOperator mOp = (opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12); + bb->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, prevOpnd0, imm1)); + result = true; + } + } + } + return result; +} + +ConditionCode AArch64Ebo::GetReverseCond(const CondOperand &cond) const { + switch (cond.GetCode()) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(0, "Not support yet."); + } + return kCcLast; +} + +/* return true if cond == CC_LE */ +bool AArch64Ebo::CheckCondCode(const CondOperand &cond) const { + switch (cond.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, + const ImmOperand &immOperand1, uint32 opndSize) const { + MOperator mOp = insn.GetMachineOpcode(); + int64 val = 0; + /* do not support negative const simplify yet */ + if (immOperand0.GetValue() < 0 || immOperand1.GetValue() < 0) { + return false; + } + uint64 opndValue0 = static_cast(immOperand0.GetValue()); + uint64 opndValue1 = static_cast(immOperand1.GetValue()); + switch (mOp) { + case MOP_weorrri12: + case MOP_weorrrr: + case MOP_xeorrri13: + case MOP_xeorrrr: + val = static_cast(opndValue0 ^ opndValue1); + break; + case MOP_wandrri12: + case MOP_waddrri24: + case MOP_wandrrr: + case MOP_xandrri13: + case MOP_xandrrr: + val = static_cast(opndValue0 & opndValue1); + break; + case MOP_wiorrri12: + case MOP_wiorrrr: + case MOP_xiorrri13: + case MOP_xiorrrr: + val = static_cast(opndValue0 | opndValue1); + break; + default: + return false; + } + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); + if (!immOperand->IsSingleInstructionMovable()) { + DEBUG_ASSERT(res->IsRegister(), " expect a register operand"); + static_cast(cgFunc)->SplitMovImmOpndInstruction(val, *(static_cast(res)), &insn); + bb.RemoveInsn(insn); + } else { + MOperator newmOp = opndSize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newmOp, *res, *immOperand); + bb.ReplaceInsn(insn, newInsn); + } + return true; +} + +bool AArch64Ebo::OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) const { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + auto *regProp = nextInsn->GetDesc()->opndMD[static_cast(i)]; + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + return LiveOutOfBB(regOpnd, *insn.GetBB()); +} + +bool AArch64Ebo::ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, + MOperator oldMop, const RegOperand& opnd) { + if (newMop == oldMop) { + return true; + } + if (prevOpndInfo == nullptr || prevOpndInfo->refCount > 1) { + return false; + } + if (OperandLiveAfterInsn(opnd, *insn)) { + return false; + } + Insn *prevInsn = prevOpndInfo->insn; + MemOperand *memOpnd = static_cast(prevInsn->GetMemOpnd()); + DEBUG_ASSERT(!prevInsn->IsStorePair(), "do not do this opt for str pair"); + DEBUG_ASSERT(!prevInsn->IsLoadPair(), "do not do this opt for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !a64CGFunc->IsOperandImmValid(newMop, prevInsn->GetMemOpnd(), kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = memSize == 8 ? 3 : memSize == 4 ? 2 : memSize == 2 ? 1 : 0; + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVector &origInfos, + ExtOpTable idx, bool is64bits) { + if (!beforeRegAlloc) { + return false; + } + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if (prevInsn == nullptr) { + return false; + } + + MOperator prevMop = prevInsn->GetMachineOpcode(); + DEBUG_ASSERT(prevMop != MOP_undef, "Invalid opcode of instruction!"); + PairMOperator *begin = &extInsnPairTable[idx][0]; + PairMOperator *end = &extInsnPairTable[idx][insPairsNum]; + auto pairIt = std::find_if(begin, end, [prevMop](const PairMOperator insPair) { + return prevMop == insPair[0]; + }); + if (pairIt == end) { + return false; + } + + auto &res = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + OpndInfo *prevOpndInfo = GetOpndInfo(res, -1); + MOperator newPreMop = (*pairIt)[1]; + DEBUG_ASSERT(newPreMop != MOP_undef, "Invalid opcode of instruction!"); + if (!ValidPatternForCombineExtAndLoad(prevOpndInfo, insn, newPreMop, prevMop, + res)) { + return false; + } + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, newPreMop); + if (newMemOp == nullptr) { + return false; + } + prevInsn->SetMemOpnd(newMemOp); + if (is64bits && idx <= SXTW && idx >= SXTB) { + newPreMop = ExtLoadSwitchBitSize(newPreMop); + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + prevDstOpnd.SetSize(k64BitSize); + prevDstOpnd.SetValidBitsNum(k64BitSize); + } + prevInsn->SetMOP(AArch64CG::kMd[newPreMop]); + MOperator movOp = is64bits ? MOP_xmovrr : MOP_wmovrr; + if (insn->GetMachineOpcode() == MOP_wandrri12 || + insn->GetMachineOpcode() == MOP_xandrri13) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + movOp, insn->GetOperand(kInsnFirstOpnd), + insn->GetOperand(kInsnSecondOpnd)); + insn->GetBB()->ReplaceInsn(*insn, newInsn); + } else { + insn->SetMOP(AArch64CG::kMd[movOp]); + } + return true; +} + +bool AArch64Ebo::CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, + bool is64bits, bool isFp) const { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmadd : MOP_smadd) : (is64bits ? MOP_xmaddrrrr : MOP_wmaddrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, *addOpnd)); + return true; +} + +bool AArch64Ebo::CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp) { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &addOpnd = insn->GetOperand(static_cast(pos)); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + return CombineMultiplyAdd(insn, insn1, insnInfo, &addOpnd, is64bits, isFp); + } + return false; +} + +bool AArch64Ebo::CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &subOpnd = insn->GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((insn1->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmsub : MOP_smsub) : (is64bits ? MOP_xmsubrrrr : MOP_wmsubrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, subOpnd)); + return true; + } + return false; +} + +bool CheckInsnRefField(const Insn &insn, size_t opndIndex) { + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + return true; + } + } + return false; +} + +bool AArch64Ebo::CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &src = insn->GetOperand(kInsnSecondOpnd); + if (res.GetSize() != src.GetSize()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + MOperator mOp = isFp ? (is64bits ? MOP_dnmul : MOP_snmul) : (is64bits ? MOP_xmnegrrr : MOP_wmnegrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2)); + return true; + } + return false; +} + +bool AArch64Ebo::CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const { + if (opndInfo.insn == nullptr) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + Insn *prevInsn = opndInfo.insn; + InsnInfo *insnInfo = opndInfo.insnInfo; + if (insnInfo == nullptr) { + return false; + } + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = prevInsn->GetMachineOpcode(); + if (!isFp && ((opc1 == MOP_xlsrrri6) || (opc1 == MOP_wlsrrri5))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64bits ? aarchFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 immVal2 = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immV2 = __builtin_ffsll(immVal2 + 1) - 1; + if (immVal1 + immV2 < k1BitSize || (is64bits && immVal1 + immV2 > k64BitSize) || + (!is64bits && immVal1 + immV2 > k32BitSize)) { + return false; + } + Operand &immOpnd2 = is64bits ? aarchFunc->CreateImmOperand(immV2, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immV2, kMaxImmVal5Bits, false); + MOperator mOp = (is64bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, immOpnd1, immOpnd2)); + return true; + } + return false; +} + +/* Do some special pattern */ +bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origInfos) { + MOperator opCode = insn.GetMachineOpcode(); + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + switch (opCode) { + /* + * mov R503, R0 + * mov R0, R503 + * ==> mov R0, R0 + */ + case MOP_wmovrr: + case MOP_xmovrr: { + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if ((prevInsn != nullptr) && (prevInsn->GetMachineOpcode() == opCode) && + (prevInsn == insn.GetPreviousMachineInsn()) && + !RegistersIdentical(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd)) && + !RegistersIdentical(insn.GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd))) { + Operand ®1 = insn.GetOperand(kInsnFirstOpnd); + Operand ®2 = prevInsn->GetOperand(kInsnSecondOpnd); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), reg1, reg2); + insn.GetBB()->ReplaceInsn(insn, newInsn); + return true; + } + break; + } + /* + * Extension elimination. Look for load extension pair. There are two cases. + * 1) extension size == load size -> change the load type or eliminate the extension + * 2) extension size > load size -> possibly eliminating the extension + * + * Example of 1) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxtb x1, x1 zxtb x1, x1 sxtb x1, x1 zxtb x1, x1 + * ===> ldrsb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * Example of 2) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxth x1, x1 zxth x1, x1 sxth x1, x1 zxth x1, x1 + * ===> ldrb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> no change + * mov x1, x1 mov x1, x1 mov x1, x1 + */ + case MOP_wandrri12: { + bool doAndOpt = false; + if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue() == 0xff) { + doAndOpt = CombineExtensionAndLoad(&insn, origInfos, AND, false); + } + if (doAndOpt) { + return doAndOpt; + } + /* + * lsr d0, d1, #6 + * and d0, d0, #1 + * ===> ubfx d0, d1, #6, #1 + */ + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + /* immValue is (1 << n - 1) */ + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineLsrAnd(insn, *opndInfo, false, false); + } + break; + } + case MOP_xandrri13: { + bool doAndOpt = false; + if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue() == 0xff) { + doAndOpt = CombineExtensionAndLoad(&insn, origInfos, AND, true); + } + if (doAndOpt) { + return doAndOpt; + } + /* + * lsr d0, d1, #6 + * and d0, d0, #1 + * ===> ubfx d0, d1, #6, #1 + */ + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + /* immValue is (1 << n - 1) */ + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineLsrAnd(insn, *opndInfo, true, false); + } + break; + } + case MOP_xsxtb32: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, false); + case MOP_xsxtb64: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, true); + case MOP_xsxth32: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, false); + case MOP_xsxth64: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, true); + case MOP_xsxtw64: + return CombineExtensionAndLoad(&insn, origInfos, SXTW, true); + case MOP_xuxtb32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTB, false); + case MOP_xuxth32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTH, false); + case MOP_xuxtw64: + return CombineExtensionAndLoad(&insn, origInfos, ZXTW, true); + /* + * lsl x1, x1, #3 + * add x0, x0, x1 + * ===> add x0, x0, x1, 3 + * + * mul x1, x1, x2 + * add x0, x0, x1 or add x0, x1, x0 + * ===> madd x0, x1, x2, x0 + */ + case MOP_xaddrrr: + case MOP_waddrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if ((opndInfo != nullptr) && (opndInfo->insn != nullptr)) { + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo1 = opndInfo->insnInfo; + if (insnInfo1 == nullptr) { + return false; + } + Operand &op0 = insn.GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((opc1 == MOP_xlslrri6) || (opc1 == MOP_wlslrri5)) { + /* don't use register if it was redefined. */ + if (cgFunc->GetMirModule().IsCModule()) { + /* global opt will do this pattern when is CMoudle */ + return false; + } + OpndInfo *opndInfo1 = insnInfo1->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); + uint32 xLslrriBitLen = 6; + uint32 wLslrriBitLen = 5; + Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand(BitShiftOperand::kLSL, + static_cast(immOpnd.GetValue()), static_cast(( + opCode == MOP_xlslrri6) ? xLslrriBitLen : wLslrriBitLen)); + MOperator mOp = (is64bits ? MOP_xaddrrrs : MOP_waddrrrs); + insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, op0, opnd1, shiftOpnd)); + return true; + } else if ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)) { + return CombineMultiplyAdd(&insn, insn1, insnInfo1, &op0, is64bits, false); + } + } + opndInfo = origInfos.at(kInsnSecondOpnd); + return CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, false); + } + /* + * fmul d1, d1, d2 + * fadd d0, d0, d1 or add d0, d1, d0 + * ===> fmadd d0, d1, d2, d0 + */ + case MOP_dadd: + case MOP_sadd: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, true)) { + return true; + } + opndInfo = origInfos.at(kInsnThirdOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnSecondOpnd, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * sub x0, x0, x1 + * ===> msub x0, x1, x2, x0 + */ + case MOP_xsubrrr: + case MOP_wsubrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fsub d0, d0, d1 + * ===> fmsub d0, d1, d2, d0 + */ + case MOP_dsub: + case MOP_ssub: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * neg x0, x1 + * ===> mneg x0, x1, x2 + */ + case MOP_xinegrr: + case MOP_winegrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fneg d0, d1 + * ===> fnmul d0, d1, d2 + */ + case MOP_wfnegrr: + case MOP_xfnegrr: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + case MOP_xcsetrc: + case MOP_wcsetrc: { + /* i. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, EQ + * cset w0, NE + * + * ii. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, NE + * cset w0, EQ + * + * a.< -1 : 0x20ff25e0 > < 0 > cmp(226) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R104 class: [I]) (opnd2: + * vreg:R106 class: [I]) + * b.< -1 : 0x20ff60a0 > < 0 > cset(72) (opnd0: vreg:R101 class: [I]) (opnd1: CC: EQ) + * c.< -1* : 0x20ff3870 > < 0 > cmp(223) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R101 class: [I]) (opnd2: + * imm:0) + * d.< * -1 : 0x20ff3908 > < 0 > cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: NE) + * d1.< -1 : 0x20ff3908 > < 0 > * cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: EQ) i, d + * ===> mov R107 R101 ii, a,b,c,d1 ===> a,b,cset Rxx + * NE, c, mov R107 Rxx + */ + auto &cond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((cond.GetCode() != CC_NE) && (cond.GetCode() != CC_EQ)) { + return false; + } + bool reverse = (cond.GetCode() == CC_EQ); + OpndInfo *condInfo = origInfos[kInsnSecondOpnd]; + if ((condInfo != nullptr) && condInfo->insn) { + Insn *cmp1 = condInfo->insn; + if ((cmp1->GetMachineOpcode() == MOP_xcmpri) || (cmp1->GetMachineOpcode() == MOP_wcmpri)) { + InsnInfo *cmpInfo1 = condInfo->insnInfo; + CHECK_FATAL(cmpInfo1 != nullptr, "pointor cmpInfo1 is null"); + OpndInfo *info0 = cmpInfo1->origOpnd[kInsnSecondOpnd]; + /* if R101 was not redefined. */ + if ((info0 != nullptr) && (info0->insnInfo != nullptr) && (info0->insn != nullptr) && + (reverse || !info0->redefined) && cmp1->GetOperand(kInsnThirdOpnd).IsImmediate()) { + Insn *csetInsn = info0->insn; + MOperator opc1 = csetInsn->GetMachineOpcode(); + if (((opc1 == MOP_xcsetrc) || (opc1 == MOP_wcsetrc)) && + static_cast(cmp1->GetOperand(kInsnThirdOpnd)).IsZero()) { + CondOperand &cond1 = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(cond1)) { + return false; + } + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === do specical condition optimization, replace insn ===> \n"; + insn.Dump(); + } + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(result != nullptr, "pointor result is null"); + uint32 size = result->GetSize(); + if (reverse) { + /* After regalloction, we can't create a new register. */ + if (!beforeRegAlloc) { + return false; + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &r = aarFunc->CreateRegisterOperandOfType(static_cast(result)->GetRegisterType(), + size / kBitsPerByte); + /* after generate a new vreg, check if the size of DataInfo is big enough */ + EnlargeSpaceForLA(*csetInsn); + CondOperand &cond2 = aarFunc->GetCondOperand(GetReverseCond(cond1)); + Operand &rflag = aarFunc->GetOrCreateRflag(); + Insn &newCset = cgFunc->GetInsnBuilder()->BuildInsn( + result->GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc, r, cond2, rflag); + /* new_cset use the same cond as cset_insn. */ + IncRef(*info0->insnInfo->origOpnd[kInsnSecondOpnd]); + csetInsn->GetBB()->InsertInsnAfter(*csetInsn, newCset); + MOperator mOp = (result->GetSize() == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, r); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } else { + Operand *result1 = &csetInsn->GetOperand(kInsnFirstOpnd); + MOperator mOp = ((result->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, *result1); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } + return true; + } + } + } + } + } /* end case MOP_wcsetrc */ + [[clang::fallthrough]]; + default: + break; + } + return false; +} + +/* + * *iii. mov w16, v10.s[1] // FMOV from simd 105 ---> replace_insn + * mov w1, w16 ----->insn + * ==> + * mov w1, v10.s[1] + */ +bool AArch64Ebo::IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const { + if (insn.GetMachineOpcode() == MOP_wmovrr && replaceInsn.GetMachineOpcode() == MOP_xvmovrv) { + insn.SetMOP(AArch64CG::kMd[replaceInsn.GetMachineOpcode()]); + return true; + } + return false; +} + +bool AArch64Ebo::IsPseudoRet(Insn &insn) const { + MOperator mop = insn.GetMachineOpcode(); + if (mop == MOP_pseudo_ret_int || mop == MOP_pseudo_ret_float) { + return true; + } + return false; +} + +bool AArch64Ebo::ChangeLdrMop(Insn &insn, const Operand &opnd) const { + DEBUG_ASSERT(insn.IsLoad(), "expect insn is load in ChangeLdrMop"); + DEBUG_ASSERT(opnd.IsRegister(), "expect opnd is a register in ChangeLdrMop"); + + const RegOperand *regOpnd = static_cast(&opnd); + if (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterType() != regOpnd->GetRegisterType()) { + return false; + } + + if (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetIndexRegister()) { + return false; + } + + bool bRet = true; + if (regOpnd->GetRegisterType() == kRegTyFloat) { + switch (insn.GetMachineOpcode()) { + case MOP_wldrb: + insn.SetMOP(AArch64CG::kMd[MOP_bldr]); + break; + case MOP_wldrh: + insn.SetMOP(AArch64CG::kMd[MOP_hldr]); + break; + case MOP_wldr: + insn.SetMOP(AArch64CG::kMd[MOP_sldr]); + break; + case MOP_xldr: + insn.SetMOP(AArch64CG::kMd[MOP_dldr]); + break; + case MOP_wldli: + insn.SetMOP(AArch64CG::kMd[MOP_sldli]); + break; + case MOP_xldli: + insn.SetMOP(AArch64CG::kMd[MOP_dldli]); + break; + case MOP_wldrsb: + case MOP_wldrsh: + default: + bRet = false; + break; + } + } else if (regOpnd->GetRegisterType() == kRegTyInt) { + switch (insn.GetMachineOpcode()) { + case MOP_bldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrb]); + break; + case MOP_hldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrh]); + break; + case MOP_sldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldr]); + break; + case MOP_dldr: + insn.SetMOP(AArch64CG::kMd[MOP_xldr]); + break; + case MOP_sldli: + insn.SetMOP(AArch64CG::kMd[MOP_wldli]); + break; + case MOP_dldli: + insn.SetMOP(AArch64CG::kMd[MOP_xldli]); + break; + default: + bRet = false; + break; + } + } else { + DEBUG_ASSERT(false, "Internal error."); + } + return bRet; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b3e4b978b4d9ca604a1cb465a7679aa7bf065d3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp @@ -0,0 +1,2146 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_emitter.h" +#include +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "metadata_layout.h" +#include "cfi.h" +#include "dbg.h" +#include "aarch64_obj_emitter.h" + +namespace { +using namespace maple; +const std::unordered_set kJniNativeFuncList = { + "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadString_7C_28J_29Ljava_2Flang_2FString_3B_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInt_7C_28JI_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadInt_7C_28J_29I_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInterfaceToken_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeEnforceInterface_7C_28JLjava_2Flang_2FString_3B_29V_native" +}; +constexpr uint32 kBinSearchInsnCount = 56; +// map func name to pair +using Func2CodeInsnMap = std::unordered_map>; +Func2CodeInsnMap func2CodeInsnMap { + { "Ljava_2Flang_2FString_3B_7ChashCode_7C_28_29I", + { "maple/mrt/codetricks/arch/arm64/hashCode.s", 29 } }, + { "Ljava_2Flang_2FString_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z", + { "maple/mrt/codetricks/arch/arm64/stringEquals.s", 50 } } +}; +constexpr uint32 kQuadInsnCount = 2; + +void GetMethodLabel(const std::string &methodName, std::string &methodLabel) { + methodLabel = ".Lmethod_desc." + methodName; +} +} + +namespace maplebe { +using namespace maple; + +void AArch64AsmEmitter::EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + std::string methodDescLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodDescLabel); + (void)emitter.Emit("\t.word " + methodDescLabel + "-.\n"); + emitter.IncreaseJavaInsnCount(); +} + +void AArch64AsmEmitter::EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().GetModule()->IsJavaModule()) { + std::string labelName = ".Label.name." + cgFunc.GetFunction().GetName(); + (void)emitter.Emit("\t.word " + labelName + " - .\n"); + } +} + +/* + * emit java method description which contains address and size of local reference area + * as well as method metadata. + */ +void AArch64AsmEmitter::EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + (void)emitter.Emit("\t.section\t.rodata\n"); + (void)emitter.Emit("\t.align\t2\n"); + std::string methodInfoLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodInfoLabel); + (void)emitter.Emit(methodInfoLabel + ":\n"); + EmitRefToMethodInfo(funcEmitInfo, emitter); + /* local reference area */ + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 refOffset = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + /* for ea usage */ + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + IntrinsiccallNode *cleanEANode = aarchCGFunc.GetCleanEANode(); + if (cleanEANode != nullptr) { + refNum += static_cast(cleanEANode->NumOpnds()); + refOffset -= static_cast(cleanEANode->NumOpnds() * kIntregBytelen); + } + (void)emitter.Emit("\t.short ").Emit(refOffset).Emit("\n"); + (void)emitter.Emit("\t.short ").Emit(refNum).Emit("\n"); +} + +/* the fast_exception_handling lsda */ +void AArch64AsmEmitter::EmitFastLSDA(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + Emitter *emitter = currCG->GetEmitter(); + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* + * .word 0xFFFFFFFF + * .word .Label.LTest_3B_7C_3Cinit_3E_7C_28_29V3-func_start_label + */ + (void)emitter->Emit("\t.word 0xFFFFFFFF\n"); + (void)emitter->Emit("\t.word .L." + idx + "__"); + if (aarchCGFunc.NeedCleanup()) { + emitter->Emit(cgFunc.GetCleanupLabel()->GetLabelIdx()); + } else { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFastLSDA"); + emitter->Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + } + emitter->Emit("-.L." + idx + "__") + .Emit(cgFunc.GetStartLabel()->GetLabelIdx()) + .Emit("\n"); + emitter->IncreaseJavaInsnCount(); +} + +/* the normal gcc_except_table */ +void AArch64AsmEmitter::EmitFullLSDA(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + EHFunc *ehFunc = cgFunc.GetEHFunc(); + Emitter *emitter = currCG->GetEmitter(); + /* emit header */ + emitter->Emit("\t.align 3\n"); + emitter->Emit("\t.section .gcc_except_table,\"a\",@progbits\n"); + emitter->Emit("\t.align 3\n"); + /* emit LSDA header */ + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + emitter->EmitStmtLabel(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetLPStartEncoding()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetTTypeEncoding()).Emit("\n"); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaHeader->GetTTypeOffset()); + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetStartOffset()->GetLabelIdx()); + /* emit call site table */ + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetCallSiteEncoding()).Emit("\n"); + /* callsite table size */ + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(ehFunc->GetLSDACallSiteTable()->GetCSTable()); + /* callsite start */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()); + ehFunc->GetLSDACallSiteTable()->SortCallSiteTable([&aarchCGFunc](const LSDACallSite *a, const LSDACallSite *b) { + CHECK_FATAL(a != nullptr, "nullptr check"); + CHECK_FATAL(b != nullptr, "nullptr check"); + LabelIDOrder id1 = aarchCGFunc.GetLabelOperand(a->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + LabelIDOrder id2 = aarchCGFunc.GetLabelOperand(b->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + /* id1 and id2 should not be default value -1u */ + CHECK_FATAL(id1 != 0xFFFFFFFF, "illegal label order assigned"); + CHECK_FATAL(id2 != 0xFFFFFFFF, "illegal label order assigned"); + return id1 < id2; + }); + const MapleVector &callSiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callSiteTable.size(); ++i) { + LSDACallSite *lsdaCallSite = callSiteTable[i]; + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csStart); + + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLength); + + if (lsdaCallSite->csLandingPad.GetStartOffset()) { + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLandingPad); + } else { + DEBUG_ASSERT(lsdaCallSite->csAction == 0, "csAction error!"); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + /* if landing pad is 0, we emit this call site as cleanup code */ + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->EmitLabelPair(cleaupCode); + } else if (cgFunc.GetFunction().IsJava()) { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64Emitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } else { + emitter->Emit("0\n"); + } + } + emitter->Emit("\t.uleb128 ").Emit(lsdaCallSite->csAction).Emit("\n"); + } + + /* + * quick hack: insert a call site entry for the whole function body. + * this will hand in any pending (uncaught) exception to its caller. Note that + * __gxx_personality_v0 in libstdc++ is coded so that if exception table exists, + * the call site table must have an entry for any possibly raised exception, + * otherwise __cxa_call_terminate will be invoked immediately, thus the caller + * does not get the chance to take charge. + */ + if (aarchCGFunc.NeedCleanup() || cgFunc.GetFunction().IsJava()) { + /* call site for clean-up */ + LabelPair funcStart; + funcStart.SetStartOffset(cgFunc.GetStartLabel()); + funcStart.SetEndOffset(cgFunc.GetStartLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcStart); + LabelPair funcLength; + funcLength.SetStartOffset(cgFunc.GetStartLabel()); + funcLength.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcLength); + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + emitter->EmitLabelPair(cleaupCode); + } else { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } + emitter->Emit("\t.uleb128 0\n"); + if (!cgFunc.GetFunction().IsJava()) { + /* call site for stack unwind */ + LabelPair unwindStart; + unwindStart.SetStartOffset(cgFunc.GetStartLabel()); + unwindStart.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindStart); + LabelPair unwindLength; + unwindLength.SetStartOffset(cgFunc.GetCleanupLabel()); + unwindLength.SetEndOffset(cgFunc.GetEndLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindLength); + emitter->Emit("\t.uleb128 0\n"); + emitter->Emit("\t.uleb128 0\n"); + } + } + /* callsite end label */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx()); + /* tt */ + const LSDAActionTable *lsdaActionTable = ehFunc->GetLSDAActionTable(); + for (size_t i = 0; i < lsdaActionTable->Size(); ++i) { + LSDAAction *lsdaAction = lsdaActionTable->GetActionTable().at(i); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionIndex()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionFilter()).Emit("\n"); + } + emitter->Emit("\t.align 3\n"); + for (int32 i = ehFunc->GetEHTyTableSize() - 1; i >= 0; i--) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehFunc->GetEHTyTableMember(i)); + MIRTypeKind typeKind = mirType->GetKind(); + if (((typeKind == kTypeScalar) && (mirType->GetPrimType() == PTY_void)) || (typeKind == kTypeStructIncomplete) || + (typeKind == kTypeInterfaceIncomplete)) { + continue; + } + CHECK_FATAL((typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete), "NYI"); + const std::string &tyName = GlobalTables::GetStrTable().GetStringFromStrIdx(mirType->GetNameStrIdx()); + std::string dwRefString(".LDW.ref."); + dwRefString += CLASSINFO_PREFIX_STR; + dwRefString += tyName; + dwRefString += " - ."; + emitter->Emit("\t.4byte " + dwRefString + "\n"); + } + /* end of lsda */ + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetEndOffset()->GetLabelIdx()); +} + +void AArch64AsmEmitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) { + (void)name; + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + Emitter &emitter = *(currCG->GetEmitter()); + LabelOperand &label = aarchCGFunc.GetOrCreateLabelOperand(labIdx); + /* if label order is default value -1, set new order */ + if (label.GetLabelOrder() == 0xFFFFFFFF) { + label.SetLabelOrder(currCG->GetLabelOrderCnt()); + currCG->IncreaseLabelOrderCnt(); + } + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + char *puIdx = strdup(std::to_string(pIdx).c_str()); + const std::string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\t//label order ").Emit(label.GetLabelOrder()); + if (!labelName.empty() && labelName.at(0) != '@') { + /* If label name has @ as its first char, it is not from MIR */ + (void)emitter.Emit(", MIR: @").Emit(labelName).Emit("\n"); + } else { + (void)emitter.Emit("\n"); + } + } else { + (void)emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\n"); + } + free(puIdx); + puIdx = nullptr; +} + +void AArch64AsmEmitter::EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().IsJava()) { + Emitter *emitter = cgFunc.GetCG()->GetEmitter(); + /* emit a comment of current address from the begining of java text section */ + std::stringstream ss; + ss << "\n\t// addr: 0x" << std::hex << (emitter->GetJavaInsnCount() * kInsnSize) << "\n"; + cgFunc.GetCG()->GetEmitter()->Emit(ss.str()); + } +} + +void AArch64AsmEmitter::RecordRegInfo(FuncEmitInfo &funcEmitInfo) const { + if (!CGOptions::DoIPARA() || funcEmitInfo.GetCGFunc().GetFunction().IsJava()) { + return; + } + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + + std::set referedRegs; + MIRFunction &mirFunc = cgFunc.GetFunction(); + FOR_ALL_BB_REV(bb, &aarchCGFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + bool safeCheck = false; + CHECK_FATAL(targetOpnd != nullptr, "target is null in AArch64Emitter::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == maple::kStFunc, "funcst must be a function name symbol") + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + safeCheck = true; + for (auto preg : func->GetReferedRegs()) { + referedRegs.insert(preg); + } + } + } + if (!safeCheck) { + mirFunc.SetReferedRegsValid(false); + return; + } + } + if (referedRegs.size() == kMaxRegNum) { + break; + } + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd) { + for (auto opnd : static_cast(insn->GetOperand(i)).GetOperands()) { + if (opnd->IsRegister()) { + referedRegs.insert(static_cast(opnd)->GetRegisterNumber()); + } + } + } + continue; + } + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + /* all use, skip it */ + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (!memOpnd.IsIntactIndexed()) { + referedRegs.insert(base->GetRegisterNumber()); + } + } else if (opnd.IsRegister()) { + RegType regType = static_cast(opnd).GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + continue; + } + bool isDef = md->GetOpndDes(i)->IsRegDef(); + if (isDef) { + referedRegs.insert(static_cast(opnd).GetRegisterNumber()); + } + } + } + } + } + mirFunc.SetReferedRegsValid(true); +#ifdef DEBUG + for (auto reg : referedRegs) { + if (reg > kMaxRegNum) { + DEBUG_ASSERT(0, "unexpected preg"); + } + } +#endif + mirFunc.CopyReferedRegs(referedRegs); +} + +void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* emit header of this function */ + Emitter &emitter = *currCG->GetEmitter(); + // insert for __cxx_global_var_init + if (cgFunc.GetName() == "__cxx_global_var_init") { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_initialization)) { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_termination)) { + (void)emitter.Emit("\t.section\t.fini_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + (void)emitter.Emit("\n"); + EmitMethodDesc(funcEmitInfo, emitter); + /* emit java code to the java section. */ + if (cgFunc.GetFunction().IsJava()) { + std::string sectionName = namemangler::kMuidJavatextPrefixStr; + (void)emitter.Emit("\t.section ." + sectionName + ",\"ax\"\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const std::string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + (void)emitter.Emit("\t.section " + sectionName).Emit(",\"ax\",@progbits\n"); + } else if (CGOptions::IsFunctionSections()) { + (void)emitter.Emit("\t.section .text.").Emit(cgFunc.GetName()).Emit(",\"ax\",@progbits\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_constructor_priority)) { + (void)emitter.Emit("\t.section\t.text.startup").Emit(",\"ax\",@progbits\n"); + } else { + (void)emitter.Emit("\t.text\n"); + } + if (CGOptions::GetFuncAlignPow() != 0) { + (void)emitter.Emit("\t.align ").Emit(CGOptions::GetFuncAlignPow()).Emit("\n"); + } + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc.GetFunction().GetStIdx().Idx()); + const std::string &funcName = std::string(cgFunc.GetShortFuncName().c_str()); + + // manually replace function with optimized assembly language + if (CGOptions::IsReplaceASM()) { + auto it = func2CodeInsnMap.find(funcSt->GetName()); + if (it != func2CodeInsnMap.end()) { + std::string optFile = it->second.first; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream codetricksFd(optFile); + if (!codetricksFd.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + LogInfo::MapleLogger() << "wrong" << '\n'; + } else { + std::string contend; + while (getline(codetricksFd, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(it->second.second); +#ifdef EMIT_INSN_COUNT + EmitJavaInsnAddr(funcEmitInfo); +#endif /* ~EMIT_INSN_COUNT */ + return; + } + } + std::string funcStName = funcSt->GetName(); + if (funcSt->GetFunction()->GetAttr(FUNCATTR_weak)) { + (void)emitter.Emit("\t.weak\t" + funcStName + "\n"); + (void)emitter.Emit("\t.hidden\t" + funcStName + "\n"); + } else if (funcSt->GetFunction()->GetAttr(FUNCATTR_local)) { + (void)emitter.Emit("\t.local\t" + funcStName + "\n"); + } else if (funcSt->GetFunction() && (!funcSt->GetFunction()->IsJava()) && funcSt->GetFunction()->IsStatic()) { + // nothing + } else { + /* should refer to function attribute */ + (void)emitter.Emit("\t.globl\t").Emit(funcSt->GetName()).Emit("\n"); + if (!currCG->GetMIRModule()->IsCModule()) { + (void)emitter.Emit("\t.hidden\t").Emit(funcSt->GetName()).Emit("\n"); + } + } + (void)emitter.Emit("\t.type\t" + funcStName + ", %function\n"); + /* add these messege , solve the simpleperf tool error */ + EmitRefToMethodDesc(funcEmitInfo, emitter); + (void)emitter.Emit(funcStName + ":\n"); + + /* if the last insn is call, then insert nop */ + bool found = false; + FOR_ALL_BB_REV(bb, &aarchCGFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction()) { + if (insn->IsCall()) { + Insn &newInsn = aarchCGFunc.GetInsnBuilder()->BuildInsn(MOP_nop); + bb->InsertInsnAfter(*insn, newInsn); + } + found = true; + break; + } + } + if (found) { + break; + } + } + + RecordRegInfo(funcEmitInfo); + + /* emit instructions */ + FOR_ALL_BB(bb, &aarchCGFunc) { + if (bb->IsUnreachable()) { + continue; + } + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit("# freq:").Emit(bb->GetFrequency()).Emit("\n"); + } + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + if (aarchCGFunc.GetMirModule().IsCModule() && bb->IsBBNeedAlign() && bb->GetAlignNopNum() != kAlignMovedFlag) { + uint32 power = bb->GetAlignPower(); + (void)emitter.Emit("\t.p2align ").Emit(power).Emit("\n"); + } + EmitBBHeaderLabel(funcEmitInfo, funcName, bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + if (insn->IsCfiInsn()) { + EmitAArch64CfiInsn(emitter, *insn); + } else if (insn->IsDbgInsn()) { + EmitAArch64DbgInsn(emitter, *insn); + } else { + EmitAArch64Insn(emitter, *insn); + } + } + } + if (CGOptions::IsMapleLinker()) { + /* Emit a label for calculating method size */ + (void)emitter.Emit(".Label.end." + funcStName + ":\n"); + } + (void)emitter.Emit("\t.size\t" + funcStName + ", .-").Emit(funcStName + "\n"); + + auto constructorAttr = funcSt->GetFunction()->GetAttrs().GetConstructorPriority(); + if (constructorAttr != -1) { + (void)emitter.Emit("\t.section\t.init_array." + std::to_string(constructorAttr) + ",\"aw\"\n"); + (void)emitter.Emit("\t.align 3\n"); + (void)emitter.Emit("\t.xword\t" + funcStName + "\n"); + } + + EHFunc *ehFunc = cgFunc.GetEHFunc(); + /* emit LSDA */ + if (cgFunc.GetFunction().IsJava() && (ehFunc != nullptr)) { + if (!cgFunc.GetHasProEpilogue()) { + (void)emitter.Emit("\t.word 0x55555555\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFullLSDA()) { + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* .word .Label.lsda_label-func_start_label */ + (void)emitter.Emit("\t.word .L." + idx).Emit("__").Emit(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + (void)emitter.Emit("-.L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFastLSDA()) { + EmitFastLSDA(funcEmitInfo); + } + } + uint32 size = static_cast(cgFunc.GetFunction().GetSymTab()->GetSymbolTableSize()); + for (uint32 i = 0; i < size; ++i) { + MIRSymbol *st = cgFunc.GetFunction().GetSymTab()->GetSymbolFromStIdx(i); + if (st == nullptr) { + continue; + } + MIRStorageClass storageClass = st->GetStorageClass(); + MIRSymKind symKind = st->GetSKind(); + if (storageClass == kScPstatic && symKind == kStConst) { + (void)emitter.Emit("\t.align 3\n"); + (void)emitter.Emit(st->GetName() + ":\n"); + if (st->GetKonst()->GetKind() == kConstStr16Const) { + MIRStr16Const *str16Const = safe_cast(st->GetKonst()); + emitter.EmitStr16Constant(*str16Const); + (void)emitter.Emit("\n"); + continue; + } + if (st->GetKonst()->GetKind() == kConstStrConst) { + MIRStrConst *strConst = safe_cast(st->GetKonst()); + emitter.EmitStrConstant(*strConst); + (void)emitter.Emit("\n"); + continue; + } + + switch (st->GetKonst()->GetType().GetPrimType()) { + case PTY_u32: { + MIRIntConst *intConst = safe_cast(st->GetKonst()); + (void)emitter.Emit("\t.long ").Emit(static_cast(intConst->GetExtValue())).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + break; + } + case PTY_f32: { + MIRFloatConst *floatConst = safe_cast(st->GetKonst()); + (void)emitter.Emit("\t.word ").Emit(static_cast(floatConst->GetIntValue())).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + break; + } + case PTY_f64: { + MIRDoubleConst *doubleConst = safe_cast(st->GetKonst()); + auto emitF64 = [&](int64 first, int64 second) { + (void)emitter.Emit("\t.word ").Emit(first).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + (void)emitter.Emit("\t.word ").Emit(second).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + }; + if (CGOptions::IsBigEndian()) { + emitF64(doubleConst->GetIntHigh32(), doubleConst->GetIntLow32()); + } else { + emitF64(doubleConst->GetIntLow32(), doubleConst->GetIntHigh32()); + } + break; + } + default: + DEBUG_ASSERT(false, "NYI"); + break; + } + } + } + + for (auto &it : cgFunc.GetEmitStVec()) { + /* emit switch table only here */ + MIRSymbol *st = it.second; + DEBUG_ASSERT(st->IsReadOnly(), "NYI"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t.align 3\n"); + emitter.IncreaseJavaInsnCount(0, true); /* just aligned */ + (void)emitter.Emit(st->GetName() + ":\n"); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_FATAL(arrayConst != nullptr, "null ptr check"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_FATAL(lblConst != nullptr, "null ptr check"); + (void)emitter.Emit("\t.quad\t.L.").Emit(idx).Emit("__").Emit(lblConst->GetValue()); + (void)emitter.Emit(" - " + st->GetName() + "\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + free(idx); + idx = nullptr; + } + /* insert manually optimized assembly language */ + if (funcSt->GetName() == "Landroid_2Futil_2FContainerHelpers_3B_7C_3Cinit_3E_7C_28_29V") { + std::string optFile = "maple/mrt/codetricks/arch/arm64/ContainerHelpers_binarySearch.s"; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream binarySearchFileFD(optFile); + if (!binarySearchFileFD.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + } else { + std::string contend; + while (getline(binarySearchFileFD, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(kBinSearchInsnCount); + } + + for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { + LabelOperand &labelOpnd = aarchCGFunc.GetOrCreateLabelOperand(mpPair.first); + A64OpndEmitVisitor visitor(emitter, nullptr); + labelOpnd.Accept(visitor); + (void)emitter.Emit(":\n"); + (void)emitter.Emit("\t.quad ").Emit(static_cast(mpPair.second)).Emit("\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + + if (ehFunc != nullptr && ehFunc->NeedFullLSDA()) { + EmitFullLSDA(funcEmitInfo); + } +#ifdef EMIT_INSN_COUNT + if (cgFunc.GetFunction().IsJava()) { + EmitJavaInsnAddr(funcEmitInfo); + } +#endif /* ~EMIT_INSN_COUNT */ +} + +void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + emitter.SetCurrentMOP(mOp); + const InsnDesc *md = insn.GetDesc(); + + if (!GetCG()->GenerateVerboseAsm() && !GetCG()->GenerateVerboseCG() && insn.IsComment()) { + return; + } + + switch (mOp) { + case MOP_clinit: { + EmitClinit(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_ldr: { + uint32 adrpldrInsnCount = md->GetAtomicNum(); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount); + EmitAdrpLdr(emitter, insn); + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount + 1); + } + return; + } + case MOP_counter: { + EmitCounter(emitter, insn); + return; + } + case MOP_asm: { + EmitInlineAsm(emitter, insn); + return; + } + case MOP_clinit_tail: { + EmitClinitTail(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_lazy_ldr: { + EmitLazyLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_label: { + EmitAdrpLabel(emitter, insn); + return; + } + case MOP_lazy_tail: { + /* No need to emit this pseudo instruction. */ + return; + } + case MOP_lazy_ldr_static: { + EmitLazyLoadStatic(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_arrayclass_cache_ldr: { + EmitArrayClassCacheLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_get_and_addI: + case MOP_get_and_addL: { + EmitGetAndAddInt(emitter, insn); + return; + } + case MOP_get_and_setI: + case MOP_get_and_setL: { + EmitGetAndSetInt(emitter, insn); + return; + } + case MOP_compare_and_swapI: + case MOP_compare_and_swapL: { + EmitCompareAndSwapInt(emitter, insn); + return; + } + case MOP_string_indexof: { + EmitStringIndexOf(emitter, insn); + return; + } + case MOP_pseudo_none: + case MOP_pseduo_tls_release: { + return; + } + case MOP_tls_desc_call: { + EmitCTlsDescCall(emitter, insn); + return; + } + case MOP_tls_desc_rel: { + EmitCTlsDescRel(emitter, insn); + return; + } + case MOP_sync_lock_test_setI: + case MOP_sync_lock_test_setL: { + EmitSyncLockTestSet(emitter, insn); + return; + } + default: + break; + } + + if (CGOptions::IsNativeOpt() && mOp == MOP_xbl) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + if (nameOpnd->GetName() == "MCC_CheckThrowPendingException") { + EmitCheckThrowPendingException(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + } + + std::string format(md->format); + (void)emitter.Emit("\t").Emit(md->name).Emit("\t"); + size_t opndSize = insn.GetOperandSize(); + std::vector seq(opndSize, -1); + std::vector prefix(opndSize); /* used for print prefix like "*" in icall *rax */ + uint32 index = 0; + uint32 commaNum = 0; + for (uint32 i = 0; i < format.length(); ++i) { + char c = format[i]; + if (c >= '0' && c <= '5') { + seq[index++] = c - '0'; + ++commaNum; + } else if (c != ',') { + prefix[index].push_back(c); + } + } + + bool isRefField = (opndSize == 0) ? false : CheckInsnRefField(insn, static_cast(static_cast(seq[0]))); + if (insn.IsComment()) { + emitter.IncreaseJavaInsnCount(); + } + uint32 compositeOpnds = 0; + for (uint32 i = 0; i < commaNum; ++i) { + if (seq[i] == -1) { + continue; + } + if (prefix[i].length() > 0) { + (void)emitter.Emit(prefix[i]); + } + if (emitter.NeedToDealWithHugeSo() && (mOp == MOP_xbl || mOp == MOP_tail_call_opt_xbl)) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + /* Suport huge so here + * As the PLT section is just before java_text section, when java_text section is larger + * then 128M, instrunction of "b" and "bl" would fault to branch to PLT stub functions. Here, to save + * instuctions space, we change the branch target to a local target within 120M address, and add non-plt + * call to the target function. + */ + emitter.InsertHugeSoTarget(nameOpnd->GetName()); + (void)emitter.Emit(nameOpnd->GetName() + emitter.HugeSoPostFix()); + break; + } + auto *opnd = &insn.GetOperand(static_cast(seq[i])); + if (opnd && opnd->IsRegister()) { + auto *regOpnd = static_cast(opnd); + if ((md->opndMD[static_cast(seq[i])])->IsVectorOperand()) { + regOpnd->SetVecLanePosition(-1); + regOpnd->SetVecLaneSize(0); + regOpnd->SetVecElementSize(0); + if (insn.IsVectorOp()) { + PrepareVectorOperand(regOpnd, compositeOpnds, insn); + if (compositeOpnds != 0) { + (void)emitter.Emit("{"); + } + } + } + } + A64OpndEmitVisitor visitor(emitter, md->opndMD[static_cast(seq[i])]); + + insn.GetOperand(static_cast(seq[i])).Accept(visitor); + if (compositeOpnds == 1) { + (void)emitter.Emit("}"); + } + if (compositeOpnds > 0) { + --compositeOpnds; + } + /* reset opnd0 ref-field flag, so following instruction has correct register */ + if (isRefField && (i == 0)) { + static_cast(&insn.GetOperand(static_cast(seq[0])))->SetRefField(false); + } + /* Temporary comment the label:.Label.debug.callee */ + if (i != (commaNum - 1)) { + (void)emitter.Emit(", "); + } + const uint32 commaNumForEmitLazy = 2; + if (!CGOptions::IsLazyBinding() || GetCG()->IsLibcore() || (mOp != MOP_wldr && mOp != MOP_xldr) || + commaNum != commaNumForEmitLazy || i != 1 || + !insn.GetOperand(static_cast(seq[1])).IsMemoryAccessOperand()) { + continue; + } + /* + * Only check the last operand of ldr in lo12 mode. + * Check the second operand, if it's [AArch64MemOperand::kAddrModeLo12Li] + */ + auto *memOpnd = static_cast(&insn.GetOperand(static_cast(seq[1]))); + if (memOpnd == nullptr || memOpnd->GetAddrMode() != MemOperand::kAddrModeLo12Li) { + continue; + } + const MIRSymbol *sym = memOpnd->GetSymbol(); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || + sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab()) { + (void)emitter.Emit("\n"); + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(1); + } + } + if (GetCG()->GenerateVerboseCG() || (GetCG()->GenerateVerboseAsm() && insn.IsComment())) { + const char *comment = insn.GetComment().c_str(); + if (comment != nullptr && strlen(comment) > 0) { + (void)emitter.Emit("\t\t// ").Emit(comment); + } + } + + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitClinit(Emitter &emitter, const Insn &insn) const { + /* + * adrp x3, __muid_data_undef_tab$$GetBoolean_dex+144 + * ldr x3, [x3, #:lo12:__muid_data_undef_tab$$GetBoolean_dex+144] + * or, + * adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * + * ldr x3, [x3,#112] + * ldr wzr, [x3] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitClinit"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + if (stImmOpnd->GetSymbol()->IsMuidDataUndefTab()) { + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + } else { + /* adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\tadrp\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("\n"); + + /* ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #:lo12:"); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("]\n"); + } + /* emit "ldr x0,[x0,#48]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(",#"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x0]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\n"); +} + +static void AsmStringOutputRegNum( + bool isInt, uint32 regno, uint32 intBase, uint32 fpBase, std::string &strToEmit) { + regno_t newRegno; + if (isInt) { + newRegno = regno - intBase; + } else { + newRegno = regno - fpBase; + } + if (newRegno > (kDecimalMax - 1)) { + uint32 tenth = newRegno / kDecimalMax; + strToEmit += '0' + static_cast(tenth); + newRegno -= (kDecimalMax * tenth); + } + strToEmit += newRegno + '0'; +} + +void AArch64AsmEmitter::EmitInlineAsm(Emitter &emitter, const Insn &insn) const { + (void)emitter.Emit("\t//Inline asm begin\n\t"); + auto &list1 = static_cast(insn.GetOperand(kAsmOutputListOpnd)); + std::vector outOpnds; + for (auto *regOpnd : list1.GetOperands()) { + outOpnds.push_back(regOpnd); + } + auto &list2 = static_cast(insn.GetOperand(kAsmInputListOpnd)); + std::vector inOpnds; + for (auto *regOpnd : list2.GetOperands()) { + inOpnds.push_back(regOpnd); + } + auto &list6 = static_cast(insn.GetOperand(kAsmOutputRegPrefixOpnd)); + auto &list7 = static_cast(insn.GetOperand(kAsmInputRegPrefixOpnd)); + MapleString asmStr = static_cast(insn.GetOperand(kAsmStringOpnd)).GetComment(); + std::string stringToEmit; + size_t sidx = 0; + auto IsMemAccess = [](char c)->bool { + return c == '['; + }; + auto EmitRegister = [&](const char *p, bool isInt, uint32 regNO, bool unDefRegSize)->void { + if (IsMemAccess(p[0])) { + stringToEmit += "[x"; + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + stringToEmit += "]"; + } else { + DEBUG_ASSERT((p[0] == 'w' || p[0] == 'x' || p[0] == 's' || p[0] == 'd' || p[0] == 'v'), "Asm invalid register type"); + if ((p[0] == 'w' || p[0] == 'x') && unDefRegSize) { + stringToEmit += 'x'; + } else { + stringToEmit += p[0]; + } + if (!unDefRegSize) { + isInt = (p[0] == 'w' || p[0] == 'x'); + } + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + } + }; + for (size_t i = 0; i < asmStr.length(); ++i) { + switch (asmStr[i]) { + case '$': { + char c = asmStr[++i]; + if ((c >= '0') && (c <= '9')) { + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + if (val < outOpnds.size()) { + const char *prefix = list6.stringList[val]->GetComment().c_str(); + RegOperand *opnd = outOpnds[val]; + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + /* input is a immediate */ + const char *prefix = list7.stringList[val]->GetComment().c_str(); + if (prefix[0] == 'i') { + stringToEmit += '#'; + for (size_t k = 1; k < list7.stringList[val]->GetComment().length(); ++k) { + stringToEmit += prefix[k]; + } + } else { + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } + } + } else if (c == '{') { + c = asmStr[++i]; + CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + regno_t regno; + bool isAddr = false; + if (val < outOpnds.size()) { + RegOperand *opnd = outOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list6.stringList[val]->GetComment().c_str()[0]); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list7.stringList[val]->GetComment().c_str()[0]); + } + c = asmStr[++i]; + CHECK_FATAL(c == ':', "Parsing error in inline asm string during emit"); + c = asmStr[++i]; + std::string prefix(1, c); + if (c == 'a' || isAddr) { + prefix = "[x"; + } + EmitRegister(prefix.c_str(), true, regno, false); + c = asmStr[++i]; + CHECK_FATAL(c == '}', "Parsing error in inline asm string during emit"); + } + break; + } + case '\n': { + stringToEmit += "\n\t"; + break; + } + default: + stringToEmit += asmStr[i]; + sidx++; + } + } + (void)emitter.Emit(stringToEmit); + (void)emitter.Emit("\n\t//Inline asm end\n"); +} + +void AArch64AsmEmitter::EmitClinitTail(Emitter &emitter, const Insn &insn) const { + /* + * ldr x17, [xs, #112] + * ldr wzr, [x17] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit_tail]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr x17,[xs,#112]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x17]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, [x17]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoad(Emitter &emitter, const Insn &insn) const { + /* + * ldr wd, [xs] # xd and xs should be differenct register + * ldr wd, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + const OpndDesc *prop1 = md->opndMD[1]; + A64OpndEmitVisitor visitor(emitter, prop0); + A64OpndEmitVisitor visitor1(emitter, prop1); + + /* emit "ldr wd, [xs]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); +#ifdef USE_32BIT_REF + opnd0->Accept(visitor); +#else + opnd0->Accept(visitor1); +#endif + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); + + /* emit "ldr wd, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); +} + +void AArch64AsmEmitter::EmitCounter(Emitter &emitter, const Insn &insn) const { + /* + * adrp x1, __profile_bb_table$$GetBoolean_dex+4 + * ldr w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + * add w17, w17, #1 + * str w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_counter]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor visitor(emitter, prop0); + StImmOperand *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitCounter"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + /* emit add */ + (void)emitter.Emit("\t").Emit("add").Emit("\tw17, w17, #1"); + (void)emitter.Emit("\n"); + /* emit str */ + (void)emitter.Emit("\t").Emit("str").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitAdrpLabel(Emitter &emitter, const Insn &insn) const { + /* adrp xd, label + * add xd, xd, #lo12:label + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_label]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto lidx = static_cast(opnd1)->GetValue(); + + /* adrp xd, label */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + char *idx; + idx = strdup(std::to_string(Globals::GetInstance()->GetBECommon()->GetMIRModule().CurFunction()->GetPuidx()).c_str()); + (void)emitter.Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + + /* add xd, xd, #lo12:label */ + (void)emitter.Emit("\tadd\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(":lo12:").Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + (void)emitter.Emit("\n"); + free(idx); + idx = nullptr; +} + +void AArch64AsmEmitter::EmitAdrpLdr(Emitter &emitter, const Insn &insn) const { + /* + * adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitAdrpLdr"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\n"); + + /* ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); + opnd0->Accept(visitor); + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const { + /* adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr_static]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(0); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// lazy load static.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + opnd0->Emit(emitter, &prop2); /* ldr wd, ... for emui */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// lazy load static.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// lazy load static.\n"); +} + +void AArch64AsmEmitter::EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const { + /* adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_arrayclass_cache_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(kInsnFirstOpnd); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__arrayClassCacheTable$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// load array class.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + A64OpndEmitVisitor visitor2(emitter, prop2); + opnd0->Accept(visitor2); /* ldr wd, ... for emui */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// load array class.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// check resolve array class.\n"); +} + +/* + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const { + DEBUG_ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the oprands number"); + (void)emitter.Emit("\t//\tstart of Unsafe.getAndAddInt.\n"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *tempOpnd2 = &insn.GetOperand(kInsnFourthOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnSixthOpnd); + Operand *deltaOpnd = &insn.GetOperand(kInsnSeventhOpnd); + Operand *labelOpnd = &insn.GetOperand(kInsnEighthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* emit label. */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + const OpndDesc *retProp = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor retVisitor(emitter, retProp); + /* emit ldaxr */ + (void)emitter.Emit("\t").Emit("ldaxr").Emit("\t"); + retVal->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", "); + retVal->Accept(retVisitor); + (void)emitter.Emit(", "); + deltaOpnd->Accept(retVisitor); + (void)emitter.Emit("\n"); + /* emit stlxr. */ + (void)emitter.Emit("\t").Emit("stlxr").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit cbnz. */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t//\tend of Unsafe.getAndAddInt.\n"); +} + +/* + * intrinsic_get_set_int w0, xt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr ws, w3, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const { + /* MOP_get_and_setI and MOP_get_and_setL have 7 operands */ + DEBUG_ASSERT(insn.GetOperandSize() > kInsnSeventhOpnd, "ensure the operands number"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFourthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *labelOpnd = &insn.GetOperand(kInsnSeventhOpnd); + /* label: */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* ldaxr w0, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *newValueOpnd = &insn.GetOperand(kInsnSixthOpnd); + /* stlxr ws, w3, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + newValueOpnd->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz w2, label */ + (void)emitter.Emit("\tcbnz\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); +} + +/* + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, + * Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, + * Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +void AArch64AsmEmitter::EmitStringIndexOf(Emitter &emitter, const Insn &insn) const { + /* MOP_string_indexof has 18 operands */ + DEBUG_ASSERT(insn.GetOperandSize() == 18, "ensure the operands number"); + Operand *patternLengthOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *srcLengthOpnd = &insn.GetOperand(kInsnThirdOpnd); + const std::string patternLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(patternLengthOpnd)->GetRegisterNumber()]; + const std::string srcLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(srcLengthOpnd)->GetRegisterNumber()]; + A64OpndEmitVisitor visitor(emitter, nullptr); + /* cmp w4, w2 */ + (void)emitter.Emit("\tcmp\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 16th operand of MOP_string_indexof is Label.NOMATCH */ + Operand *labelNoMatch = &insn.GetOperand(16); + /* b.gt Label.NOMATCH */ + (void)emitter.Emit("\tb.gt\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w2, w2, w4 */ + (void)emitter.Emit("\tsub\t"); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w4, w4, #8 */ + (void)emitter.Emit("\tsub\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* the 10th operand of MOP_string_indexof is w10 */ + Operand *resultTmp = &insn.GetOperand(10); + /* mov w10, w2 */ + (void)emitter.Emit("\tmov\t"); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x4, w4 */ + (void)emitter.Emit("\tuxtw\t").Emit(patternLengthReg); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x2, w2 */ + (void)emitter.Emit("\tuxtw\t").Emit(srcLengthReg); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *patternStringBaseOpnd = &insn.GetOperand(kInsnFourthOpnd); + /* add x3, x3, x4 */ + (void)emitter.Emit("\tadd\t"); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + Operand *srcStringBaseOpnd = &insn.GetOperand(kInsnSecondOpnd); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + /* neg x4, x4 */ + (void)emitter.Emit("\tneg\t").Emit(patternLengthReg); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* neg x2, x2 */ + (void)emitter.Emit("\tneg\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + Operand *first = &insn.GetOperand(kInsnSixthOpnd); + /* ldr x5, [x3,x4] */ + (void)emitter.Emit("\tldr\t"); + first->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(patternLengthReg); + (void)emitter.Emit("]\n"); + /* the 11th operand of MOP_string_indexof is Label.FIRST_LOOP */ + Operand *labelFirstLoop = &insn.GetOperand(11); + /* .Label.FIRST_LOOP: */ + labelFirstLoop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 7th operand of MOP_string_indexof is x7 */ + Operand *ch2 = &insn.GetOperand(7); + /* ldr x7, [x1,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x5, x7 */ + (void)emitter.Emit("\tcmp\t"); + first->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 13th operand of MOP_string_indexof is Label.STR1_LOOP */ + Operand *labelStr1Loop = &insn.GetOperand(13); + /* b.eq .Label.STR1_LOOP */ + (void)emitter.Emit("\tb.eq\t"); + labelStr1Loop->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 12th operand of MOP_string_indexof is Label.STR2_NEXT */ + Operand *labelStr2Next = &insn.GetOperand(12); + /* .Label.STR2_NEXT: */ + labelStr2Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* adds x2, x2, #1 */ + (void)emitter.Emit("\tadds\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #1\n"); + /* b.le .Label.FIRST_LOOP */ + (void)emitter.Emit("\tb.le\t"); + labelFirstLoop->Accept(visitor); + (void)emitter.Emit("\n"); + /* b .Label.NOMATCH */ + (void)emitter.Emit("\tb\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.STR1_LOOP: */ + labelStr1Loop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 8th operand of MOP_string_indexof is x8 */ + Operand *tmp1 = &insn.GetOperand(kInsnEighthOpnd); + /* adds x8, x4, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 9th operand of MOP_string_indexof is x9 */ + Operand *tmp2 = &insn.GetOperand(9); + /* add x9, x2, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 15th operand of MOP_string_indexof is Label.LAST_WORD */ + Operand *labelLastWord = &insn.GetOperand(15); + /* b.ge .Label.LAST_WORD */ + (void)emitter.Emit("\tb.ge\t"); + labelLastWord->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 14th operand of MOP_string_indexof is Label.STR1_NEXT */ + Operand *labelStr1Next = &insn.GetOperand(14); + /* .Label.STR1_NEXT: */ + labelStr1Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 6th operand of MOP_string_indexof is x6 */ + Operand *ch1 = &insn.GetOperand(6); + /* ldr x6, [x3,x8] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp1->Accept(visitor); + (void)emitter.Emit("]\n"); + /* ldr x7, [x1,x9] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp2->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* adds x8, x8, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", "); + tmp1->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* add x9, x9, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + tmp2->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* b.lt .Label.STR1_NEXT */ + (void)emitter.Emit("\tb.lt\t"); + labelStr1Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.LAST_WORD: */ + labelLastWord->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldr x6, [x3] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit("]\n"); + /* sub x9, x1, x4 */ + (void)emitter.Emit("\tsub\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* ldr x7, [x9,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* add w0, w10, w2 */ + (void)emitter.Emit("\tadd\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", "); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 17th operand of MOP_string_indexof Label.ret */ + Operand *labelRet = &insn.GetOperand(17); + /* b .Label.ret */ + (void)emitter.Emit("\tb\t"); + labelRet->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.NOMATCH: */ + labelNoMatch->Accept(visitor); + (void)emitter.Emit(":\n"); + /* mov w0, #-1 */ + (void)emitter.Emit("\tmov\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", #-1\n"); + /* .Label.ret: */ + labelRet->Accept(visitor); + (void)emitter.Emit(":\n"); +} + +/* + * intrinsic_compare_swap_int x0, xt, xs, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +void AArch64AsmEmitter::EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const { + /* MOP_compare_and_swapI and MOP_compare_and_swapL have 8 operands */ + DEBUG_ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the operands number"); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + Operand *temp0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *temp1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *obj = &insn.GetOperand(kInsnFourthOpnd); + Operand *offset = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add xt, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + temp0->Accept(visitor); + (void)emitter.Emit(", "); + obj->Accept(visitor); + (void)emitter.Emit(", "); + offset->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *label1 = &insn.GetOperand(kInsnEighthOpnd); + /* label1: */ + label1->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldaxr ws, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *expectedValue = &insn.GetOperand(kInsnSixthOpnd); + const OpndDesc *expectedValueProp = md->opndMD[kInsnSixthOpnd]; + /* cmp ws, w3 */ + (void)emitter.Emit("\tcmp\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", "); + A64OpndEmitVisitor visitorExpect(emitter, expectedValueProp); + expectedValue->Accept(visitorExpect); + (void)emitter.Emit("\n"); + constexpr uint32 kInsnNinethOpnd = 8; + Operand *label2 = &insn.GetOperand(kInsnNinethOpnd); + /* b.ne label2 */ + (void)emitter.Emit("\tbne\t"); + label2->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *newValue = &insn.GetOperand(kInsnSeventhOpnd); + /* stlxr ws, w4, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + (void)emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + newValue->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz ws, label1 */ + (void)emitter.Emit("\tcbnz\t"); + (void)emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + label1->Accept(visitor); + (void)emitter.Emit("\n"); + /* label2: */ + label2->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* cset x0, eq */ + (void)emitter.Emit("\tcset\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", EQ\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_rel]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + Operand *src = &insn.GetOperand(kInsnSecondOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + auto stImmOpnd = static_cast(symbol); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + A64OpndEmitVisitor srcVisitor(emitter, md->opndMD[1]); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + src->Accept(srcVisitor); + (void)emitter.Emit(", #:tprel_hi12:").Emit(stImmOpnd->GetName()).Emit(", lsl #12\n"); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + result->Accept(resultVisitor); + (void)emitter.Emit(", #:tprel_lo12_nc:").Emit(stImmOpnd->GetName()).Emit("\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_call]; + Operand *func = &insn.GetOperand(kInsnFirstOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + const OpndDesc *prop = md->opndMD[0]; + auto *stImmOpnd = static_cast(symbol); + const std::string &symName = stImmOpnd->GetName(); + A64OpndEmitVisitor funcVisitor(emitter, prop); + /* adrp x0, :tlsdesc:symbol */ + (void)emitter.Emit("\t").Emit("adrp\tx0, :tlsdesc:").Emit(symName).Emit("\n"); + /* ldr x1, [x0, #tlsdesc_lo12:symbol] */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit(", [x0, #:tlsdesc_lo12:").Emit(symName).Emit("]\n"); + /* add x0 ,#tlsdesc_lo12:symbol */ + (void)emitter.Emit("\t").Emit("add\tx0, x0, :tlsdesc_lo12:").Emit(symName).Emit("\n"); + /* .tlsdesccall */ + (void)emitter.Emit("\t").Emit(".tlsdesccall").Emit("\t").Emit(symName).Emit("\n"); + /* blr xd */ + (void)emitter.Emit("\t").Emit("blr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *result = &insn.GetOperand(kInsnFirstOpnd); + auto *temp = &insn.GetOperand(kInsnSecondOpnd); + auto *addr = &insn.GetOperand(kInsnThirdOpnd); + auto *value = &insn.GetOperand(kInsnFourthOpnd); + auto *label = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[kInsnFirstOpnd]); + A64OpndEmitVisitor tempVisitor(emitter, md->opndMD[kInsnSecondOpnd]); + A64OpndEmitVisitor addrVisitor(emitter, md->opndMD[kInsnThirdOpnd]); + A64OpndEmitVisitor valueVisitor(emitter, md->opndMD[kInsnFourthOpnd]); + A64OpndEmitVisitor labelVisitor(emitter, md->opndMD[kInsnFifthOpnd]); + /* label: */ + label->Accept(labelVisitor); + (void)emitter.Emit(":\n"); + /* ldxr x0, [x2] */ + (void)emitter.Emit("\t").Emit("ldxr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* stxr w1, x3, [x2] */ + (void)emitter.Emit("\t").Emit("stxr").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + value->Accept(valueVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* cbnz w1, label */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + label->Accept(labelVisitor); + (void)emitter.Emit("\n"); + /* dmb ish */ + (void)emitter.Emit("\t").Emit("dmb").Emit("\t").Emit("ish").Emit("\n"); +} + +void AArch64AsmEmitter::EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const { + /* + * mrs x16, TPIDR_EL0 + * ldr x16, [x16, #64] + * ldr x16, [x16, #8] + * cbz x16, .lnoexception + * bl MCC_ThrowPendingException + * .lnoexception: + */ + (void)emitter.Emit("\t").Emit("mrs").Emit("\tx16, TPIDR_EL0"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #64]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #8]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("cbz").Emit("\tx16, .lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("bl").Emit("\tMCC_ThrowPendingException"); + (void)emitter.Emit("\n"); + (void)emitter.Emit(".lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()).Emit(":"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const { + /* ldr xzr, [xs] */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr xzr,[xs]" */ +#ifdef USE_32BIT_REF + (void)emitter.Emit("\t").Emit("ldr").Emit("\twzr, ["); +#else + (void)emitter.Emit("\t").Emit("ldr").Emit("\txzr, ["); +#endif /* USE_32BIT_REF */ + opnd0->Accept(visitor); + (void)emitter.Emit("]"); + (void)emitter.Emit("\t// Lazy binding\n"); +} + +void AArch64AsmEmitter::PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const { + VectorRegSpec* vecSpec = static_cast(insn).GetAndRemoveRegSpecFromList(); + compositeOpnds = vecSpec->compositeOpnds ? vecSpec->compositeOpnds : compositeOpnds; + regOpnd->SetVecLanePosition(vecSpec->vecLane); + switch (insn.GetMachineOpcode()) { + case MOP_vanduuu: + case MOP_vxoruuu: + case MOP_voruuu: + case MOP_vnotuu: + case MOP_vextuuui: { + regOpnd->SetVecLaneSize(k8ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + case MOP_vandvvv: + case MOP_vxorvvv: + case MOP_vorvvv: + case MOP_vnotvv: + case MOP_vextvvvi: { + regOpnd->SetVecLaneSize(k16ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + default: { + regOpnd->SetVecLaneSize(vecSpec->vecLaneMax); + regOpnd->SetVecElementSize(vecSpec->vecElementSize); + break; + } + } +} + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[cfi::kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) \ + { ".cfi_" #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + { "." #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + (void)emitter.Emit("\t").Emit(cfiDescr.name); + for (uint32 i = 0; i < cfiDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + cfi::CFIOpndEmitVisitor cfiOpndEmitVisitor(emitter); + curOperand.Accept(cfiOpndEmitVisitor); + if (i < (cfiDescr.opndCount - 1)) { + (void)emitter.Emit(","); + } + } + (void)emitter.Emit("\n"); +} + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[mpldbg::kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) \ + { #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "dbg.def" +#undef DBG_DEFINE + { "undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64DbgInsn(Emitter &emitter, const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + (void)emitter.Emit("\t.").Emit(dbgDescr.name); + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + mpldbg::DBGOpndEmitVisitor dbgOpndEmitVisitor(emitter); + curOperand.Accept(dbgOpndEmitVisitor); + } + (void)emitter.Emit("\n"); +} + +bool AArch64AsmEmitter::CheckInsnRefField(const Insn &insn, size_t opndIndex) const { + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + static_cast(opnd0).SetRefField(true); + return true; + } + } + return false; +} + +/* new phase manager */ +bool CgEmission::PhaseRun(maplebe::CGFunc &f) { + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + AsmFuncEmitInfo funcEmitInfo(f); + emitter->EmitLocalVariable(f); + static_cast(emitter)->Run(funcEmitInfo); + emitter->EmitHugeSoRoutines(); + } else { + FuncEmitInfo &funcEmitInfo = static_cast(emitter)->CreateFuncEmitInfo(f); + static_cast(emitter)->Run(funcEmitInfo); + f.SetFuncEmitInfo(&funcEmitInfo); + } + + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b31261c4c6d7163444b4ff2b4a3b5dc16d1915b --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_fixshortbranch.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" + +namespace maplebe { +uint32 AArch64FixShortBranch::CalculateAlignRange(const BB &bb, uint32 addr) const { + if (addr == 0) { + return addr; + } + uint32 alignPower = bb.GetAlignPower(); + /* + * The algorithm can avoid the problem that alignment causes conditional branch out of range in two stages. + * 1. asm: .mpl -> .s + * The pseudo-instruction [.p2align 5] is 12B. + * kAlignPseudoSize = 12 / 4 = 3 + * 2. link: .s -> .o + * The pseudo-instruction will be expanded to nop. + * eg. .p2align 5 + * alignPower = 5, alignValue = 2^5 = 32 + * range = (32 - ((addr - 1) * 4) % 32) / 4 - 1 + * + * =======> max[range, kAlignPseudoSize] + */ + uint32 range = ((1U << alignPower) - (((addr - 1) * kInsnSize) & ((1U << alignPower) - 1))) / kInsnSize - 1; + return range > kAlignPseudoSize ? range : kAlignPseudoSize; +} + +void AArch64FixShortBranch::SetInsnId() const { + uint32 i = 0; + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarch64CGFunc) { + if (aarch64CGFunc->GetMirModule().IsCModule() && bb->IsBBNeedAlign() && bb->GetAlignNopNum() != 0) { + i = i + CalculateAlignRange(*bb, i); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + i += insn->GetAtomicNum(); + insn->SetId(i); + if (insn->GetMachineOpcode() == MOP_adrp_ldr && CGOptions::IsLazyBinding() && !cgFunc->GetCG()->IsLibcore()) { + /* For 1 additional EmitLazyBindingRoutine in lazybinding + * see function AArch64Insn::Emit in file aarch64_insn.cpp + */ + ++i; + } + } + } +} + +/* + * TBZ/TBNZ instruction is generated under -O2, these branch instructions only have a range of +/-32KB. + * If the branch target is not reachable, we split tbz/tbnz into combination of ubfx and cbz/cbnz, which + * will clobber one extra register. With LSRA under -O2, we can use one of the reserved registers R16 for + * that purpose. To save compile time, we do this change when there are more than 32KB / 4 instructions + * in the function. + */ +void AArch64FixShortBranch::FixShortBranches() { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + bool change = false; + do { + change = false; + SetInsnId(); + for (auto *bb = aarch64CGFunc->GetFirstBB(); bb != nullptr && !change; bb = bb->GetNext()) { + /* Do a backward scan searching for short branches */ + for (auto *insn = bb->GetLastInsn(); insn != nullptr && !change; insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_wtbz && thisMop != MOP_wtbnz && thisMop != MOP_xtbz && thisMop != MOP_xtbnz) { + continue; + } + LabelOperand &label = static_cast(insn->GetOperand(kInsnThirdOpnd)); + /* should not be commented out after bug fix */ + if (aarch64CGFunc->DistanceCheck(*bb, label.GetLabelIndex(), insn->GetId())) { + continue; + } + auto ® = static_cast(insn->GetOperand(kInsnFirstOpnd)); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + auto &bitPos = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MOperator ubfxOp = MOP_undef; + MOperator cbOp = MOP_undef; + switch (thisMop) { + case MOP_wtbz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbz; + break; + case MOP_wtbnz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbnz; + break; + case MOP_xtbz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbz; + break; + case MOP_xtbnz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbnz; + break; + default: + break; + } + RegOperand &tmp = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + R16, (ubfxOp == MOP_wubfxrri5i5) ? k32BitSize : k64BitSize, kRegTyInt); + (void)bb->InsertInsnAfter(*insn, cgFunc->GetInsnBuilder()->BuildInsn(cbOp, tmp, label)); + (void)bb->InsertInsnAfter(*insn, cgFunc->GetInsnBuilder()->BuildInsn(ubfxOp, tmp, reg, bitPos, bitSize)); + bb->RemoveInsn(*insn); + change = true; + } + } + } while (change); +} + +bool CgFixShortBranch::PhaseRun(maplebe::CGFunc &f) { + auto *fixShortBranch = GetPhaseAllocator()->New(&f); + CHECK_FATAL(fixShortBranch != nullptr, "AArch64FixShortBranch instance create failure"); + fixShortBranch->FixShortBranches(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixShortBranch, fixshortbranch) +} /* namespace maplebe */ + diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..12a6e9fb4a845de05913e29e4f608af54e6d9d70 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp @@ -0,0 +1,2243 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_global.h" +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" + +namespace maplebe { +using namespace maple; +#define GLOBAL_DUMP CG_DEBUG_FUNC(cgFunc) + +constexpr uint32 kExMOpTypeSize = 9; +constexpr uint32 kLsMOpTypeSize = 15; + +MOperator exMOpTable[kExMOpTypeSize] = { + MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre +}; +MOperator lsMOpTable[kLsMOpTypeSize] = { + MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs +}; + +/* Optimize ExtendShiftOptPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ +constexpr uint32 kExtenAddShift = 5; +ExtendShiftOptPattern::SuffixType doOptimize[kExtenAddShift][kExtenAddShift] = { + { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSL, ExtendShiftOptPattern::kLSR, + ExtendShiftOptPattern::kASR, ExtendShiftOptPattern::kExten }, + { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSL, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kExten }, + { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSR, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix }, + { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kASR, ExtendShiftOptPattern::kNoSuffix }, + { ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kExten } +}; + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64GlobalOpt::Run() { + OptimizeManager optManager(cgFunc); + bool hasSpillBarrier = (cgFunc.NumBBs() > kMaxBBNum) || (cgFunc.GetRD()->GetMaxInsnNO() > kMaxInsnNum); + if (cgFunc.IsAfterRegAlloc()) { + optManager.Optimize(); + optManager.Optimize(); + return; + } + if (!hasSpillBarrier) { + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + } + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); +} + +/* if used Operand in insn is defined by zero in all define insn, return true */ +bool OptimizePattern::OpndDefByZero(Insn &insn, int32 useIdx) const { + DEBUG_ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if used Operand in insn is defined by one in all define insn, return true */ +bool OptimizePattern::OpndDefByOne(Insn &insn, int32 useIdx) const { + DEBUG_ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return false; + } + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefOne(*defInsn)) { + return false; + } + } + return true; +} + + /* if used Operand in insn is defined by one valid bit in all define insn, return true */ +bool OptimizePattern::OpndDefByOneOrZero(Insn &insn, int32 useIdx) const { + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsnSet.empty()) { + return false; + } + + for (auto &defInsn : defInsnSet) { + if (!InsnDefOneOrZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if defined operand(must be first insn currently) in insn is const one, return true */ +bool OptimizePattern::InsnDefOne(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(1); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 1) { + return true; + } + return false; + } + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn is const zero, return true */ +bool OptimizePattern::InsnDefZero(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn has only one valid bit, return true */ +bool OptimizePattern::InsnDefOneOrZero(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue != 0 && defConstValue != 1) { + return false; + } else { + return true; + } + } + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + if (((defMop == MOP_wlsrrri5) && (shiftBits == k32BitSize - 1)) || + ((defMop == MOP_xlsrrri6) && (shiftBits == k64BitSize - 1))) { + return true; + } else { + return false; + } + } + default: + return false; + } +} + +void ReplaceAsmListReg(const Insn *insn, uint32 index, uint32 regNO, Operand *newOpnd) { + MapleList *list = &static_cast(insn->GetOperand(index)).GetOperands(); + int32 size = static_cast(list->size()); + for (int i = 0; i < size; ++i) { + RegOperand *opnd = static_cast(*(list->begin())); + list->pop_front(); + if (opnd->GetRegisterNumber() == regNO) { + list->push_back(static_cast(newOpnd)); + } else { + list->push_back(opnd); + } + } +} + +void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, + Operand &newOpnd, bool updateInfo) const { + for (auto useInsn : useInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, regNO, &newOpnd); + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + useInsn->SetOperand(i, newOpnd); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(*static_cast(&newOpnd)); + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + if (index != nullptr && (index->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(*static_cast(&newOpnd)); + if (static_cast(newOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + } +} + +bool ForwardPropPattern::CheckCondition(Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + if ((insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr) && + (insn.GetMachineOpcode() != MOP_xmovrr_uxtw)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (firstOpnd.GetSize() != secondOpnd.GetSize() && insn.GetMachineOpcode() != MOP_xmovrr_uxtw) { + return false; + } + RegOperand &firstRegOpnd = static_cast(firstOpnd); + RegOperand &secondRegOpnd = static_cast(secondOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + uint32 secondRegNO = secondRegOpnd.GetRegisterNumber(); + if (IsZeroRegister(firstRegOpnd) || !firstRegOpnd.IsVirtualRegister() || !secondRegOpnd.IsVirtualRegister()) { + return false; + } + firstRegUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, firstRegNO, true); + if (firstRegUseInsnSet.empty()) { + return false; + } + InsnSet secondRegDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (secondRegDefInsnSet.size() != 1 || RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + bool toDoOpt = true; + for (auto useInsn : firstRegUseInsnSet) { + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn)) { + toDoOpt = false; + break; + } + /* part defined */ + if ((useInsn->GetMachineOpcode() == MOP_xmovkri16) || + (useInsn->GetMachineOpcode() == MOP_wmovkri16)) { + toDoOpt = false; + break; + } + if (useInsn->GetMachineOpcode() == MOP_asm) { + toDoOpt = false; + break; + } + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (defInsnSet.size() > 1) { + toDoOpt = false; + break; + } else if (defInsnSet.size() == 1 && *defInsnSet.begin() != &insn) { + toDoOpt = false; + break; + } + } + return toDoOpt; +} + +void ForwardPropPattern::Optimize(Insn &insn) { + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + for (auto *useInsn : firstRegUseInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, firstRegNO, &secondOpnd); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + continue; + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + const OpndDesc *regProp = md->GetOpndDes(i); + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == firstRegNO)) { + useInsn->SetOperand(i, secondOpnd); + if (((useInsn->GetMachineOpcode() == MOP_xmovrr) || (useInsn->GetMachineOpcode() == MOP_wmovrr)) && + (static_cast(useInsn->GetOperand(kInsnSecondOpnd)).IsVirtualRegister()) && + (static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsVirtualRegister())) { + (void)modifiedBB.insert(useInsn->GetBB()); + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(secondOpnd)); + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + if ((index != nullptr) && (index->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(static_cast(secondOpnd)); + if (static_cast(secondOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + insn.SetOperand(0, secondOpnd); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); +} + +void ForwardPropPattern::RemoveMopUxtwToMov(Insn &insn) { + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check case in ssa"); + } + auto &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 destRegNo = destOpnd.GetRegisterNumber(); + destOpnd.SetRegisterNumber(secondOpnd.GetRegisterNumber()); + auto *newOpnd = static_cast(destOpnd.Clone(*cgFunc.GetMemoryPool())); + cgFunc.InsertExtendSet(secondOpnd.GetRegisterNumber()); + InsnSet regUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, destRegNo, true); + if (regUseInsnSet.size() >= 1) { + for (auto useInsn : regUseInsnSet) { + uint32 optSize = useInsn->GetOperandSize(); + for (uint32 i = 0; i < optSize; i++) { + DEBUG_ASSERT(useInsn->GetOperand(i).IsRegister(), "only design for register"); + if (destRegNo == static_cast(useInsn->GetOperand(i)).GetRegisterNumber()) { + useInsn->SetOperand(i, *newOpnd); + } + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + insn.GetBB()->RemoveInsn(insn); +} + +void ForwardPropPattern::Init() { + firstRegUseInsnSet.clear(); +} + +void ForwardPropPattern::Run() { + bool secondTime = false; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + insn->SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + RemoveMopUxtwToMov(*insn); + continue; + } + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool BackPropPattern::CheckAndGetOpnd(const Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && + (insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr) && + (insn.GetMachineOpcode() != MOP_xvmovs) && (insn.GetMachineOpcode() != MOP_xvmovd)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + if (firstOpnd.GetSize() != secondOpnd.GetSize()) { + return false; + } + firstRegOpnd = &static_cast(firstOpnd); + secondRegOpnd = &static_cast(secondOpnd); + if (IsZeroRegister(*firstRegOpnd)) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (!secondRegOpnd->IsVirtualRegister() || !firstRegOpnd->IsVirtualRegister())) { + return false; + } + firstRegNO = firstRegOpnd->GetRegisterNumber(); + secondRegNO = secondRegOpnd->GetRegisterNumber(); + return true; +} + +bool BackPropPattern::DestOpndHasUseInsns(Insn &insn) { + BB &bb = *insn.GetBB(); + InsnSet useInsnSetOfFirstOpnd; + bool findRes = cgFunc.GetRD()->FindRegUseBetweenInsn(firstRegNO, insn.GetNext(), + bb.GetLastInsn(), useInsnSetOfFirstOpnd); + if ((findRes && useInsnSetOfFirstOpnd.empty()) || + (!findRes && useInsnSetOfFirstOpnd.empty() && !bb.GetLiveOut()->TestBit(firstRegNO))) { + return false; + } + return true; +} + +bool BackPropPattern::DestOpndLiveOutToEHSuccs(Insn &insn) const { + BB &bb = *insn.GetBB(); + for (auto ehSucc : bb.GetEhSuccs()) { + if (ehSucc->GetLiveIn()->TestBit(firstRegNO)) { + return true; + } + } + return false; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) { + BB &bb = *insn.GetBB(); + /* secondOpnd is defined in other BB */ + std::vector defInsnVec = cgFunc.GetRD()->FindRegDefBetweenInsn(secondRegNO, bb.GetFirstInsn(), insn.GetPrev()); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = defInsnVec.back(); + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + if (AArch64isa::IsPseudoInstruction(defInsnForSecondOpnd->GetMachineOpcode()) || defInsnForSecondOpnd->IsCall()) { + return false; + } + /* unconcerned regs. */ + if ((secondRegNO >= RLR && secondRegNO <= RZR) || secondRegNO == RFP) { + return false; + } + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + bool findFinish = cgFunc.GetRD()->FindRegUseBetweenInsn(secondRegNO, defInsnForSecondOpnd->GetNext(), + bb.GetLastInsn(), srcOpndUseInsnSet); + if (!findFinish && bb.GetLiveOut()->TestBit(secondRegNO)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && findFinish && srcOpndUseInsnSet.size() > 1) { + /* use later before killed. */ + return false; + } + if (cgFunc.IsAfterRegAlloc()) { + for (auto *usePoint : srcOpndUseInsnSet) { + if (usePoint->IsCall()) { + return false; + } + } + } + return true; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn) { + /* secondOpnd is defined in other BB */ + InsnSet defInsnVec = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = *(defInsnVec.begin()); + + /* ensure that there is no fisrt RegNO def/use between insn and defInsnForSecondOpnd */ + std::vector defInsnVecFirst; + + if (insn.GetBB() != defInsnForSecondOpnd->GetBB()) { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, &insn); + } else { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsn(firstRegNO, defInsnForSecondOpnd, insn.GetPrev()); + } + if (!defInsnVecFirst.empty()) { + return false; + } + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + srcOpndUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsnForSecondOpnd, secondRegNO, true); + /* + * useInsn is not expected to have multiple definition + * replaced opnd is not expected to have definition already + */ + return CheckReplacedUseInsn(insn); +} + +bool BackPropPattern::CheckPredefineInsn(Insn &insn) { + if (insn.GetPrev() == defInsnForSecondOpnd) { + return true; + } + std::vector preDefInsnForFirstOpndVec; + /* there is no predefine insn in current bb */ + if (!cgFunc.GetRD()->RegIsUsedOrDefBetweenInsn(firstRegNO, *defInsnForSecondOpnd, insn)) { + return false; + } + return true; +} + +bool BackPropPattern::CheckReplacedUseInsn(Insn &insn) { + for (auto *useInsn : srcOpndUseInsnSet) { + if (useInsn->GetMemOpnd() != nullptr) { + auto *a64MemOpnd = static_cast(useInsn->GetMemOpnd()); + if (!a64MemOpnd->IsIntactIndexed()) { + if (a64MemOpnd->GetBaseRegister() != nullptr && + a64MemOpnd->GetBaseRegister()->GetRegisterNumber() == secondRegNO) { + return false; + } + } + } + /* insn has been checked def */ + if (useInsn == &insn) { + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + continue; + } + auto checkOneDefOnly = [](const InsnSet &defSet, const Insn &oneDef, bool checkHasDef = false)->bool { + if (defSet.size() > 1) { + return false; + } else if (defSet.size() == 1) { + if (&oneDef != *(defSet.begin())) { + return false; + } + } else { + if (checkHasDef) { + CHECK_FATAL(false, "find def insn failed"); + } + } + return true; + }; + /* ensure that the use insns to be replaced is defined by defInsnForSecondOpnd only */ + if (useInsn->IsMemAccess() && static_cast( + useInsn->GetMemOpnd())->GetIndexOpt() != MemOperand::kIntact) { + return false; + } + InsnSet defInsnVecOfSrcOpnd = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, secondRegNO, true); + if (!checkOneDefOnly(defInsnVecOfSrcOpnd, *defInsnForSecondOpnd, true)) { + return false; + } + + InsnSet defInsnVecOfFirstReg = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (!checkOneDefOnly(defInsnVecOfFirstReg, insn)) { + return false; + } + + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckRedefineInsn(Insn &insn) { + for (auto useInsn : srcOpndUseInsnSet) { + Insn *startInsn = &insn; + Insn *endInsn = useInsn; + if (endInsn == startInsn) { + if (cgFunc.GetRD()->RegIsUsedIncaller(firstRegNO, insn, *useInsn)) { + return false; + } else { + continue; + } + } + + if (useInsn->GetBB() == insn.GetBB()) { + if (useInsn->GetId() < insn.GetId()) { + startInsn = useInsn; + endInsn = &insn; + } + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(firstRegNO, *startInsn, *endInsn, true, true)) { + return false; + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, *startInsn, *endInsn, true)) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckCondition(Insn &insn) { + if (!CheckAndGetOpnd(insn)) { + return false; + } + /* Unless there is a reason that dest can not live out the current BB */ + if (cgFunc.HasAsm() && !DestOpndHasUseInsns(insn)) { + return false; + } + /* first register must not be live out to eh_succs */ + if (DestOpndLiveOutToEHSuccs(insn)) { + return false; + } + if (globalProp) { + if (!CheckSrcOpndDefAndUseInsnsGlobal(insn)) { + return false; + } + } else { + if (!CheckSrcOpndDefAndUseInsns(insn)) { + return false; + } + if (!CheckPredefineInsn(insn)) { + return false; + } + if (!CheckRedefineInsn(insn)) { + return false; + } + } + return true; +} + +void BackPropPattern::Optimize(Insn &insn) { + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + ReplaceAllUsedOpndWithNewOpnd(srcOpndUseInsnSet, secondRegNO, firstOpnd, true); + /* replace define insn */ + const InsnDesc *md = defInsnForSecondOpnd->GetDesc(); + uint32 opndNum = defInsnForSecondOpnd->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsnForSecondOpnd->GetOperand(i); + if (!md->opndMD[i]->IsRegDef() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == secondRegNO)) { + /* remove remat info */ + Operand &defOp = defInsnForSecondOpnd->GetOperand(i); + CHECK_FATAL(defOp.IsRegister(), "unexpect def opnd type"); + auto &defRegOp = static_cast(defOp); + MIRPreg *preg = static_cast(cgFunc).GetPseudoRegFromVirtualRegNO( + defRegOp.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr) { + preg->SetOp(OP_undef); + } + defInsnForSecondOpnd->SetOperand(i, firstOpnd); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && base->GetRegisterNumber() == secondRegNO) { + MemOperand *newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(firstOpnd)); + defInsnForSecondOpnd->SetOperand(i, *newMem); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } + } + } + /* There is special implication when backward propagation is allowed for physical register R0. + * This is a case that the calling func foo directly returns the result from the callee bar as follows: + * + * foo: + * bl bl // bar() + * mov vreg, X0 //res = bar() naive bkprop + * .... //X0 is not redefined ====> .... //X0 may be reused as RA sees "X0 has not been used" after bl + * mov X0, vreg //In fact, X0 is implicitly used by foo. We need to tell RA that X0 is live + * ret ret + * + * To make RA simple, we tell RA to not use X0 by keeping "mov X0, X0". That is + * foo: + * bl //bar() + * .... // Perform backward prop X0 and force X0 cant be reused + * mov X0, X0 // This can be easily remved later in peephole phase + * ret + */ + if (cgFunc.HasCall() && + !(cgFunc.GetFunction().IsReturnVoid()) && + (firstRegNO == R0) && + (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R0)) { + /* Keep this instruction: mov R0, R0 */ + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return; + } else { + insn.GetBB()->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + } +} + +void BackPropPattern::Init() { + firstRegOpnd = nullptr; + secondRegOpnd = nullptr; + firstRegNO = 0; + secondRegNO = 0; + srcOpndUseInsnSet.clear(); + defInsnForSecondOpnd = nullptr; +} + +void BackPropPattern::Run() { + bool secondTime = false; + std::set modifiedBB; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS_REV(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + (void)modifiedBB.insert(bb); + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool CmpCsetPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || !insn.IsMachineInstruction()) { + return false; + } + + MOperator firstMop = insn.GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!(((firstMop == MOP_wcmpri) || (firstMop == MOP_xcmpri)) && + ((secondMop == MOP_wcsetrc) || (secondMop == MOP_xcsetrc)))) { + return false; + } + + /* get cmp_first operand */ + cmpFirstOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + /* get cmp second Operand, ImmOperand must be 0 or 1 */ + cmpSecondOpnd = &(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(cmpSecondOpnd->IsIntImmediate(), "expects ImmOperand"); + ImmOperand *cmpConstOpnd = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConstOpnd->GetValue(); + /* get cset first Operand */ + csetFirstOpnd = &(nextInsn->GetOperand(kInsnFirstOpnd)); + if (((cmpConstVal != 0) && (cmpConstVal != 1)) || (cmpFirstOpnd->GetSize() != csetFirstOpnd->GetSize()) || + !OpndDefByOneOrZero(insn, 1)) { + return false; + } + + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + if (useInsnSet.size() > 1) { + return false; + } + return true; +} + +void CmpCsetPattern::Optimize(Insn &insn) { + Insn *csetInsn = nextInsn; + BB &bb = *insn.GetBB(); + nextInsn = nextInsn->GetNextMachineInsn(); + /* get condition Operand */ + CondOperand &cond = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (((cmpConstVal == 0) && (cond.GetCode() == CC_NE)) || ((cmpConstVal == 1) && (cond.GetCode() == CC_EQ))) { + if (RegOperand::IsSameReg(*cmpFirstOpnd, *csetFirstOpnd)) { + bb.RemoveInsn(insn); + bb.RemoveInsn(*csetInsn); + } else { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } else if (((cmpConstVal == 1) && (cond.GetCode() == CC_NE)) || + ((cmpConstVal == 0) && (cond.GetCode() == CC_EQ))) { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + constexpr int64 eorImm = 1; + auto &aarch64CGFunc = static_cast(cgFunc); + ImmOperand &one = aarch64CGFunc.CreateImmOperand(eorImm, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd, one); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void CmpCsetPattern::Init() { + cmpConstVal = 0; + cmpFirstOpnd = nullptr; + cmpSecondOpnd = nullptr; + csetFirstOpnd = nullptr; +} + +void CmpCsetPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CselPattern::CheckCondition(Insn &insn) { + MOperator mopCode = insn.GetMachineOpcode(); + if ((mopCode != MOP_xcselrrrc) && (mopCode != MOP_wcselrrrc)) { + return false; + } + return true; +} + +void CselPattern::Optimize(Insn &insn) { + BB &bb = *insn.GetBB(); + Operand &opnd0 = insn.GetOperand(kInsnFirstOpnd); + Operand &cond = insn.GetOperand(kInsnFourthOpnd); + MOperator newMop = ((opnd0.GetSize()) == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &rflag = cgFunc.GetOrCreateRflag(); + if (OpndDefByOne(insn, kInsnSecondOpnd) && OpndDefByZero(insn, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, cond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } else if (OpndDefByZero(insn, kInsnSecondOpnd) && OpndDefByOne(insn, kInsnThirdOpnd)) { + auto &originCond = static_cast(cond); + ConditionCode inverseCondCode = GetReverseBasicCC(originCond.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto &aarchCGFunc = static_cast(cgFunc); + CondOperand &inverseCond = aarchCGFunc.GetCondOperand(inverseCondCode); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, inverseCond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } +} + +void CselPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +uint32 RedundantUxtPattern::GetInsnValidBit(const Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + uint32 nRet; + switch (mOp) { + case MOP_wcsetrc: + case MOP_xcsetrc: + nRet = 1; + break; + case MOP_wldrb: + case MOP_wldarb: + case MOP_wldxrb: + case MOP_wldaxrb: + nRet = k8BitSize; + break; + case MOP_wldrh: + case MOP_wldarh: + case MOP_wldxrh: + case MOP_wldaxrh: + nRet = k16BitSize; + break; + case MOP_wmovrr: + case MOP_wmovri32: + case MOP_wldrsb: + case MOP_wldrsh: + case MOP_wldli: + case MOP_wldr: + case MOP_wldp: + case MOP_wldar: + case MOP_wmovkri16: + case MOP_wmovzri16: + case MOP_wmovnri16: + case MOP_wldxr: + case MOP_wldaxr: + case MOP_wldaxp: + case MOP_wcsincrrrc: + case MOP_wcselrrrc: + case MOP_wcsinvrrrc: + nRet = k32BitSize; + break; + default: + nRet = k64BitSize; + break; + } + return nRet; +} + +uint32 RedundantUxtPattern::GetMaximumValidBit(Insn &insn, uint8 index, InsnSet &visitedInsn) const { + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, index); + if (defInsnSet.empty()) { + /* disable opt when there is no def point. */ + return k64BitSize; + } + + uint32 validBit = 0; + uint32 nMaxValidBit = 0; + for (auto &defInsn : defInsnSet) { + if (visitedInsn.find(defInsn) != visitedInsn.end()) { + continue; + } + + (void)visitedInsn.insert(defInsn); + MOperator mOp = defInsn->GetMachineOpcode(); + if ((mOp == MOP_wmovrr) || (mOp == MOP_xmovrr)) { + validBit = GetMaximumValidBit(*defInsn, 1, visitedInsn); + } else { + validBit = GetInsnValidBit(*defInsn); + } + + nMaxValidBit = nMaxValidBit < validBit ? validBit : nMaxValidBit; + } + return nMaxValidBit; +} + +bool RedundantUxtPattern::CheckCondition(Insn &insn) { + BB &bb = *insn.GetBB(); + InsnSet visitedInsn1; + InsnSet visitedInsn2; + if (!((insn.GetMachineOpcode() == MOP_xuxth32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn1) <= k16BitSize) || + (insn.GetMachineOpcode() == MOP_xuxtb32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn2) <= k8BitSize))) { + return false; + } + + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + secondOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + if (RegOperand::IsSameReg(firstOpnd, *secondOpnd)) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + firstRegNO = firstRegOpnd.GetRegisterNumber(); + /* for uxth R1, V501, R1 is parameter register, this can't be optimized. */ + if (firstRegOpnd.IsPhysicalRegister()) { + return false; + } + + if (useInsnSet.empty()) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + + RegOperand *secondRegOpnd = static_cast(secondOpnd); + DEBUG_ASSERT(secondRegOpnd != nullptr, "secondRegOpnd should not be nullptr"); + uint32 secondRegNO = secondRegOpnd->GetRegisterNumber(); + for (auto useInsn : useInsnSet) { + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if ((defInsnSet.size() > 1) || !(cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn))) { + return false; + } + } + return true; +} + +void RedundantUxtPattern::Optimize(Insn &insn) { + BB &bb = *insn.GetBB(); + ReplaceAllUsedOpndWithNewOpnd(useInsnSet, firstRegNO, *secondOpnd, true); + bb.RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void RedundantUxtPattern::Init() { + useInsnSet.clear(); + secondOpnd = nullptr; +} + +void RedundantUxtPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool LocalVarSaveInsnPattern::CheckFirstInsn(const Insn &firstInsn) { + MOperator mOp = firstInsn.GetMachineOpcode(); + if (mOp != MOP_xmovrr && mOp != MOP_wmovrr) { + return false; + } + firstInsnSrcOpnd = &(firstInsn.GetOperand(kInsnSecondOpnd)); + RegOperand *firstInsnSrcReg = static_cast(firstInsnSrcOpnd); + if (firstInsnSrcReg->GetRegisterNumber() != R0) { + return false; + } + firstInsnDestOpnd = &(firstInsn.GetOperand(kInsnFirstOpnd)); + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + if (firstInsnDestReg->IsPhysicalRegister()) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckSecondInsn() { + MOperator mOp = secondInsn->GetMachineOpcode(); + if (mOp != MOP_wstr && mOp != MOP_xstr) { + return false; + } + secondInsnSrcOpnd = &(secondInsn->GetOperand(kInsnFirstOpnd)); + if (!RegOperand::IsSameReg(*firstInsnDestOpnd, *secondInsnSrcOpnd)) { + return false; + } + /* check memOperand is stack memOperand, and x0 is stored in localref var region */ + secondInsnDestOpnd = &(secondInsn->GetOperand(kInsnSecondOpnd)); + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + RegOperand *baseReg = secondInsnDestMem->GetBaseRegister(); + RegOperand *indexReg = secondInsnDestMem->GetIndexRegister(); + if ((baseReg == nullptr) || !(cgFunc.IsFrameReg(*baseReg)) || (indexReg != nullptr)) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckAndGetUseInsn(Insn &firstInsn) { + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(firstInsn, kInsnFirstOpnd, false); + if (useInsnSet.size() != 2) { /* secondInsn and another useInsn */ + return false; + } + + /* useInsnSet includes secondInsn and another useInsn */ + for (auto tmpUseInsn : useInsnSet) { + if (tmpUseInsn->GetId() != secondInsn->GetId()) { + useInsn = tmpUseInsn; + break; + } + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckLiveRange(const Insn &firstInsn) { + uint32 maxInsnNO = cgFunc.GetRD()->GetMaxInsnNO(); + uint32 useInsnID = useInsn->GetId(); + uint32 defInsnID = firstInsn.GetId(); + uint32 distance = useInsnID > defInsnID ? useInsnID - defInsnID : defInsnID - useInsnID; + float liveRangeProportion = static_cast(distance) / maxInsnNO; + /* 0.3 is a balance for real optimization effect */ + if (liveRangeProportion < 0.3) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckCondition(Insn &firstInsn) { + secondInsn = firstInsn.GetNext(); + if (secondInsn == nullptr) { + return false; + } + /* check firstInsn is : mov vreg, R0; */ + if (!CheckFirstInsn(firstInsn)) { + return false; + } + /* check the secondInsn is : str vreg, stackMem */ + if (!CheckSecondInsn()) { + return false; + } + /* find the uses of the vreg */ + if (!CheckAndGetUseInsn(firstInsn)) { + return false; + } + /* simulate live range using insn distance */ + if (!CheckLiveRange(firstInsn)) { + return false; + } + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + regno_t firstInsnDestRegNO = firstInsnDestReg->GetRegisterNumber(); + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstInsnDestRegNO, true); + if (defInsnSet.size() != 1) { + return false; + } + DEBUG_ASSERT((*(defInsnSet.begin()))->GetId() == firstInsn.GetId(), "useInsn has only one define Insn : firstInsn"); + /* check whether the stack mem is changed or not */ + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + int64 memOffset = secondInsnDestMem->GetOffsetImmediate()->GetOffsetValue(); + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*useInsn, memOffset, true); + if (memDefInsnSet.size() != 1) { + return false; + } + if ((*(memDefInsnSet.begin()))->GetId() != secondInsn->GetId()) { + return false; + } + /* check whether has call between use and def */ + if (!cgFunc.GetRD()->HasCallBetweenDefUse(firstInsn, *useInsn)) { + return false; + } + return true; +} + +void LocalVarSaveInsnPattern::Optimize(Insn &insn) { + /* insert ldr insn before useInsn */ + MOperator ldrOpCode = secondInsnSrcOpnd->GetSize() == k64BitSize ? MOP_xldr : MOP_wldr; + Insn &ldrInsn = cgFunc.GetInsnBuilder()->BuildInsn(ldrOpCode, *secondInsnSrcOpnd, *secondInsnDestOpnd); + ldrInsn.SetId(useInsn->GetId() - 1); + useInsn->GetBB()->InsertInsnBefore(*useInsn, ldrInsn); + cgFunc.GetRD()->UpdateInOut(*useInsn->GetBB(), true); + secondInsn->SetOperand(kInsnFirstOpnd, *firstInsnSrcOpnd); + BB *saveInsnBB = insn.GetBB(); + saveInsnBB->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*saveInsnBB, true); +} + +void LocalVarSaveInsnPattern::Init() { + firstInsnSrcOpnd = nullptr; + firstInsnDestOpnd = nullptr; + secondInsnSrcOpnd = nullptr; + secondInsnDestOpnd = nullptr; + useInsn = nullptr; + secondInsn = nullptr; +} + +void LocalVarSaveInsnPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsCleanup()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!insn->IsCall()) { + continue; + } + Insn *firstInsn = insn->GetNextMachineInsn(); + if (firstInsn == nullptr) { + continue; + } + Init(); + if (!CheckCondition(*firstInsn)) { + continue; + } + Optimize(*firstInsn); + } + } +} + +void ExtendShiftOptPattern::SetExMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftOptPattern::SetLsMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftOptPattern::SelectExtendOrShift(const Insn &def) { + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* first use must match SelectExtendOrShift */ +bool ExtendShiftOptPattern::CheckDefUseInfo(Insn &use, uint32 size) { + auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + auto &useDefOpnd = static_cast(use.GetOperand(kInsnFirstOpnd)); + if ((shiftOp != BitShiftOperand::kUndef || extendOp != ExtendShiftOperand::kUndef) && + (regDefSrc.GetSize() > regOperand.GetSize() || useDefOpnd.GetSize() != size)) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > size)) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + InsnSet defSrcSet = cgFunc.GetRD()->FindDefForRegOpnd(use, defSrcRegNo, true); + /* The first defSrcInsn must be closest to useInsn */ + if (defSrcSet.empty()) { + return false; + } + Insn *defSrcInsn = *defSrcSet.begin(); + const InsnDesc *md = defSrcInsn->GetDesc(); + if ((size != regOperand.GetSize()) && md->IsMove()) { + return false; + } + if (defInsn->GetBB() == use.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn->GetNext(); + while (tmpInsn != &use) { + if (tmpInsn == defSrcInsn || tmpInsn == nullptr) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { /* def use not in same BB */ + if (defSrcInsn->GetBB() != defInsn->GetBB()) { + return false; + } + if (defSrcInsn->GetId() > defInsn->GetId()) { + return false; + } + } + /* case: + * lsl w0, w0, #5 + * eor w0, w2, w0 + * ---> + * eor w0, w2, w0, lsl 5 + */ + if (defSrcInsn == defInsn) { + InsnSet replaceRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsn, defSrcRegNo, true); + if (replaceRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +/* Check whether ExtendShiftOptPattern optimization can be performed. */ +ExtendShiftOptPattern::SuffixType ExtendShiftOptPattern::CheckOpType(const Operand &lastOpnd) const { + /* Assign values to useType and defType. */ + uint32 useType = ExtendShiftOptPattern::kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = ExtendShiftOptPattern::kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = ExtendShiftOptPattern::kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return ExtendShiftOptPattern::kNoSuffix; + } + } + return doOptimize[useType][defType]; +} + +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftOptPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) { + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + ExtendShiftOptPattern::SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == ExtendShiftOptPattern::kNoSuffix) { + return; + }else if (optType == ExtendShiftOptPattern::kExten) { + replaceOp = exMOpTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMOpTable[lsMOpType]; + if (amount >= k32BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + if (removeDefInsn) { + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======RemoveDefInsn :\n"; + defInsn->Dump(); + } + defInsn->GetBB()->RemoveInsn(*defInsn); + } + cgFunc.GetRD()->InitGenUse(*defInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*use.GetBB(), true); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftOptPattern::Optimize(Insn &insn) { + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + BitShiftOperand &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + ImmOperand &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftOptPattern::DoExtendShiftOpt(Insn &insn) { + Init(); + if (!CheckCondition(insn)) { + return; + } + Optimize(insn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftOptPattern::CheckCondition(Insn &insn) { + SetLsMOpType(insn); + SetExMOpType(insn); + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); + if (regOperand.IsPhysicalRegister()) { + return false; + } + regno_t regNo = regOperand.GetRegisterNumber(); + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + if (regDefInsnSet.size() != k1BitSize) { + return false; + } + defInsn = *regDefInsnSet.begin(); + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + return CheckDefUseInfo(insn, regOperand.GetSize()); +} + +void ExtendShiftOptPattern::Init() { + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + newInsn = nullptr; + optSuccess = false; + removeDefInsn = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; +} + +void ExtendShiftOptPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtenToMovPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +/* Check for Implicit uxtw */ +bool ExtenToMovPattern::CheckHideUxtw(const Insn &insn, regno_t regno) const { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + if (md->IsMove()) { + return false; + } + uint32 optSize = insn.GetOperandSize(); + for (uint32 i = 0; i < optSize; ++i) { + if (regno == static_cast(insn.GetOperand(i)).GetRegisterNumber()) { + auto *curOpndDescription = md->GetOpndDes(i); + if (curOpndDescription->IsDef() && curOpndDescription->GetSize() == k32BitSize) { + return true; + } + break; + } + } + return false; +} + +bool ExtenToMovPattern::CheckUxtw(Insn &insn) { + if (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize && + insn.GetOperand(kInsnSecondOpnd).GetSize() == k32BitSize) { + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "is not Register"); + regno_t regno = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + InsnSet preDef = cgFunc.GetRD()->FindDefForRegOpnd(insn, kInsnSecondOpnd, false); + if (preDef.empty()) { + return false; + } + for (auto defInsn : preDef) { + if (!CheckHideUxtw(*defInsn, regno)) { + return false; + } + } + replaceMop = MOP_xmovrr_uxtw; + return true; + } + return false; +} + +bool ExtenToMovPattern::CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum) { + InsnSet srcDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, srcRegNo, true); + for (auto defInsn : srcDefSet) { + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo, validNum)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum) && !CheckSrcReg(*defInsn, defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum) || !CheckSrcReg(*defInsn, defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtenToMovPattern::BitNotAffected(Insn &insn, uint32 validNum) { + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + InsnSet desDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, desRegNo, true); + /* desReg is not redefined */ + if (!desDefSet.empty()) { + return false; + } + if (!CheckSrcReg(insn, srcRegNo, validNum)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtenToMovPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtw64: return CheckUxtw(insn); + case MOP_xuxtb32: return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: return BitNotAffected(insn, k16BitSize); + default: return false; + } +} + +/* No initialization required */ +void ExtenToMovPattern::Init() { + replaceMop = MOP_undef; +} + +void ExtenToMovPattern::Optimize(Insn &insn) { + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void SameDefPattern::Run() { + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn) || !bb->GetEhPreds().empty()) { + continue; + } + Optimize(*insn); + } + } +} + +void SameDefPattern::Init() { + currInsn = nullptr; + sameInsn = nullptr; +} + +bool SameDefPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (insn.GetBB()->GetPreds().size() > k1BitSize) { + return false; + } + if (insn.GetBB()->HasCall()) { + return false; + } + return (mOp == MOP_wcmprr) || (mOp == MOP_xcmprr) || (mOp == MOP_xwcmprre) || (mOp == MOP_xcmprrs); +} + +void SameDefPattern::Optimize(Insn &insn) { + InsnSet sameDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, 0, false); + if (sameDefSet.size() != k1BitSize) { + return; + } + Insn *sameDefInsn = *sameDefSet.begin(); + if (sameDefInsn == nullptr) { + return; + } + currInsn = &insn; + sameInsn = sameDefInsn; + if (!IsSameDef()) { + return; + } + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameDefPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======remove insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======sameDef insn: \n"; + sameDefInsn->Dump(); + } + insn.GetBB()->RemoveInsn(insn); +} + +bool SameDefPattern::IsSameDef() { + if (!CheckCondition(*sameInsn)) { + return false; + } + if (currInsn == sameInsn) { + return false; + } + if (currInsn->GetMachineOpcode() != sameInsn->GetMachineOpcode()) { + return false; + } + for (uint32 i = k1BitSize; i < currInsn->GetOperandSize(); ++i) { + Operand &opnd0 = currInsn->GetOperand(i); + Operand &opnd1 = sameInsn->GetOperand(i); + if (!IsSameOperand(opnd0, opnd1)) { + return false; + } + } + return true; +} + +bool SameDefPattern::IsSameOperand(Operand &opnd0, Operand &opnd1) { + if (opnd0.IsRegister()) { + CHECK_FATAL(opnd1.IsRegister(), "must be RegOperand!"); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + if (!RegOperand::IsSameReg(regOpnd0, regOpnd1)) { + return false; + } + regno_t regNo = regOpnd0.GetRegisterNumber(); + /* src reg not redefined between sameInsn and currInsn */ + if (SrcRegIsRedefined(regNo)) { + return false; + } + } else if (opnd0.IsOpdShift()) { + CHECK_FATAL(opnd1.IsOpdShift(), "must be ShiftOperand!"); + BitShiftOperand &shiftOpnd0 = static_cast(opnd0); + BitShiftOperand &shiftOpnd1 = static_cast(opnd1); + if (shiftOpnd0.GetShiftAmount() != shiftOpnd1.GetShiftAmount()) { + return false; + } + } else if (opnd0.IsOpdExtend()) { + CHECK_FATAL(opnd1.IsOpdExtend(), "must be ExtendOperand!"); + ExtendShiftOperand &extendOpnd0 = static_cast(opnd0); + ExtendShiftOperand &extendOpnd1 = static_cast(opnd1); + if (extendOpnd0.GetShiftAmount() != extendOpnd1.GetShiftAmount()) { + return false; + } + } else { + return false; + } + return true; +} + +bool SameDefPattern::SrcRegIsRedefined(regno_t regNo) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (currInsn->GetBB() == sameInsn->GetBB()) { + FOR_BB_INSNS(insn, currInsn->GetBB()) { + if (insn->GetMachineOpcode() == MOP_xbl) { + return true; + } + } + if (!a64RD->FindRegDefBetweenInsn(regNo, sameInsn, currInsn).empty()) { + return true; + } + } else if (a64RD->HasRegDefBetweenInsnGlobal(regNo, *sameInsn, *currInsn)) { + return true; + } + return false; +} + +void AndCbzPattern::Init() { + prevInsn = nullptr; +} + +bool AndCbzPattern::IsAdjacentArea(Insn &prev, Insn &curr) const { + if (prev.GetBB() == curr.GetBB()) { + return true; + } + for (auto *succ : prev.GetBB()->GetSuccs()) { + if (succ == curr.GetBB()) { + return true; + } + } + return false; +} + +bool AndCbzPattern::CheckCondition(Insn &insn) { + auto *aar64RD = static_cast(cgFunc.GetRD()); + MOperator mOp = insn.GetMachineOpcode(); + if ((mOp != MOP_wcbz) && (mOp != MOP_xcbz) && (mOp != MOP_wcbnz) && (mOp != MOP_xcbnz)) { + return false; + } + regno_t regNo = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + InsnSet defSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + if (defSet.size() != k1BitSize) { + return false; + } + prevInsn = *defSet.begin(); + if (prevInsn->GetMachineOpcode() != MOP_wandrri12 && prevInsn->GetMachineOpcode() != MOP_xandrri13) { + return false; + } + if (!IsAdjacentArea(*prevInsn, insn)) { + return false; + } + regno_t propRegNo = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (prevInsn->GetBB() == insn.GetBB() && !(aar64RD->FindRegDefBetweenInsn(propRegNo, prevInsn, &insn).empty())) { + return false; + } + if (prevInsn->GetBB() != insn.GetBB() && aar64RD->HasRegDefBetweenInsnGlobal(propRegNo, *prevInsn, insn)) { + return false; + } + if (!(cgFunc.GetRD()->FindUseForRegOpnd(insn, regNo, true).empty())) { + return false; + } + return true; +} + +int64 AndCbzPattern::CalculateLogValue(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +void AndCbzPattern::Optimize(Insn &insn) { + BB *bb = insn.GetBB(); + auto &aarchFunc = static_cast(cgFunc); + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = CalculateLogValue(andImm.GetValue()); + if (tbzVal < 0) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc.CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), tbzImm, label); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In AndCbzPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void AndCbzPattern::Run() { + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction() || !CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void SameRHSPropPattern::Init() { + prevInsn = nullptr; + candidates = {MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12, + MOP_wmovri32, MOP_xmovri64, MOP_wmovrr, MOP_xmovrr}; +} + +bool SameRHSPropPattern::IsSameOperand(Operand *opnd1, Operand *opnd2) const { + if (opnd1 == nullptr && opnd2 == nullptr) { + return true; + } else if (opnd1 == nullptr || opnd2 == nullptr) { + return false; + } + if (opnd1->IsRegister() && opnd2->IsRegister()) { + return RegOperand::IsSameReg(*opnd1, *opnd2); + } else if (opnd1->IsImmediate() && opnd2->IsImmediate()) { + auto *immOpnd1 = static_cast(opnd1); + auto *immOpnd2 = static_cast(opnd2); + return (immOpnd1->GetSize() == immOpnd2->GetSize()) && (immOpnd1->GetValue() == immOpnd2->GetValue()); + } + return false; +} + +bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + Operand *curRegOpnd = nullptr; + Operand *curImmOpnd = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.OpndIsDef(i)) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + curRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + curImmOpnd = &opnd; + } + } + if (curRegOpnd == nullptr && curImmOpnd != nullptr && static_cast(curImmOpnd)->IsZero()) { + return false; + } + BB *bb = insn.GetBB(); + for (auto *cursor = insn.GetPrev(); cursor != nullptr && cursor != bb->GetFirstInsn(); cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall() && !cgFunc.IsAfterRegAlloc()) { + return false; + } + if (cursor->GetMachineOpcode() != insn.GetMachineOpcode()) { + continue; + } + uint32 candOpndNum = cursor->GetOperandSize(); + Operand *candRegOpnd = nullptr; + Operand *candImmOpnd = nullptr; + for (uint32 i = 0; i < candOpndNum; ++i) { + Operand &opnd = cursor->GetOperand(i); + if (cursor->OpndIsDef(i)) { + continue; + } + if (opnd.IsRegister()) { + candRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + candImmOpnd = &opnd; + } + } + if (IsSameOperand(curRegOpnd, candRegOpnd) && IsSameOperand(curImmOpnd, candImmOpnd)) { + prevInsn = cursor; + return true; + } + } + return false; +} + +bool SameRHSPropPattern::CheckCondition(Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + MOperator mOp = insn.GetMachineOpcode(); + if (std::find(candidates.begin(), candidates.end(), mOp) == candidates.end()) { + return false; + } + if (!FindSameRHSInsnInBB(insn)) { + return false; + } + CHECK_FATAL(prevInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "prevInsn first operand must be register"); + if (prevInsn->GetOperand(kInsnSecondOpnd).IsRegister() && + RegOperand::IsSameReg(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + uint32 opndNum = prevInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = prevInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + regno_t regNO = static_cast(opnd).GetRegisterNumber(); + if (!(cgFunc.GetRD()->FindRegDefBetweenInsn(regNO, prevInsn->GetNext(), insn.GetPrev()).empty())) { + return false; + } + } + return true; +} + +void SameRHSPropPattern::Optimize(Insn &insn) { + BB *bb = insn.GetBB(); + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + uint32 bitSize = static_cast(destOpnd).GetSize(); + MOperator mOp = (bitSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, destOpnd, prevInsn->GetOperand(kInsnFirstOpnd)); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameRHSPropPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void SameRHSPropPattern::Run() { + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d74c115646c73f9f17d3a0d3d6c3bcda8760dfca --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -0,0 +1,941 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_ico.h" +#include "ico.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { +void AArch64IfConversionOptimizer::InitOptimizePatterns() { + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +/* build ccmp Insn */ +Insn *AArch64ICOPattern::BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const { + Operand &opnd0 = cmpInsn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = cmpInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = cmpInsn->GetOperand(kInsnThirdOpnd); + /* ccmp has only int opnd */ + if (!static_cast(opnd1).IsOfIntClass()) { + return nullptr; + } + AArch64CGFunc *func = static_cast(cgFunc); + uint32 nzcv = GetNZCV(ccCode, false); + if (nzcv == k16BitSize) { + return nullptr; + } + ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + uint32 dSize = opnd1.GetSize(); + bool isIntTy = opnd2.IsIntImmediate(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xccmpriic : MOP_wccmpriic) + : (dSize == k64BitSize ? MOP_xccmprric : MOP_wccmprric); + /* cmp opnd2 in the range 0-4095, ccmp opnd2 in the range 0-31 */ + if (isIntTy && static_cast(opnd2).GetRegisterNumber() >= k32BitSize) { + return nullptr; + } + return &cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd2, opnd3, cond); +} + +/* Rooted ccCode resource NZCV */ +uint32 AArch64ICOPattern::GetNZCV(ConditionCode ccCode, bool inverse) { + switch (ccCode) { + case CC_EQ: + return inverse ? k4BitSize : k0BitSize; + case CC_HS: + return inverse ? k2BitSize : k0BitSize; + case CC_MI: + return inverse ? k8BitSize : k0BitSize; + case CC_VS: + return inverse ? k1BitSize : k0BitSize; + case CC_VC: + return inverse ? k0BitSize : k1BitSize; + case CC_LS: + return inverse ? k4BitSize : k2BitSize; + case CC_LO: + return inverse ? k0BitSize : k2BitSize; + case CC_NE: + return inverse ? k0BitSize : k4BitSize; + case CC_HI: + return inverse ? k2BitSize : k4BitSize; + case CC_PL: + return inverse ? k0BitSize : k8BitSize; + default: + return k16BitSize; + } +} + +Insn *AArch64ICOPattern::BuildCmpInsn(const Insn &condBr) const { + AArch64CGFunc *func = static_cast(cgFunc); + RegOperand ® = static_cast(condBr.GetOperand(0)); + PrimType ptyp = (reg.GetSize() == k64BitSize) ? PTY_u64 : PTY_u32; + ImmOperand &numZero = func->CreateImmOperand(ptyp, 0); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Insn &cmpInsn = func->GetInsnBuilder()->BuildInsn(mopCode, rflag, reg, numZero); + return &cmpInsn; +} + +bool AArch64ICOPattern::IsSetInsn(const Insn &insn, Operand *&dest, std::vector &src) const { + MOperator mOpCode = insn.GetMachineOpcode(); + if ((mOpCode >= MOP_xmovrr && mOpCode <= MOP_xvmovd) || cgFunc->GetTheCFG()->IsAddOrSubInsn(insn)) { + dest = &(insn.GetOperand(0)); + for (uint32 i = 1; i < insn.GetOperandSize(); ++i) { + (void)src.emplace_back(&(insn.GetOperand(i))); + } + return true; + } + dest = nullptr; + src.clear(); + return false; +} + +ConditionCode AArch64ICOPattern::Encode(MOperator mOp, bool inverse) const { + switch (mOp) { + case MOP_bmi: + return inverse ? CC_PL : CC_MI; + case MOP_bvc: + return inverse ? CC_VS : CC_VC; + case MOP_bls: + return inverse ? CC_HI : CC_LS; + case MOP_blt: + return inverse ? CC_GE : CC_LT; + case MOP_ble: + return inverse ? CC_GT : CC_LE; + case MOP_beq: + return inverse ? CC_NE : CC_EQ; + case MOP_bne: + return inverse ? CC_EQ : CC_NE; + case MOP_blo: + return inverse ? CC_HS : CC_LO; + case MOP_bpl: + return inverse ? CC_MI : CC_PL; + case MOP_bhs: + return inverse ? CC_LO : CC_HS; + case MOP_bvs: + return inverse ? CC_VC : CC_VS; + case MOP_bhi: + return inverse ? CC_LS : CC_HI; + case MOP_bgt: + return inverse ? CC_LE : CC_GT; + case MOP_bge: + return inverse ? CC_LT : CC_GE; + case MOP_wcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_xcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_wcbz: + return inverse ? CC_NE : CC_EQ; + case MOP_xcbz: + return inverse ? CC_NE : CC_EQ; + default: + return kCcLast; + } +} + +Insn *AArch64ICOPattern::BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const { + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), inverse); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + AArch64CGFunc *func = static_cast(cgFunc); + CondOperand &cond = func->GetCondOperand(ccCode); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; + return &func->GetInsnBuilder()->BuildInsn(mopCode, reg, cond, rflag); +} + +Insn *AArch64ICOPattern::BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, + RegOperand &src2) const { + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), false); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + return &cgFunc->GetInsnBuilder()->BuildInsn(mOp, dst, src1, src2, cond, rflag); +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) { + ImmOperand &imm1 = static_cast(ifDest); + ImmOperand &imm2 = static_cast(elseDest); + bool inverse = imm1.IsZero() && imm2.IsOne(); + if (inverse || (imm2.IsZero() && imm1.IsOne())) { + Insn *csetInsn = BuildCondSet(branchInsn, destReg, inverse); + DEBUG_ASSERT(csetInsn != nullptr, "build a insn failed"); + generateInsn.emplace_back(csetInsn); + } else if (imm1.GetValue() == imm2.GetValue()) { + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) : + ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, imm1); + generateInsn.emplace_back(&tempInsn); + } else { + bool destIsIntTy = destReg.IsOfIntClass(); + uint32 dSize = destReg.GetSize(); + bool isD64 = dSize == k64BitSize; + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) : + ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + RegOperand *tempTarIf = nullptr; + if (imm1.IsZero()) { + tempTarIf = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarIf = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarIf, imm1); + generateInsn.emplace_back(&tempInsnIf); + } + + RegOperand *tempTarElse = nullptr; + if (imm2.IsZero()) { + tempTarElse = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarElse = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnElse = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarElse, imm2); + generateInsn.emplace_back(&tempInsnElse); + } + + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (isD64 ? MOP_xcselrrrc : MOP_wcselrrrc) + : (isD64 ? MOP_dcselrrrc : (dSize == k32BitSize ? MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tempTarIf, *tempTarElse); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +RegOperand *AArch64ICOIfThenElsePattern::GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg, + std::vector &generateInsn) const { + RegOperand *reg = nullptr; + if (!dest.IsRegister()) { + bool destIsIntTy = destReg.IsOfIntClass(); + bool isDest64 = destReg.GetSize() == k64BitSize; + MOperator mOp = destIsIntTy ? (isDest64 ? MOP_xmovri64 : MOP_wmovri32) : (isDest64 ? MOP_xdfmovri : MOP_wsfmovri); + reg = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + ImmOperand &tempSrcElse = static_cast(dest); + if (tempSrcElse.IsZero()) { + return &cgFunc->GetZeroOpnd(destReg.GetSize()); + } + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *reg, tempSrcElse); + generateInsn.emplace_back(&tempInsn); + return reg; + } else { + return (static_cast(&dest)); + } +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) { + RegOperand *tReg = static_cast(&ifDest); + RegOperand *eReg = static_cast(&elseDest); + + /* mov w0, w1 mov w0, w1 --> mov w0, w1 */ + if (eReg->GetRegisterNumber() == tReg->GetRegisterNumber()) { + uint32 dSize = destReg.GetSize(); + bool srcIsIntTy = tReg->IsOfIntClass(); + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp; + if (dSize == k64BitSize) { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_xmovrr : MOP_xvmovdr) : (destIsIntTy ? MOP_xvmovrd : MOP_xvmovd); + } else { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_wmovrr : MOP_xvmovsr) : (destIsIntTy ? MOP_xvmovrs : MOP_xvmovs); + } + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, *tReg); + generateInsn.emplace_back(&tempInsnIf); + } else { + uint32 dSize = destReg.GetSize(); + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? + MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tReg, *eReg); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +Operand *AArch64ICOIfThenElsePattern::GetDestReg(const std::map> &destSrcMap, + const RegOperand &destReg) const { + Operand *dest = nullptr; + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *destRegInMap = static_cast(destSrcPair.first); + DEBUG_ASSERT(destRegInMap != nullptr, "nullptr check"); + if (destRegInMap->GetRegisterNumber() == destReg.GetRegisterNumber()) { + if (destSrcPair.second.size() > 1) { + dest = destSrcPair.first; + } else { + dest = destSrcPair.second[0]; + } + break; + } + } + return dest; +} + +bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(BB &cmpBB, const BB &bb, + const std::map> &ifDestSrcMap, + const std::map> &elseDestSrcMap, + bool elseBBIsProcessed, + std::vector &generateInsn) { + Insn *branchInsn = cgFunc->GetTheCFG()->FindLastCondBrInsn(cmpBB); + FOR_BB_INSNS_CONST(insn, (&bb)) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, dest, src)) { + DEBUG_ASSERT(false, "insn check"); + } + DEBUG_ASSERT(dest->IsRegister(), "register check"); + RegOperand *destReg = static_cast(dest); + + Operand *elseDest = GetDestReg(elseDestSrcMap, *destReg); + Operand *ifDest = GetDestReg(ifDestSrcMap, *destReg); + + if (elseBBIsProcessed) { + if (elseDest != nullptr) { + continue; + } + elseDest = dest; + DEBUG_ASSERT(ifDest != nullptr, "null ptr check"); + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + } else { + DEBUG_ASSERT(elseDest != nullptr, "null ptr check"); + if (ifDest == nullptr) { + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + ifDest = dest; + } + } + + /* generate cset or csel instruction */ + DEBUG_ASSERT(ifDest != nullptr, "null ptr check"); + if (ifDest->IsIntImmediate() && elseDest->IsIntImmediate()) { + GenerateInsnForImm(*branchInsn, *ifDest, *elseDest, *destReg, generateInsn); + } else { + RegOperand *tReg = GenerateRegAndTempInsn(*ifDest, *destReg, generateInsn); + RegOperand *eReg = GenerateRegAndTempInsn(*elseDest, *destReg, generateInsn); + if ((tReg->GetRegisterType() != eReg->GetRegisterType()) || + (tReg->GetRegisterType() != destReg->GetRegisterType())) { + return false; + } + GenerateInsnForReg(*branchInsn, *tReg, *eReg, *destReg, generateInsn); + } + } + + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckHasSameDest(std::vector &lInsn, std::vector &rInsn) const { + for (size_t i = 0; i < lInsn.size(); ++i) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*lInsn[i])) { + bool hasSameDest = false; + for (size_t j = 0; j < rInsn.size(); ++j) { + RegOperand *rDestReg = static_cast(&rInsn[j]->GetOperand(0)); + RegOperand *lDestReg = static_cast(&lInsn[i]->GetOperand(0)); + if (lDestReg->GetRegisterNumber() == rDestReg->GetRegisterNumber()) { + hasSameDest = true; + break; + } + } + if (!hasSameDest) { + return false; + } + } + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckModifiedRegister(Insn &insn, std::map> &destSrcMap, std::vector &src, + Operand &dest, const Insn *cmpInsn, + const Operand *flagOpnd) const { +/* src was modified in this blcok earlier */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + RegOperand &srcReg = static_cast(*srcOpnd); + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == srcReg.GetRegisterNumber()) { + return false; + } + } + } + } + + /* dest register was modified earlier in this block */ + DEBUG_ASSERT(dest.IsRegister(), "opnd must be register"); + RegOperand &destReg = static_cast(dest); + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == destReg.GetRegisterNumber()) { + return false; + } + } + + /* src register is modified later in this block, will not be processed */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + RegOperand &srcReg = static_cast(*srcOpnd); + if (destReg.IsOfFloatOrSIMDClass() && srcReg.GetRegisterNumber() == RZR) { + return false; + } + for (Insn *tmpInsn = &insn; tmpInsn != nullptr; tmpInsn = tmpInsn->GetNext()) { + Operand *tmpDest = nullptr; + std::vector tmpSrc; + if (IsSetInsn(*tmpInsn, tmpDest, tmpSrc) && tmpDest->Equals(*srcOpnd)) { + DEBUG_ASSERT(tmpDest->IsRegister(), "opnd must be register"); + RegOperand *tmpDestReg = static_cast(tmpDest); + if (srcReg.GetRegisterNumber() == tmpDestReg->GetRegisterNumber()) { + return false; + } + } + } + } + } + + /* add/sub insn's dest register does not exist in cmp insn. */ + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(insn)) { + RegOperand &insnDestReg = static_cast(insn.GetOperand(0)); + if (flagOpnd) { + RegOperand &cmpReg = static_cast(cmpInsn->GetOperand(0)); + if (insnDestReg.GetRegisterNumber() == cmpReg.GetRegisterNumber()) { + return false; + } + } else { + RegOperand &cmpReg1 = static_cast(cmpInsn->GetOperand(1)); + if (cmpInsn->GetOperand(2).IsRegister()) { + RegOperand &cmpReg2 = static_cast(cmpInsn->GetOperand(2)); + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber() || + insnDestReg.GetRegisterNumber() == cmpReg2.GetRegisterNumber()) { + return false; + } + } else { + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckCondMoveBB(BB *bb, std::map> &destSrcMap, + std::vector &destRegs, std::vector &setInsn, + Operand *flagOpnd, Insn *cmpInsn) const { + if (bb == nullptr) { + return false; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, dest, src)) { + return false; + } + DEBUG_ASSERT(dest != nullptr, "null ptr check"); + DEBUG_ASSERT(src.size() != 0, "null ptr check"); + + if (!dest->IsRegister()) { + return false; + } + + for (auto srcOpnd : src) { + if (!(srcOpnd->IsConstImmediate()) && !srcOpnd->IsRegister()) { + return false; + } + } + + if (flagOpnd != nullptr) { + RegOperand *flagReg = static_cast(flagOpnd); + regno_t flagRegNO = flagReg->GetRegisterNumber(); + if (bb->GetLiveOut()->TestBit(flagRegNO)) { + return false; + } + } + + if (!CheckModifiedRegister(*insn, destSrcMap, src, *dest, cmpInsn, flagOpnd)) { + return false; + } + + (void)destSrcMap.insert(std::make_pair(dest, src)); + destRegs.emplace_back(dest); + (void)setInsn.emplace_back(insn); + } + return true; +} + +/* Convert conditional branches into cset/csel instructions */ +bool AArch64ICOIfThenElsePattern::DoOpt(BB &cmpBB, BB *ifBB, BB *elseBB, BB &joinBB) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(cmpBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + Insn *cmpInsn = FindLastCmpInsn(cmpBB); + Operand *flagOpnd = nullptr; + /* for cbnz and cbz institution */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Operand &opnd0 = condBr->GetOperand(0); + if (opnd0.IsRegister() && static_cast(opnd0).GetRegisterNumber() == RZR) { + return false; + } + cmpInsn = condBr; + flagOpnd = &(opnd0); + } + + /* tbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (mOperator == MOP_xtbz || mOperator == MOP_wtbz || mOperator == MOP_xtbnz || mOperator == MOP_wtbnz) { + return false; + } + if (cmpInsn == nullptr) { + return false; + } + + std::vector ifDestRegs; + std::vector ifSetInsn; + std::vector elseDestRegs; + std::vector elseSetInsn; + + std::map> ifDestSrcMap; + std::map> elseDestSrcMap; + + if (!CheckCondMoveBB(elseBB, elseDestSrcMap, elseDestRegs, elseSetInsn, flagOpnd, cmpInsn) || + (ifBB != nullptr && !CheckCondMoveBB(ifBB, ifDestSrcMap, ifDestRegs, ifSetInsn, flagOpnd, cmpInsn))) { + return false; + } + + if (!CheckHasSameDest(ifSetInsn, elseSetInsn) || !CheckHasSameDest(elseSetInsn, ifSetInsn)) { + return false; + } + + size_t count = elseDestRegs.size(); + + for (size_t i = 0; i < ifDestRegs.size(); ++i) { + bool foundInElse = false; + for (size_t j = 0; j < elseDestRegs.size(); ++j) { + RegOperand *elseDestReg = static_cast(elseDestRegs[j]); + RegOperand *ifDestReg = static_cast(ifDestRegs[i]); + if (ifDestReg->GetRegisterNumber() == elseDestReg->GetRegisterNumber()) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*ifSetInsn[i]) && + cgFunc->GetTheCFG()->IsAddOrSubInsn(*elseSetInsn[j])) { + return false; + } + foundInElse = true; + break; + } + } + if (foundInElse) { + continue; + } else { + ++count; + } + } + if (count > kThreshold) { + return false; + } + + /* generate insns */ + std::vector elseGenerateInsn; + std::vector ifGenerateInsn; + bool elseBBProcessResult = false; + if (elseBB != nullptr) { + elseBBProcessResult = BuildCondMovInsn(cmpBB, *elseBB, ifDestSrcMap, elseDestSrcMap, false, elseGenerateInsn); + } + bool ifBBProcessResult = false; + if (ifBB != nullptr) { + ifBBProcessResult = BuildCondMovInsn(cmpBB, *ifBB, ifDestSrcMap, elseDestSrcMap, true, ifGenerateInsn); + } + if (!elseBBProcessResult || (ifBB != nullptr && !ifBBProcessResult)) { + return false; + } + + /* insert insn */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Insn *innerCmpInsn = BuildCmpInsn(*condBr); + cmpBB.InsertInsnBefore(*condBr, *innerCmpInsn); + cmpInsn = innerCmpInsn; + } + + if (elseBB != nullptr) { + cmpBB.SetKind(elseBB->GetKind()); + } else { + DEBUG_ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + cmpBB.SetKind(ifBB->GetKind()); + } + + for (auto setInsn : ifSetInsn) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*setInsn)) { + (void)cmpBB.InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + for (auto setInsn : elseSetInsn) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*setInsn)) { + (void)cmpBB.InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + /* delete condBr */ + cmpBB.RemoveInsn(*condBr); + /* Insert goto insn after csel insn. */ + if (cmpBB.GetKind() == BB::kBBGoto || cmpBB.GetKind() == BB::kBBIf) { + if (elseBB != nullptr) { + (void)cmpBB.InsertInsnAfter(*cmpBB.GetLastInsn(), *elseBB->GetLastInsn()); + } else { + DEBUG_ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + (void)cmpBB.InsertInsnAfter(*cmpBB.GetLastInsn(), *ifBB->GetLastInsn()); + } + } + + /* Insert instructions in branches after cmpInsn */ + for (auto itr = elseGenerateInsn.rbegin(); itr != elseGenerateInsn.rend(); ++itr) { + (void)cmpBB.InsertInsnAfter(*cmpInsn, **itr); + } + for (auto itr = ifGenerateInsn.rbegin(); itr != ifGenerateInsn.rend(); ++itr) { + (void)cmpBB.InsertInsnAfter(*cmpInsn, **itr); + } + + /* Remove branches and merge join */ + if (ifBB != nullptr) { + cgFunc->GetTheCFG()->RemoveBB(*ifBB); + } + if (elseBB != nullptr) { + cgFunc->GetTheCFG()->RemoveBB(*elseBB); + } + + if (cmpBB.GetKind() != BB::kBBIf && cmpBB.GetNext() == &joinBB && + !maplebe::CGCFG::InLSDA(joinBB.GetLabIdx(), *cgFunc->GetEHFunc()) && + cgFunc->GetTheCFG()->CanMerge(cmpBB, joinBB)) { + maplebe::CGCFG::MergeBB(cmpBB, joinBB, *cgFunc); + keepPosition = true; + } + return true; +} + +/* + * Find IF-THEN-ELSE or IF-THEN basic block pattern, + * and then invoke DoOpt(...) to finish optimize. + */ +bool AArch64ICOIfThenElsePattern::Optimize(BB &curBB) { + if (curBB.GetKind() != BB::kBBIf) { + return false; + } + BB *ifBB = nullptr; + BB *elseBB = nullptr; + BB *joinBB = nullptr; + + BB *thenDest = CGCFG::GetTargetSuc(curBB); + BB *elseDest = curBB.GetNext(); + CHECK_FATAL(thenDest != nullptr, "then_dest is null in ITEPattern::Optimize"); + CHECK_FATAL(elseDest != nullptr, "else_dest is null in ITEPattern::Optimize"); + /* IF-THEN-ELSE */ + if (thenDest->NumPreds() == 1 && thenDest->NumSuccs() == 1 && elseDest->NumSuccs() == 1 && + elseDest->NumPreds() == 1 && thenDest->GetSuccs().front() == elseDest->GetSuccs().front()) { + ifBB = thenDest; + elseBB = elseDest; + joinBB = thenDest->GetSuccs().front(); + } else if (elseDest->NumPreds() == 1 && elseDest->NumSuccs() == 1 && elseDest->GetSuccs().front() == thenDest) { + /* IF-THEN */ + ifBB = nullptr; + elseBB = elseDest; + joinBB = thenDest; + } else { + /* not a form we can handle */ + return false; + } + DEBUG_ASSERT(elseBB != nullptr, "elseBB should not be nullptr"); + if (CGCFG::InLSDA(elseBB->GetLabIdx(), *cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(elseBB->GetLabIdx(), *cgFunc)) { + return false; + } + + if (ifBB != nullptr && + (CGCFG::InLSDA(ifBB->GetLabIdx(), *cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(ifBB->GetLabIdx(), *cgFunc))) { + return false; + } + return DoOpt(curBB, ifBB, elseBB, *joinBB); +} + +/* If( cmp || cmp ) then + * or + * If( cmp && cmp ) then */ +bool AArch64ICOSameCondPattern::Optimize(BB &secondIfBB) { + if (secondIfBB.GetKind() != BB::kBBIf || secondIfBB.NumPreds() != 1) { + return false; + } + BB *firstIfBB = secondIfBB.GetPrev(); + BB *nextBB = firstIfBB->GetNext(); + CHECK_FATAL(nextBB != nullptr, "nextBB is null in AArch64ICOSameCondPattern::Optimize"); + /* firstIfBB's nextBB is secondIfBB */ + if (firstIfBB == nullptr || firstIfBB->GetKind() != BB::kBBIf || nextBB->GetId() != secondIfBB.GetId()) { + return false; + } + return DoOpt(firstIfBB, secondIfBB); +} + +bool AArch64ICOPattern::CheckMop(MOperator mOperator) const { + switch (mOperator) { + case MOP_beq: + case MOP_bne: + case MOP_blt: + case MOP_ble: + case MOP_bgt: + case MOP_bge: + case MOP_blo: + case MOP_bls: + case MOP_bhs: + case MOP_bhi: + case MOP_bpl: + case MOP_bmi: + case MOP_bvc: + case MOP_bvs: + return true; + default: + return false; + } +} + +/* branchInsn1 is firstIfBB's LastCondBrInsn + * branchInsn2 is secondIfBB's LastCondBrInsn + * + * Limitations: branchInsn1 is the same as branchInsn2 + * */ +bool AArch64ICOSameCondPattern::DoOpt(BB *firstIfBB, BB &secondIfBB) { + Insn *branchInsn1 = cgFunc->GetTheCFG()->FindLastCondBrInsn(*firstIfBB); + DEBUG_ASSERT(branchInsn1 != nullptr, "nullptr check"); + Insn *cmpInsn1 = FindLastCmpInsn(*firstIfBB); + MOperator mOperator1 = branchInsn1->GetMachineOpcode(); + Insn *branchInsn2 = cgFunc->GetTheCFG()->FindLastCondBrInsn(secondIfBB); + DEBUG_ASSERT(branchInsn2 != nullptr, "nullptr check"); + Insn *cmpInsn2 = FindLastCmpInsn(secondIfBB); + MOperator mOperator2 = branchInsn2->GetMachineOpcode(); + if (cmpInsn1 == nullptr || cmpInsn2 == nullptr) { + return false; + } + + /* tbz and cbz will not be optimized */ + if (mOperator1 != mOperator2 || !CheckMop(mOperator1)) { + return false; + } + + /* two BB has same branch */ + std::vector labelOpnd1 = GetLabelOpnds(*branchInsn1); + std::vector labelOpnd2 = GetLabelOpnds(*branchInsn2); + if (labelOpnd1.size() != 1 || labelOpnd1.size() != 1 || + labelOpnd1[0]->GetLabelIndex() != labelOpnd2[0]->GetLabelIndex()) { + return false; + } + + /* secondifBB only has branchInsn and cmpInsn */ + FOR_BB_INSNS_REV(insn, &secondIfBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn != branchInsn2 && insn != cmpInsn2) { + return false; + } + } + + /* build ccmp Insn */ + ConditionCode ccCode = Encode(branchInsn1->GetMachineOpcode(), true); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + Insn *ccmpInsn = BuildCcmpInsn(ccCode, cmpInsn2); + if (ccmpInsn == nullptr) { + return false; + } + + /* insert ccmp Insn */ + firstIfBB->InsertInsnBefore(*branchInsn1, *ccmpInsn); + + /* Remove secondIfBB */ + BB *nextBB = secondIfBB.GetNext(); + cgFunc->GetTheCFG()->RemoveBB(secondIfBB); + firstIfBB->PushFrontSuccs(*nextBB); + nextBB->PushFrontPreds(*firstIfBB); + return true; +} +/* + * find the preds all is ifBB + */ +bool AArch64ICOMorePredsPattern::Optimize(BB &curBB) { + if (curBB.GetKind() != BB::kBBGoto) { + return false; + } + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() != BB::kBBIf) { + return false; + } + } + for (BB *succsBB : curBB.GetSuccs()) { + if (succsBB->GetKind() != BB::kBBFallthru) { + return false; + } + if (succsBB->NumPreds() > 2) { + return false; + } + } + Insn *gotoBr = curBB.GetLastMachineInsn(); + DEBUG_ASSERT(gotoBr != nullptr, "gotoBr should not be nullptr"); + auto &gotoLabel = static_cast(gotoBr->GetOperand(gotoBr->GetOperandSize() - 1)); + for (BB *preBB : curBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + Operand &condBrLastOpnd = condBr->GetOperand(condBr->GetOperandSize() - 1); + DEBUG_ASSERT(condBrLastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(condBrLastOpnd); + if (labelOpnd.GetLabelIndex() != curBB.GetLabIdx()) { + return false; + } + if (gotoLabel.GetLabelIndex() != preBB->GetNext()->GetLabIdx()) { + /* do not if convert if 'else' clause present */ + return false; + } + } + return DoOpt(curBB); +} + +/* this BBGoto only has mov Insn and Branch */ +bool AArch64ICOMorePredsPattern::CheckGotoBB(BB &gotoBB, std::vector &movInsn) const { + FOR_BB_INSNS(insn, &gotoBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + movInsn.push_back(insn); + continue; + } + if (insn->GetId() != gotoBB.GetLastInsn()->GetId()) { + return false; + } else if (!insn->IsBranch()) { /* last Insn is Branch */ + return false; + } + } + return true; +} + +/* this BBGoto only has mov Insn */ +bool AArch64ICOMorePredsPattern::MovToCsel(std::vector &movInsn, std::vector &cselInsn, + const Insn &branchInsn) const { + Operand &branchOpnd0 = branchInsn.GetOperand(kInsnFirstOpnd); + regno_t branchRegNo; + if (branchOpnd0.IsRegister()) { + branchRegNo = static_cast(branchOpnd0).GetRegisterNumber(); + } + for (Insn *insn:movInsn) { + /* use mov build csel */ + Operand &opnd0 = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn->GetOperand(kInsnSecondOpnd); + ConditionCode ccCode = AArch64ICOPattern::Encode(branchInsn.GetMachineOpcode(), false); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + /* movInsn's opnd1 is Immediate */ + if (opnd1.IsImmediate()) { + return false; + } + /* opnd0 and opnd1 hsa same type and size */ + if (regOpnd0.GetSize() != regOpnd1.GetSize() || (regOpnd0.IsOfIntClass() != regOpnd1.IsOfIntClass())) { + return false; + } + /* The branchOpnd0 cannot be modified for csel. */ + regno_t movRegNo0 = static_cast(opnd0).GetRegisterNumber(); + if (branchOpnd0.IsRegister() && branchRegNo == movRegNo0) { + return false; + } + uint32 dSize = regOpnd0.GetSize(); + bool isIntTy = regOpnd0.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? + MOP_scselrrrc : MOP_hcselrrrc)); + cselInsn.emplace_back(&cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd0, cond, rflag)); + } + if (cselInsn.size() < 1) { + return false; + } + return true; +} + +bool AArch64ICOMorePredsPattern::DoOpt(BB &gotoBB) { + std::vector movInsn; + std::vector> presCselInsn; + std::vector presBB; + Insn *branchInsn = gotoBB.GetLastMachineInsn(); + if (branchInsn == nullptr || !branchInsn->IsUnCondBranch()) { + return false; + } + /* get preds's new label */ + std::vector labelOpnd = GetLabelOpnds(*branchInsn); + if (labelOpnd.size() != 1) { + return false; + } + if (!CheckGotoBB(gotoBB, movInsn)) { + return false; + } + /* Check all preBB, Exclude gotoBBs that cannot be optimized. */ + for (BB *preBB : gotoBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + + /* tbz/cbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (!CheckMop(mOperator)) { + return false; + } + std::vector cselInsn; + if (!MovToCsel(movInsn, cselInsn, *condBr)) { + return false; + } + if (cselInsn.size() < 1) { + return false; + } + presCselInsn.emplace_back(cselInsn); + presBB.emplace_back(preBB); + } + /* modifies presBB */ + for (size_t i = 0; i < presCselInsn.size(); ++i) { + BB *preBB = presBB[i]; + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + std::vector cselInsn = presCselInsn[i]; + /* insert csel insn */ + for (Insn *csel : cselInsn) { + preBB->InsertInsnBefore(*condBr, *csel); + } + /* new condBr */ + condBr->SetOperand(condBr->GetOperandSize() - 1, *labelOpnd[0]); + } + /* Remove branches and merge gotoBB */ + cgFunc->GetTheCFG()->RemoveBB(gotoBB); + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e72c31548c3806fda021de2c0f008b09c6c4bd0f --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_insn.h" +#include "aarch64_cg.h" +#include "common_utils.h" +#include "insn.h" +#include "metadata_layout.h" +#include + +namespace maplebe { + +void A64OpndEmitVisitor::EmitIntReg(const RegOperand &v, uint8 opndSz) { + CHECK_FATAL(v.GetRegisterType() == kRegTyInt, "wrong Type"); + uint8 opndSize = (opndSz == kMaxSimm32) ? v.GetSize() : opndSz; + DEBUG_ASSERT((opndSize == k32BitSize || opndSize == k64BitSize), "illegal register size"); +#ifdef USE_32BIT_REF + bool r32 = (opndSize == k32BitSize) || isRefField; +#else + bool r32 = (opndSize == k32BitSize); +#endif /* USE_32BIT_REF */ + (void)emitter.Emit(AArch64CG::intRegNames[(r32 ? AArch64CG::kR32List : AArch64CG::kR64List)][v.GetRegisterNumber()]); +} + +void A64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { + DEBUG_ASSERT(opndProp == nullptr || opndProp->IsRegister(), + "operand type doesn't match"); + uint32 size = v->GetSize(); + regno_t regNO = v->GetRegisterNumber(); + uint8 opndSize = (opndProp != nullptr) ? opndProp->GetSize() : size; + switch (v->GetRegisterType()) { + case kRegTyInt: { + EmitIntReg(*v, opndSize); + break; + } + case kRegTyFloat: { + DEBUG_ASSERT((opndSize == k8BitSize || opndSize == k16BitSize || opndSize == k32BitSize || + opndSize == k64BitSize || opndSize == k128BitSize), "illegal register size"); + if (opndProp->IsVectorOperand() && v->GetVecLaneSize() != 0) { + EmitVectorOperand(*v); + } else { + /* FP reg cannot be reffield. 8~0, 16~1, 32~2, 64~3. 8 is 1000b, has 3 zero. */ + uint32 regSet = __builtin_ctz(static_cast(opndSize)) - 3; + (void)emitter.Emit(AArch64CG::intRegNames[regSet][regNO]); + } + break; + } + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void A64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { + if (v->IsOfstImmediate()) { + return Visit(static_cast(v)); + } + + int64 value = v->GetValue(); + if (!v->IsFmov()) { + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + if (v->GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(value == 0, "NIY"); + emitter.Emit("#0.0"); + } + /* + * compute float value + * use top 4 bits expect MSB of value . then calculate its fourth power + */ + int32 exp = static_cast((((static_cast(value) & 0x70) >> 4) ^ 0x4) - 3); + /* use the lower four bits of value in this expression */ + const float mantissa = 1.0 + (static_cast(static_cast(value) & 0xf) / 16.0); + float result = std::pow(2, exp) * mantissa; + + std::stringstream ss; + ss << std::setprecision(10) << result; + std::string res; + ss >> res; + size_t dot = res.find('.'); + if (dot == std::string::npos) { + res += ".0"; + dot = res.find('.'); + CHECK_FATAL(dot != std::string::npos, "cannot find in string"); + } + (void)res.erase(dot, 1); + std::string integer(res, 0, 1); + std::string fraction(res, 1); + while (fraction.size() != 1 && fraction[fraction.size() - 1] == '0') { + fraction.pop_back(); + } + /* fetch the sign bit of this value */ + std::string sign = static_cast(value) & 0x80 ? "-" : ""; + (void)emitter.Emit(sign + integer + "." + fraction + "e+").Emit(static_cast(dot) - 1); +} + +void A64OpndEmitVisitor::Visit(maplebe::MemOperand *v) { + auto a64v = static_cast(v); + MemOperand::AArch64AddressingMode addressMode = a64v->GetAddrMode(); +#if DEBUG + const InsnDesc *md = &AArch64CG::kMd[emitter.GetCurrentMOP()]; + bool isLDSTpair = md->IsLoadStorePair(); + DEBUG_ASSERT(md->Is64Bit() || md->GetOperandSize() <= k32BitSize || md->GetOperandSize() == k128BitSize, + "unexpected opnd size"); +#endif + if (addressMode == MemOperand::kAddrModeBOi) { + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + DEBUG_ASSERT(baseReg != nullptr, "expect an RegOperand here"); + uint32 baseSize = baseReg->GetSize(); + if (baseSize != k64BitSize) { + baseReg->SetSize(k64BitSize); + } + EmitIntReg(*baseReg); + baseReg->SetSize(baseSize); + OfstOperand *offset = a64v->GetOffsetImmediate(); + if (offset != nullptr) { +#ifndef USE_32BIT_REF /* can be load a ref here */ + /* + * Cortex-A57 Software Optimization Guide: + * The ARMv8-A architecture allows many types of load and store accesses to be arbitrarily aligned. + * The Cortex- A57 processor handles most unaligned accesses without performance penalties. + */ +#if DEBUG + if (a64v->IsOffsetMisaligned(md->GetOperandSize())) { + INFO(kLncInfo, "The Memory operand's offset is misaligned:", ""); + + } +#endif +#endif /* USE_32BIT_REF */ + if (a64v->IsPostIndexed()) { + DEBUG_ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + (void)emitter.Emit("]"); + if (!offset->IsZero()) { + (void)emitter.Emit(", "); + Visit(offset); + } + } else if (a64v->IsPreIndexed()) { + DEBUG_ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + (void)emitter.Emit("]!"); + } else { + if (CGOptions::IsPIC() && (offset->IsSymOffset() || offset->IsSymAndImmOffset()) && + (offset->GetSymbol()->NeedPIC() || offset->GetSymbol()->IsThreadLocal())) { + std::string gotEntry = offset->GetSymbol()->IsThreadLocal() ? ", #:tlsdesc_lo12:" : ", #:got_lo12:"; + (void)emitter.Emit(gotEntry + offset->GetSymbolName()); + } else { + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + } + (void)emitter.Emit("]"); + } + } else { + (void)emitter.Emit("]"); + } + } else if (addressMode == MemOperand::kAddrModeBOrX) { + /* + * Base plus offset | [base{, #imm}] [base, Xm{, LSL #imm}] [base, Wm, (S|U)XTW {#imm}] + * offset_opnds=nullptr + * offset_opnds=64 offset_opnds=32 + * imm=0 or 3 imm=0 or 2, s/u + */ + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + // After ssa version support different size, the value is changed back + baseReg->SetSize(k64BitSize); + + EmitIntReg(*baseReg); + (void)emitter.Emit(","); + EmitIntReg(*a64v->GetIndexRegister()); + if (a64v->ShouldEmitExtend() || v->GetBaseRegister()->GetSize() > a64v->GetIndexRegister()->GetSize()) { + (void)emitter.Emit(","); + /* extend, #0, of #3/#2 */ + (void)emitter.Emit(a64v->GetExtendAsString()); + if (a64v->GetExtendAsString() == "LSL" || a64v->ShiftAmount() != 0) { + (void)emitter.Emit(" #"); + (void)emitter.Emit(a64v->ShiftAmount()); + } + } + (void)emitter.Emit("]"); + } else if (addressMode == MemOperand::kAddrModeLiteral) { + CHECK_FATAL(opndProp != nullptr, "prop is nullptr in MemOperand::Emit"); + if (opndProp->IsMemLow12()) { + (void)emitter.Emit("#:lo12:"); + } + (void)emitter.Emit(v->GetSymbol()->GetName()); + } else if (addressMode == MemOperand::kAddrModeLo12Li) { + (void)emitter.Emit("["); + EmitIntReg(*v->GetBaseRegister()); + + OfstOperand *offset = a64v->GetOffsetImmediate(); + DEBUG_ASSERT(offset != nullptr, "nullptr check"); + + (void)emitter.Emit(", #:lo12:"); + if (v->GetSymbol()->GetAsmAttr() != UStrIdx(0) && + (v->GetSymbol()->GetStorageClass() == kScPstatic || v->GetSymbol()->GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(v->GetSymbol()->GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (v->GetSymbol()->GetStorageClass() == kScPstatic && v->GetSymbol()->IsLocal()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)emitter.Emit(a64v->GetSymbolName() + std::to_string(pIdx)); + } else { + (void)emitter.Emit(a64v->GetSymbolName()); + } + } + if (!offset->IsZero()) { + (void)emitter.Emit("+"); + (void)emitter.Emit(std::to_string(offset->GetOffsetValue())); + } + (void)emitter.Emit("]"); + } else { + DEBUG_ASSERT(false, "nyi"); + } +} + +void A64OpndEmitVisitor::Visit(LabelOperand *v) { + emitter.EmitLabelRef(v->GetLabelIndex()); +} + +void A64OpndEmitVisitor::Visit(CondOperand *v) { + (void)emitter.Emit(CondOperand::ccStrs[v->GetCode()]); +} + +void A64OpndEmitVisitor::Visit(ExtendShiftOperand *v) { + DEBUG_ASSERT(v->GetShiftAmount() <= k4BitSize && v->GetShiftAmount() >= 0, + "shift amount out of range in ExtendShiftOperand"); + auto emitExtendShift = [this, v](const std::string &extendKind)->void { + (void)emitter.Emit(extendKind); + if (v->GetShiftAmount() != 0) { + (void)emitter.Emit(" #").Emit(v->GetShiftAmount()); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + emitExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + emitExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + emitExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + emitExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + emitExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + emitExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + emitExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + emitExtendShift("SXTX"); + break; + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } +} + +void A64OpndEmitVisitor::Visit(BitShiftOperand *v) { + (void)emitter.Emit((v->GetShiftOp() == BitShiftOperand::kLSL) ? "LSL #" : + ((v->GetShiftOp() == BitShiftOperand::kLSR) ? "LSR #" : "ASR #")).Emit(v->GetShiftAmount()); +} + +void A64OpndEmitVisitor::Visit(StImmOperand *v) { + CHECK_FATAL(opndProp != nullptr, "opndProp is nullptr in StImmOperand::Emit"); + const MIRSymbol *symbol = v->GetSymbol(); + const bool isThreadLocal = symbol->IsThreadLocal(); + const bool isLiteralLow12 = opndProp->IsLiteralLow12(); + const bool hasGotEntry = CGOptions::IsPIC() && symbol->NeedPIC(); + bool hasPrefix = false; + if (isThreadLocal) { + (void)emitter.Emit(":tlsdesc"); + hasPrefix = true; + } + if (!hasPrefix && hasGotEntry) { + (void)emitter.Emit(":got"); + hasPrefix = true; + } + if (isLiteralLow12) { + std::string lo12String = hasPrefix ? "_lo12" : ":lo12"; + (void)emitter.Emit(lo12String); + hasPrefix = true; + } + if (hasPrefix) { + (void)emitter.Emit(":"); + } + if (symbol->GetAsmAttr() != UStrIdx(0) && + (symbol->GetStorageClass() == kScPstatic || symbol->GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + (void)emitter.Emit(symbol->GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(v->GetName()); + } + } + if (!hasGotEntry && v->GetOffset() != 0) { + (void)emitter.Emit("+" + std::to_string(v->GetOffset())); + } +} + +void A64OpndEmitVisitor::Visit(FuncNameOperand *v) { + (void)emitter.Emit(v->GetName()); +} + +void A64OpndEmitVisitor::Visit(CommentOperand *v) { + (void)emitter.Emit(v->GetComment()); +} + +void A64OpndEmitVisitor::Visit(ListOperand *v) { + (void)opndProp; + size_t nLeft = v->GetOperands().size(); + if (nLeft == 0) { + return; + } + + for (auto it = v->GetOperands().begin(); it != v->GetOperands().end(); ++it) { + Visit(*it); + if (--nLeft >= 1) { + (void)emitter.Emit(", "); + } + } +} + +void A64OpndEmitVisitor::Visit(OfstOperand *v) { + int64 value = v->GetValue(); + if (v->IsImmOffset()) { + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + const MIRSymbol *symbol = v->GetSymbol(); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + (void)emitter.Emit(":got:" + symbol->GetName()); + } else if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + (void)emitter.Emit(symbol->GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(symbol->GetName()); + } + if (value != 0) { + (void)emitter.Emit("+" + std::to_string(value)); + } +} + +void A64OpndEmitVisitor::EmitVectorOperand(const RegOperand &v) { + std::string width; + switch (v.GetVecElementSize()) { + case k8BitSize: + width = "b"; + break; + case k16BitSize: + width = "h"; + break; + case k32BitSize: + width = "s"; + break; + case k64BitSize: + width = "d"; + break; + default: + CHECK_FATAL(false, "unexpected value size for vector element"); + break; + } + (void)emitter.Emit(AArch64CG::vectorRegNames[v.GetRegisterNumber()]); + int32 lanePos = v.GetVecLanePosition(); + if (lanePos == -1) { + (void)emitter.Emit("." + std::to_string(v.GetVecLaneSize()) + width); + } else { + (void)emitter.Emit("." + width + "[" + std::to_string(lanePos) + "]"); + } +} + +void A64OpndDumpVisitor::Visit(RegOperand *v) { + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; + uint32 regType = v->GetRegisterType(); + DEBUG_ASSERT(regType < kRegTyLast, "unexpected regType"); + + regno_t reg = v->GetRegisterNumber(); + reg = v->IsVirtualRegister() ? reg : (reg - 1); + uint32 vb = v->GetValidBitsNum(); + LogInfo::MapleLogger() << (v->IsVirtualRegister() ? "vreg:" : " reg:") << prims[regType] << reg << " " << classes[regType]; + if (v->GetValidBitsNum() != v->GetSize()) { + LogInfo::MapleLogger() << " Vb: [" << vb << "]"; + } + LogInfo::MapleLogger() << " Sz: [" << v->GetSize() << "]" ; +} + +void A64OpndDumpVisitor::Visit(ImmOperand *v) { + LogInfo::MapleLogger() << "imm:" << v->GetValue(); +} + +void A64OpndDumpVisitor::Visit(MemOperand *a64v) { + LogInfo::MapleLogger() << "Mem:"; + LogInfo::MapleLogger() << " size:" << a64v->GetSize() << " "; + LogInfo::MapleLogger() << " isStack:" << a64v->IsStackMem() << "-" << a64v->IsStackArgMem() << " "; + switch (a64v->GetAddrMode()) { + case MemOperand::kAddrModeBOi: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetOffsetOperand()); + switch (a64v->GetIndexOpt()) { + case MemOperand::kIntact: + LogInfo::MapleLogger() << " intact"; + break; + case MemOperand::kPreIndex: + LogInfo::MapleLogger() << " pre-index"; + break; + case MemOperand::kPostIndex: + LogInfo::MapleLogger() << " post-index"; + break; + default: + break; + } + break; + } + case MemOperand::kAddrModeBOrX: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetIndexRegister()); + LogInfo::MapleLogger() << " " << a64v->GetExtendAsString(); + LogInfo::MapleLogger() << " shift: " << a64v->ShiftAmount(); + LogInfo::MapleLogger() << " extend: " << a64v->GetExtendAsString(); + break; + } + case MemOperand::kAddrModeLiteral: + LogInfo::MapleLogger() << "literal: " << a64v->GetSymbolName(); + break; + case MemOperand::kAddrModeLo12Li: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + OfstOperand *offOpnd = a64v->GetOffsetImmediate(); + LogInfo::MapleLogger() << "#:lo12:"; + if (a64v->GetSymbol()->GetStorageClass() == kScPstatic && a64v->GetSymbol()->IsLocal()) { + PUIdx pIdx = CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetPuidx(); + LogInfo::MapleLogger() << a64v->GetSymbolName() << std::to_string(pIdx); + } else { + LogInfo::MapleLogger() << a64v->GetSymbolName(); + } + LogInfo::MapleLogger() << "+" << std::to_string(offOpnd->GetOffsetValue()); + break; + } + default: + DEBUG_ASSERT(false, "error memoperand dump"); + break; + } +} + +void A64OpndDumpVisitor::Visit(CondOperand *v) { + LogInfo::MapleLogger() << "CC: " << CondOperand::ccStrs[v->GetCode()]; +} +void A64OpndDumpVisitor::Visit(StImmOperand *v) { + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetOffset(); +} +void A64OpndDumpVisitor::Visit(BitShiftOperand *v) { + BitShiftOperand::ShiftOp shiftOp = v->GetShiftOp(); + uint32 shiftAmount = v->GetShiftAmount(); + LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kLSL) ? "LSL: " : + ((shiftOp == BitShiftOperand::kLSR) ? "LSR: " : "ASR: ")); + LogInfo::MapleLogger() << shiftAmount; +} +void A64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { + auto dumpExtendShift = [v](const std::string &extendKind)->void { + LogInfo::MapleLogger() << extendKind; + if (v->GetShiftAmount() != 0) { + LogInfo::MapleLogger() << " : " << v->GetShiftAmount(); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + dumpExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + dumpExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + dumpExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + dumpExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + dumpExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + dumpExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + dumpExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + dumpExtendShift("SXTX"); + break; + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } +} +void A64OpndDumpVisitor::Visit(LabelOperand *v) { + LogInfo::MapleLogger() << "label:" << v->GetLabelIndex(); +} +void A64OpndDumpVisitor::Visit(FuncNameOperand *v) { + LogInfo::MapleLogger() << "func :" << v->GetName(); +} +void A64OpndDumpVisitor::Visit(CommentOperand *v) { + LogInfo::MapleLogger() << " #" << v->GetComment(); +} +void A64OpndDumpVisitor::Visit(PhiOperand *v) { + auto &phiList = v->GetOperands(); + for (auto it = phiList.begin(); it != phiList.end();) { + Visit(it->second); + LogInfo::MapleLogger() << " fBB<" << it->first << ">"; + LogInfo::MapleLogger() << (++it == phiList.end() ? "" : " ,"); + } +} +void A64OpndDumpVisitor::Visit(ListOperand *v) { + auto &opndList = v->GetOperands(); + for (auto it = opndList.begin(); it != opndList.end();) { + Visit(*it); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e322aa7fcc5925fc74cfc006df13ad5fb284d58 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_isa.h" +#include "insn.h" + +namespace maplebe { +/* + * Get the ldp/stp corresponding to ldr/str + * mop : a ldr or str machine operator + */ +MOperator GetMopPair(MOperator mop) { + switch (mop) { + case MOP_xldr: + return MOP_xldp; + case MOP_wldr: + return MOP_wldp; + case MOP_xstr: + return MOP_xstp; + case MOP_wstr: + return MOP_wstp; + case MOP_dldr: + return MOP_dldp; + case MOP_qldr: + return MOP_qldp; + case MOP_sldr: + return MOP_sldp; + case MOP_dstr: + return MOP_dstp; + case MOP_sstr: + return MOP_sstp; + case MOP_qstr: + return MOP_qstp; + default: + DEBUG_ASSERT(false, "should not run here"); + return MOP_undef; + } +} +namespace AArch64isa { +MOperator FlipConditionOp(MOperator flippedOp) { + switch (flippedOp) { + case AArch64MOP_t::MOP_beq: + return AArch64MOP_t::MOP_bne; + case AArch64MOP_t::MOP_bge: + return AArch64MOP_t::MOP_blt; + case AArch64MOP_t::MOP_bgt: + return AArch64MOP_t::MOP_ble; + case AArch64MOP_t::MOP_bhi: + return AArch64MOP_t::MOP_bls; + case AArch64MOP_t::MOP_bhs: + return AArch64MOP_t::MOP_blo; + case AArch64MOP_t::MOP_ble: + return AArch64MOP_t::MOP_bgt; + case AArch64MOP_t::MOP_blo: + return AArch64MOP_t::MOP_bhs; + case AArch64MOP_t::MOP_bls: + return AArch64MOP_t::MOP_bhi; + case AArch64MOP_t::MOP_blt: + return AArch64MOP_t::MOP_bge; + case AArch64MOP_t::MOP_bne: + return AArch64MOP_t::MOP_beq; + case AArch64MOP_t::MOP_bpl: + return AArch64MOP_t::MOP_bmi; + case AArch64MOP_t::MOP_xcbnz: + return AArch64MOP_t::MOP_xcbz; + case AArch64MOP_t::MOP_wcbnz: + return AArch64MOP_t::MOP_wcbz; + case AArch64MOP_t::MOP_xcbz: + return AArch64MOP_t::MOP_xcbnz; + case AArch64MOP_t::MOP_wcbz: + return AArch64MOP_t::MOP_wcbnz; + case AArch64MOP_t::MOP_wtbnz: + return AArch64MOP_t::MOP_wtbz; + case AArch64MOP_t::MOP_wtbz: + return AArch64MOP_t::MOP_wtbnz; + case AArch64MOP_t::MOP_xtbnz: + return AArch64MOP_t::MOP_xtbz; + case AArch64MOP_t::MOP_xtbz: + return AArch64MOP_t::MOP_xtbnz; + default: + break; + } + return AArch64MOP_t::MOP_undef; +} + +uint32 GetJumpTargetIdx(const Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + /* unconditional jump */ + case MOP_xuncond: { + return kInsnFirstOpnd; + } + case MOP_xbr: { + DEBUG_ASSERT(insn.GetOperandSize() == 2, "ERR"); + return kInsnSecondOpnd; + } + /* conditional jump */ + case MOP_bmi: + case MOP_bvc: + case MOP_bls: + case MOP_blt: + case MOP_ble: + case MOP_blo: + case MOP_beq: + case MOP_bpl: + case MOP_bhs: + case MOP_bvs: + case MOP_bhi: + case MOP_bgt: + case MOP_bge: + case MOP_bne: + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + return kInsnSecondOpnd; + } + case MOP_wtbz: + case MOP_xtbz: + case MOP_wtbnz: + case MOP_xtbnz: { + return kInsnThirdOpnd; + } + default: + CHECK_FATAL(false, "Not a jump insn"); + } + return kInsnFirstOpnd; +} +} /* namespace AArch64isa */ +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2880c0bfd8fb5011e6f17ed315b02b9e4d75fe92 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_live.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64LiveAnalysis::GenerateReturnBBDefUse(BB &bb) const { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + auto *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64LiveAnalysis::InitEhDefine(BB &bb) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®R1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR1); + bb.InsertInsnBegin(pseudoInsn1); + + /* Insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®R0 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &pseudoInsn2 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR0); + bb.InsertInsnBegin(pseudoInsn2); +} + +bool AArch64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) { + regno_t regNO = reg + R0; + if (regNO < R8 || (RLR <= regNO && regNO <= RZR)) { + return true; + } + return false; +} + +void AArch64LiveAnalysis::ProcessCallInsnParam(BB &bb, const Insn &insn) const { + /* R0 ~ R7(R0 + 0 ~ R0 + 7) and V0 ~ V7 (V0 + 0 ~ V0 + 7) is parameter register */ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + CollectLiveInfo(bb, *opnd, true, false); + } + return; + } + } + for (uint32 i = 0; i < 8; ++i) { + Operand &phyOpndR = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0 + i), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpndR, true, false); + Operand &phyOpndV = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0 + i), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpndV, true, false); + } +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..184307d6739717c3c9fa66700f55ebb90f3ca080 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -0,0 +1,579 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" + +namespace maplebe { +using namespace maple; + +/* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ +uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) { + /* instantiate a parm locator */ + AArch64CallConvImpl parmLocator(be); + uint32 sizeOfArgsToStkPass = 0; + size_t i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + if (std::strcmp(stmt.GetOpName(), "call") == 0) { + CallNode *callNode = static_cast(&stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + CHECK_FATAL(fn != nullptr, "get MIRFunction failed"); + MIRSymbol *symbol = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if (symbol->GetName() == "MCC_CallFastNative" || symbol->GetName() == "MCC_CallFastNativeExt" || + symbol->GetName() == "MCC_CallSlowNative0" || symbol->GetName() == "MCC_CallSlowNative1" || + symbol->GetName() == "MCC_CallSlowNative2" || symbol->GetName() == "MCC_CallSlowNative3" || + symbol->GetName() == "MCC_CallSlowNative4" || symbol->GetName() == "MCC_CallSlowNative5" || + symbol->GetName() == "MCC_CallSlowNative6" || symbol->GetName() == "MCC_CallSlowNative7" || + symbol->GetName() == "MCC_CallSlowNative8" || symbol->GetName() == "MCC_CallSlowNativeExt") { + ++i; + } + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else { + /* OP_iread */ + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + return sizeOfArgsToStkPass; +} + +void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { + if (be.GetTypeSize(typeIdx) > k16ByteSize) { + /* size > 16 is passed on stack, the formal is just a pointer to the copy on stack. */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + size = k8ByteSize; + } else { + align = GetPointerSize(); + size = GetPointerSize(); + } + } else { + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); + } +} + +void AArch64MemLayout::SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const { + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(typeIdx, size, align); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), align))); + symbolAlloc.SetOffset(segment.GetSize()); + segment.SetSize(segment.GetSize() + size); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), GetPointerSize()))); +} + +void AArch64MemLayout::LayoutVarargParams() { + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + AArch64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + if (ploc.reg0 >= R0 && ploc.reg0 <= R7) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R7) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + if (CGOptions::IsArm64ilp32()) { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * k8ByteSize); + } else { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * GetPointerSize()); + } + if (CGOptions::UseGeneralRegOnly()) { + SetSizeOfVRSaveArea(0); + } else { + if (CGOptions::IsArm64ilp32()) { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * k8ByteSize * k2ByteSize); + } else { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } + } + } +} + +void AArch64MemLayout::LayoutFormalParams() { + bool isLmbc = (be.GetMIRModule().GetFlavor() == kFlavorLmbc); + if (isLmbc && mirFunction->GetFormalCount() == 0) { + /* + * lmbc : upformalsize - size of formals passed from caller's frame into current function + * framesize - total frame size of current function used by Maple IR + * outparmsize - portion of frame size of current function used by call parameters + */ + segArgsStkPassed.SetSize(mirFunction->GetOutParmSize()); + segArgsRegPassed.SetSize(mirFunction->GetOutParmSize()); + return; + } + + AArch64CallConvImpl parmLocator(be); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) { + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } else { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize()); + } + } + continue; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + if (ploc.reg0 != kRinvalid) { /* register */ + symLoc->SetRegisters(static_cast(ploc.reg0), static_cast(ploc.reg1), + static_cast(ploc.reg2), static_cast(ploc.reg3)); + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + symLoc->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc, segRefLocals, ptyIdx); + } else if (!sym->IsPreg()) { + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + /* the type's alignment requirement may be smaller than a registser's byte size */ + if (ty->GetPrimType() == PTY_agg) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + } else { + align = GetPointerSize(); + } + } + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } else if (isLmbc) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } + } else { /* stack */ + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + /* We need it as dictated by the AArch64 ABI $5.4.2 C12 */ + if (CGOptions::IsArm64ilp32()) { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize))); + } else { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + SetLocalRegLocInfo(sym->GetStIdx(), *symLoc); + AArch64SymbolAlloc *symLoc1 = memAllocator->GetMemPool()->New(); + symLoc1->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc1, segRefLocals, ptyIdx); + SetSymAllocInfo(stIndex, *symLoc1); + } + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf() && ploc.reg0 == kRinvalid) { + cgFunc->AddDIESymbolLocation(sym, symLoc); + } + } +} + +void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize()); + return; + } + + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + if (sym->IsRefType()) { + if (mirFunction->GetRetRefSym().find(sym) != mirFunction->GetRetRefSym().end()) { + /* try to put ret_ref at the end of segRefLocals */ + returnDelays.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } else { + if (sym->GetName() == "__EARetTemp__" || + sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") { + tempVar.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { + cgFunc->AddDIESymbolLocation(sym, symLoc); + } + } +} + +void AArch64MemLayout::LayoutEAVariales(std::vector &tempVar) { + for (auto sym : tempVar) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register"); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, + int32 &structCopySize, int32 &maxParmStackSize) { + for (auto sym : returnDelays) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register"); + + DEBUG_ASSERT(sym->IsRefType(), "expect reftype "); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segArgsToStkPass.SetSize(mirFunction->GetOutParmSize() + kDivide2 * k8ByteSize); + } else { + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + } + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } else { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + /* 8-VirtualRegNode occupy byte number */ + aarchCGFunc->SetCatchRegno(cgFunc->NewVReg(kRegTyInt, 8)); + } + segRefLocals.SetSize(static_cast(RoundUp(segRefLocals.GetSize(), GetPointerSize()))); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8ByteSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); + } +} + +void AArch64MemLayout::LayoutActualParams() { + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + continue; + } + } + MIRSymbol *sym = mirFunction->GetFormal(i); + if (sym->IsPreg()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = static_cast(GetSymAllocInfo(stIndex)); + if (symLoc->GetMemSegment() == &GetSegArgsRegPassed()) { /* register */ + /* + * In O0, we store parameters passed via registers into memory. + * So, each of such parameter needs to get assigned storage in stack. + * If a function parameter is never accessed in the function body, + * and if we don't create its memory operand here, its offset gets + * computed when the instruction to store its value into stack + * is generated in the prologue when its memory operand is created. + * But, the parameter would see a different StackFrameSize than + * the parameters that are accessed in the body, because + * the size of the storage for FP/LR is added to the stack frame + * size in between. + * To make offset assignment easier, we create a memory operand + * for each of function parameters in advance. + * This has to be done after all of formal parameters and local + * variables get assigned their respecitve storage, i.e. + * CallFrameSize (discounting callee-saved and FP/LR) is known. + */ + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte); + } + } +} + +void AArch64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) { + LayoutVarargParams(); + LayoutFormalParams(); + /* + * We do need this as LDR/STR with immediate + * requires imm be aligned at a 8/4-byte boundary, + * and local varirables may need 8-byte alignment. + */ + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), k8ByteSize)); + /* we do need this as SP has to be aligned at a 16-bytes bounardy */ + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize + k8ByteSize)); + } else { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + } + /* allocate the local variables in the stack */ + std::vector EATempVar; + std::vector retDelays; + LayoutLocalVariables(EATempVar, retDelays); + LayoutEAVariales(EATempVar); + + /* handle ret_ref sym now */ + LayoutReturnRef(retDelays, structCopySize, maxParmStackSize); + + /* + * for the actual arguments that cannot be pass through registers + * need to allocate space for caller-save registers + */ + LayoutActualParams(); + + fixStackSize = static_cast(RealStackFrameSize()); + cgFunc->SetUseFP(cgFunc->UseFP() || fixStackSize > kMaxPimm32); +} + +void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } + + if (!cgFunc->GetMirModule().IsJavaModule()) { + return; + } + + /* + * Allocate additional stack space for "thrownval". + * segLocals need 8 bit align + */ + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(RoundUp(segLocals.GetSize(), k8ByteSize)); + } else { + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPointerSize())); + } + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + RegOperand &baseOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand(); + int32 offset = static_cast(segLocals.GetSize()); + + OfstOperand *offsetOpnd = + &aarchCGFunc->CreateOfstOpnd(offset + k16BitSize, k64BitSize); + MemOperand *throwMem = aarchCGFunc->CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, baseOpnd, static_cast(nullptr), offsetOpnd, + nullptr); + aarchCGFunc->SetCatchOpnd(*throwMem); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(segLocals.GetSize() + k8ByteSize); + } else { + segLocals.SetSize(segLocals.GetSize() + GetPointerSize()); + } +} + +SymbolAlloc *AArch64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segSpillReg); + uint32 regSize = cgFunc->IsExtendReg(vrNum) ? k8ByteSize : cgFunc->GetVRegSize(vrNum); + segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); + symLoc->SetOffset(segSpillReg.GetSize()); + segSpillReg.SetSize(segSpillReg.GetSize() + regSize); + SetSpillRegLocInfo(vrNum, *symLoc); + return symLoc; +} + +uint64 AArch64MemLayout::StackFrameSize() const { + uint64 total = segArgsRegPassed.GetSize() + static_cast(cgFunc)->SizeOfCalleeSaved() + + GetSizeOfRefLocals() + locals().GetSize() + GetSizeOfSpillReg(); + + if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + if (GetSizeOfGRSaveArea() > 0) { + total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (GetSizeOfVRSaveArea() > 0) { + total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + /* + * if the function does not have VLA nor alloca, + * we allocate space for arguments to stack-pass + * in the call frame; otherwise, it has to be allocated for each call and reclaimed afterward. + */ + total += segArgsToStkPass.GetSize(); + return RoundUp(total, kAarch64StackPtrAlignment); +} + +uint32 AArch64MemLayout::RealStackFrameSize() const { + auto size = StackFrameSize(); + if (cgFunc->GetCG()->IsStackProtectorStrong() || cgFunc->GetCG()->IsStackProtectorAll()) { + size += static_cast(kAarch64StackPtrAlignment); + } + return static_cast(size); +} + +int32 AArch64MemLayout::GetRefLocBaseLoc() const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto beforeSize = GetSizeOfLocals(); + if (aarchCGFunc->UsedStpSubPairForCallFrameAllocation()) { + return static_cast(beforeSize); + } + return static_cast(beforeSize + kSizeOfFplr); +} + +int32 AArch64MemLayout::GetGRSaveAreaBaseLoc() { + int32 total = static_cast(RealStackFrameSize() - + RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()); + return total; +} + +int32 AArch64MemLayout::GetVRSaveAreaBaseLoc() { + int32 total = static_cast((RealStackFrameSize() - + RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)) - + RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()); + return total; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4a2af97512695ef406d61cb10efeced2c3a1fb17 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp @@ -0,0 +1,1641 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "aarch64_obj_emitter.h" +#include "aarch64_isa.h" + +namespace { +enum ShiftNumber : maple::uint8 { + kShiftFour = 4, + kShiftFive = 5, + kShiftSix = 6, + kShiftEight = 8, + kShiftTen = 10, + kShiftTwelve = 12, + kShiftThirteen = 13, + kShiftFifteen = 15, + kShiftSixteen = 16, + kShiftNineteen = 19, + kShiftTwenty = 20, + kShiftTwentyOne = 21, + kShiftTwentyTwo = 22, + kShiftTwentyFour = 24, + kShiftTwentyNine = 29, +}; + +enum ShiftTypeValue : maple::uint32 { + kShiftLSL = 0, + kShiftLSR = 1, + kShiftASR = 2, +}; + +/* from armv8 manual C1.2.3 */ +maple::uint8 ccEncode[maplebe::kCcLast] = { +#define CONDCODE(a, encode) encode, +#include "aarch64_cc.def" +#undef CONDCODE +}; +}; + +namespace maplebe { +/* fixup b .label, b(cond) .label, ldr label insn */ +void AArch64ObjFuncEmitInfo::HandleLocalBranchFixup(const std::vector &label2Offset) { + for (auto *fixup : localFixups) { + uint32 useOffset = fixup->GetOffset(); + uint32 useLabelIndex = fixup->GetLabelIndex(); + uint32 defOffset = label2Offset[useLabelIndex]; + if (defOffset == 0xFFFFFFFFULL) { + CHECK_FATAL(false, "fixup is not local"); + } + + FixupKind fixupKind = fixup->GetFixupKind(); + if (static_cast(fixupKind) == kAArch64CondBranchPCRelImm19 || + static_cast(fixupKind) == kAArch64CompareBranchPCRelImm19) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x7FFFF; +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + CHECK_FATAL(useOffset < textData.size(), "out of range"); + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "after contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + } else if (static_cast(fixupKind) == kAArch64UnCondBranchPCRelImm26) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x3FFFFFF; +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + CHECK_FATAL(useOffset < textData.size(), "out of vector size!"); + uint32 newValue = GetTextDataElem32(useOffset) | (pcRelImm & mask); + SwapTextData(&newValue, useOffset, sizeof(uint32)); +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "after contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + } else if (static_cast(fixupKind) == kAArch64TestBranchPCRelImm14) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x3FFF; + CHECK_FATAL(useOffset < textData.size(), "out of vector size"); + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); + } else if (static_cast(fixupKind) == kAArch64LoadPCRelImm19) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x7FFFF; + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); + } + } + localFixups.clear(); +} + +void AArch64ObjEmitter::HandleTextSectionGlobalFixup() { + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + for (auto *fixup : content->GetGlobalFixups()) { + if (fixup->GetFixupKind() == kLSDAFixup) { + HandleLSDAFixup(*content, *fixup); + continue; + } + switch (static_cast(fixup->GetFixupKind())) { + case kAArch64CallPCRelImm26: { + HandleCallFixup(*content, *fixup); + break; + } + case kAArch64PCRelAdrImm21: { + HandleAdrFixup(*content, *fixup); + break; + } + default: + ASSERT(false, "unsupported FixupKind"); + break; + } + } + } +} + +void AArch64ObjEmitter::HandleTextSectionFixup() { + relaSection = memPool->New(".rela.text", SHT_RELA, SHF_INFO_LINK, textSection->GetIndex(), + 8, *symbolTabSection, *this, *memPool); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + for (auto *fixup : content->GetGlobalFixups()) { + switch (static_cast(fixup->GetFixupKind())) { + case kAArch64CallPCRelImm26: { + // HandleCallFixup(*content, *fixup); + auto nameIndex = strTabSection->AddString(fixup->GetLabel()); + symbolTabSection->AppendSymbol({static_cast(nameIndex), static_cast((STB_GLOBAL << kShiftFour) + + (STT_NOTYPE & 0xf)), 0, 0, 0, 0}); + symbolTabSection->AppendIdxInSymbols(0); // 0: temporarily + uint32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_CALL26; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(0) << + 32) + (type & 0xffffffff)), relOffset}); + break; + } + case kAArch64PCRelAdrpImm21: { + // HandleAdrpFixup(*content, *fixup); + uint32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_ADR_PREL_PG_HI21; + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << + 32) + (type & 0xffffffff)), relOffset}); + break; + } + case kAArch64PCRelAdrImm21: { + // HandleAdrFixup(*content, *fixup); + break; + } + case kAArch64LdrPCRelLo12: + case kAArch64AddPCRelLo12: { + // HandlekPCRelLo12Fixup(*content, *fixup); + int32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_ADD_ABS_LO12_NC; + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << + 32) + (type & 0xffffffff)), relOffset}); + break; + } + default: + ASSERT(false, "unsupported FixupKind"); + break; + } + } + } +} + +void AArch64ObjEmitter::HandleCallFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) { + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 useOffset = objFuncEmitInfo.GetStartOffset() + fixup.GetOffset(); + const std::string &funcName = fixup.GetLabel(); + auto str2objSymbolItr = globalLabel2Offset.find(funcName); + if (str2objSymbolItr != globalLabel2Offset.end()) { + uint32 defOffset = str2objSymbolItr->second.offset; + LogInfo::MapleLogger() << std::hex << "defOffset: " << defOffset << "\n"; + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 newValue = objFuncEmitInfo.GetTextDataElem32(fixup.GetOffset()) | (pcRelImm & 0x3FFFFFF); + objFuncEmitInfo.SwapTextData(&newValue, fixup.GetOffset(), sizeof(uint32)); + } +} + +void AArch64ObjEmitter::HandleAdrFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) { + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 useOffset = objFuncEmitInfo.GetStartOffset() + fixup.GetOffset(); + const std::string &label = fixup.GetLabel(); + auto str2objSymbolItr = globalLabel2Offset.find(label); + if (str2objSymbolItr != globalLabel2Offset.end()) { + uint32 defOffset = str2objSymbolItr->second.offset + fixup.GetRelOffset(); + uint32 pcRelImm = defOffset - useOffset; + uint32 immLow = (pcRelImm & 0x3) << kShiftTwentyNine; + uint32 immHigh = ((pcRelImm >> k2BitSize) & 0x7FFFF) << kShiftFive; + uint32 newValue = objFuncEmitInfo.GetTextDataElem32(fixup.GetOffset()) | immLow | immHigh; + objFuncEmitInfo.SwapTextData(&newValue, fixup.GetOffset(), sizeof(uint32)); + } +} + +void AArch64ObjEmitter::HandleLSDAFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) { + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 value = objFuncEmitInfo.GetExceptStartOffset() - objFuncEmitInfo.GetStartOffset(); + objFuncEmitInfo.SwapTextData(&value, fixup.GetOffset(), sizeof(uint32)); +} + +void AArch64ObjEmitter::AppendTextSectionData() { + auto &contents = GetContents(); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + MapleVector funcTextData = content->GetTextData(); + textSection->AppendData(funcTextData); + } +} + +void AArch64ObjEmitter::AppendGlobalLabel() { + auto &contents = GetContents(); + uint32 offset = 0; + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + content->SetStartOffset(offset); + ObjLabel objLabel = { offset, content->GetTextDataSize() }; + offset += content->GetTextDataSize(); + std::string funcName(content->GetFuncName().c_str()); + RegisterGlobalLabel(funcName, objLabel); + /* register all the start of switch table */ + const MapleMap &switchTableOffset = content->GetSwitchTableOffset(); + for (auto &elem : switchTableOffset) { + ObjLabel switchTableLabel = { elem.second + content->GetStartOffset(), 0 }; + RegisterGlobalLabel(elem.first.c_str(), switchTableLabel); + } + } +} + +void AArch64ObjEmitter::AppendSymsToSymTabSec() { + // section symbol + AddSymbol(".text", textSection->GetDataSize(), *textSection, 0); + /* Indexed by the inverse of the section index. */ + symbolTabSection->AppendIdxInSymbols(~textSection->GetIndex() + 1); + AddSymbol(".data", dataSection->GetDataSize(), *dataSection, 0); + symbolTabSection->AppendIdxInSymbols(~dataSection->GetIndex() + 1); + // rodataSection = memPool->New(".rodata", SHT_PROGBITS, SHF_ALLOC, 8, *this, *memPool); + // AddSymbol(".rodata", rodataSection->GetDataSize(), *rodataSection, 0); + // symbolTabSection->AppendIdxInSymbols(~rodataSection->GetIndex() + 1); + + Address offset = 0; + auto &contents = GetContents(); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + // func symbol + AddFuncSymbol(content->GetFuncName(), content->GetTextData().size(), offset); + offset += content->GetTextData().size(); + } +} + +void AArch64ObjEmitter::InitSections() { + (void)memPool->New(" ", SHT_NULL, 0, 0, *this, *memPool); + textSection = memPool->New(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, 4, *this, *memPool); + dataSection = memPool->New(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, 8, *this, *memPool); + strTabSection = + memPool->New(".strtab", SHT_STRTAB, 0, 1, *this, *memPool); + symbolTabSection = + memPool->New(".symtab", SHT_SYMTAB, 0, sizeof(Symbol), *this, *memPool, *strTabSection); + shStrSection = + memPool->New(".shstrtab", SHT_STRTAB, 0, 1, *this, *memPool); +} + +void AArch64ObjEmitter::LayoutSections() { + /* Init elf file header */ + InitELFHeader(); + globalOffset = sizeof(FileHeader); + globalOffset = Alignment::Align(globalOffset, k8ByteSize); + + // header.e_phoff = globalOffset; + + globalAddr = globalOffset; + + for (auto *section : sections) { + section->SetSectionHeaderNameIndex(static_cast(shStrSection->AddString(section->GetName()))); + } + + for (auto *section : sections) { + // section->SetSectionHeaderNameIndex(shStrSection->AddString(section->GetName())); + globalOffset = Alignment::Align(globalOffset, section->GetAlign()); + globalAddr = Alignment::Align
(globalAddr, section->GetAlign()); + section->Layout(); + } + + globalOffset = Alignment::Align(globalOffset, k8ByteSize); + header.e_shoff = globalOffset; + header.e_phnum = 0; + header.e_shnum = sections.size(); +} + +void AArch64ObjEmitter::UpdateMachineAndFlags(FileHeader &header) { + header.e_machine = EM_AARCH64; + header.e_flags = 0; +} + +/* input insn, ang get the binary code of insn */ +uint32 AArch64ObjEmitter::GetBinaryCodeForInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const { + const InsnDesc &md = AArch64CG::kMd[insn.GetMachineOpcode()]; + uint32 binInsn = md.GetMopEncode(); + switch (md.GetEncodeType()) { + case kMovReg: + return GenMovReg(insn); + + case kMovImm: + return GenMovImm(insn); + + case kAddSubExtendReg: + return binInsn | GenAddSubExtendRegInsn(insn); + + case kAddSubImm: + return binInsn | GenAddSubImmInsn(insn); + + case kAddSubShiftImm: + return binInsn | GenAddSubShiftImmInsn(insn); + + case kAddSubReg: + return binInsn | GenAddSubRegInsn(insn); + + case kAddSubShiftReg: + return binInsn | GenAddSubShiftRegInsn(insn); + + case kBitfield: { + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + opnd |= (0b11111 << kShiftFive) | AArch64CG::kMd[MOP_wiorrrr].GetMopEncode(); + return opnd; + } + return binInsn | GenBitfieldInsn(insn); + } + + case kExtract: + return binInsn | GenExtractInsn(insn); + + case kBranchImm: + return binInsn | GenBranchImmInsn(insn, label2Offset, objFuncEmitInfo); + + case kBranchReg: + return binInsn | GenBranchRegInsn(insn); + + case kCompareBranch: + return binInsn | GenCompareBranchInsn(insn, objFuncEmitInfo); + + case kCondCompareImm: + return binInsn | GenCondCompareImmInsn(insn); + + case kCondCompareReg: + return binInsn | GenCondCompareRegInsn(insn); + + case kConditionalSelect: + return binInsn | GenConditionalSelectInsn(insn); + + case kDataProcess1Src: + return binInsn | GenDataProcess1SrcInsn(insn); + + case kDataProcess2Src: + return binInsn | GenDataProcess2SrcInsn(insn); + + case kDataProcess3Src: + return binInsn | GenDataProcess3SrcInsn(insn); + + case kFloatIntConversions: + return binInsn | GenFloatIntConversionsInsn(insn); + + case kFloatCompare: + return binInsn | GenFloatCompareInsn(insn); + + case kFloatDataProcessing1: + return binInsn | GenFloatDataProcessing1Insn(insn); + + case kFloatDataProcessing2: + return binInsn | GenFloatDataProcessing2Insn(insn); + + case kFloatImm: + return binInsn | GenFloatImmInsn(insn); + + case kFloatCondSelect: + return binInsn | GenFloatCondSelectInsn(insn); + + case kLoadStoreReg: + return GenLoadStoreRegInsn(insn, objFuncEmitInfo); + + case kLoadStoreAR: + return binInsn | GenLoadStoreARInsn(insn); + + case kLoadExclusive: + return binInsn | GenLoadExclusiveInsn(insn); + + case kLoadExclusivePair: + return binInsn | GenLoadExclusivePairInsn(insn); + + case kStoreExclusive: + return binInsn | GenStoreExclusiveInsn(insn); + + case kStoreExclusivePair: + return binInsn | GenStoreExclusivePairInsn(insn); + + case kLoadPair: + return binInsn | GenLoadPairInsn(insn); + + case kStorePair: + return binInsn | GenStorePairInsn(insn); + + case kLoadStoreFloat: + return GenLoadStoreFloatInsn(insn, objFuncEmitInfo); + + case kLoadPairFloat: + return binInsn | GenLoadPairFloatInsn(insn); + + case kStorePairFloat: + return binInsn | GenStorePairFloatInsn(insn); + + case kLoadLiteralReg: + return binInsn | GenLoadLiteralRegInsn(insn, objFuncEmitInfo); + + case kLogicalReg: + return binInsn | GenLogicalRegInsn(insn); + + case kLogicalImm: + return binInsn | GenLogicalImmInsn(insn); + + case kMoveWide: + return binInsn | GenMoveWideInsn(insn); + + case kPCRelAddr: + return binInsn | GenPCRelAddrInsn(insn, objFuncEmitInfo); + + case kAddPCRelAddr: + return binInsn | GenAddPCRelAddrInsn(insn, objFuncEmitInfo); + + case kSystemInsn: + return binInsn | GenSystemInsn(insn); + + case kTestBranch: + return binInsn | GenTestBranchInsn(insn, objFuncEmitInfo); + + case kCondBranch: + return binInsn | GenCondBranchInsn(insn, objFuncEmitInfo); + + case kUnknownEncodeType: + break; + + default: + break; + } + return binInsn; +} + +/* get binary code of operand */ +uint32 AArch64ObjEmitter::GetOpndMachineValue(const Operand &opnd) const { + if (opnd.IsRegister()) { + const RegOperand ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == kRFLAG) { + return 0; + } + if (regOpnd.IsOfIntClass()) { + if (regOpnd.GetRegisterNumber() == RZR) { + return regNO - R0 - 2; + } + if (regOpnd.GetRegisterNumber() == RSP) { + return regNO - R0 - 1; + } + return regNO - R0; + } + return regNO - V0; + } else if (opnd.IsImmediate()) { + return static_cast(static_cast(opnd).GetValue()); + } else if (opnd.IsConditionCode()) { + const CondOperand &condOpnd = static_cast(opnd); + return static_cast(ccEncode[condOpnd.GetCode()]); + } else if (opnd.IsOpdExtend()) { + const ExtendShiftOperand &exendOpnd = static_cast(opnd); + uint32 shift = exendOpnd.GetShiftAmount(); + ASSERT(exendOpnd.GetExtendOp() == ExtendShiftOperand::kSXTW, "support kSXTW only!"); + uint32 option = 0x30; + return option | shift; + } else { + CHECK_FATAL(false, "not supported operand type currently"); + } +} + +uint32 AArch64ObjEmitter::GetAdrLabelOpndValue(const Insn &insn, const Operand &opnd, + ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = (insn.GetMachineOpcode() == MOP_xadrp) ? FixupKind(kAArch64PCRelAdrpImm21) + : FixupKind(kAArch64PCRelAdrImm21); + if (opnd.IsMemoryAccessOperand()) { + const MemOperand &memOpnd = static_cast(opnd); + Fixup *fixup = memPool->New(memOpnd.GetSymbolName(), 0, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else if (opnd.IsStImmediate()) { + const StImmOperand &stOpnd = static_cast(opnd); + Fixup *fixup = memPool->New( + stOpnd.GetName(), stOpnd.GetOffset(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else { + CHECK_FATAL(opnd.IsImmediate(), "check kind failed"); + } + return 0; +} + +uint32 AArch64ObjEmitter::GetLoadLiteralOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = FixupKind(kAArch64LoadPCRelImm19); + CHECK_FATAL(opnd.IsLabelOpnd(), "check literal kind failed"); + const LabelOperand &label = static_cast(opnd); + LocalFixup *fixup = memPool->New(label.GetLabelIndex(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetCondBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = FixupKind(kAArch64CondBranchPCRelImm19); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetUnCondBranchOpndValue(const Operand &opnd, + const std::vector &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo) const { + uint32 defOffset = label2Offset[static_cast(opnd).GetLabelIndex()]; + if (defOffset != 0xFFFFFFFFULL) { + uint32 useOffset = objFuncEmitInfo.GetTextDataSize(); + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + return (pcRelImm & 0x3FFFFFF); + } + + FixupKind fixupKind = FixupKind(kAArch64UnCondBranchPCRelImm26); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetCallFuncOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const { + const FuncNameOperand &funcNameOpnd = static_cast(opnd); + const MIRSymbol *funcSymbol = funcNameOpnd.GetFunctionSymbol(); + FixupKind fixupKind = FixupKind(kAArch64CallPCRelImm26); + + Fixup *fixup = memPool->New(funcNameOpnd.GetName(), 0, objFuncEmitInfo.GetTextDataSize(), fixupKind); + if (funcSymbol->GetFunction() != nullptr && funcSymbol->GetFunction()->IsStatic()) { + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } + return 0; +} + +uint32 AArch64ObjEmitter::GetCompareBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = FixupKind(kAArch64CompareBranchPCRelImm19); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetTestBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = FixupKind(kAArch64TestBranchPCRelImm14); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetLo12LitrealOpndValue(MOperator mOp, const Operand &opnd, + ObjFuncEmitInfo &objFuncEmitInfo) const { + FixupKind fixupKind = (mOp == MOP_xadrpl12) ? FixupKind(kAArch64AddPCRelLo12) : FixupKind(kAArch64LdrPCRelLo12); + if (opnd.IsMemoryAccessOperand()) { + const MemOperand &memOpnd = static_cast(opnd); + uint32 offset = 0; + if (memOpnd.GetOffsetImmediate() != nullptr) { + offset = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + } + Fixup *fixup = memPool->New(memOpnd.GetSymbolName(), offset, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else { + CHECK_FATAL(opnd.IsStImmediate(), "check opnd kind"); + const StImmOperand &stOpnd = static_cast(opnd); + Fixup *fixup = memPool->New( + stOpnd.GetName(), stOpnd.GetOffset(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } + return 0; +} + +uint32 AArch64ObjEmitter::GenMovReg(const Insn &insn) const { + Operand &opnd1 = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd2 = insn.GetOperand(kInsnSecondOpnd); + ASSERT(opnd1.IsRegister(), "opnd1 must be a register"); + ASSERT(opnd2.IsRegister(), "opnd2 must be a register"); + uint32 opCode = 0; + if (static_cast(opnd1).GetRegisterNumber() == RSP || + static_cast(opnd2).GetRegisterNumber() == RSP) { + if (insn.GetMachineOpcode() == MOP_xmovrr) { + const InsnDesc &md = AArch64CG::kMd[MOP_xaddrri12]; + opCode = md.GetMopEncode(); + } else { + ASSERT(insn.GetMachineOpcode() == MOP_wmovrr, "support MOP_wmovrr Currently!"); + const InsnDesc &md = AArch64CG::kMd[MOP_waddrri12]; + opCode = md.GetMopEncode(); + } + /* Rd */ + uint32 opnd = opCode | GetOpndMachineValue(opnd1); + /* Rn */ + opnd |= GetOpndMachineValue(opnd2) << kShiftFive; + return opnd; + } else { + if (insn.GetMachineOpcode() == MOP_xmovrr) { + const InsnDesc &md = AArch64CG::kMd[MOP_xiorrrr]; + opCode = md.GetMopEncode(); + } else { + ASSERT(insn.GetMachineOpcode() == MOP_wmovrr, "support MOP_wmovrr Currently!"); + const InsnDesc &md = AArch64CG::kMd[MOP_wiorrrr]; + opCode = md.GetMopEncode(); + } + /* Rd */ + uint32 opnd = opCode | GetOpndMachineValue(opnd1); + /* Rn */ + opnd |= GetOpndMachineValue(opnd2) << kShiftSixteen; + /* Rm */ + uint32 zr = 0x1f; /* xzr / wzr */ + opnd |= zr << kShiftFive; + return opnd; + } +} + +uint32 AArch64ObjEmitter::GenMovImm(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 immSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + uint64 immValue = static_cast(immOpnd.GetValue()); + bool isMovz = IsMoveWidableImmediate(immValue, immSize); + bool isMovn = IsMoveWidableImmediate(~immValue, immSize); + if (isMovz || isMovn) { + if (!isMovz) { + immValue = ~immValue; + } + uint32 hwFlag = 0; + if (immSize == k32BitSize) { + auto &md = isMovz ? AArch64CG::kMd[MOP_wmovzri16] : AArch64CG::kMd[MOP_wmovnri16]; + opnd |= md.GetMopEncode(); + immValue = static_cast(immValue); + uint32 bitFieldValue = 0xFFFF; + if (((static_cast(immValue)) & (bitFieldValue << k16BitSize)) != 0) { + hwFlag = 1; + } + } else { + ASSERT(immSize == k64BitSize, "support 64 bit only!"); + auto &md = isMovz ? AArch64CG::kMd[MOP_xmovzri16] : AArch64CG::kMd[MOP_xmovnri16]; + opnd |= md.GetMopEncode(); + uint64 bitFieldValue = 0xFFFF; + /* hw is 00b, 01b, 10b, or 11b */ + for (hwFlag = 0; hwFlag < 4; ++hwFlag) { + if (immValue & (bitFieldValue << (k16BitSize * hwFlag))) { + break; + } + } + } + opnd |= ((static_cast(immValue >> (hwFlag * k16BitSize))) << kShiftFive); + opnd |= (hwFlag << kShiftTwentyOne); + } else { + if (immSize == k32BitSize) { + auto &md = AArch64CG::kMd[MOP_wiorrri12]; + opnd |= md.GetMopEncode(); + } else { + ASSERT(immSize == k64BitSize, "support 64 bit only!"); + auto &md = AArch64CG::kMd[MOP_xiorrri13]; + opnd |= md.GetMopEncode(); + } + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = (immSize == k32BitSize) ? k32BitSize : k64BitSize; + opnd |= EncodeLogicaImm(value, size) << kShiftTen; + opnd |= (0x1FU << kShiftFive); + } + + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubExtendRegInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Extend */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetLo12LitrealOpndValue(insn.GetMachineOpcode(), + insn.GetOperand(kInsnThirdOpnd), objFuncEmitInfo) << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubImmInsn(const Insn &insn) const { + uint32 operandSize = 4; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Imm */ + uint32 immValue = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)); + if ((immValue & (0xFFFU)) == 0 && ((immValue & (0xFFFU << kShiftTwelve))) != 0) { + opnd |= (1U << kShiftTwentyTwo); + immValue >>= kShiftTwelve; + } + opnd |= (immValue << kShiftTen); + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubShiftImmInsn(const Insn &insn) const { + uint32 operandSize = 5; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)) << kShiftTen; + /* Shift */ + BitShiftOperand &lslOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd + index)); + if (lslOpnd.GetShiftAmount() > 0) { + uint32 shift = 0x1; + opnd |= shift << kShiftTwentyTwo; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubRegInsn(const Insn &insn) const { + uint32 operandSize = 4; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + operandSize = 2; + if (insn.GetOperandSize() == operandSize) { // neg, cmp or cmn insn + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + return opnd; + } + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)) << kShiftSixteen; + + RegOperand &rd = static_cast(insn.GetOperand(kInsnFirstOpnd + index)); + RegOperand &rn = static_cast(insn.GetOperand(kInsnSecondOpnd + index)); + // SP register can only be used with LSL or Extend + if (rd.GetRegisterNumber() == RSP || rn.GetRegisterNumber() == RSP) { + uint32 regSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); + opnd |= 1 << kShiftTwentyOne; + opnd |= ((regSize == k64BitSize ? 0b11 : 0b10) << kShiftThirteen); // option + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubShiftRegInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + + BitShiftOperand *bitShiftOpnd = nullptr; + + uint32 operandSize = 3; + if (insn.GetOperandSize() == operandSize) { + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + bitShiftOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + } else { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + bitShiftOpnd = static_cast(&insn.GetOperand(kInsnFourthOpnd)); + } + uint32 shift = 0; + switch (bitShiftOpnd->GetShiftOp()) { + case BitShiftOperand::kLSL: + shift = kShiftLSL; + break; + case BitShiftOperand::kLSR: + shift = kShiftLSR; + break; + case BitShiftOperand::kASR: + shift = kShiftASR; + break; + default: + break; + } + /* Shift */ + opnd |= shift << kShiftTwentyTwo; + /* Imm */ + opnd |= bitShiftOpnd->GetShiftAmount() << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenBitfieldInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + + uint32 operandSize = 4; + if (insn.GetMachineOpcode() == MOP_wubfizrri5i5 || insn.GetMachineOpcode() == MOP_xubfizrri6i6 || + insn.GetMachineOpcode() == MOP_wbfirri5i5 || insn.GetMachineOpcode() == MOP_xbfirri6i6) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 shift = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + uint32 immr = -shift % mod; + opnd |= immr << kShiftSixteen; + uint32 width = GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + uint32 imms = width - 1; + opnd |= imms << kShiftTen; + } else if (insn.GetOperandSize() == operandSize) { + uint32 lab = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + opnd |= lab << kShiftSixteen; + uint32 width = GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + opnd |= (lab + width - 1) << kShiftTen; + } else if (insn.GetMachineOpcode() == MOP_xlslrri6 || insn.GetMachineOpcode() == MOP_wlslrri5) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 shift = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + uint32 immr = -shift % mod; + opnd |= immr << kShiftSixteen; + uint32 imms = mod - 1 - shift; + opnd |= imms << kShiftTen; + } else if (insn.GetMachineOpcode() == MOP_xlsrrri6 || insn.GetMachineOpcode() == MOP_wlsrrri5 || + insn.GetMachineOpcode() == MOP_xasrrri6 || insn.GetMachineOpcode() == MOP_wasrrri5) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 immr = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + opnd |= immr << kShiftSixteen; + uint32 imms = mod - 1; + opnd |= imms << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenExtractInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftTen; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + + return opnd; +} + +uint32 AArch64ObjEmitter::GenBranchImmInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Imm */ + if (insn.IsCall()) { + return GetCallFuncOpndValue(insn.GetOperand(kInsnFirstOpnd), objFuncEmitInfo); + } else { + return GetUnCondBranchOpndValue(insn.GetOperand(kInsnFirstOpnd), label2Offset, objFuncEmitInfo); + } +} + +uint32 AArch64ObjEmitter::GenBranchRegInsn(const Insn &insn) const { + if (insn.GetMachineOpcode() == MOP_xret || insn.GetMachineOpcode() == MOP_clrex) { + return 0; + } + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCompareBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetCompareBranchOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondCompareImmInsn(const Insn &insn) const { + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Nzcv */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFifthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondCompareRegInsn(const Insn &insn) const { + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Nzcv */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFifthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenConditionalSelectInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + uint32 operandSize = 5; + if (insn.GetOperandSize() == operandSize) { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTwelve; + } else if (insn.GetMachineOpcode() == MOP_wcnegrrrc || insn.GetMachineOpcode() == MOP_xcnegrrrc) { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm Rn==Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + /* Cond */ + uint8 cond = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + /* invert cond */ + opnd |= ((cond ^ 1u) & 0xfu) << kShiftTwelve; + } else { + /* Cond */ + uint8 cond = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* invert cond */ + opnd |= ((cond ^ 1u) & 0xfu) << kShiftTwelve; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess1SrcInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess2SrcInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess3SrcInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Ra */ + uint32 operandSize = 4; + if (insn.GetOperandSize() == operandSize) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatIntConversionsInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatCompareInsn(const Insn &insn) const { + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + if (insn.GetOperand(kInsnThirdOpnd).IsRegister()) { + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatDataProcessing1Insn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatDataProcessing2Insn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatImmInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= (GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) &0xff) << kShiftThirteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatCondSelectInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeLiteral(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + FixupKind fixupKind = FixupKind(kAArch64LoadPCRelImm19); + LocalFixup *fixup = + memPool->New(objFuncEmitInfo.GetCGFunc().GetLocalSymLabelIndex(*memOpnd.GetSymbol()), + objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_sldr) { + mOp = MOP_sldli; + } else if (mOp == MOP_dldr) { + mOp = MOP_dldli; + } else { + CHECK_FATAL(false, "unsupported mOp"); + } + auto &md = AArch64CG::kMd[mOp]; + return md.GetMopEncode() | opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeBOi(const Insn &insn) const { + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + /* Imm */ + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 imm9Mask = 0x1ff; + uint32 opnd = 0U; + if (memOpnd.IsPostIndexed()) { + opnd |= (static_cast(offsetValue) & imm9Mask) << kShiftTwelve; + uint32 specialOpCode = 0x1; + opnd |= specialOpCode << kShiftTen; + } else if (memOpnd.IsPreIndexed()) { + opnd |= (static_cast(offsetValue) & imm9Mask) << kShiftTwelve; + uint32 specialOpCode = 0x3; + opnd |= specialOpCode << kShiftTen; + } else { + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + uint32 specialOpCode = 0x1; + opnd |= specialOpCode << kShiftTwentyFour; + uint32 divisor = 1; + MOperator mOp = insn.GetMachineOpcode(); + if ((mOp == MOP_xldr) || (mOp == MOP_xstr) || (mOp == MOP_dldr) || (mOp == MOP_dstr)) { + divisor = k8BitSize; + } else if ((mOp == MOP_wldr) || (mOp == MOP_wstr) || (mOp == MOP_sstr) || (mOp == MOP_sldr)) { + divisor = k4BitSize; + } else if (mOp == MOP_hldr) { + divisor = k2BitSize; + } + uint32 shiftRightNum = 0; + if ((mOp == MOP_wldrsh) || (mOp == MOP_wldrh) || (mOp == MOP_wstrh)) { + shiftRightNum = 1; + } + opnd |= ((static_cast(offsetValue) >> shiftRightNum) / divisor) << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeBOrX(const Insn &insn) const { + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 opnd = 0; + opnd |= 0x1 << kShiftTwentyOne; + opnd |= 0x2 << kShiftTen; + RegOperand *offsetReg = memOpnd.GetIndexRegister(); + opnd |= GetOpndMachineValue(*offsetReg) << kShiftSixteen; + std::string extend = memOpnd.GetExtendAsString(); + uint32 option = 0; + if (extend == "UXTW") { + option = 0x2; + } else if (extend == "LSL") { + option = 0x3; + } else if (extend == "SXTW") { + option = 0x6; + } else { + ASSERT(extend == "SXTX", "must be SXTX!"); + option = 0x7; + } + opnd |= option << kShiftThirteen; + uint32 s = (memOpnd.ShiftAmount() > 0) ? 1 : 0; + opnd |= s << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeLiteral) { + return GenLoadStoreModeLiteral(insn, objFuncEmitInfo); + } + + MOperator mOp = insn.GetMachineOpcode(); +#ifdef USE_32BIT_REF + if (((mOp == MOP_xstr) || (mOp == MOP_xldr)) && + static_cast(insn.GetOperand(kInsnFirstOpnd)).IsRefField()) { + mOp = (mOp == MOP_xstr) ? MOP_wstr : MOP_wldr; + } +#endif + auto &md = AArch64CG::kMd[mOp]; + uint32 binInsn = md.GetMopEncode(); + // invalid insn generated by the eval node + if (static_cast(insn.GetOperand(kFirstOpnd)).GetRegisterNumber() == RZR) { + if(mOp == MOP_sldr) { + binInsn = AArch64CG::kMd[MOP_wldr].GetMopEncode(); + } else if (mOp == MOP_dldr) { + binInsn = AArch64CG::kMd[MOP_xldr].GetMopEncode(); + } else if (mOp == MOP_sstr) { + binInsn = AArch64CG::kMd[MOP_wstr].GetMopEncode(); + } else if (mOp == MOP_sstr) { + binInsn = AArch64CG::kMd[MOP_xstr].GetMopEncode(); + } + } + /* Rt */ + binInsn |= GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + binInsn |= GetOpndMachineValue(*baseReg) << kShiftFive; + + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + uint32 size = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + /* Imm */ + int32 offsetValue = ofstOpnd != nullptr ? ofstOpnd->GetOffsetValue() : 0; + if ((((size == k16BitSize) && (offsetValue % k2BitSize) != 0) || + ((size == k32BitSize) && (offsetValue % k4BitSize) != 0) || + ((size == k64BitSize) && (offsetValue % k8BitSize) != 0)) && ((offsetValue < 256) && (offsetValue > -257))) { + uint32 mopEncode = 0; + // ldur, ldurh, ldurb + if (insn.IsLoad()) { + if (insn.GetDesc()->GetEncodeType() == kLoadStoreFloat) { + mopEncode = size == k16BitSize ? 0x7c400000 : (size == k32BitSize ? 0xbc400000 : 0xfc400000); + }else { + mopEncode = size == k16BitSize ? 0x78400000 : (size == k32BitSize ? 0xb8400000 : 0xf8400000); + } + } else { // stur, sturh, sturb + if (insn.GetDesc()->GetEncodeType() == kLoadStoreFloat) { + mopEncode = size == k16BitSize ? 0x7c000000 : (size == k32BitSize ? 0xbc000000 : 0xfc000000); + } else { + mopEncode = size == k16BitSize ? 0x78000000 : (size == k32BitSize ? 0xb8000000 : 0xf8000000); + } + } + binInsn = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) | (GetOpndMachineValue(*baseReg) << kShiftFive); + return binInsn | mopEncode | (offsetValue << kShiftTwelve); + } + return binInsn | GenLoadStoreModeBOi(insn); + } else if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX) { + return binInsn | GenLoadStoreModeBOrX(insn); + } + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeLo12Li, "support kAddrModeLo12Li only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + binInsn |= GetLo12LitrealOpndValue(insn.GetMachineOpcode(), memOpnd, objFuncEmitInfo) << kShiftTen; + uint32 specialOpCode = 0x1; + binInsn |= specialOpCode << kShiftTwentyFour; + + return binInsn; +} + +uint32 AArch64ObjEmitter::GenLoadStoreARInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadExclusiveInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadExclusivePairInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStoreExclusiveInsn(const Insn &insn) const { + /* Rs */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftSixteen; + /* Rt */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStoreExclusivePairInsn(const Insn &insn) const { + /* Rs */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftSixteen; + /* Rt */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadPairInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x3; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x7; + } else { + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x5; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStorePairInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x2; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x6; + } else { + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x4; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreFloatInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + return GenLoadStoreRegInsn(insn, objFuncEmitInfo); +} + +uint32 AArch64ObjEmitter::GenLoadPairFloatInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x3; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x7; + } else { + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x5; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStorePairFloatInsn(const Insn &insn) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x2; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x6; + } else { + ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x4; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadLiteralRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetLoadLiteralOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLogicalRegInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + + uint32 operandSize = 2; // mvn insn + if (insn.GetOperandSize() == operandSize) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftFive; + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + return opnd; + } + + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + + operandSize = 4; + if (insn.GetOperandSize() == operandSize) { + BitShiftOperand &bitShiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + uint32 shift = 0; + switch (bitShiftOpnd.GetShiftOp()) { + case BitShiftOperand::kLSL: + shift = kShiftLSL; + break; + case BitShiftOperand::kLSR: + shift = kShiftLSR; + break; + case BitShiftOperand::kASR: + shift = kShiftASR; + break; + default: + break; + } + /* Shift */ + opnd |= shift << kShiftTwentyTwo; + /* Imm */ + opnd |= bitShiftOpnd.GetShiftAmount() << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenLogicalImmInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + if (insn.GetMachineOpcode() == MOP_wmovri32 || insn.GetMachineOpcode() == MOP_xmovri64) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + return opnd; + } + + // tst insn + if (insn.GetMachineOpcode() == MOP_wtstri32 || insn.GetMachineOpcode() == MOP_xtstri64) { + // Rn + uint32 opndValue = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + // Imm + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = insn.GetDesc()->GetOpndDes(kInsnThirdOpnd)->GetSize(); + opndValue |= EncodeLogicaImm(value, size) << kShiftTen; + return opndValue; + } + + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + opnd |= EncodeLogicaImm(value, size) << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenMoveWideInsn(const Insn &insn) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + + BitShiftOperand &lslOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 mod = 16; /* 16 from Armv8 Manual C5.6.128 */ + uint32 shift = lslOpnd.GetShiftAmount() / mod; + /* Shift */ + opnd |= shift << kShiftTwentyOne; + return opnd; +} + +uint32 AArch64ObjEmitter::GenPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetAdrLabelOpndValue(insn, insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenSystemInsn(const Insn &insn) const { + (void)insn; + return 0; +} + +uint32 AArch64ObjEmitter::GenTestBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* b40 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftNineteen; + /* Imm */ + opnd |= GetTestBranchOpndValue(insn.GetOperand(kInsnThirdOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const { + /* Imm */ + uint32 opnd = GetCondBranchOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +void AArch64ObjEmitter::InsertNopInsn(ObjFuncEmitInfo &objFuncEmitInfo) const { + AArch64CGFunc &cgFunc = static_cast(objFuncEmitInfo.GetCGFunc()); + bool found = false; + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction()) { + if (insn->IsCall()) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_nop); + bb->InsertInsnAfter(*insn, newInsn); + } + found = true; + break; + } + } + if (found) { + break; + } + } +} + +uint32 AArch64ObjEmitter::EncodeLogicaImm(uint64 imm, uint32 size) const { + /* the element size */ + uint32 elementSize = size; + while (elementSize > 2) { + elementSize /= 2; + uint64 mask = (1ULL << elementSize) - 1; + if ((imm & mask) != ((imm >> elementSize) & mask)) { + elementSize *= 2; + break; + } + } + + if (elementSize != k64BitSize) { + imm &= ((1ULL << elementSize) - 1); + } + std::bitset bitValue(imm); + uint32 trailCount = 0; + for (uint32 i = 1; i < elementSize; ++i) { + if (bitValue[i] ^ bitValue[0]) { + trailCount = i; + break; + } + } + + uint32 immr = 0; + uint32 oneNum = bitValue.count(); + if (bitValue.test(0)) { /* for 1+0+1+ pattern */ + immr = oneNum - trailCount; + } else { /* for 0+1+0+ pattern */ + immr = elementSize - trailCount; + } + + uint32 imms = ~(elementSize - 1) << 1; + imms |= oneNum - 1; + uint32 n = (elementSize == k64BitSize) ? 1 : 0; + return (n << kShiftTwelve) | (immr << kShiftSix) | (imms & 0x3f); +} + +void AArch64ObjEmitter::EmitIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) { + switch (insn.GetMachineOpcode()) { + // adrp xd, label + // add xd, xd, #:lo12:label + case MOP_adrp_label: { + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + uint32 binInsn = AArch64CG::kMd[MOP_xadrp].GetMopEncode(); + binInsn |= opnd; + objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize); + binInsn = AArch64CG::kMd[MOP_xaddrri12].GetMopEncode(); + binInsn |= opnd | (opnd << kShiftFive); + objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize); + break; + } + default: + CHECK_FATAL(false, "unsupport mop in EmitIntrinsicInsn!\n"); + } +} + +void AArch64ObjEmitter::EmitSpinIntrinsicInsn(const Insn &insn, std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) { + switch (insn.GetMachineOpcode()) { + case MOP_tls_desc_rel: { + objFuncEmitInfo.AppendTextData(0x91400000, k4ByteSize); + objFuncEmitInfo.AppendTextData(0x91000000, k4ByteSize); + break; + } + default: + CHECK_FATAL(false, "unsupport mop in EmitSpinIntrinsicInsn!\n"); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30997f40a96a8c929fc60a2f39545806898f627c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_offset_adjust.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64FPLROffsetAdjustment::Run() { + AdjustmentOffsetForFPLR(); +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc) { + bool isLmbc = (aarchCGFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); + uint32 opndNum = insn.GetOperandSize(); + MemLayout *memLayout = aarchCGFunc.GetMemlayout(); + bool stackBaseOpnd = false; + AArch64reg stackBaseReg = isLmbc ? R29 : (aarchCGFunc.UseFP() ? R29 : RSP); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfVary()) { + insn.SetOperand(i, aarchCGFunc.GetOrCreateStackBaseRegOperand()); + } + if (regOpnd.GetRegisterNumber() == RFP) { + insn.SetOperand(i, aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt)); + stackBaseOpnd = true; + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) || + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX)) && + memOpnd.GetBaseRegister() != nullptr) { + if (memOpnd.GetBaseRegister()->IsOfVary()) { + memOpnd.SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); + } + RegOperand *memBaseReg = memOpnd.GetBaseRegister(); + if (memBaseReg->GetRegisterNumber() == RFP) { + RegOperand &newBaseOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + memOpnd.GetAddrMode(), memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), + memOpnd.GetOffsetImmediate(), memOpnd.GetSymbol()); + insn.SetOperand(i, newMemOpnd); + stackBaseOpnd = true; + } + } + if ((memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + continue; + } + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + if (ofstOpnd == nullptr) { + continue; + } + if (ofstOpnd->GetVary() == kUnAdjustVary) { + ofstOpnd->AdjustOffset(static_cast(static_cast(memLayout)->RealStackFrameSize() + - memLayout->SizeOfArgsToStackPass())); + ofstOpnd->SetVary(kAdjustVary); + } + if (!stackBaseOpnd && (ofstOpnd->GetVary() == kAdjustVary || ofstOpnd->GetVary() == kNotVary)) { + bool condition = aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, i); + if (!condition) { + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + memOpnd, memOpnd.GetSize(), static_cast(R16), false, &insn); + insn.SetOperand(i, newMemOpnd); + } + } + } else if (opnd.IsIntImmediate()) { + AdjustmentOffsetForImmOpnd(insn, i, aarchCGFunc); + } + } + if (stackBaseOpnd && !aarchCGFunc.UseFP()) { + AdjustmentStackPointer(insn, aarchCGFunc); + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index, + AArch64CGFunc &aarchCGFunc) const { + auto &immOpnd = static_cast(insn.GetOperand(index)); + MemLayout *memLayout = aarchCGFunc.GetMemlayout(); + if (immOpnd.GetVary() == kUnAdjustVary) { + int64 ofst = static_cast(memLayout)->RealStackFrameSize() - memLayout->SizeOfArgsToStackPass(); + if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + immOpnd.SetValue(immOpnd.GetValue() - ofst); + if (immOpnd.GetValue() < 0) { + immOpnd.Negate(); + } + insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]); + } else { + immOpnd.Add(ofst); + } + } + if (!aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &immOpnd, index)) { + if (insn.GetMachineOpcode() >= MOP_xaddrri24 && insn.GetMachineOpcode() <= MOP_waddrri12) { + PrimType destTy = + static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize ? PTY_i64 : PTY_i32; + RegOperand *resOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + ImmOperand ©ImmOpnd = aarchCGFunc.CreateImmOperand( + immOpnd.GetValue(), immOpnd.GetSize(), immOpnd.IsSignedValue()); + aarchCGFunc.SelectAddAfterInsn(*resOpnd, insn.GetOperand(kInsnSecondOpnd), copyImmOpnd, destTy, false, insn); + insn.GetBB()->RemoveInsn(insn); + } else if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + if (immOpnd.IsSingleInstructionMovable()) { + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + bool is64bit = insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize; + MOperator tempMovOp = is64bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &tempMov = cgFunc->GetInsnBuilder()->BuildInsn(tempMovOp, tempReg, immOpnd); + insn.SetOperand(index, tempReg); + insn.SetMOP(is64bit ? AArch64CG::kMd[MOP_xsubrrr] : AArch64CG::kMd[MOP_wsubrrr]); + (void)insn.GetBB()->InsertInsnBefore(insn, tempMov); + } + } else { + CHECK_FATAL(false, "NIY"); + } + } + immOpnd.SetVary(kAdjustVary); +} + +void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn, AArch64CGFunc &aarchCGFunc) { + AArch64MemLayout *aarch64memlayout = static_cast(aarchCGFunc.GetMemlayout()); + int32 offset = static_cast(aarch64memlayout->SizeOfArgsToStackPass()); + if (offset == 0) { + return; + } + if (insn.IsLoad() || insn.IsStore()) { + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + DEBUG_ASSERT(memOpnd.GetBaseRegister() != nullptr, "Unexpect, need check"); + CHECK_FATAL(memOpnd.IsIntactIndexed(), "unsupport yet"); + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + ImmOperand *ofstOpnd = memOpnd.GetOffsetOperand(); + ImmOperand *newOfstOpnd = &aarchCGFunc.GetOrCreateOfstOpnd( + static_cast(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize()); + MemOperand &newOfstMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, memOpnd.GetSize(), memOpnd.GetBaseRegister(), memOpnd.GetIndexRegister(), + newOfstOpnd, memOpnd.GetSymbol()); + insn.SetOperand(i, newOfstMemOpnd); + if (!aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &newOfstMemOpnd, i)) { + bool isPair = (i == kInsnThirdOpnd); + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + newOfstMemOpnd, newOfstMemOpnd.GetSize(), static_cast(R16), false, &insn, isPair); + insn.SetOperand(i, newMemOpnd); + } + continue; + } else if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX) { + CHECK_FATAL(false, "Unexpect adjust insn"); + } else { + insn.Dump(); + CHECK_FATAL(false, "Unexpect adjust insn"); + } + } + } + } else { + switch (insn.GetMachineOpcode()) { + case MOP_xaddrri12: { + DEBUG_ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &addend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + addend.SetValue(addend.GetValue() + offset); + AdjustmentOffsetForImmOpnd(insn, kInsnThirdOpnd, aarchCGFunc); /* legalize imm opnd */ + break; + } + case MOP_xaddrri24: { + DEBUG_ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); + break; + } + case MOP_xsubrri12: { + DEBUG_ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &subend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + subend.SetValue(subend.GetValue() - offset); + break; + } + case MOP_xsubrri24: { + DEBUG_ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); + break; + } + case MOP_waddrri12: { + if (!CGOptions::IsArm64ilp32()) { + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } else { + DEBUG_ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &addend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + addend.SetValue(addend.GetValue() + offset); + AdjustmentOffsetForImmOpnd(insn, kInsnThirdOpnd, aarchCGFunc); /* legalize imm opnd */ + } + break; + } + default: + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForFPLR() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarchCGFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + AdjustmentOffsetForOpnd(*insn, *aarchCGFunc); + } + } + +#undef STKLAY_DBUG +#ifdef STKLAY_DBUG + AArch64MemLayout *aarch64memlayout = static_cast(cgFunc->GetMemlayout()); + LogInfo::MapleLogger() << "stkpass: " << aarch64memlayout->GetSegArgsStkpass().size << "\n"; + LogInfo::MapleLogger() << "local: " << aarch64memlayout->GetSizeOfLocals() << "\n"; + LogInfo::MapleLogger() << "ref local: " << aarch64memlayout->GetSizeOfRefLocals() << "\n"; + LogInfo::MapleLogger() << "regpass: " << aarch64memlayout->GetSegArgsRegPassed().size << "\n"; + LogInfo::MapleLogger() << "regspill: " << aarch64memlayout->GetSizeOfSpillReg() << "\n"; + LogInfo::MapleLogger() << "calleesave: " << SizeOfCalleeSaved() << "\n"; + +#endif +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eda313759eba456fd254d15d179fa47f9d970236 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_operand.h" +#include +#include +#include "aarch64_abi.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +bool StImmOperand::Less(const Operand &right) const{ + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const StImmOperand *rightOpnd = static_cast(&right); + if (symbol != rightOpnd->symbol) { + return symbol < rightOpnd->symbol; + } + if (offset != rightOpnd->offset) { + return offset < rightOpnd->offset; + } + return relocs < rightOpnd->relocs; +} + +bool ExtendShiftOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const ExtendShiftOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (extendOp != rightOpnd->extendOp) { + return extendOp < rightOpnd->extendOp; + } + return shiftAmount < rightOpnd->shiftAmount; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d608349b43211da41359c464cdb8870d8d6fcacd --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_optimize_common.h" +#include "aarch64_isa.h" +#include "aarch64_cgfunc.h" +#include "cgbb.h" + +namespace maplebe { +void AArch64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) { + if (bb.GetKind() == BB::kBBIgoto) { + bool modified = false; + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + LabelIdx labIdx = static_cast(targetOperand).GetLabelIndex(); + ImmOperand &immOpnd = static_cast(GetCGFunc())->CreateImmOperand(labIdx, k8BitSize, false); + insn->SetOperand(1, immOpnd); + modified = true; + } + } + CHECK_FATAL(modified, "ModifyJumpTarget: Could not change jump target"); + return; + } else if (bb.GetKind() == BB::kBBGoto) { + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + maple::LabelIdx labidx = static_cast(targetOperand).GetLabelIndex(); + LabelOperand &label = static_cast(GetCGFunc())->GetOrCreateLabelOperand(labidx); + insn->SetOperand(1, label); + break; + } + } + // fallthru below to patch the branch insn + } + bb.GetLastInsn()->SetOperand(AArch64isa::GetJumpTargetIdx(*bb.GetLastInsn()), targetOperand); +} + +void AArch64InsnVisitor::ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) { + ModifyJumpTarget(static_cast(GetCGFunc())->GetOrCreateLabelOperand(targetLabel), bb); +} + +void AArch64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) { + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand( + AArch64isa::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *AArch64InsnVisitor::CloneInsn(Insn &originalInsn) { + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label from the given instruction. + * Note: MOP_xbr is a branching instruction, but the target is unknown at compile time, + * because a register instead of label. So we don't take it as a branching instruction. + */ +LabelIdx AArch64InsnVisitor::GetJumpLabel(const Insn &insn) const { + uint32 operandIdx = AArch64isa::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + DEBUG_ASSERT(false, "Operand is not label"); + return 0; +} + +bool AArch64InsnVisitor::IsCompareInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_wcmpri: + case MOP_wcmprr: + case MOP_xcmpri: + case MOP_xcmprr: + case MOP_hcmperi: + case MOP_hcmperr: + case MOP_scmperi: + case MOP_scmperr: + case MOP_dcmperi: + case MOP_dcmperr: + case MOP_hcmpqri: + case MOP_hcmpqrr: + case MOP_scmpqri: + case MOP_scmpqrr: + case MOP_dcmpqri: + case MOP_dcmpqrr: + case MOP_wcmnri: + case MOP_wcmnrr: + case MOP_xcmnri: + case MOP_xcmnrr: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_wcbnz: + case MOP_xcbnz: + case MOP_wcbz: + case MOP_xcbz: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_xaddrrr: + case MOP_xaddrri12: + case MOP_waddrrr: + case MOP_waddrri12: + case MOP_xsubrrr: + case MOP_xsubrri12: + case MOP_wsubrrr: + case MOP_wsubrri12: + return true; + default: + return false; + } +} + +RegOperand *AArch64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) { + return &static_cast(GetCGFunc())->CreateRegisterOperandOfType( + pReg.GetRegisterType(), pReg.GetSize() / k8BitSize); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9edb52dd1003cae81c2ecd533011deaddbfc1c10 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -0,0 +1,5582 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "aarch64_utils.h" + +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CG_PEEP_DUMP CG_DEBUG_FUNC(*cgFunc) +namespace { +const std::string kMccLoadRef = "MCC_LoadRefField"; +const std::string kMccLoadRefV = "MCC_LoadVolatileField"; +const std::string kMccLoadRefS = "MCC_LoadRefStatic"; +const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; +const std::string kMccDummy = "MCC_Dummy"; + +const uint32 kSizeOfSextMopTable = 5; +const uint32 kSizeOfUextMopTable = 3; + +MOperator sextMopTable[kSizeOfSextMopTable] = { + MOP_xsxtb32, MOP_xsxtb64, MOP_xsxth32, MOP_xsxth64, MOP_xsxtw64 +}; + +MOperator uextMopTable[kSizeOfUextMopTable] = { + MOP_xuxtb32, MOP_xuxth32, MOP_xuxtw64 +}; + +const std::string GetReadBarrierName(const Insn &insn) { + constexpr int32 totalBarrierNamesNum = 5; + std::array barrierNames = { + kMccLoadRef, kMccLoadRefV, kMccLoadRefS, kMccLoadRefVS, kMccDummy + }; + if (insn.GetMachineOpcode() == MOP_xbl || + insn.GetMachineOpcode() == MOP_tail_call_opt_xbl) { + auto &op = static_cast(insn.GetOperand(kInsnFirstOpnd)); + const std::string &funcName = op.GetName(); + for (const std::string &singleBarrierName : barrierNames) { + if (funcName == singleBarrierName) { + return singleBarrierName; + } + } + } + return ""; +} + +MOperator GetLoadOperator(uint32 refSize, bool isVolatile) { + if (refSize == k32BitSize) { + return isVolatile ? MOP_wldar : MOP_wldr; + } + return isVolatile ? MOP_xldar : MOP_xldr; +} +} + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64CGPeepHole::Run() { + bool optSuccess = false; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo != nullptr) { + optSuccess = DoSSAOptimize(*bb, *insn); + } else { + DoNormalOptimize(*bb, *insn); + } + } + } + if (optSuccess) { + Run(); + } +} + +bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn, *ssaInfo); + switch (thisMop) { + case MOP_xandrrr: + case MOP_wandrrr: { + manager->Optimize(true); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + manager->Optimize(true); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->Optimize(true); + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_beq: + case MOP_bne: { + manager->Optimize(true); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + manager->Optimize(true); + break; + } + case MOP_waddrrr: + case MOP_xaddrrr: + case MOP_dadd: + case MOP_sadd: + case MOP_wsubrrr: + case MOP_xsubrrr: + case MOP_dsub: + case MOP_ssub: + case MOP_xinegrr: + case MOP_winegrr: + case MOP_wfnegrr: + case MOP_xfnegrr: { + manager->Optimize(true); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + manager->Optimize(true); + break; + } + case MOP_wcselrrrc: + case MOP_xcselrrrc: { + manager->Optimize(true); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: + case MOP_wiorrrrs: + case MOP_xiorrrrs: { + manager->Optimize(true); + break; + } + case MOP_bge: + case MOP_ble: + case MOP_blt: + case MOP_bgt: { + manager->Optimize(true); + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + manager->Optimize(true); + break; + } + case MOP_xlslrri6: { + manager->Optimize(); + manager->Optimize(true); + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + manager->Optimize(true); + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: + case MOP_wlslrri5: + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + manager->Optimize(true); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + manager->Optimize(true); + break; + } + default: + break; + } + return manager->OptSuccess(); +} + +bool ContinuousCmpCsetPattern::CheckCondCode(const CondOperand &condOpnd) const { + switch (condOpnd.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool ContinuousCmpCsetPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + auto &condOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (condOpnd.GetCode() != CC_NE && condOpnd.GetCode() != CC_EQ) { + return false; + } + reverse = (condOpnd.GetCode() == CC_EQ); + auto &ccReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + if (!static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)).IsZero()) { + return false; + } + auto &cmpCCReg = static_cast(prevCmpInsn->GetOperand(kInsnFirstOpnd)); + InsnSet useSet = GetAllUseInsn(cmpCCReg); + if (useSet.size() > 1) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevCsetInsn1 = GetDefInsn(cmpUseReg); + if (prevCsetInsn1 == nullptr) { + return false; + } + MOperator prevCsetMop1 = prevCsetInsn1->GetMachineOpcode(); + if (prevCsetMop1 != MOP_wcsetrc && prevCsetMop1 != MOP_xcsetrc) { + return false; + } + auto &condOpnd1 = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(condOpnd1)) { + return false; + } + auto &ccReg1 = static_cast(prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + prevCmpInsn1 = GetDefInsn(ccReg1); + if (prevCmpInsn1 == nullptr) { + return false; + } + if (IsCCRegCrossVersion(*prevCsetInsn1, *prevCmpInsn, ccReg1)) { + return false; + } + return true; +} + +void ContinuousCmpCsetPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + MOperator curMop = insn.GetMachineOpcode(); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn *newCsetInsn = nullptr; + if (reverse) { + MOperator prevCsetMop = prevCsetInsn1->GetMachineOpcode(); + auto &prevCsetCondOpnd = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + CondOperand &newCondOpnd = aarFunc->GetCondOperand(GetReverseBasicCC(prevCsetCondOpnd.GetCode())); + regno_t tmpRegNO = 0; + auto *tmpDefOpnd = aarFunc->CreateVirtualRegisterOperand(tmpRegNO, + resOpnd.GetSize(), static_cast(resOpnd).GetRegisterType()); + tmpDefOpnd->SetValidBitsNum(k1BitSize); + newCsetInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + prevCsetMop, *tmpDefOpnd, newCondOpnd, prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + BB *prevCsetBB = prevCsetInsn1->GetBB(); + (void)prevCsetBB->InsertInsnAfter(*prevCsetInsn1, *newCsetInsn); + /* update ssa info */ + auto *a64SSAInfo = static_cast(ssaInfo); + a64SSAInfo->CreateNewInsnSSAInfo(*newCsetInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, prevCmpInsn, newCsetInsn); + } + } + MOperator newMop = (curMop == MOP_wcsetrc) ? MOP_wmovrr : MOP_xmovrr; + Insn *newInsn = nullptr; + if (newCsetInsn == nullptr) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), prevCsetInsn1->GetOperand(kInsnFirstOpnd)); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), newCsetInsn->GetOperand(kInsnFirstOpnd)); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(prevCsetInsn1); + if (newCsetInsn == nullptr) { + (void)prevs.emplace_back(prevCmpInsn); + } else { + (void)prevs.emplace_back(newCsetInsn); + } + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool NegCmpToCmnPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcmprr && curMop != MOP_xcmprr) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_winegrr && prevMop != MOP_xinegrr && + prevMop != MOP_winegrrs && prevMop != MOP_xinegrrs) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(ccReg); + for (auto *useInsn : useInsns) { + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_bhi || useMop == MOP_bls) { + return false; + } + bool findUnsignedCond = false; + for (size_t i = 0; i < useInsn->GetOperandSize(); ++i) { + if (useInsn->GetOperand(i).GetKind() == Operand::kOpdCond) { + ConditionCode cond = static_cast(useInsn->GetOperand(i)).GetCode(); + if (cond == CC_HI || cond == CC_LS) { + findUnsignedCond = true; + break; + } + } + } + if (findUnsignedCond) { + return false; + } + } + return true; +} + +void NegCmpToCmnPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Operand &opnd1 = insn.GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnSecondOpnd); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + Insn *newInsn = nullptr; + if (prevMop == MOP_winegrr || prevMop == MOP_xinegrr) { + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrr : MOP_xcmnrr; + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2)); + } else { + /* prevMop == MOP_winegrrs || prevMop == MOP_xinegrrs */ + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrrs : MOP_xcmnrrs; + Operand &shiftOpnd = prevInsn->GetOperand(kInsnThirdOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2, shiftOpnd)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool CsetCbzToBeqPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcsetrc && prevMop != MOP_xcsetrc) { + return false; + } + auto &ccReg = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (IsCCRegCrossVersion(*prevInsn, insn, ccReg)) { + return false; + } + return true; +} + +MOperator CsetCbzToBeqPattern::SelectNewMop(ConditionCode condCode, bool inverse) const { + switch (condCode) { + case CC_NE: + return inverse ? MOP_beq : MOP_bne; + case CC_EQ: + return inverse ? MOP_bne : MOP_beq; + case CC_MI: + return inverse ? MOP_bpl : MOP_bmi; + case CC_PL: + return inverse ? MOP_bmi : MOP_bpl; + case CC_VS: + return inverse ? MOP_bvc : MOP_bvs; + case CC_VC: + return inverse ? MOP_bvs : MOP_bvc; + case CC_HI: + return inverse ? MOP_bls : MOP_bhi; + case CC_LS: + return inverse ? MOP_bhi : MOP_bls; + case CC_GE: + return inverse ? MOP_blt : MOP_bge; + case CC_LT: + return inverse ? MOP_bge : MOP_blt; + case CC_HS: + return inverse ? MOP_blo : MOP_bhs; + case CC_LO: + return inverse ? MOP_bhs : MOP_blo; + case CC_LE: + return inverse ? MOP_bgt : MOP_ble; + case CC_GT: + return inverse ? MOP_ble : MOP_bgt; + case CC_CS: + return inverse ? MOP_bcc : MOP_bcs; + default: + return MOP_undef; + } +} + +void CsetCbzToBeqPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator curMop = insn.GetMachineOpcode(); + bool reverse = (curMop == MOP_wcbz || curMop == MOP_xcbz); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &condOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MOperator newMop = SelectNewMop(condOpnd.GetCode(), reverse); + DEBUG_ASSERT(newMop != MOP_undef, "unknown condition code"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnThirdOpnd), labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtLslToBitFieldInsertPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xsxtw64 && prevMop != MOP_xuxtw64) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() > k32BitSize) { + return false; + } + return true; +} + +void ExtLslToBitFieldInsertPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto &prevSrcReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + cgFunc->InsertExtendSet(prevSrcReg.GetRegisterNumber()); + MOperator newMop = (prevInsn->GetMachineOpcode() == MOP_xsxtw64) ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarFunc = static_cast(cgFunc); + auto &newImmOpnd1 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newImmOpnd2 = aarFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), prevSrcReg, newImmOpnd1, newImmOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CselToCsetPattern::IsOpndDefByZero(const Insn &insn) const { + MOperator movMop = insn.GetMachineOpcode(); + switch (movMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wmovri32: + case MOP_xmovri64: { + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 0; + } + default: + return false; + } +} + +bool CselToCsetPattern::IsOpndDefByOne(const Insn &insn) const { + MOperator movMop = insn.GetMachineOpcode(); + if ((movMop != MOP_wmovri32) && (movMop != MOP_xmovri64)) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 1; +} + +bool CselToCsetPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcselrrrc && curMop != MOP_xcselrrrc) { + return false; + } + auto &useOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevMovInsn1 = GetDefInsn(useOpnd1); + if (prevMovInsn1 == nullptr) { + return false; + } + MOperator prevMop1 = prevMovInsn1->GetMachineOpcode(); + if (prevMop1 != MOP_wmovri32 && prevMop1 != MOP_xmovri64 && + prevMop1 != MOP_wmovrr && prevMop1 != MOP_xmovrr) { + return false; + } + auto &useOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevMovInsn2 = GetDefInsn(useOpnd2); + if (prevMovInsn2 == nullptr) { + return false; + } + MOperator prevMop2 = prevMovInsn2->GetMachineOpcode(); + if (prevMop2 != MOP_wmovri32 && prevMop2 != MOP_xmovri64 && + prevMop2 != MOP_wmovrr && prevMop2 != MOP_xmovrr) { + return false; + } + return true; +} + +void CselToCsetPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + MOperator newMop = (dstOpnd.GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &condOpnd = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = insn.GetOperand(kInsnFifthOpnd); + Insn *newInsn = nullptr; + if (IsOpndDefByOne(*prevMovInsn1) && IsOpndDefByZero(*prevMovInsn2)) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, condOpnd, rflag)); + } else if (IsOpndDefByZero(*prevMovInsn1) && IsOpndDefByOne(*prevMovInsn2)) { + auto &origCondOpnd = static_cast(condOpnd); + ConditionCode inverseCondCode = GetReverseCC(origCondOpnd.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto *aarFunc = static_cast(cgFunc); + CondOperand &inverseCondOpnd = aarFunc->GetCondOperand(inverseCondCode); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, inverseCondOpnd, rflag)); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevMovInsn1); + prevs.emplace_back(prevMovInsn2); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool AndCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { + MOperator curMop = currInsn.GetMachineOpcode(); + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + auto &andImmOpnd = static_cast(prevAndInsn->GetOperand(kInsnThirdOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + if (cmpImmOpnd.GetValue() == 0) { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + if (tbzImmVal < 0) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + } else { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + int64 tmpVal = GetLogValueAtBase2(cmpImmOpnd.GetValue()); + if (tbzImmVal < 0 || tmpVal < 0 || tbzImmVal != tmpVal) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + default: + return false; + } + } + return true; +} + +bool AndCmpBranchesToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_beq && curMop != MOP_bne) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevAndInsn = GetDefInsn(cmpUseReg); + if (prevAndInsn == nullptr) { + return false; + } + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + if (prevAndMop != MOP_wandrri12 && prevAndMop != MOP_xandrri13) { + return false; + } + CHECK_FATAL(prevAndInsn->GetOperand(kInsnFirstOpnd).GetSize() == + prevCmpInsn->GetOperand(kInsnSecondOpnd).GetSize(), "def-use reg size must be same based-on ssa"); + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void AndCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImmOpnd = aarFunc->CreateImmOperand(tbzImmVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, prevAndInsn->GetOperand(kInsnSecondOpnd), tbzImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevAndInsn); + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { + MOperator currMop = currInsn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + switch (prevMop) { + case MOP_wcmpri: + case MOP_xcmpri: { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &immOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() != 0) { + return false; + } + switch (currMop) { + case MOP_bge: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_blt: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + auto ®Opnd0 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto ®Opnd1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (!IsZeroRegister(regOpnd0) && !IsZeroRegister(regOpnd1)) { + return false; + } + switch (currMop) { + case MOP_bge: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_ble: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_blt: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + case MOP_bgt: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + default: + return false; + } + break; + } + // fall through + [[clang::fallthrough]]; + default: + return false; + } + return true; +} + +bool ZeroCmpBranchesToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_ble && curMop != MOP_blt && curMop != MOP_bgt) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsLabel(), "must be labelOpnd"); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(ccReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcmpri && prevMop != MOP_xcmpri && prevMop != MOP_wcmprr && prevMop != MOP_xcmprr) { + return false; + } + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + CHECK_FATAL(regOpnd != nullptr, "must have regOpnd"); + auto *aarFunc = static_cast(cgFunc); + ImmOperand &bitOpnd = aarFunc->CreateImmOperand( + (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *static_cast(regOpnd), bitOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrri12 && curMop != MOP_xandrri13) { + return false; + } + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + /* and_imm value must be (1 << n - 1) */ + if (immValue <= 0 || + (((static_cast(immValue)) & (static_cast(immValue) + 1)) != 0)) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6) { + return false; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currUseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* check def-use reg size found by ssa */ + CHECK_FATAL(prevDstOpnd.GetSize() == currUseOpnd.GetSize(), "def-use reg size must be same"); + auto &andDstReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + VRegVersion *andDstVersion = ssaInfo->FindSSAVersion(andDstReg.GetRegisterNumber()); + DEBUG_ASSERT(andDstVersion != nullptr, "find destReg Version failed"); + for (auto useDUInfoIt : andDstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + /* combine [and & cbz --> tbz] first, to eliminate more insns becase of incompleted copy prop */ + if (useMop == MOP_wcbz || useMop == MOP_xcbz || useMop == MOP_wcbnz || useMop == MOP_xcbnz) { + return false; + } + } + return true; +} + +void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + bool is64Bits = (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64Bits ? aarFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 tmpVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal2 = __builtin_ffsll(tmpVal + 1) - 1; + if ((immVal2 < k1BitSize) || (is64Bits && (immVal1 + immVal2) > k64BitSize) || + (!is64Bits && (immVal1 + immVal2) > k32BitSize)) { + return; + } + Operand &immOpnd2 = is64Bits ? aarFunc->CreateImmOperand(immVal2, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(immVal2, kMaxImmVal5Bits, false); + MOperator newMop = (is64Bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, srcOpnd, immOpnd1, immOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool MvnAndToBicPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrrr && curMop != MOP_xandrrr) { + return false; + } + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn1 = GetDefInsn(useReg1); + prevInsn2 = GetDefInsn(useReg2); + MOperator mop = insn.GetMachineOpcode(); + MOperator desMop = mop == MOP_xandrrr ? MOP_xnotrr : MOP_wnotrr; + op1IsMvnDef = prevInsn1 != nullptr && prevInsn1->GetMachineOpcode() == desMop; + op2IsMvnDef = prevInsn2 != nullptr && prevInsn2->GetMachineOpcode() == desMop; + if (op1IsMvnDef || op2IsMvnDef) { + return true; + } + return false; +} + +void MvnAndToBicPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator newMop = insn.GetMachineOpcode() == MOP_xandrrr ? MOP_xbicrrr : MOP_wbicrrr; + Insn *prevInsn = op1IsMvnDef ? prevInsn1 : prevInsn2; + auto &prevOpnd1 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &opnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, opnd0, op1IsMvnDef ? opnd2 : opnd1, prevOpnd1); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + bb.ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool AndCbzToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo ? GetDefInsn(useReg) : insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wandrri12 && prevMop != MOP_xandrri13) { + return false; + } + if (!ssaInfo && (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd)))) { + return false; + } + return true; +} + +void AndCbzToTbzPattern::Run(BB &bb, Insn &insn) { + auto *aarchFunc = static_cast(cgFunc); + if (!CheckCondition(insn)) { + return; + } + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = GetLogValueAtBase2(andImm.GetValue()); + if (tbzVal == -1) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc->CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), + tbzImm, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + if (ssaInfo) { + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + } + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CombineSameArithmeticPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (std::find(validMops.begin(), validMops.end(), curMop) == validMops.end()) { + return false; + } + Operand &useOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(useOpnd.IsRegister(), "expect regOpnd"); + prevInsn = GetDefInsn(static_cast(useOpnd)); + if (prevInsn == nullptr) { + return false; + } + if (prevInsn->GetMachineOpcode() != curMop) { + return false; + } + auto &prevDefOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(prevDefOpnd.IsRegister(), "expect regOpnd"); + InsnSet useInsns = GetAllUseInsn(static_cast(prevDefOpnd)); + if (useInsns.size() > 1) { + return false; + } + auto *aarFunc = static_cast(cgFunc); + CHECK_FATAL(prevInsn->GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + auto &curImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 prevImm = prevImmOpnd.GetValue(); + int64 curImm = curImmOpnd.GetValue(); + newImmOpnd = &aarFunc->CreateImmOperand(prevImmOpnd.GetValue() + curImmOpnd.GetValue(), + curImmOpnd.GetSize(), curImmOpnd.IsSignedValue()); + switch (curMop) { + case MOP_wlsrrri5: + case MOP_wasrrri5: + case MOP_wlslrri5: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k32BitSizeInt) { + return false; + } + break; + } + case MOP_xlsrrri6: + case MOP_xasrrri6: + case MOP_xlslrri6: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k64BitSizeInt) { + return false; + } + break; + } + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + if (!newImmOpnd->IsSingleInstructionMovable()) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +void CombineSameArithmeticPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), + insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnSecondOpnd), + *newImmOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrrr && curMop != MOP_xiorrrr && curMop != MOP_wiorrrrs && curMop != MOP_xiorrrrs) { + return false; + } + Operand &curDstOpnd = insn.GetOperand(kInsnFirstOpnd); + is64Bits = (curDstOpnd.GetSize() == k64BitSize); + if (curMop == MOP_wiorrrr || curMop == MOP_xiorrrr) { + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn1 = GetDefInsn(useReg1); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn *prevInsn2 = GetDefInsn(useReg2); + if (prevInsn1 == nullptr || prevInsn2 == nullptr) { + return false; + } + MOperator prevMop1 = prevInsn1->GetMachineOpcode(); + MOperator prevMop2 = prevInsn2->GetMachineOpcode(); + if ((prevMop1 == MOP_wlsrrri5 || prevMop1 == MOP_xlsrrri6) && + (prevMop2 == MOP_wlslrri5 || prevMop2 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn1; + prevLslInsn = prevInsn2; + } else if ((prevMop2 == MOP_wlsrrri5 || prevMop2 == MOP_xlsrrri6) && + (prevMop1 == MOP_wlslrri5 || prevMop1 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn2; + prevLslInsn = prevInsn1; + } else { + return false; + } + int64 prevLsrImmValue = static_cast(prevLsrInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevLslImmValue = static_cast(prevLslInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + if ((prevLsrImmValue + prevLslImmValue) < 0) { + return false; + } + if ((is64Bits && (prevLsrImmValue + prevLslImmValue) != k64BitSize) || + (!is64Bits && (prevLsrImmValue + prevLslImmValue) != k32BitSize)) { + return false; + } + shiftValue = prevLsrImmValue; + } else if (curMop == MOP_wiorrrrs || curMop == MOP_xiorrrrs) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6 && prevMop != MOP_wlslrri5 && prevMop != MOP_xlslrri6) { + return false; + } + int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + uint32 shiftAmount = shiftOpnd.GetShiftAmount(); + if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { + prevLsrInsn = prevInsn; + shiftValue = prevImm; + } else if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSR && + (prevMop == MOP_wlslrri5 || prevMop == MOP_xlslrri6)) { + prevLslInsn = prevInsn; + shiftValue = shiftAmount; + } else { + return false; + } + if (prevImm + static_cast(shiftAmount) < 0) { + return false; + } + if ((is64Bits && (prevImm + static_cast(shiftAmount)) != k64BitSize) || + (!is64Bits && (prevImm + static_cast(shiftAmount)) != k32BitSize)) { + return false; + } + } else { + CHECK_FATAL(false, "must be above mop"); + return false; + } + return true; +} + +void LogicShiftAndOrrToExtrPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + Operand &opnd1 = (prevLslInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : + prevLslInsn->GetOperand(kInsnSecondOpnd)); + Operand &opnd2 = (prevLsrInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : + prevLsrInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &immOpnd = is64Bits ? aarFunc->CreateImmOperand(shiftValue, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(shiftValue, kMaxImmVal5Bits, false); + MOperator newMop = is64Bits ? MOP_xextrrrri6 : MOP_wextrrrri5; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), opnd1, opnd2, immOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevLsrInsn); + prevs.emplace_back(prevLslInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +void SimplifyMulArithmeticPattern::SetArithType(const Insn &currInsn) { + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_waddrrr: + case MOP_xaddrrr: { + arithType = kAdd; + isFloat = false; + break; + } + case MOP_dadd: + case MOP_sadd: { + arithType = kFAdd; + isFloat = true; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + arithType = kSub; + isFloat = false; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_dsub: + case MOP_ssub: { + arithType = kFSub; + isFloat = true; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_xinegrr: + case MOP_winegrr: { + arithType = kNeg; + isFloat = false; + validOpndIdx = kInsnSecondOpnd; + break; + } + case MOP_wfnegrr: + case MOP_xfnegrr: { + arithType = kFNeg; + isFloat = true; + validOpndIdx = kInsnSecondOpnd; + break; + } + default: { + CHECK_FATAL(false, "must be above mop"); + break; + } + } +} + +bool SimplifyMulArithmeticPattern::CheckCondition(Insn &insn) { + if (arithType == kUndef || validOpndIdx < 0) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(static_cast(validOpndIdx))); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + if (useVersion->GetAllUseInsns().size() > 1) { + return false; + } + MOperator currMop = insn.GetMachineOpcode(); + if (currMop == MOP_dadd || currMop == MOP_sadd || currMop == MOP_dsub || currMop == MOP_ssub || + currMop == MOP_wfnegrr || currMop == MOP_xfnegrr) { + isFloat = true; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wmulrrr && prevMop != MOP_xmulrrr && prevMop != MOP_xvmuld && prevMop != MOP_xvmuls) { + return false; + } + if (isFloat && (prevMop == MOP_wmulrrr || prevMop == MOP_xmulrrr)) { + return false; + } + if (!isFloat && (prevMop == MOP_xvmuld || prevMop == MOP_xvmuls)) { + return false; + } + if ((currMop == MOP_xaddrrr) || (currMop == MOP_waddrrr)) { + return true; + } + return CGOptions::IsFastMath(); +} + +void SimplifyMulArithmeticPattern::DoOptimize(BB &currBB, Insn &currInsn) { + Operand &resOpnd = currInsn.GetOperand(kInsnFirstOpnd); + Operand &opndMulOpnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opndMulOpnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + bool is64Bits = (static_cast(resOpnd).GetSize() == k64BitSize); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64Bits) { + return; + } + MOperator newMop = is64Bits ? curMop2NewMopTable[arithType][1] : curMop2NewMopTable[arithType][0]; + Insn *newInsn = nullptr; + if (arithType == kNeg || arithType == kFNeg) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2)); + } else { + Operand &opnd3 = (validOpndIdx == kInsnSecondOpnd) ? currInsn.GetOperand(kInsnThirdOpnd) : + currInsn.GetOperand(kInsnSecondOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2, opnd3)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + currBB.ReplaceInsn(currInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, *newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, newInsn); + } +} + +void SimplifyMulArithmeticPattern::Run(BB &bb, Insn &insn) { + SetArithType(insn); + if (arithType == kAdd || arithType == kFAdd) { + validOpndIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + DoOptimize(bb, insn); + return; + } else { + validOpndIdx = kInsnThirdOpnd; + } + } + if (!CheckCondition(insn)) { + return; + } + DoOptimize(bb, insn); +} + +void ElimSpecificExtensionPattern::SetSpecificExtType(const Insn &currInsn) { + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_xsxtb32: { + is64Bits = false; + extTypeIdx = SXTB; + break; + } + case MOP_xsxtb64: { + is64Bits = true; + extTypeIdx = SXTB; + break; + } + case MOP_xsxth32: { + is64Bits = false; + extTypeIdx = SXTH; + break; + } + case MOP_xsxth64: { + is64Bits = true; + extTypeIdx = SXTH; + break; + } + case MOP_xsxtw64: { + is64Bits = true; + extTypeIdx = SXTW; + break; + } + case MOP_xuxtb32: { + is64Bits = false; + extTypeIdx = UXTB; + break; + } + case MOP_xuxth32: { + is64Bits = false; + extTypeIdx = UXTH; + break; + } + case MOP_xuxtw64: { + is64Bits = true; + extTypeIdx = UXTW; + break; + } + default: { + extTypeIdx = EXTUNDEF; + } + } +} + +void ElimSpecificExtensionPattern::SetOptSceneType() { + if (prevInsn->IsCall()) { + sceneType = kSceneMov; + return; + } + MOperator preMop = prevInsn->GetMachineOpcode(); + switch (preMop) { + case MOP_wldr: + case MOP_wldrb: + case MOP_wldrsb: + case MOP_wldrh: + case MOP_wldrsh: + case MOP_xldrsw: { + sceneType = kSceneLoad; + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + sceneType = kSceneMov; + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + sceneType = kSceneSameExt; + break; + } + default: { + sceneType = kSceneUndef; + } + } +} + +void ElimSpecificExtensionPattern::ReplaceExtWithMov(Insn &currInsn) { + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(currInsn.GetOperand(kInsnFirstOpnd)); + MOperator newMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, currDstOpnd, prevDstOpnd); + currBB->ReplaceInsn(currInsn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, &newInsn); + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterMov(Insn &insn) { + if (&insn == currBB->GetFirstInsn()) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &currSrcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator currMop = insn.GetMachineOpcode(); + /* example 2) [mov w0, R0] is return value of call and return size is not of range */ + if (prevInsn->IsCall() && (currSrcOpnd.GetRegisterNumber() == R0 || currSrcOpnd.GetRegisterNumber() == V0) && + currDstOpnd.GetRegisterNumber() == currSrcOpnd.GetRegisterNumber()) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((currMop == MOP_xuxtb32 && retSize <= k1ByteSize) || + (currMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (currMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + ReplaceExtWithMov(insn); + } + return; + } + if (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + auto &immMovOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + int64 value = immMovOpnd.GetValue(); + uint64 minRange = extValueRangeTable[extTypeIdx][0]; + uint64 maxRange = extValueRangeTable[extTypeIdx][1]; + if (currMop == MOP_xsxtb32 || currMop == MOP_xsxth32) { + /* value should be in valid range */ + if (static_cast(value) >= minRange && static_cast(value) <= maxRange && + immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtb32 || currMop == MOP_xuxth32) { + if (!(static_cast(value) & minRange)) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtw64) { + ReplaceExtWithMov(insn); + } else { + /* MOP_xsxtb64 & MOP_xsxth64 & MOP_xsxtw64 */ + if (!(static_cast(value) & minRange) && immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::IsValidLoadExtPattern(Insn &currInsn, MOperator oldMop, MOperator newMop) const { + if (oldMop == newMop) { + return true; + } + auto *aarFunc = static_cast(cgFunc); + auto *memOpnd = static_cast(prevInsn->GetMemOpnd()); + DEBUG_ASSERT(!prevInsn->IsStorePair(), "do not do ElimSpecificExtensionPattern for str pair"); + DEBUG_ASSERT(!prevInsn->IsLoadPair(), "do not do ElimSpecificExtensionPattern for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !aarFunc->IsOperandImmValid(newMop, memOpnd, kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = ((memSize == k8BitSize) ? k3BitSize : ((memSize == k4BitSize) ? k2BitSize : + ((memSize == k2BitSize) ? k1BitSize : k0BitSize))); + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +MOperator ElimSpecificExtensionPattern::SelectNewLoadMopByBitSize(MOperator lowBitMop) const { + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + switch (lowBitMop) { + case MOP_wldrsb: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsb; + } + case MOP_wldrsh: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsh; + } + default: + break; + } + return lowBitMop; +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { + if (extTypeIdx == EXTUNDEF) { + return; + } + MOperator prevOrigMop = prevInsn->GetMachineOpcode(); + for (uint8 i = 0; i < kPrevLoadPatternNum; i++) { + DEBUG_ASSERT(extTypeIdx < SpecificExtTypeSize, "extTypeIdx must be lower than SpecificExtTypeSize"); + if (prevOrigMop != loadMappingTable[extTypeIdx][i][0]) { + continue; + } + MOperator prevNewMop = loadMappingTable[extTypeIdx][i][1]; + if (!IsValidLoadExtPattern(insn, prevOrigMop, prevNewMop)) { + return; + } + if (is64Bits && extTypeIdx >= SXTB && extTypeIdx <= SXTW) { + prevNewMop = SelectNewLoadMopByBitSize(prevNewMop); + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* to avoid {mov [64], [32]} in the case of big endian */ + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, prevNewMop); + + if (newMemOp == nullptr) { + return; + } + + auto *aarCGSSAInfo = static_cast(ssaInfo); + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (prevOrigMop != prevNewMop) { + LogInfo::MapleLogger() << "======= OrigPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + } + + prevInsn->SetMemOpnd(newMemOp); + prevInsn->SetMOP(AArch64CG::kMd[prevNewMop]); + + if ((prevOrigMop != prevNewMop) && CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= NewPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + + MOperator movMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newMovInsn = cgFunc->GetInsnBuilder()->BuildInsn(movMop, insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnFirstOpnd)); + currBB->ReplaceInsn(insn, newMovInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newMovInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= ReplacedInsn :\n"; + insn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(insn); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newMovInsn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(newMovInsn); + } + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterSameExt(Insn &insn) { + if (extTypeIdx == EXTUNDEF) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + for (uint8 i = 0; i < kSameExtPatternNum; i++) { + DEBUG_ASSERT(extTypeIdx < SpecificExtTypeSize, "extTypeIdx must be lower than SpecificExtTypeSize"); + if (sameExtMappingTable[extTypeIdx][i][0] == MOP_undef || sameExtMappingTable[extTypeIdx][i][1] == MOP_undef) { + continue; + } + if (prevMop == sameExtMappingTable[extTypeIdx][i][0] && currMop == sameExtMappingTable[extTypeIdx][i][1]) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + InsnSet useInsns = GetAllUseInsn(useReg); + if ((prevInsn == nullptr) || (useInsns.size() != 1)) { + return false; + } + SetOptSceneType(); + SetSpecificExtType(insn); + if (sceneType == kSceneUndef) { + return false; + } + return true; +} + +void ElimSpecificExtensionPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + if (sceneType == kSceneMov) { + ElimExtensionAfterMov(insn); + } else if (sceneType == kSceneLoad) { + ElimExtensionAfterLoad(insn); + } else if (sceneType == kSceneSameExt) { + ElimExtensionAfterSameExt(insn); + } +} + +void OneHoleBranchPattern::FindNewMop(const BB &bb, const Insn &insn) { + if (&insn != bb.GetLastInsn()) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbnz; + break; + case MOP_wcbnz: + newOp = MOP_wtbz; + break; + case MOP_xcbz: + newOp = MOP_xtbnz; + break; + case MOP_xcbnz: + newOp = MOP_xtbz; + break; + default: + break; + } +} + +/* + * pattern1: + * uxtb w0, w1 <-----(ValidBitsNum <= 8) + * cbz w0, .label + * ===> + * cbz w1, .label + * + * pattern2: + * uxtb w2, w1 <-----(ValidBitsNum == 1) + * eor w3, w2, #1 + * cbz w3, .label + * ===> + * tbnz w1, #0, .label + */ +void OneHoleBranchPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + LabelOperand &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + bool pattern1 = (prevInsn->GetMachineOpcode() == MOP_xuxtb32) && + (static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() <= k8BitSize || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetValidBitsNum() <= k8BitSize); + if (pattern1) { + Insn &newCbzInsn = cgFunc->GetInsnBuilder()->BuildInsn( + insn.GetMachineOpcode(), prevInsn->GetOperand(kInsnSecondOpnd), label); + bb.ReplaceInsn(insn, newCbzInsn); + ssaInfo->ReplaceInsn(insn, newCbzInsn); + optSuccess = true; + SetCurrInsn(&newCbzInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &newCbzInsn, nullptr); + } + return; + } + bool pattern2 = (prevInsn->GetMachineOpcode() == MOP_xeorrri13 || prevInsn->GetMachineOpcode() == MOP_weorrri12) && + (static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue() == 1); + if (pattern2) { + if (!CheckPrePrevInsn()) { + return; + } + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); + auto ®Operand = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + Insn &newTbzInsn = cgFunc->GetInsnBuilder()->BuildInsn(newOp, regOperand, oneHoleOpnd, label); + bb.ReplaceInsn(insn, newTbzInsn); + ssaInfo->ReplaceInsn(insn, newTbzInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + prevs.emplace_back(prePrevInsn); + DumpAfterPattern(prevs, &newTbzInsn, nullptr); + } + } +} + +bool OneHoleBranchPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + FindNewMop(*insn.GetBB(), insn); + if (newOp == MOP_undef) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return false; + } + return true; +} + +bool OneHoleBranchPattern::CheckPrePrevInsn() { + auto &useReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prePrevInsn = GetDefInsn(useReg); + if (prePrevInsn == nullptr) { + return false; + } + if (prePrevInsn->GetMachineOpcode() != MOP_xuxtb32 || + static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() != 1) { + return false; + } + if (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void OrrToMovPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + RegOperand *reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *reg1, *reg2); + bb.ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &newInsn, nullptr); + } +} + +bool OrrToMovPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrri12 && curMop != MOP_xiorrri13) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + Operand *opndOfOrr = nullptr; + switch (thisMop) { + case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_wmovrr; + break; + } + case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_xmovrr; + break; + } + default: + return false; + } + CHECK_FATAL(opndOfOrr->IsIntImmediate(), "expects immediate operand"); + ImmOperand *immOpnd = static_cast(opndOfOrr); + if (immOpnd->GetValue() != 0) { + return false; + } + return true; +} + +void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + /* + * e.g. + * execute before & after RA: manager->NormalPatternOpt<>(true) + * execute before RA: manager->NormalPatternOpt<>(!cgFunc->IsAfterRegAlloc()) + * execute after RA: manager->NormalPatternOpt<>(cgFunc->IsAfterRegAlloc()) + */ + case MOP_xubfxrri6i6: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xmovzri16: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcmpri: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wstrb: + case MOP_wldrb: + case MOP_wstrh: + case MOP_wldrh: + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: + case MOP_qldr: + case MOP_qstr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xvmovrv: + case MOP_xvmovrd: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xsbfxrri6i6: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wsdivrrr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xbl: { + if (JAVALANG) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } + if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } + break; + } + default: + break; + } + /* skip if it is not a read barrier call. */ + if (GetReadBarrierName(insn) != "") { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } +} +/* ======== CGPeepPattern End ======== */ + +void AArch64PeepHole::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kEliminateSpecifcSXTOpt] = optOwnMemPool->New(cgFunc); + optimizations[kEliminateSpecifcUXTOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCsetCbzToBeqOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToTstOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCbzBranchesToTstOpt] = optOwnMemPool->New(cgFunc); + optimizations[kZeroCmpBranchesOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCselZeroOneToCsetOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtb32: + case MOP_xsxth32: + case MOP_xsxtb64: + case MOP_xsxth64: + case MOP_xsxtw64: { + (static_cast(optimizations[kEliminateSpecifcSXTOpt]))->Run(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + (static_cast(optimizations[kEliminateSpecifcUXTOpt]))->Run(bb, insn); + break; + } + case MOP_wcbnz: + case MOP_xcbnz: { + (static_cast(optimizations[kCsetCbzToBeqOpt]))->Run(bb, insn); + break; + } + case MOP_wcbz: + case MOP_xcbz: { + (static_cast(optimizations[kCsetCbzToBeqOpt]))->Run(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + (static_cast(optimizations[kAndCmpBranchesToCsetOpt]))->Run(bb, insn); + break; + } + case MOP_xandrrr: + case MOP_wandrrr: + case MOP_wandrri12: + case MOP_xandrri13: { + (static_cast(optimizations[kAndCmpBranchesToTstOpt]))->Run(bb, insn); + (static_cast(optimizations[kAndCbzBranchesToTstOpt]))->Run(bb, insn); + break; + } + case MOP_wcselrrrc: + case MOP_xcselrrrc: { + (static_cast(optimizations[kCselZeroOneToCsetOpt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + (static_cast(optimizations[kZeroCmpBranchesOpt]))->Run(bb, insn); + } +} + +void AArch64PeepHole0::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kRemoveIdenticalLoadAndStoreOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCmpCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptAdd] = optOwnMemPool->New(cgFunc); + optimizations[kDeleteMovAfterCbzOrCbnzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kRemoveSxtBeforeStrOpt] = optOwnMemPool->New(cgFunc); + optimizations[kRemoveMovingtoSameRegOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole0::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xstr: + case MOP_wstr: { + (static_cast(optimizations[kRemoveIdenticalLoadAndStoreOpt]))->Run(bb, insn); + break; + } + case MOP_wcmpri: + case MOP_xcmpri: { + (static_cast(optimizations[kCmpCsetOpt]))->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kComplexMemOperandOptAdd]))->Run(bb, insn); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + (static_cast(optimizations[kDeleteMovAfterCbzOrCbnzOpt]))->Run(bb, insn); + break; + } + case MOP_wstrh: + case MOP_wstrb: { + (static_cast(optimizations[kRemoveSxtBeforeStrOpt]))->Run(bb, insn); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + (static_cast(optimizations[kRemoveMovingtoSameRegOpt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +void AArch64PrePeepHole::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kOneHoleBranchesPreOpt] = optOwnMemPool->New(cgFunc); + optimizations[kReplaceOrrToMovOpt] = optOwnMemPool->New(cgFunc); + optimizations[kReplaceCmpToCmnOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandPreOptAdd] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptLSL] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptLabel] = optOwnMemPool->New(cgFunc); + optimizations[kDuplicateExtensionOpt] = optOwnMemPool->New(cgFunc); + optimizations[kEnhanceStrLdrAArch64Opt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wiorrri12: + case MOP_xiorrri13: { + (static_cast(optimizations[kReplaceOrrToMovOpt]))->Run(bb, insn); + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + (static_cast(optimizations[kReplaceCmpToCmnOpt]))->Run(bb, insn); + break; + } + case MOP_xadrpl12: { + (static_cast(optimizations[kComplexMemOperandOpt]))->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kComplexMemOperandPreOptAdd]))->Run(bb, insn); + break; + } + case MOP_xaddrrrs: { + (static_cast(optimizations[kComplexMemOperandOptLSL]))->Run(bb, insn); + break; + } + case MOP_xsxtb32: + case MOP_xsxth32: + case MOP_xsxtb64: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + (static_cast(optimizations[kDuplicateExtensionOpt]))->Run(bb, insn); + break; + } + case MOP_xldli: { + (static_cast(optimizations[kComplexMemOperandOptLabel]))->Run(bb, insn); + break; + } + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: { + (static_cast(optimizations[kEnhanceStrLdrAArch64Opt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + (static_cast(optimizations[kOneHoleBranchesPreOpt]))->Run(bb, insn); + } +} + +void AArch64PrePeepHole1::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kOneHoleBranchesOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToTbzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexExtendWordLslOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole1::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtw64: + case MOP_xuxtw64: { + (static_cast(optimizations[kComplexExtendWordLslOpt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + switch (thisMop) { + case MOP_wcbz: + case MOP_wcbnz: + case MOP_xcbz: + case MOP_xcbnz: { + (static_cast(optimizations[kOneHoleBranchesOpt]))->Run(bb, insn); + break; + } + case MOP_beq: + case MOP_bne: { + (static_cast(optimizations[kAndCmpBranchesToTbzOpt]))->Run(bb, insn); + break; + } + default: + break; + } + } +} + +bool RemoveIdenticalLoadAndStorePattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + return true; +} + +void RemoveIdenticalLoadAndStorePattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mop1 = insn.GetMachineOpcode(); + MOperator mop2 = nextInsn->GetMachineOpcode(); + if ((mop1 == MOP_wstr && mop2 == MOP_wstr) || (mop1 == MOP_xstr && mop2 == MOP_xstr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(insn); + } + } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(*nextInsn); + } + } +} + +bool RemoveIdenticalLoadAndStorePattern::IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const { + regno_t regNO1 = static_cast(insn1.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t regNO2 = static_cast(insn2.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (regNO1 != regNO2) { + return false; + } + /* Match only [base + offset] */ + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + Operand *base1 = memOpnd1.GetBaseRegister(); + Operand *base2 = memOpnd2.GetBaseRegister(); + if (!((base1 != nullptr) && base1->IsRegister()) || !((base2 != nullptr) && base2->IsRegister())) { + return false; + } + + regno_t baseRegNO1 = static_cast(base1)->GetRegisterNumber(); + /* First insn re-write base addr reg1 <- [ reg1 + offset ] */ + if (baseRegNO1 == regNO1) { + return false; + } + + regno_t baseRegNO2 = static_cast(base2)->GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return false; + } + + return memOpnd1.GetOffsetImmediate()->GetOffsetValue() == memOpnd2.GetOffsetImmediate()->GetOffsetValue(); +} + +void RemoveIdenticalLoadAndStoreAArch64::Run(BB &bb, Insn &insn) { + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator mop1 = insn.GetMachineOpcode(); + MOperator mop2 = nextInsn->GetMachineOpcode(); + if ((mop1 == MOP_wstr && mop2 == MOP_wstr) || (mop1 == MOP_xstr && mop2 == MOP_xstr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(insn); + } + } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(*nextInsn); + } + } +} + +bool RemoveIdenticalLoadAndStoreAArch64::IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const { + regno_t regNO1 = static_cast(insn1.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t regNO2 = static_cast(insn2.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (regNO1 != regNO2) { + return false; + } + /* Match only [base + offset] */ + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + Operand *base1 = memOpnd1.GetBaseRegister(); + Operand *base2 = memOpnd2.GetBaseRegister(); + if (!((base1 != nullptr) && base1->IsRegister()) || !((base2 != nullptr) && base2->IsRegister())) { + return false; + } + + regno_t baseRegNO1 = static_cast(base1)->GetRegisterNumber(); + /* First insn re-write base addr reg1 <- [ reg1 + offset ] */ + if (baseRegNO1 == regNO1) { + return false; + } + + regno_t baseRegNO2 = static_cast(base2)->GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return false; + } + + return memOpnd1.GetOffsetImmediate()->GetOffsetValue() == memOpnd2.GetOffsetImmediate()->GetOffsetValue(); +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) { + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void RemoveMovingtoSameRegAArch64::Run(BB &bb, Insn &insn) { + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb.RemoveInsn(insn); + } +} + +void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { + Insn *prevInsn = insn.GetPrev(); + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + + if (prevInsn == nullptr) { + return; + } + Operand &memOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(memOpnd.GetKind() == Operand::kOpdMem, "Unexpected operand in EnhanceStrLdrAArch64"); + auto &a64MemOpnd = static_cast(memOpnd); + RegOperand *baseOpnd = a64MemOpnd.GetBaseRegister(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (IsEnhanceAddImm(prevMop) && a64MemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + a64MemOpnd.GetOffsetImmediate()->GetValue() == 0) { + auto &addDestOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (baseOpnd == &addDestOpnd && !IfOperandIsLiveAfterInsn(addDestOpnd, insn)) { + auto &concreteMemOpnd = static_cast(memOpnd); + auto *origBaseReg = concreteMemOpnd.GetBaseRegister(); + concreteMemOpnd.SetBaseRegister( + static_cast(prevInsn->GetOperand(kInsnSecondOpnd))); + auto &ofstOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + OfstOperand &offOpnd = static_cast(cgFunc).GetOrCreateOfstOpnd( + static_cast(ofstOpnd.GetValue()), k32BitSize); + auto *origOffOpnd = concreteMemOpnd.GetOffsetImmediate(); + concreteMemOpnd.SetOffsetOperand(offOpnd); + if (!static_cast(cgFunc).IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, kInsnSecondOpnd)) { + // If new offset is invalid, undo it + concreteMemOpnd.SetBaseRegister(*static_cast(origBaseReg)); + concreteMemOpnd.SetOffsetOperand(*origOffOpnd); + return; + } + bb.RemoveInsn(*prevInsn); + } + } +} + +bool EnhanceStrLdrAArch64::IsEnhanceAddImm(MOperator prevMop) const { + return prevMop == MOP_xaddrri12 || prevMop == MOP_waddrri12; +} + +bool IsSameRegisterOperation(const RegOperand &desMovOpnd, + const RegOperand &uxtDestOpnd, + const RegOperand &uxtFromOpnd) { + return ((desMovOpnd.GetRegisterNumber() == uxtDestOpnd.GetRegisterNumber()) && + (uxtDestOpnd.GetRegisterNumber() == uxtFromOpnd.GetRegisterNumber())); +} + +bool CombineContiLoadAndStorePattern::IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, + int64 baseOfst) const { + uint32 opndNum = insn.GetOperandSize(); + bool sameMemAccess = false; /* both store or load */ + if (insn.IsStore() == isStore) { + sameMemAccess = true; + } + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOperand = static_cast(opnd); + RegOperand *base = memOperand.GetBaseRegister(); + /* need check offset as well */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + if (!sameMemAccess && base != nullptr) { + regno_t curBaseRegNO = base->GetRegisterNumber(); + int64 memBarrierRange = static_cast(insn.IsLoadStorePair() ? k16BitSize : k8BitSize); + if (!(curBaseRegNO == regNO && memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && + memOperand.GetOffsetImmediate() != nullptr && + (memOperand.GetOffsetImmediate()->GetOffsetValue() <= (baseOfst - memBarrierRange) || + memOperand.GetOffsetImmediate()->GetOffsetValue() >= (baseOfst + memBarrierRange)))) { + return true; + } + } + /* do not trust the following situation : + * str x1, [x9] + * str x6, [x2] + * str x3, [x9, #8] + */ + if (isStore && regNO != stackBaseRegNO && base != nullptr && + base->GetRegisterNumber() != stackBaseRegNO && base->GetRegisterNumber() != regNO) { + return true; + } + if (isStore && base != nullptr && base->GetRegisterNumber() == regNO) { + if (memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && memOperand.GetOffsetImmediate() != nullptr) { + int64 curOffset = memOperand.GetOffsetImmediate()->GetOffsetValue(); + if (memOperand.GetSize() == k64BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k16BitSize : k8BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - memBarrierRange) { + return true; + } + } else if (memOperand.GetSize() == k32BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k8BitSize : k4BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - memBarrierRange) { + return true; + } + } + } + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister()) { + if (!isStore && static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, regno_t destRegNO, + regno_t memBaseRegNO, int64 baseOfst) { + std::vector prevContiInsns; + bool isStr = insn.IsStore(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsRegDefined(memBaseRegNO)) { + return prevContiInsns; + } + if (IsRegNotSameMemUseInInsn(*curInsn, memBaseRegNO, insn.IsStore(), static_cast(baseOfst))) { + return prevContiInsns; + } + /* return continuous STD/LDR insn */ + if (((isStr && curInsn->IsStore()) || (!isStr && curInsn->IsLoad())) && !curInsn->IsLoadStorePair()) { + auto *memOperand = static_cast(curInsn->GetMemOpnd()); + /* do not combine ldr r0, label */ + if (memOperand != nullptr) { + auto *BaseRegOpnd = static_cast(memOperand->GetBaseRegister()); + DEBUG_ASSERT(BaseRegOpnd == nullptr || !BaseRegOpnd->IsVirtualRegister(), + "physical register has not been allocated?"); + if (memOperand->GetAddrMode() == MemOperand::kAddrModeBOi && + BaseRegOpnd->GetRegisterNumber() == memBaseRegNO) { + prevContiInsns.emplace_back(curInsn); + } + } + } + /* check insn that changes the data flow */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + /* ldr x8, [x21, #8] + * call foo() + * ldr x9, [x21, #16] + * although x21 is a calleeSave register, there is no guarantee data in memory [x21] is not changed + */ + if (curInsn->IsCall() && (!AArch64Abi::IsCalleeSavedReg(static_cast(destRegNO)) || + memBaseRegNO != stackBaseRegNO)) { + return prevContiInsns; + } + /* store opt should not cross call due to stack args */ + if (curInsn->IsCall() && isStr) { + return prevContiInsns; + } + if (curInsn->GetMachineOpcode() == MOP_asm) { + return prevContiInsns; + } + if (curInsn->ScanReg(destRegNO)) { + return prevContiInsns; + } + } + return prevContiInsns; +} + +Insn *CombineContiLoadAndStorePattern::FindValidSplitAddInsn(Insn &curInsn, RegOperand &baseOpnd) const { + Insn *splitAdd = nullptr; + for (Insn *cursor = curInsn.GetPrev(); cursor != nullptr; cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall()) { + break; + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + MOperator mOp = cursor->GetMachineOpcode(); + if (mOp != MOP_xaddrri12 && mOp != MOP_waddrri12) { + continue; + } + auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); + if (destOpnd.GetRegisterNumber() != R16 || destOpnd.GetSize() != baseOpnd.GetSize()) { + continue; + } + auto &useOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + /* + * split add as following: + * add R16, R0, #2, LSL #12 + * add R16, R16, #1536 + */ + if (useOpnd.GetRegisterNumber() != baseOpnd.GetRegisterNumber()) { + if (useOpnd.GetRegisterNumber() == R16) { + Insn *defInsn = cursor->GetPrev(); + CHECK_FATAL(defInsn, "invalid defInsn"); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &opnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (opnd.GetRegisterNumber() == baseOpnd.GetRegisterNumber()) { + splitAdd = cursor; + } + } + break; + } else { + splitAdd = cursor; + break; + } + } + return splitAdd; +} + +bool CombineContiLoadAndStorePattern::PlaceSplitAddInsn(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand, + RegOperand &baseOpnd, uint32 bitLen) const { + Insn *cursor = nullptr; + MemOperand *maxOfstMem = nullptr; + int64 maxOfstVal = 0; + MOperator mop = curInsn.GetMachineOpcode(); + OfstOperand *ofstOpnd = memOperand.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + auto &aarFunc = static_cast(*cgFunc); + for (cursor = curInsn.GetNext(); cursor != nullptr; cursor = cursor->GetNext()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->GetMachineOpcode() == mop && (cursor->IsLoad() || cursor->IsStore())) { + auto &curMemOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + RegOperand *curBaseOpnd = curMemOpnd.GetBaseRegister(); + if (curMemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && RegOperand::IsSameReg(baseOpnd, *curBaseOpnd)) { + OfstOperand *curOfstOpnd = curMemOpnd.GetOffsetImmediate(); + CHECK_FATAL(curOfstOpnd, "invalid OfstOperand"); + if (curOfstOpnd->GetOffsetValue() > ofstVal && + (curOfstOpnd->GetOffsetValue() - ofstVal) < MemOperand::GetMaxPairPIMM(bitLen) && + !aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), &curMemOpnd, kInsnThirdOpnd)) { + maxOfstMem = &curMemOpnd; + maxOfstVal = curOfstOpnd->GetOffsetValue(); + } + } + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + if (cursor->IsRegDefined(R16)) { + break; + } + } + MemOperand *newMemOpnd = nullptr; + if (maxOfstMem == nullptr) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), + false, &combineInsn, true); + } else { + RegOperand *addResOpnd = aarFunc.GetBaseRegForSplit(R16); + ImmOperand &immAddend = aarFunc.SplitAndGetRemained(*maxOfstMem, bitLen, addResOpnd, maxOfstVal, + false, &combineInsn, true); + newMemOpnd = &aarFunc.CreateReplacementMemOperand(bitLen, *addResOpnd, ofstVal - immAddend.GetValue()); + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), + false, &combineInsn, true); + } else { + aarFunc.SelectAddAfterInsn(*addResOpnd, baseOpnd, immAddend, PTY_i64, false, combineInsn); + } + } + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + return false; + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; +} + +bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand) const { + auto *baseRegOpnd = static_cast(memOperand.GetBaseRegister()); + auto *ofstOpnd = static_cast(memOperand.GetOffsetImmediate()); + DEBUG_ASSERT(baseRegOpnd && ofstOpnd, "get baseOpnd and ofstOpnd failed"); + CHECK_FATAL(combineInsn.GetOperand(kInsnFirstOpnd).GetSize() == combineInsn.GetOperand(kInsnSecondOpnd).GetSize(), + "the size must equal"); + if (baseRegOpnd->GetRegisterNumber() == R16) { + return false; + } + Insn *splitAdd = FindValidSplitAddInsn(combineInsn, *baseRegOpnd); + const InsnDesc *md = &AArch64CG::kMd[combineInsn.GetMachineOpcode()]; + auto *opndProp = md->opndMD[kInsnFirstOpnd]; + auto &aarFunc = static_cast(*cgFunc); + if (splitAdd == nullptr) { + if (combineInsn.IsLoadStorePair()) { + if (ofstOpnd->GetOffsetValue() < 0) { + return false; /* do not split */ + } + } + /* create and place addInsn */ + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } else { + auto &newBaseReg = static_cast(splitAdd->GetOperand(kInsnFirstOpnd)); + auto &addImmOpnd = static_cast(splitAdd->GetOperand(kInsnThirdOpnd)); + int64 addVal = 0; + if (static_cast(splitAdd->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R16) { + Insn *defInsn = splitAdd->GetPrev(); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + addVal = (immOpnd.GetValue() << shiftOpnd.GetShiftAmount()) + addImmOpnd.GetValue(); + } else { + addVal = addImmOpnd.GetValue(); + } + auto *newOfstOpnd = &aarFunc.CreateOfstOpnd(static_cast(ofstOpnd->GetOffsetValue() - addVal), + ofstOpnd->GetSize()); + auto *newMemOpnd = aarFunc.CreateMemOperand(MemOperand::kAddrModeBOi, opndProp->GetSize(), + newBaseReg, nullptr, newOfstOpnd, + memOperand.GetSymbol()); + if (!(static_cast(*cgFunc).IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, + kInsnThirdOpnd))) { + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; + } +} + +bool CombineContiLoadAndStorePattern::CheckCondition(Insn &insn) { + memOpnd = static_cast(insn.GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "get mem operand failed"); + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + if (!doAggressiveCombine) { + return false; + } + return true; +} + +/* Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp */ +void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "unexpect operand"); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto *baseRegOpnd = static_cast(memOpnd->GetBaseRegister()); + OfstOperand *offsetOpnd = memOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr, "offset opnd lost"); + DEBUG_ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), "physical register has not been allocated?"); + std::vector prevContiInsnVec = FindPrevStrLdr( + insn, destOpnd.GetRegisterNumber(), baseRegOpnd->GetRegisterNumber(), offsetOpnd->GetOffsetValue()); + for (auto prevContiInsn : prevContiInsnVec) { + DEBUG_ASSERT(prevContiInsn != nullptr, "get previous consecutive instructions failed"); + auto *prevMemOpnd = static_cast(prevContiInsn->GetMemOpnd()); + if (memOpnd->GetIndexOpt() != prevMemOpnd->GetIndexOpt()) { + continue; + } + OfstOperand *prevOffsetOpnd = prevMemOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr && prevOffsetOpnd != nullptr, "both conti str/ldr have no offset"); + auto &prevDestOpnd = static_cast(prevContiInsn->GetOperand(kInsnFirstOpnd)); + uint32 memSize = insn.GetMemoryByteSize(); + uint32 prevMemSize = prevContiInsn->GetMemoryByteSize(); + if (prevDestOpnd.GetRegisterType() != destOpnd.GetRegisterType()) { + continue; + } + int64 offsetVal = offsetOpnd->GetOffsetValue(); + int64 prevOffsetVal = prevOffsetOpnd->GetOffsetValue(); + auto diffVal = std::abs(offsetVal - prevOffsetVal); + regno_t destRegNO = destOpnd.GetRegisterNumber(); + regno_t prevDestRegNO = prevDestOpnd.GetRegisterNumber(); + if (insn.IsStore() && memOpnd->IsStackArgMem() && prevMemOpnd->IsStackArgMem() && + (memSize == k4ByteSize || memSize == k8ByteSize) && diffVal == k8BitSize && + (prevMemSize == k4ByteSize || prevMemSize == k8ByteSize) && + (destOpnd.GetValidBitsNum() == memSize * k8BitSize) && + (prevDestOpnd.GetValidBitsNum() == prevMemSize * k8BitSize)) { + RegOperand &newDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(destRegNO), k64BitSize, destOpnd.GetRegisterType()); + RegOperand &newPrevDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(prevDestRegNO), k64BitSize, prevDestOpnd.GetRegisterType()); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + MOperator mopPair = (destOpnd.GetRegisterType() == kRegTyInt) ? MOP_xstp : MOP_dstp; + if ((static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd))) { + Insn &combineInsn = (offsetVal < prevOffsetVal) ? + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newDest, newPrevDest, *combineMemOpnd): + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newPrevDest, newDest, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + if (memSize != prevMemSize || + thisMop != prevContiInsn->GetMachineOpcode() || prevDestOpnd.GetSize() != destOpnd.GetSize()) { + continue; + } + /* do combination str/ldr -> stp/ldp */ + if ((insn.IsStore() || destRegNO != prevDestRegNO) || (destRegNO == RZR && prevDestRegNO == RZR)) { + if ((memSize == k8ByteSize && diffVal == k8BitSize) || + (memSize == k4ByteSize && diffVal == k4BitSize) || + (memSize == k16ByteSize && diffVal == k16BitSize)) { + MOperator mopPair = GetMopPair(thisMop); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + Insn &combineInsn = (offsetVal < prevOffsetVal) ? + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, prevDestOpnd, *combineMemOpnd) : + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, destOpnd, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + if (!(static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd)) && + !SplitOfstWithAddToCombine(insn, combineInsn, *combineMemOpnd)) { + bb.RemoveInsn(combineInsn); + return; + } + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + /* do combination strb/ldrb -> strh/ldrh -> str/ldr */ + if (destRegNO == prevDestRegNO && destRegNO == RZR && prevDestRegNO == RZR) { + if ((memSize == k1ByteSize && diffVal == k1ByteSize) || (memSize == k2ByteSize && diffVal == k2ByteSize)) { + MOperator mopPair = GetMopHigherByte(thisMop); + if (offsetVal < prevOffsetVal) { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, memOpnd, kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, *memOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } else { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, prevMemOpnd, kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, *prevMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + } + } + } +} + +MOperator CombineContiLoadAndStorePattern::GetMopHigherByte(MOperator mop) const { + switch (mop) { + case MOP_wldrb: + return MOP_wldrh; + case MOP_wstrb: + return MOP_wstrh; + case MOP_wldrh: + return MOP_wldr; + case MOP_wstrh: + return MOP_wstr; + default: + DEBUG_ASSERT(false, "should not run here"); + return MOP_undef; + } +} + +void CombineContiLoadAndStorePattern::RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const { + /* keep the comment */ + Insn *nn = prevInsn.GetNextMachineInsn(); + std::string newComment = ""; + MapleString comment = insn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment += comment.c_str(); + } + comment = prevInsn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment = newComment + " " + comment.c_str(); + } + if (newComment.c_str() != nullptr && strlen(newComment.c_str()) > 0) { + DEBUG_ASSERT(nn != nullptr, "nn should not be nullptr"); + nn->SetComment(newComment); + } + bb.RemoveInsn(insn); + bb.RemoveInsn(prevInsn); +} + +void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && !prevInsn->GetMachineOpcode()) { + prevInsn = prevInsn->GetPrev(); + } + if (prevInsn == nullptr) { + return; + } + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (&insn != bb.GetFirstInsn() && regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + prevInsn->IsMachineInstruction()) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if (thisMop == MOP_xsxtb32) { + /* value should in range between -127 and 127 */ + if (value >= static_cast(0xFFFFFFFFFFFFFF80) && value <= 0x7F && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xsxth32) { + /* value should in range between -32678 and 32678 */ + if (value >= static_cast(0xFFFFFFFFFFFF8000) && value <= 0x7FFF && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else { + uint64 flag = 0xFFFFFFFFFFFFFF80; /* initialize the flag with fifty-nine 1s at top */ + if (thisMop == MOP_xsxth64) { + flag = 0xFFFFFFFFFFFF8000; /* specify the flag with forty-nine 1s at top in this case */ + } else if (thisMop == MOP_xsxtw64) { + flag = 0xFFFFFFFF80000000; /* specify the flag with thirty-three 1s at top in this case */ + } + if (!(static_cast(value) & flag) && immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + auto *aarch64CGFunc = static_cast(&cgFunc); + RegOperand &dstOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(dstMovOpnd.GetRegisterNumber()), k64BitSize, dstMovOpnd.GetRegisterType()); + prevInsn->SetOperand(kInsnFirstOpnd, dstOpnd); + prevInsn->SetMOP(AArch64CG::kMd[MOP_xmovri64]); + bb.RemoveInsn(insn); + } + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrsb) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + if (thisMop == MOP_xsxtb32) { + bb.RemoveInsn(insn); + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrsh) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + if (thisMop == MOP_xsxth32) { + bb.RemoveInsn(insn); + } + } + } +} + +void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevInsn->IsCall() && + prevInsn->GetIsCallReturnUnsigned() && + regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + (regOpnd1.GetRegisterNumber() == R0 || regOpnd1.GetRegisterNumber() == V0)) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((thisMop == MOP_xuxtb32 && retSize <= k1ByteSize) || + (thisMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (thisMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + bb.RemoveInsn(insn); + } + return; + } + if (&insn == bb.GetFirstInsn() || regOpnd0.GetRegisterNumber() != regOpnd1.GetRegisterNumber() || + !prevInsn->IsMachineInstruction()) { + return; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + if (thisMop == MOP_xuxtb32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + /* check the top 56 bits of value */ + if (!(static_cast(value) & 0xFFFFFFFFFFFFFF00)) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrb) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xuxth32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if (!(static_cast(value) & 0xFFFFFFFFFFFF0000)) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrh) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else { + /* this_mop == MOP_xuxtw64 */ + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_wldrsb || + prevInsn->GetMachineOpcode() == MOP_wldrb || prevInsn->GetMachineOpcode() == MOP_wldrsh || + prevInsn->GetMachineOpcode() == MOP_wldrh || prevInsn->GetMachineOpcode() == MOP_wldr) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstOpnd, regOpnd1, regOpnd0)) { + return; + } + /* 32-bit ldr does zero-extension by default, so this conversion can be skipped */ + bb.RemoveInsn(insn); + } + } +} + +bool FmovRegPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (&insn == insn.GetBB()->GetFirstInsn()) { + return false; + } + prevInsn = insn.GetPrev(); + auto &curSrcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &prevSrcRegOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + /* same src freg */ + if (curSrcRegOpnd.GetRegisterNumber() != prevSrcRegOpnd.GetRegisterNumber()) { + return false; + } + return true; +} + +void FmovRegPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator newMop; + uint32 doOpt = 0; + if (prevMop == MOP_xvmovrv && thisMop == MOP_xvmovrv) { + doOpt = k32BitSize; + newMop = MOP_wmovrr; + } else if (prevMop == MOP_xvmovrd && thisMop == MOP_xvmovrd) { + doOpt = k64BitSize; + newMop = MOP_xmovrr; + } + if (doOpt == 0) { + return; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t curDstReg = curDstRegOpnd.GetRegisterNumber(); + /* optimize case 1 */ + auto &prevDstRegOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + regno_t prevDstReg = prevDstRegOpnd.GetRegisterNumber(); + auto *aarch64CGFunc = static_cast(cgFunc); + RegOperand &dst = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(curDstReg), doOpt, kRegTyInt); + RegOperand &src = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, dst, src); + bb.InsertInsnBefore(insn, newInsn); + bb.RemoveInsn(insn); + RegOperand &newOpnd = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + uint32 opndNum = nextInsn->GetOperandSize(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (base->IsRegister()) { + auto *reg = static_cast(base); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetBaseRegister(newOpnd); + } + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + if (offset->IsRegister()) { + auto *reg = static_cast(offset); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetIndexRegister(newOpnd); + } + } + } + } else if (opnd.IsRegister()) { + /* Check if it is a source operand. */ + auto *regProp = nextInsn->GetDesc()->opndMD[opndIdx]; + if (regProp->IsUse()) { + auto ® = static_cast(opnd); + if (reg.GetRegisterNumber() == curDstReg) { + nextInsn->SetOperand(opndIdx, newOpnd); + } + } + } + } +} + +bool SbfxOptPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 opndNum = nextInsn->GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + /* Check if it is a source operand. */ + if (opnd.IsMemoryAccessOperand() || opnd.IsList()) { + return false; + } else if (opnd.IsRegister()) { + auto ® = static_cast(opnd); + auto *regProp = md->opndMD[opndIdx]; + if (reg.GetRegisterNumber() == curDstRegOpnd.GetRegisterNumber()) { + if (reg.GetSize() != k32BitSize) { + return false; + } + if (regProp->IsDef()) { + toRemove = true; + } else { + (void)cands.emplace_back(opndIdx); + } + } + } + } + return cands.size() != 0; +} + +void SbfxOptPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto &srcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand &newReg = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(srcRegOpnd.GetRegisterNumber()), k32BitSize, srcRegOpnd.GetRegisterType()); + // replace use point of opnd in nextInsn + for (auto i: cands) { + nextInsn->SetOperand(i, newReg); + } + if (toRemove) { + bb.RemoveInsn(insn); + } +} + +bool CbnzToCbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + /* reg has to be R0, since return value is in R0 */ + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (regOpnd0.GetRegisterNumber() != R0) { + return false; + } + nextBB = insn.GetBB()->GetNext(); + /* Make sure nextBB can only be reached by bb */ + if (nextBB->GetPreds().size() > 1 || nextBB->GetEhPreds().empty()) { + return false; + } + /* Next insn should be a mov R0 = 0 */ + movInsn = nextBB->GetFirstMachineInsn(); + if (movInsn == nullptr) { + return false; + } + MOperator movInsnMop = movInsn->GetMachineOpcode(); + if (movInsnMop != MOP_wmovri32 && movInsnMop != MOP_xmovri64) { + return false; + } + auto &movDest = static_cast(movInsn->GetOperand(kInsnFirstOpnd)); + if (movDest.GetRegisterNumber() != R0) { + return false; + } + auto &movImm = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (movImm.GetValue() != 0) { + return false; + } + Insn *nextBrInsn = movInsn->GetNextMachineInsn(); + if (nextBrInsn == nullptr) { + return false; + } + if (nextBrInsn->GetMachineOpcode() != MOP_xuncond) { + return false; + } + /* Is nextBB branch to the return-bb? */ + if (nextBB->GetSuccs().size() != 1) { + return false; + } + return true; +} + +void CbnzToCbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + BB *targetBB = nullptr; + auto it = bb.GetSuccsBegin(); + if (*it == nextBB) { + ++it; + } + targetBB = *it; + /* Make sure when nextBB is empty, targetBB is fallthru of bb. */ + if (targetBB != nextBB->GetNext()) { + return; + } + BB *nextBBTarget = *(nextBB->GetSuccsBegin()); + if (nextBBTarget->GetKind() != BB::kBBReturn) { + return; + } + /* Control flow looks nice, instruction looks nice */ + Operand &brTarget = brInsn->GetOperand(kInsnFirstOpnd); + insn.SetOperand(kInsnSecondOpnd, brTarget); + if (thisMop == MOP_wcbnz) { + insn.SetMOP(AArch64CG::kMd[MOP_wcbz]); + } else { + insn.SetMOP(AArch64CG::kMd[MOP_xcbz]); + } + nextBB->RemoveInsn(*movInsn); + nextBB->RemoveInsn(*brInsn); + /* nextBB is now a fallthru bb, not a goto bb */ + nextBB->SetKind(BB::kBBFallthru); + /* + * fix control flow, we have bb, nextBB, targetBB, nextBB_target + * connect bb -> nextBB_target erase targetBB + */ + it = bb.GetSuccsBegin(); + CHECK_FATAL(it != bb.GetSuccsEnd(), "succs is empty."); + if (*it == targetBB) { + bb.EraseSuccs(it); + bb.PushFrontSuccs(*nextBBTarget); + } else { + ++it; + bb.EraseSuccs(it); + bb.PushBackSuccs(*nextBBTarget); + } + for (auto targetBBIt = targetBB->GetPredsBegin(); targetBBIt != targetBB->GetPredsEnd(); ++targetBBIt) { + if (*targetBBIt == &bb) { + targetBB->ErasePreds(targetBBIt); + break; + } + } + for (auto nextIt = nextBBTarget->GetPredsBegin(); nextIt != nextBBTarget->GetPredsEnd(); ++nextIt) { + if (*nextIt == nextBB) { + nextBBTarget->ErasePreds(nextIt); + break; + } + } + nextBBTarget->PushBackPreds(bb); + + /* nextBB has no target, originally just branch target */ + nextBB->EraseSuccs(nextBB->GetSuccsBegin()); + DEBUG_ASSERT(nextBB->GetSuccs().empty(), "peep: branch target incorrect"); + /* Now make nextBB fallthru to targetBB */ + nextBB->PushFrontSuccs(*targetBB); + targetBB->PushBackPreds(*nextBB); +} + +void CsetCbzToBeqOptAArch64::Run(BB &bb, Insn &insn) { + Insn *insn1 = insn.GetPreviousMachineInsn(); + if (insn1 == nullptr) { + return; + } + /* prevInsn must be "cset" insn */ + MOperator opCode1 = insn1->GetMachineOpcode(); + if (opCode1 != MOP_xcsetrc && opCode1 != MOP_wcsetrc) { + return; + } + + auto &tmpRegOp1 = static_cast(insn1->GetOperand(kInsnFirstOpnd)); + regno_t baseRegNO1 = tmpRegOp1.GetRegisterNumber(); + auto &tmpRegOp2 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t baseRegNO2 = tmpRegOp2.GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the cset insn here */ + if (IfOperandIsLiveAfterInsn(tmpRegOp2, insn)) { + return; + } + MOperator opCode = insn.GetMachineOpcode(); + bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &cond = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + MOperator jmpOperator = SelectMOperator(cond.GetCode(), reverse); + CHECK_FATAL((jmpOperator != MOP_undef), "unknown condition code"); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(jmpOperator, rflag, label); + bb.RemoveInsn(*insn1); + bb.ReplaceInsn(insn, newInsn); +} + +MOperator CsetCbzToBeqOptAArch64::SelectMOperator(ConditionCode condCode, bool inverse) const { + switch (condCode) { + case CC_NE: + return inverse ? MOP_beq : MOP_bne; + case CC_EQ: + return inverse ? MOP_bne : MOP_beq; + case CC_MI: + return inverse ? MOP_bpl : MOP_bmi; + case CC_PL: + return inverse ? MOP_bmi : MOP_bpl; + case CC_VS: + return inverse ? MOP_bvc : MOP_bvs; + case CC_VC: + return inverse ? MOP_bvs : MOP_bvc; + case CC_HI: + return inverse ? MOP_bls : MOP_bhi; + case CC_LS: + return inverse ? MOP_bhi : MOP_bls; + case CC_GE: + return inverse ? MOP_blt : MOP_bge; + case CC_LT: + return inverse ? MOP_bge : MOP_blt; + case CC_HS: + return inverse ? MOP_blo : MOP_bhs; + case CC_LO: + return inverse ? MOP_bhs : MOP_blo; + case CC_LE: + return inverse ? MOP_bgt : MOP_ble; + case CC_GT: + return inverse ? MOP_ble : MOP_bgt; + case CC_CS: + return inverse ? MOP_bcc : MOP_bcs; + default: + return MOP_undef; + } +} + +bool ContiLDRorSTRToSameMEMPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && !prevInsn->GetMachineOpcode() && prevInsn != insn.GetBB()->GetFirstInsn()) { + prevInsn = prevInsn->GetPrev(); + } + if (!insn.IsMachineInstruction() || prevInsn == nullptr) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + /* + * store regB, RegC, offset + * load regA, RegC, offset + */ + if ((thisMop == MOP_xldr && prevMop == MOP_xstr) || (thisMop == MOP_wldr && prevMop == MOP_wstr) || + (thisMop == MOP_dldr && prevMop == MOP_dstr) || (thisMop == MOP_sldr && prevMop == MOP_sstr)) { + loadAfterStore = true; + } + /* + * load regA, RegC, offset + * load regB, RegC, offset + */ + if ((thisMop == MOP_xldr || thisMop == MOP_wldr || thisMop == MOP_dldr || thisMop == MOP_sldr) && + prevMop == thisMop) { + loadAfterLoad = true; + } + if (!loadAfterStore && !loadAfterLoad) { + return false; + } + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + DEBUG_ASSERT(prevInsn->GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + return true; +} + +void ContiLDRorSTRToSameMEMPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + auto &memOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode1 = memOpnd1.GetAddrMode(); + if (addrMode1 != MemOperand::kAddrModeBOi || (!memOpnd1.IsIntactIndexed())) { + return; + } + + auto *base1 = static_cast(memOpnd1.GetBaseRegister()); + DEBUG_ASSERT(base1 == nullptr || !base1->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset1 = memOpnd1.GetOffsetImmediate(); + + auto &memOpnd2 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode2 = memOpnd2.GetAddrMode(); + if (addrMode2 != MemOperand::kAddrModeBOi || (!memOpnd2.IsIntactIndexed())) { + return; + } + + auto *base2 = static_cast(memOpnd2.GetBaseRegister()); + DEBUG_ASSERT(base2 == nullptr || !base2->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset2 = memOpnd2.GetOffsetImmediate(); + + if (base1 == nullptr || base2 == nullptr || offset1 == nullptr || offset2 == nullptr) { + return; + } + + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + int64 offsetVal1 = offset1->GetOffsetValue(); + int64 offsetVal2 = offset2->GetOffsetValue(); + if (base1->GetRegisterNumber() != base2->GetRegisterNumber() || + reg1.GetRegisterType() != reg2.GetRegisterType() || reg1.GetSize() != reg2.GetSize() || + offsetVal1 != offsetVal2) { + return; + } + if (loadAfterStore && reg1.GetRegisterNumber() != reg2.GetRegisterNumber()) { + /* replace it with mov */ + MOperator newOp = MOP_wmovrr; + if (reg1.GetRegisterType() == kRegTyInt) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_wmovrr : MOP_xmovrr; + } else if (reg1.GetRegisterType() == kRegTyFloat) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + Insn *nextInsn = insn.GetNext(); + while (nextInsn != nullptr && !nextInsn->GetMachineOpcode() && nextInsn != bb.GetLastInsn()) { + nextInsn = nextInsn->GetNext(); + } + bool moveSameReg = false; + if (nextInsn && nextInsn->GetIsSpill() && !IfOperandIsLiveAfterInsn(reg1, *nextInsn)) { + MOperator nextMop = nextInsn->GetMachineOpcode(); + if ((thisMop == MOP_xldr && nextMop == MOP_xstr) || (thisMop == MOP_wldr && nextMop == MOP_wstr) || + (thisMop == MOP_dldr && nextMop == MOP_dstr) || (thisMop == MOP_sldr && nextMop == MOP_sstr)) { + nextInsn->Insn::SetOperand(kInsnFirstOpnd, reg2); + moveSameReg = true; + } + } + if (!moveSameReg) { + bb.InsertInsnAfter(*prevInsn, cgFunc->GetInsnBuilder()->BuildInsn(newOp, reg1, reg2)); + } + bb.RemoveInsn(insn); + } else if (reg1.GetRegisterNumber() == reg2.GetRegisterNumber() && + base1->GetRegisterNumber() != reg2.GetRegisterNumber()) { + bb.RemoveInsn(insn); + } +} + +bool RemoveIncDecRefPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xmovrr) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != R0) { + return false; + } + return true; +} + +void RemoveIncDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +#ifdef USE_32BIT_REF +constexpr uint32 kRefSize = 32; +#else +constexpr uint32 kRefSize = 64; +#endif + +void CselZeroOneToCsetOpt::Run(BB &bb, Insn &insn) { + Operand &trueValueOp = insn.GetOperand(kInsnSecondOpnd); + Operand &falseValueOp = insn.GetOperand(kInsnThirdOpnd); + Operand *trueTempOp = nullptr; + Operand *falseTempOp = nullptr; + + /* find fixed value in BB */ + if (!trueValueOp.IsIntImmediate()) { + trueMovInsn = FindFixedValue(trueValueOp, bb, trueTempOp, insn); + } + if (!falseValueOp.IsIntImmediate()) { + falseMovInsn = FindFixedValue(falseValueOp, bb, falseTempOp, insn); + } + + DEBUG_ASSERT(trueTempOp != nullptr, "trueTempOp should not be nullptr"); + DEBUG_ASSERT(falseTempOp != nullptr, "falseTempOp should not be nullptr"); + /* csel to cset */ + if ((trueTempOp->IsIntImmediate() || IsZeroRegister(*trueTempOp)) && + (falseTempOp->IsIntImmediate() || IsZeroRegister(*falseTempOp))) { + ImmOperand *imm1 = static_cast(trueTempOp); + ImmOperand *imm2 = static_cast(falseTempOp); + bool inverse = imm1->IsOne() && (imm2->IsZero() || IsZeroRegister(*imm2)); + if (inverse || ((imm1->IsZero() || IsZeroRegister(*imm1)) && imm2->IsOne())) { + Operand ® = insn.GetOperand(kInsnFirstOpnd); + CondOperand &condOperand = static_cast(insn.GetOperand(kInsnFourthOpnd)); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; + /* get new cond ccCode */ + ConditionCode ccCode = inverse ? condOperand.GetCode() : GetReverseCC(condOperand.GetCode()); + if (ccCode == kCcLast) { + return; + } + AArch64CGFunc *func = static_cast(cgFunc); + CondOperand &cond = func->GetCondOperand(ccCode); + Operand &rflag = func->GetOrCreateRflag(); + Insn &csetInsn = func->GetInsnBuilder()->BuildInsn(mopCode, reg, cond, rflag); + if (CGOptions::DoCGSSA() && CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + insn.GetBB()->ReplaceInsn(insn, csetInsn); + } + } +} + +Insn *CselZeroOneToCsetOpt::FindFixedValue(Operand &opnd, BB &bb, Operand *&tempOp, const Insn &insn) const { + tempOp = &opnd; + bool alreadyFindCsel = false; + bool isRegDefined = false; + regno_t regno = static_cast(opnd).GetRegisterNumber(); + FOR_BB_INSNS_REV(defInsn, &bb) { + if (!defInsn->IsMachineInstruction() || defInsn->IsBranch()) { + continue; + } + /* find csel */ + if (defInsn->GetId() == insn.GetId()) { + alreadyFindCsel = true; + } + /* find def defined */ + if (alreadyFindCsel) { + isRegDefined = defInsn->IsRegDefined(regno); + } + /* if def defined is movi do this opt */ + if (isRegDefined) { + MOperator thisMop = defInsn->GetMachineOpcode(); + if (thisMop == MOP_wmovri32 || thisMop == MOP_xmovri64) { + if (&defInsn->GetOperand(kInsnFirstOpnd) == &opnd) { + tempOp = &(defInsn->GetOperand(kInsnSecondOpnd)); + return defInsn; + } + } else { + return nullptr; + } + } + } + return nullptr; +} + +bool InlineReadBarriersPattern::CheckCondition(Insn &insn) { + /* Inline read barriers only enabled for GCONLY. */ + if (!CGOptions::IsGCOnly()) { + return false; + } + return true; +} + +void InlineReadBarriersPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + const std::string &barrierName = GetReadBarrierName(insn); + if (barrierName == kMccDummy) { + /* remove dummy call. */ + bb.RemoveInsn(insn); + } else { + /* replace barrier function call with load instruction. */ + bool isVolatile = (barrierName == kMccLoadRefV || barrierName == kMccLoadRefVS); + bool isStatic = (barrierName == kMccLoadRefS || barrierName == kMccLoadRefVS); + /* refSize is 32 if USE_32BIT_REF defined, otherwise 64. */ + const uint32 refSize = kRefSize; + auto *aarch64CGFunc = static_cast(cgFunc); + MOperator loadOp = GetLoadOperator(refSize, isVolatile); + RegOperand ®Op = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, refSize, kRegTyInt); + AArch64reg addrReg = isStatic ? R0 : R1; + MemOperand &addr = aarch64CGFunc->CreateMemOpnd(addrReg, 0, refSize); + Insn &loadInsn = cgFunc->GetInsnBuilder()->BuildInsn(loadOp, regOp, addr); + bb.ReplaceInsn(insn, loadInsn); + } + bool isTailCall = (insn.GetMachineOpcode() == MOP_tail_call_opt_xbl); + if (isTailCall) { + /* add 'ret' instruction for tail call optimized load barrier. */ + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xret); + bb.AppendInsn(retInsn); + bb.SetKind(BB::kBBReturn); + } +} + +bool ReplaceDivToMultiPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + prePrevInsn = prevInsn->GetPreviousMachineInsn(); + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (sdivOpnd1.GetRegisterNumber() == sdivOpnd2.GetRegisterNumber() || sdivOpnd1.GetRegisterNumber() == R16 || + sdivOpnd2.GetRegisterNumber() == R16 || prePrevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator prePrevMop = prePrevInsn->GetMachineOpcode(); + if (prevMop && (prevMop == MOP_wmovkri16) && prePrevMop && (prePrevMop == MOP_wmovri32)) { + return true; + } + return false; +} + +void ReplaceDivToMultiPattern::Run(BB &bb, Insn &insn) { + if (CheckCondition(insn)) { + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + /* Check if dest operand of insn is idential with register of prevInsn and prePrevInsn. */ + if ((&(prevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2) || + (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2)) { + return; + } + auto &prevLsl = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (prevLsl.GetShiftAmount() != k16BitSize) { + return; + } + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &prePrevImmOpnd = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + /* + * expect the immediate value of first mov is 0x086A0 which matches 0x186A0 + * because 0x10000 is ignored in 32 bits register + */ + if ((prevImmOpnd.GetValue() != 1) || (prePrevImmOpnd.GetValue() != 34464)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + /* mov w16, #0x588f */ + RegOperand &tempOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R16), + k64BitSize, kRegTyInt); + /* create a immedate operand with this specific value */ + ImmOperand &multiplierLow = aarch64CGFunc->CreateImmOperand(0x588f, k32BitSize, false); + Insn &multiplierLowInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovri32, tempOpnd, multiplierLow); + bb.InsertInsnBefore(*prePrevInsn, multiplierLowInsn); + + /* + * movk w16, #0x4f8b, LSL #16 + * create a immedate operand with this specific value + */ + ImmOperand &multiplierHigh = aarch64CGFunc->CreateImmOperand(0x4f8b, k32BitSize, false); + BitShiftOperand *multiplierHighLsl = aarch64CGFunc->GetLogicalShiftLeftOperand(k16BitSize, true); + Insn &multiplierHighInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovkri16, tempOpnd, multiplierHigh, *multiplierHighLsl); + bb.InsertInsnBefore(*prePrevInsn, multiplierHighInsn); + + /* smull x16, w0, w16 */ + Insn &newSmullInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, tempOpnd, sdivOpnd1, tempOpnd); + bb.InsertInsnBefore(*prePrevInsn, newSmullInsn); + + /* asr x16, x16, #32 */ + ImmOperand &dstLsrImmHigh = aarch64CGFunc->CreateImmOperand(k32BitSize, k32BitSize, false); + Insn &dstLsrInsnHigh = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmHigh); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnHigh); + + /* add x16, x16, w0, SXTW */ + Operand &sxtw = aarch64CGFunc->CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, 3); + Insn &addInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xxwaddrrre, tempOpnd, tempOpnd, sdivOpnd1, sxtw); + bb.InsertInsnBefore(*prePrevInsn, addInsn); + + /* asr x16, x16, #17 */ + ImmOperand &dstLsrImmChange = aarch64CGFunc->CreateImmOperand(17, k32BitSize, false); + Insn &dstLsrInsnChange = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmChange); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnChange); + + /* add x2, x16, x0, LSR #31 */ + auto &sdivOpnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t sdivOpnd0RegNO = sdivOpnd0.GetRegisterNumber(); + RegOperand &extSdivO0 = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(sdivOpnd0RegNO), + k64BitSize, kRegTyInt); + + regno_t sdivOpnd1RegNum = sdivOpnd1.GetRegisterNumber(); + RegOperand &extSdivO1 = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(sdivOpnd1RegNum), + k64BitSize, kRegTyInt); + /* shift bit amount is thirty-one at this insn */ + BitShiftOperand &addLsrOpnd = aarch64CGFunc->CreateBitShiftOperand(BitShiftOperand::kLSR, 31, 6); + Insn &addLsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrrrs, extSdivO0, tempOpnd, extSdivO1, addLsrOpnd); + bb.InsertInsnBefore(*prePrevInsn, addLsrInsn); + + /* + * remove insns + * Check if x1 is used after sdiv insn, and if it is in live-out. + */ + if (sdivOpnd2.GetRegisterNumber() != sdivOpnd0.GetRegisterNumber()) { + if (IfOperandIsLiveAfterInsn(sdivOpnd2, insn)) { + /* Only remove div instruction. */ + bb.RemoveInsn(insn); + return; + } + } + + bb.RemoveInsn(*prePrevInsn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); + } +} + +Insn *AndCmpBranchesToCsetAArch64::FindPreviousCmp(Insn &insn) const { + regno_t defRegNO = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->GetMachineOpcode() == MOP_wcmpri || curInsn->GetMachineOpcode() == MOP_xcmpri) { + return curInsn; + } + /* + * if any def/use of CC or insn defReg between insn and curInsn, stop searching and return nullptr. + */ + if (curInsn->ScanReg(defRegNO) || + curInsn->ScanReg(kRFLAG)) { + return nullptr; + } + } + return nullptr; +} + +void AndCmpBranchesToCsetAArch64::Run(BB &bb, Insn &insn) { + /* prevInsn must be "cmp" insn */ + Insn *prevInsn = FindPreviousCmp(insn); + if (prevInsn == nullptr) { + return; + } + /* prevPrevInsn must be "and" insn */ + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr || + (prevPrevInsn->GetMachineOpcode() != MOP_wandrri12 && prevPrevInsn->GetMachineOpcode() != MOP_xandrri13)) { + return; + } + + auto &csetCond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &cmpImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImm.GetValue(); + auto &andImm = static_cast(prevPrevInsn->GetOperand(kInsnThirdOpnd)); + int64 andImmVal = andImm.GetValue(); + if ((csetCond.GetCode() == CC_EQ && cmpImmVal == andImmVal) || + (csetCond.GetCode() == CC_NE && cmpImmVal == 0)) { + /* if flag_reg of "cmp" is live later, we can't remove cmp insn. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (IfOperandIsLiveAfterInsn(flagReg, insn)) { + return; + } + + auto &csetReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &prevInsnSecondReg = prevInsn->GetOperand(kInsnSecondOpnd); + bool isRegDiff = !RegOperand::IsSameRegNO(csetReg, prevInsnSecondReg); + if (isRegDiff && IfOperandIsLiveAfterInsn(static_cast(prevInsnSecondReg), insn)) { + return; + } + if (andImmVal == 1) { + if (!RegOperand::IsSameRegNO(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd))) { + return; + } + /* save the "and" insn only. */ + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + if (isRegDiff) { + prevPrevInsn->Insn::SetOperand(kInsnFirstOpnd, csetReg); + } + } else { + if (!RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd)) || + !RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnSecondOpnd))) { + return; + } + + /* andImmVal is n power of 2 */ + int n = logValueAtBase2(andImmVal); + if (n < 0) { + return; + } + + /* create ubfx insn */ + MOperator ubfxOp = (csetReg.GetSize() <= k32BitSize) ? MOP_wubfxrri5i5 : MOP_xubfxrri6i6; + if (ubfxOp == MOP_wubfxrri5i5 && static_cast(n) >= k32BitSize) { + return; + } + auto &dstReg = static_cast(csetReg); + auto &srcReg = static_cast(prevInsnSecondReg); + auto *aarch64CGFunc = static_cast(&cgFunc); + ImmOperand &bitPos = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + Insn &ubfxInsn = cgFunc.GetInsnBuilder()->BuildInsn(ubfxOp, dstReg, srcReg, bitPos, bitSize); + bb.InsertInsnBefore(*prevPrevInsn, ubfxInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } + } +} + +void AndCmpBranchesToTstAArch64::Run(BB &bb, Insn &insn) { + /* nextInsn must be "cmp" insn */ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_wcmpri && nextInsn->GetMachineOpcode() != MOP_xcmpri)) { + return; + } + /* nextNextInsn must be "beq" or "bne" insn */ + Insn *nextNextInsn = nextInsn->GetNextMachineInsn(); + if (nextNextInsn == nullptr || + (nextNextInsn->GetMachineOpcode() != MOP_beq && nextNextInsn->GetMachineOpcode() != MOP_bne)) { + return; + } + auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNO1 = andRegOp.GetRegisterNumber(); + auto &cmpRegOp2 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + regno_t cmpRegNO2 = cmpRegOp2.GetRegisterNumber(); + if (andRegNO1 != cmpRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the and insn here */ + if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + return; + } + Operand &immOpnd = nextInsn->GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(immOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(immOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue != 0) { + return; + } + /* build tst insn */ + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MOperator newOp = MOP_undef; + if (andOpnd3.IsRegister()) { + newOp = (andRegOp2.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + } else { + newOp = (andRegOp2.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + } + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newOp, rflag, andRegOp2, andOpnd3); + if (CGOptions::DoCGSSA() && CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.InsertInsnAfter(*nextInsn, newInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*nextInsn); +} + +void AndCbzBranchesToTstAArch64::Run(BB &bb, Insn &insn) { + /* nextInsn must be "cbz" or "cbnz" insn */ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_wcbz && nextInsn->GetMachineOpcode() != MOP_xcbz)) { + return; + } + auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNO1 = andRegOp.GetRegisterNumber(); + auto &cbzRegOp2 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t cbzRegNO2 = cbzRegOp2.GetRegisterNumber(); + if (andRegNO1 != cbzRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the and insn here */ + if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + return; + } + /* build tst insn */ + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &andRegOp3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + MOperator newTstOp = MOP_undef; + if (andOpnd3.IsRegister()) { + newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + } else { + newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + } + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + Insn &newInsnTst = cgFunc.GetInsnBuilder()->BuildInsn(newTstOp, rflag, andRegOp2, andOpnd3); + if (andOpnd3.IsImmediate()) { + if (!static_cast(andOpnd3).IsBitmaskImmediate(andRegOp2.GetSize())) { + return; + } + } + /* build beq insn */ + MOperator opCode = nextInsn->GetMachineOpcode(); + bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); + auto &label = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + MOperator jmpOperator = reverse ? MOP_beq : MOP_bne; + Insn &newInsnJmp = cgFunc.GetInsnBuilder()->BuildInsn(jmpOperator, rflag, label); + bb.ReplaceInsn(insn, newInsnTst); + bb.ReplaceInsn(*nextInsn, newInsnJmp); +} + +void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) { + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (!insn.IsBranch() || insn.GetOperandSize() <= kInsnSecondOpnd || prevInsn == nullptr) { + return; + } + if (!insn.GetOperand(kInsnSecondOpnd).IsLabel()) { + return; + } + LabelOperand *label = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand *regOpnd = nullptr; + RegOperand *reg0 = nullptr; + RegOperand *reg1 = nullptr; + MOperator newOp = MOP_undef; + ImmOperand *imm = nullptr; + switch (prevInsn->GetMachineOpcode()) { + case MOP_wcmpri: + case MOP_xcmpri: { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + imm = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (imm->GetValue() != 0) { + return; + } + if (insn.GetMachineOpcode() == MOP_bge) { + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else if (insn.GetMachineOpcode() == MOP_blt) { + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + reg0 = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + reg1 = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (!IsZeroRegister(*reg0) && !IsZeroRegister(*reg1)) { + return; + } + switch (insn.GetMachineOpcode()) { + case MOP_bge: + if (IsZeroRegister(*reg1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return; + } + break; + case MOP_ble: + if (IsZeroRegister(*reg0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return; + } + break; + case MOP_blt: + if (IsZeroRegister(*reg1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + case MOP_bgt: + if (IsZeroRegister(*reg0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + default: + return; + } + break; + } + default: + return; + } + auto aarch64CGFunc = static_cast(&cgFunc); + ImmOperand &bitp = aarch64CGFunc->CreateImmOperand( + (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); + bb.InsertInsnAfter( + insn, cgFunc.GetInsnBuilder()->BuildInsn(newOp, *static_cast(regOpnd), bitp, *label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); +} + +void ElimDuplicateExtensionAArch64::Run(BB &bb, Insn &insn) { + (void)bb; + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + uint32 index; + uint32 upper; + bool is32bits = false; + MOperator *table = nullptr; + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtb32: + is32bits = true; + [[clang::fallthrough]]; + case MOP_xsxtb64: + table = sextMopTable; + index = 0; + upper = kSizeOfSextMopTable; + break; + case MOP_xsxth32: + is32bits = true; + [[clang::fallthrough]]; + case MOP_xsxth64: + table = sextMopTable; + index = 2; + upper = kSizeOfSextMopTable; + break; + case MOP_xsxtw64: + table = sextMopTable; + index = 4; + upper = kSizeOfSextMopTable; + break; + case MOP_xuxtb32: + is32bits = true; + table = uextMopTable; + index = 0; + upper = kSizeOfUextMopTable; + break; + case MOP_xuxth32: + is32bits = true; + table = uextMopTable; + index = 1; + upper = kSizeOfUextMopTable; + break; + case MOP_xuxtw64: + table = uextMopTable; + index = 2; + upper = kSizeOfUextMopTable; + break; + default: + CHECK_FATAL(false, "Unexpected mop"); + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + for (uint32 i = index; i < upper; ++i) { + if (prevMop == table[i]) { + Operand &prevDestOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + regno_t dest = static_cast(prevDestOpnd).GetRegisterNumber(); + regno_t src = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (dest == src) { + insn.SetMOP(is32bits ? AArch64CG::kMd[MOP_wmovrr] : AArch64CG::kMd[MOP_xmovrr]); + if (upper == kSizeOfSextMopTable && + static_cast(prevDestOpnd).GetValidBitsNum() != + static_cast(insn.GetOperand(kInsnFirstOpnd)).GetValidBitsNum()) { + if (is32bits) { + insn.GetOperand(kInsnFirstOpnd).SetSize(k64BitSize); + insn.SetMOP(AArch64CG::kMd[MOP_xmovrr]); + } else { + prevDestOpnd.SetSize(k64BitSize); + prevInsn->SetMOP(prevMop == MOP_xsxtb32 ? AArch64CG::kMd[MOP_xsxtb64] : AArch64CG::kMd[MOP_xsxth64]); + } + } + } + break; + } + } +} + +/* + * if there is define point of checkInsn->GetOperand(opndIdx) between startInsn and firstInsn + * return define insn. else return nullptr + */ +const Insn *CmpCsetAArch64::DefInsnOfOperandInBB(const Insn &startInsn, const Insn &checkInsn, int opndIdx) const { + Insn *prevInsn = nullptr; + for (const Insn *insn = &startInsn; insn != nullptr; insn = prevInsn) { + prevInsn = insn->GetPreviousMachineInsn(); + if (!insn->IsMachineInstruction()) { + continue; + } + /* checkInsn.GetOperand(opndIdx) is thought modified conservatively */ + if (insn->IsCall()) { + return insn; + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!md->opndMD[i]->IsDef()) { + continue; + } + /* Operand is base reg of Memory, defined by str */ + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + DEBUG_ASSERT(base->IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(*base, checkInsn.GetOperand(static_cast(opndIdx))) && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + return insn; + } + } else { + DEBUG_ASSERT(opnd.IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(checkInsn.GetOperand(static_cast(opndIdx)), opnd)) { + return insn; + } + } + } + } + return nullptr; +} + +bool CmpCsetAArch64::OpndDefByOneValidBit(const Insn &defInsn) const { + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +/* + * help function for cmpcset optimize + * if all define points of used opnd in insn has only one valid bit,return true. + * for cmp reg,#0(#1), that is checking for reg + */ +bool CmpCsetAArch64::CheckOpndDefPoints(Insn &checkInsn, int opndIdx) { + if (checkInsn.GetBB()->GetPrev() == nullptr) { + /* For 1st BB, be conservative for def of parameter registers */ + /* Since peep is light weight, do not want to insert pseudo defs */ + regno_t reg = static_cast(checkInsn.GetOperand(static_cast(opndIdx))).GetRegisterNumber(); + if ((reg >= R0 && reg <= R7) || (reg >= D0 && reg <= D7)) { + return false; + } + } + /* check current BB */ + const Insn *defInsn = DefInsnOfOperandInBB(checkInsn, checkInsn, opndIdx); + if (defInsn != nullptr) { + return OpndDefByOneValidBit(*defInsn); + } + /* check pred */ + for (auto predBB : checkInsn.GetBB()->GetPreds()) { + const Insn *tempInsn = nullptr; + if (predBB->GetLastInsn() != nullptr) { + tempInsn = DefInsnOfOperandInBB(*predBB->GetLastInsn(), checkInsn, opndIdx); + } + if (tempInsn == nullptr || !OpndDefByOneValidBit(*tempInsn)) { + return false; + } + } + return true; +} + +/* Check there is use point of rflag start from startInsn to current bb bottom */ +bool CmpCsetAArch64::FlagUsedLaterInCurBB(const BB &bb, Insn &startInsn) const { + if (&bb != startInsn.GetBB()) { + return false; + } + Insn *nextInsn = nullptr; + for (Insn *insn = &startInsn; insn != nullptr; insn = nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + /* + * For condition operand, such as NE, EQ and so on, the register number should be + * same with RFLAG, we only need check the property of use/def. + */ + if (!opnd.IsConditionCode()) { + continue; + } + if (md->opndMD[i]->IsUse()) { + return true; + } else { + DEBUG_ASSERT(md->opndMD[i]->IsDef(), "register should be redefined."); + return false; + } + } + } + return false; +} + +void CmpCsetAArch64::Run(BB &bb, Insn &insn) { + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator firstMop = insn.GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if ((firstMop == MOP_wcmpri || firstMop == MOP_xcmpri) && + (secondMop == MOP_wcsetrc || secondMop == MOP_xcsetrc)) { + Operand &cmpFirstOpnd = insn.GetOperand(kInsnSecondOpnd); + /* get ImmOperand, must be 0 or 1 */ + Operand &cmpSecondOpnd = insn.GetOperand(kInsnThirdOpnd); + auto &cmpFlagReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + DEBUG_ASSERT(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + int64 cmpConstVal = cmpConst.GetValue(); + Operand &csetFirstOpnd = nextInsn->GetOperand(kInsnFirstOpnd); + if ((cmpConstVal != 0 && cmpConstVal != 1) || !CheckOpndDefPoints(insn, 1) || + (nextInsn->GetNextMachineInsn() != nullptr && + FlagUsedLaterInCurBB(bb, *nextInsn->GetNextMachineInsn())) || + FindRegLiveOut(cmpFlagReg, *insn.GetBB())) { + return; + } + + Insn *csetInsn = nextInsn; + nextInsn = nextInsn->GetNextMachineInsn(); + auto &cond = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + if (RegOperand::IsSameRegNO(cmpFirstOpnd, csetFirstOpnd)) { + bb.RemoveInsn(insn); + bb.RemoveInsn(*csetInsn); + } else { + if (cmpFirstOpnd.GetSize() != csetFirstOpnd.GetSize()) { + return; + } + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + if (cmpFirstOpnd.GetSize() != csetFirstOpnd.GetSize()) { + return; + } + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(&cgFunc)->CreateImmOperand(1, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } +} + +/* + * help function for DeleteMovAfterCbzOrCbnz + * input: + * bb: the bb to be checked out + * checkCbz: to check out BB end with cbz or cbnz, if cbz, input true + * opnd: for MOV reg, #0, opnd indicate reg + * return: + * according to cbz, return true if insn is cbz or cbnz and the first operand of cbz(cbnz) is same as input + * operand + */ +bool DeleteMovAfterCbzOrCbnzAArch64::PredBBCheck(BB &bb, bool checkCbz, const Operand &opnd) const { + if (bb.GetKind() != BB::kBBIf) { + return false; + } + + Insn *condBr = cgcfg->FindLastCondBrInsn(bb); + DEBUG_ASSERT(condBr != nullptr, "condBr must be found"); + if (!cgcfg->IsCompareAndBranchInsn(*condBr)) { + return false; + } + MOperator mOp = condBr->GetMachineOpcode(); + if (checkCbz && mOp != MOP_wcbz && mOp != MOP_xcbz) { + return false; + } + if (!checkCbz && mOp != MOP_xcbnz && mOp != MOP_wcbnz) { + return false; + } + return RegOperand::IsSameRegNO(condBr->GetOperand(kInsnFirstOpnd), opnd); +} + +bool DeleteMovAfterCbzOrCbnzAArch64::OpndDefByMovZero(const Insn &insn) const { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: { + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(secondOpnd.IsRegister(), "expects RegOperand here"); + auto ®Opnd = static_cast(secondOpnd); + return IsZeroRegister(regOpnd); + } + default: + return false; + } +} + +/* check whether predefine insn of first operand of test_insn is exist in current BB */ +bool DeleteMovAfterCbzOrCbnzAArch64::NoPreDefine(Insn &testInsn) const { + Insn *nextInsn = nullptr; + for (Insn *insn = testInsn.GetBB()->GetFirstInsn(); insn != nullptr && insn != &testInsn; insn = nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (!insn->IsMachineInstruction()) { + continue; + } + DEBUG_ASSERT(!insn->IsCall(), "CG internal error, call insn should not be at the middle of the BB."); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!md->opndMD[i]->IsDef()) { + continue; + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + DEBUG_ASSERT(base->IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(*base, testInsn.GetOperand(kInsnFirstOpnd)) && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + return false; + } + } else if (opnd.IsList()) { + for (auto operand : static_cast(opnd).GetOperands()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), *operand)) { + return false; + } + } + } else if (opnd.IsRegister()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), opnd)) { + return false; + } + } + } + } + return true; +} +void DeleteMovAfterCbzOrCbnzAArch64::ProcessBBHandle(BB *processBB, const BB &bb, const Insn &insn) const { + FOR_BB_INSNS_SAFE(processInsn, processBB, nextProcessInsn) { + nextProcessInsn = processInsn->GetNextMachineInsn(); + if (!processInsn->IsMachineInstruction()) { + continue; + } + /* register may be a caller save register */ + if (processInsn->IsCall()) { + break; + } + if (!OpndDefByMovZero(*processInsn) || !NoPreDefine(*processInsn) || + !RegOperand::IsSameRegNO(processInsn->GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnFirstOpnd))) { + continue; + } + bool toDoOpt = true; + MOperator condBrMop = insn.GetMachineOpcode(); + /* process elseBB, other preds must be cbz */ + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + /* check out all preds of process_bb */ + for (auto *processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + if (!PredBBCheck(*processBBPred, true, processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } else { + /* process ifBB, other preds can be cbz or cbnz(one at most) */ + for (auto processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + /* for cbnz pred, there is one at most */ + if (!PredBBCheck(*processBBPred, processBBPred != processBB->GetPrev(), + processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } + if (!toDoOpt) { + continue; + } + processBB->RemoveInsn(*processInsn); + } +} + +/* ldr wn, [x1, wn, SXTW] + * add x2, wn, x2 + */ +bool ComplexMemOperandAddAArch64::IsExpandBaseOpnd(const Insn &insn, const Insn &prevInsn) const { + MOperator prevMop = prevInsn.GetMachineOpcode(); + if (prevMop >= MOP_wldrsb && prevMop <= MOP_xldr && + prevInsn.GetOperand(kInsnFirstOpnd).Equals(insn.GetOperand(kInsnSecondOpnd))) { + return true; + } + return false; +} + +void ComplexMemOperandAddAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + Insn *prevInsn = insn.GetPreviousMachineInsn(); + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrr && thisMop != MOP_waddrrr) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + if (!IsMemOperandOptPattern(insn, *nextInsn)) { + return; + } + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + auto newBaseOpnd = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto newIndexOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + regno_t memBaseOpndRegNO = newBaseOpnd->GetRegisterNumber(); + if (newBaseOpnd->GetSize() <= k32BitSize && prevInsn != nullptr && IsExpandBaseOpnd(insn, *prevInsn)) { + newBaseOpnd = &aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(memBaseOpndRegNO), + k64BitSize, kRegTyInt); + } + if (newBaseOpnd->GetSize() != k64BitSize) { + return; + } + if (newIndexOpnd->GetSize() <= k32BitSize) { + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, + newIndexOpnd, 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } else { + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), newBaseOpnd, + newIndexOpnd, nullptr, nullptr); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } + bb.RemoveInsn(insn); + } +} + +void DeleteMovAfterCbzOrCbnzAArch64::Run(BB &bb, Insn &insn) { + if (bb.GetKind() != BB::kBBIf) { + return; + } + if (&insn != cgcfg->FindLastCondBrInsn(bb)) { + return; + } + if (!cgcfg->IsCompareAndBranchInsn(insn)) { + return; + } + BB *processBB = nullptr; + if (bb.GetNext() == maplebe::CGCFG::GetTargetSuc(bb)) { + return; + } + + MOperator condBrMop = insn.GetMachineOpcode(); + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + processBB = bb.GetNext(); + } else { + processBB = maplebe::CGCFG::GetTargetSuc(bb); + } + + DEBUG_ASSERT(processBB != nullptr, "process_bb is null in DeleteMovAfterCbzOrCbnzAArch64::Run"); + ProcessBBHandle(processBB, bb, insn); +} + +MOperator OneHoleBranchesPreAArch64::FindNewMop(const BB &bb, const Insn &insn) const { + MOperator newOp = MOP_undef; + if (&insn != bb.GetLastInsn()) { + return newOp; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_wcbz && thisMop != MOP_wcbnz && thisMop != MOP_xcbz && thisMop != MOP_xcbnz) { + return newOp; + } + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbnz; + break; + case MOP_wcbnz: + newOp = MOP_wtbz; + break; + case MOP_xcbz: + newOp = MOP_xtbnz; + break; + case MOP_xcbnz: + newOp = MOP_xtbz; + break; + default: + CHECK_FATAL(false, "can not touch here"); + break; + } + return newOp; +} + +void OneHoleBranchesPreAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + MOperator newOp = FindNewMop(bb, insn); + if (newOp == MOP_undef) { + return; + } + Insn *prevInsn = insn.GetPreviousMachineInsn(); + LabelOperand &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevInsn != nullptr && prevInsn->GetMachineOpcode() == MOP_xuxtb32 && + (static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() <= k8BitSize || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetValidBitsNum() <= k8BitSize)) { + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + if (IfOperandIsLiveAfterInsn(static_cast(insn.GetOperand(kInsnFirstOpnd)), insn)) { + return; + } + insn.SetOperand(kInsnFirstOpnd, prevInsn->GetOperand(kInsnSecondOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.RemoveInsn(*prevInsn); + } + if (prevInsn != nullptr && + (prevInsn->GetMachineOpcode() == MOP_xeorrri13 || prevInsn->GetMachineOpcode() == MOP_weorrri12) && + static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue() == 1) { + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr) { + return; + } + if (prevPrevInsn->GetMachineOpcode() != MOP_xuxtb32 || + static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() != 1) { + return; + } + if (&(prevPrevInsn->GetOperand(kInsnFirstOpnd)) != &(prevInsn->GetOperand(kInsnSecondOpnd))) { + return; + } + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); + auto ®Operand = static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.InsertInsnAfter(insn, cgFunc.GetInsnBuilder()->BuildInsn(newOp, regOperand, oneHoleOpnd, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } +} + +bool LoadFloatPointPattern::FindLoadFloatPoint(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + optInsn.clear(); + if (mOp != MOP_xmovzri16) { + return false; + } + optInsn.emplace_back(&insn); + + Insn *insnMov2 = insn.GetNextMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + if (insnMov2->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov2); + + Insn *insnMov3 = insnMov2->GetNextMachineInsn(); + if (insnMov3 == nullptr) { + return false; + } + if (insnMov3->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov3); + + Insn *insnMov4 = insnMov3->GetNextMachineInsn(); + if (insnMov4 == nullptr) { + return false; + } + if (insnMov4->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov4); + return true; +} + +bool LoadFloatPointPattern::IsPatternMatch() { + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + if ((static_cast(insn1->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn4->GetOperand(kInsnFirstOpnd)).GetRegisterNumber())) { + return false; + } + if ((static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != 0) || + (static_cast(insn2->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k16BitSize) || + (static_cast(insn3->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k32BitSize) || + (static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != + (k16BitSize + k32BitSize))) { + return false; + } + return true; +} + +bool LoadFloatPointPattern::CheckCondition(Insn &insn) { + if (FindLoadFloatPoint(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LoadFloatPointPattern::Run(BB &bb, Insn &insn) { + /* logical shift left values in three optimized pattern */ + if (CheckCondition(insn)) { + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + auto &movConst1 = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + auto &movConst2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); + auto &movConst3 = static_cast(insn3->GetOperand(kInsnSecondOpnd)); + auto &movConst4 = static_cast(insn4->GetOperand(kInsnSecondOpnd)); + /* movk/movz's immOpnd is 16-bit unsigned immediate */ + uint64 value = static_cast(movConst1.GetValue()) + + (static_cast(movConst2.GetValue()) << k16BitSize) + + (static_cast(movConst3.GetValue()) << k32BitSize) + + (static_cast(movConst4.GetValue()) << (k16BitSize + k32BitSize)); + + LabelIdx lableIdx = cgFunc->CreateLabel(); + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + LabelOperand &target = aarch64CGFunc->GetOrCreateLabelOperand(lableIdx); + cgFunc->InsertLabelMap(lableIdx, value); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xldli, insn4->GetOperand(kInsnFirstOpnd), + target); + bb.InsertInsnAfter(*insn4, newInsn); + bb.RemoveInsn(*insn1); + bb.RemoveInsn(*insn2); + bb.RemoveInsn(*insn3); + bb.RemoveInsn(*insn4); + } +} + +void ReplaceOrrToMovAArch64::Run(BB &bb, Insn &insn) { + Operand *opndOfOrr = nullptr; + ImmOperand *immOpnd = nullptr; + RegOperand *reg1 = nullptr; + RegOperand *reg2 = nullptr; + MOperator thisMop = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (thisMop) { + case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_wmovrr; + break; + } + case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_xmovrr; + break; + } + default: + break; + } + DEBUG_ASSERT(opndOfOrr->IsIntImmediate(), "expects immediate operand"); + immOpnd = static_cast(opndOfOrr); + if (immOpnd->GetValue() == 0) { + reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.ReplaceInsn(insn, cgFunc.GetInsnBuilder()->BuildInsn(newMop, *reg1, *reg2)); + } +} + +void ReplaceCmpToCmnAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + MOperator thisMop = insn.GetMachineOpcode(); + MOperator nextMop = MOP_undef; + MOperator newMop = MOP_undef; + uint64 negOne = UINT64_MAX; + switch (thisMop) { + case MOP_wmovri32: { + nextMop = MOP_wcmprr; + newMop = MOP_wcmnri; + negOne = UINT32_MAX; + break; + } + case MOP_xmovri64: { + nextMop = MOP_xcmprr; + newMop = MOP_xcmnri; + break; + } + default: + break; + } + Operand *opnd1OfMov = &(insn.GetOperand(kInsnFirstOpnd)); + Operand *opnd2OfMov = &(insn.GetOperand(kInsnSecondOpnd)); + if (opnd2OfMov->IsIntImmediate()) { + ImmOperand *immOpnd = static_cast(opnd2OfMov); + int64 iVal = immOpnd->GetValue(); + if ((kNegativeImmLowerLimit <= iVal && iVal < 0) || iVal == negOne) { + Insn *nextInsn = insn.GetNextMachineInsn(); /* get the next insn to judge if it is a cmp instruction. */ + if (nextInsn != nullptr) { + if (nextInsn->GetMachineOpcode() == nextMop) { + Operand *opndCmp2 = &(nextInsn->GetOperand(kInsnSecondOpnd)); + Operand *opndCmp3 = &(nextInsn->GetOperand(kInsnThirdOpnd)); /* get the third operand of cmp */ + /* if the first operand of mov equals the third operand of cmp, match the pattern. */ + if (opnd1OfMov == opndCmp3) { + if (iVal == negOne) { + iVal = -1; + } + ImmOperand &newOpnd = aarch64CGFunc->CreateImmOperand(iVal * (-1), immOpnd->GetSize(), false); + Operand ®Flag = nextInsn->GetOperand(kInsnFirstOpnd); + bb.ReplaceInsn(*nextInsn, cgFunc.GetInsnBuilder()->BuildInsn(newMop, regFlag, *opndCmp2, newOpnd)); + } + } + } + } + } +} + +bool RemoveIncRefPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + insnMov2 = insn.GetPreviousMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + MOperator mopMov2 = insnMov2->GetMachineOpcode(); + if (mopMov2 != MOP_xmovrr) { + return false; + } + insnMov1 = insnMov2->GetPreviousMachineInsn(); + if (insnMov1 == nullptr) { + return false; + } + MOperator mopMov1 = insnMov1->GetMachineOpcode(); + if (mopMov1 != MOP_xmovrr) { + return false; + } + if (static_cast(insnMov1->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != + static_cast(insnMov2->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { + return false; + } + auto &mov2Dest = static_cast(insnMov2->GetOperand(kInsnFirstOpnd)); + auto &mov1Dest = static_cast(insnMov1->GetOperand(kInsnFirstOpnd)); + if (mov1Dest.IsVirtualRegister() || mov2Dest.IsVirtualRegister() || mov1Dest.GetRegisterNumber() != R0 || + mov2Dest.GetRegisterNumber() != R1) { + return false; + } + return true; +} + +void RemoveIncRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(insn); + bb.RemoveInsn(*insnMov2); + bb.RemoveInsn(*insnMov1); +} + +bool LongIntCompareWithZPattern::FindLondIntCmpWithZ(Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + optInsn.clear(); + /* forth */ + if (thisMop != MOP_wcmpri) { + return false; + } + (void)optInsn.emplace_back(&insn); + + /* third */ + Insn *preInsn1 = insn.GetPreviousMachineInsn(); + if (preInsn1 == nullptr) { + return false; + } + MOperator preMop1 = preInsn1->GetMachineOpcode(); + if (preMop1 != MOP_wcsincrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn1); + + /* second */ + Insn *preInsn2 = preInsn1->GetPreviousMachineInsn(); + if (preInsn2 == nullptr) { + return false; + } + MOperator preMop2 = preInsn2->GetMachineOpcode(); + if (preMop2 != MOP_wcsinvrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn2); + + /* first */ + Insn *preInsn3 = preInsn2->GetPreviousMachineInsn(); + if (preInsn3 == nullptr) { + return false; + } + MOperator preMop3 = preInsn3->GetMachineOpcode(); + if (preMop3 != MOP_xcmpri) { + return false; + } + (void)optInsn.emplace_back(preInsn3); + return true; +} + +bool LongIntCompareWithZPattern::IsPatternMatch() { + constexpr int insnLen = 4; + if (optInsn.size() != insnLen) { + return false; + } + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + DEBUG_ASSERT(insnNum == 3, " this specific case has three insns"); + if (IsZeroRegister(insn3->GetOperand(kInsnSecondOpnd)) && IsZeroRegister(insn3->GetOperand(kInsnThirdOpnd)) && + IsZeroRegister(insn2->GetOperand(kInsnThirdOpnd)) && + &(insn2->GetOperand(kInsnFirstOpnd)) == &(insn2->GetOperand(kInsnSecondOpnd)) && + static_cast(insn3->GetOperand(kInsnFourthOpnd)).GetCode() == CC_GE && + static_cast(insn2->GetOperand(kInsnFourthOpnd)).GetCode() == CC_LE && + static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetValue() == 0 && + static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetValue() == 0) { + return true; + } + return false; +} + +bool LongIntCompareWithZPattern::CheckCondition(Insn &insn) { + if (FindLondIntCmpWithZ(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LongIntCompareWithZPattern::Run(BB &bb, Insn &insn) { + /* found pattern */ + if (CheckCondition(insn)) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(optInsn[3]->GetMachineOpcode(), + optInsn[3]->GetOperand(kInsnFirstOpnd), + optInsn[3]->GetOperand(kInsnSecondOpnd), + optInsn[3]->GetOperand(kInsnThirdOpnd)); + /* use newInsn to replace the third optInsn */ + bb.ReplaceInsn(*optInsn[0], newInsn); + optInsn.clear(); + } +} + +void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + return; + } + + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldp) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstp))) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd is null in AArch64Peep::ComplexMemOperandAArch64"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return; + } + + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + + /* Avoid linking issues when object is not 16byte aligned */ + if (memOpnd->GetSize() == k128BitSize) { + return; + } + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != ®Opnd) { + return; + } + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + + /* load store pairs cannot have relocation */ + if (nextInsn->IsLoadStorePair() && insn.GetOperand(kInsnThirdOpnd).IsStImmediate()) { + return; + } + + auto &stImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + OfstOperand &offOpnd = aarch64CGFunc->GetOrCreateOfstOpnd( + stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue(), k32BitSize); + + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && stImmOpnd.GetSymbol()->IsReadOnly()) { + return; + } + + /* avoid relocation */ + if ((offOpnd.GetValue() % static_cast(kBitsPerByte)) != 0) { + return; + } + + if (cgFunc.GetMirModule().IsCModule()) { + Insn *prevInsn = insn.GetPrev(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xadrp) { + return; + } else { + auto &prevStImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prevStImmOpnd.SetOffset(offOpnd.GetValue()); + } + } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + &newBaseOpnd, nullptr, &offOpnd, stImmOpnd.GetSymbol()); + + nextInsn->SetMemOpnd(&newMemOpnd); + bb.RemoveInsn(insn); + CHECK_FATAL(!CGOptions::IsLazyBinding() || cgFunc.GetCG()->IsLibcore(), + "this pattern can't be found in this phase"); + } +} + +void ComplexMemOperandPreAddAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrr && thisMop != MOP_waddrrr) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + if (!IsMemOperandOptPattern(insn, *nextInsn)) { + return; + } + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (newBaseOpnd.GetSize() != k64BitSize) { + return; + } + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (newIndexOpnd.GetSize() <= k32BitSize) { + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } else { + auto *newOfstOpnd = &aarch64CGFunc->GetOrCreateOfstOpnd(0, k32BitSize); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, newOfstOpnd, nullptr); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } + bb.RemoveInsn(insn); + } +} + +bool ComplexMemOperandLSLAArch64::CheckShiftValid(const Insn &insn, + const BitShiftOperand &lsl) const { + /* check if shift amount is valid */ + uint32 lslAmount = lsl.GetShiftAmount(); + constexpr uint8 twoShiftBits = 2; + constexpr uint8 threeShiftBits = 3; + uint32 memSize = insn.GetMemoryByteSize(); + if ((memSize == k4ByteSize && (lsl.GetShiftAmount() != 0 && lslAmount != twoShiftBits)) || + (memSize == k8ByteSize && (lsl.GetShiftAmount() != 0 && lslAmount != threeShiftBits))) { + return false; + } + if (memSize != (k5BitSize << lslAmount)) { + return false; + } + return true; +} + +void ComplexMemOperandLSLAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrrs) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "null ptr check"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + + /* Only for immediate is 0. */ + if (memOpnd->GetOffsetImmediate()->GetOffsetValue() != 0) { + return; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return; + } + + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != ®Opnd) { + return; + } + +#ifdef USE_32BIT_REF + if (nextInsn->IsAccessRefField() && nextInsn->GetOperand(kInsnFirstOpnd).GetSize() > k32BitSize) { + return; + } +#endif + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + auto &lsl = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if (!CheckShiftValid(*nextInsn, lsl)) { + return; + } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, static_cast(lsl.GetShiftAmount()), + false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + bb.RemoveInsn(insn); + } +} + +void ComplexMemOperandLabelAArch64::Run(BB &bb, Insn &insn) { + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xldli) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xvmovdr) { + return; + } + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (regOpnd.GetRegisterNumber() != + static_cast(nextInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { + return; + } + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + if (CGOptions::DoCGSSA()) { + /* same as CombineFmovLdrPattern in ssa */ + CHECK_FATAL(false, "check this case in ssa"); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn( + MOP_dldli, nextInsn->GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd)); + bb.InsertInsnAfter(*nextInsn, newInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*nextInsn); +} + +static bool MayThrowBetweenInsn(const Insn &prevCallInsn, const Insn &currCallInsn) { + for (Insn *insn = prevCallInsn.GetNext(); insn != nullptr && insn != &currCallInsn; insn = insn->GetNext()) { + if (insn->MayThrow()) { + return true; + } + } + return false; +} + +/* + * mov R0, vreg1 / R0 -> objDesignateInsn + * add vreg2, vreg1, #imm -> fieldDesignateInsn + * mov R1, vreg2 -> fieldParamDefInsn + * mov R2, vreg3 -> fieldValueDefInsn + */ +bool WriteFieldCallPattern::WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m) { + Insn *fieldValueDefInsn = writeFieldCallInsn.GetPreviousMachineInsn(); + if (fieldValueDefInsn == nullptr || fieldValueDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldValueDefInsnDestOpnd = fieldValueDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldValueDefInsnDestReg = static_cast(fieldValueDefInsnDestOpnd); + if (fieldValueDefInsnDestReg.GetRegisterNumber() != R2) { + return false; + } + paramDefInsns.emplace_back(fieldValueDefInsn); + param.fieldValue = &(fieldValueDefInsn->GetOperand(kInsnSecondOpnd)); + Insn *fieldParamDefInsn = fieldValueDefInsn->GetPreviousMachineInsn(); + if (fieldParamDefInsn == nullptr || fieldParamDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldParamDestOpnd = fieldParamDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldParamDestReg = static_cast(fieldParamDestOpnd); + if (fieldParamDestReg.GetRegisterNumber() != R1) { + return false; + } + paramDefInsns.emplace_back(fieldParamDefInsn); + Insn *fieldDesignateInsn = fieldParamDefInsn->GetPreviousMachineInsn(); + if (fieldDesignateInsn == nullptr || fieldDesignateInsn->GetMachineOpcode() != MOP_xaddrri12) { + return false; + } + Operand &fieldParamDefSrcOpnd = fieldParamDefInsn->GetOperand(kInsnSecondOpnd); + Operand &fieldDesignateDestOpnd = fieldDesignateInsn->GetOperand(kInsnFirstOpnd); + if (!RegOperand::IsSameReg(fieldParamDefSrcOpnd, fieldDesignateDestOpnd)) { + return false; + } + Operand &fieldDesignateBaseOpnd = fieldDesignateInsn->GetOperand(kInsnSecondOpnd); + param.fieldBaseOpnd = &(static_cast(fieldDesignateBaseOpnd)); + auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); + param.fieldOffset = immOpnd.GetValue(); + paramDefInsns.emplace_back(fieldDesignateInsn); + Insn *objDesignateInsn = fieldDesignateInsn->GetPreviousMachineInsn(); + if (objDesignateInsn == nullptr || objDesignateInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &objDesignateDestOpnd = objDesignateInsn->GetOperand(kInsnFirstOpnd); + auto &objDesignateDestReg = static_cast(objDesignateDestOpnd); + if (objDesignateDestReg.GetRegisterNumber() != R0) { + return false; + } + Operand &objDesignateSrcOpnd = objDesignateInsn->GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(objDesignateDestOpnd, objDesignateSrcOpnd) || + !RegOperand::IsSameReg(objDesignateSrcOpnd, fieldDesignateBaseOpnd)) { + return false; + } + param.objOpnd = &(objDesignateInsn->GetOperand(kInsnSecondOpnd)); + paramDefInsns.emplace_back(objDesignateInsn); + return true; +} + +bool WriteFieldCallPattern::IsWriteRefFieldCallInsn(const Insn &insn) const { + if (!insn.IsCall() || insn.GetMachineOpcode() == MOP_xblr) { + return false; + } + Operand *targetOpnd = insn.GetCallTargetOperand(); + DEBUG_ASSERT(targetOpnd != nullptr, "targetOpnd must not be nullptr"); + if (!targetOpnd->IsFuncNameOpnd()) { + return false; + } + auto *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "the kind of funcSt is unreasonable"); + const std::string &funcName = funcSt->GetName(); + return funcName == "MCC_WriteRefField" || funcName == "MCC_WriteVolatileField"; +} + +bool WriteFieldCallPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (!IsWriteRefFieldCallInsn(insn)) { + return false; + } + if (!hasWriteFieldCall) { + if (!WriteFieldCallOptPatternMatch(insn, firstCallParam)) { + return false; + } + prevCallInsn = &insn; + hasWriteFieldCall = true; + return false; + } + if (!WriteFieldCallOptPatternMatch(insn, currentCallParam)) { + return false; + } + if (prevCallInsn == nullptr || MayThrowBetweenInsn(*prevCallInsn, insn)) { + return false; + } + if (firstCallParam.objOpnd == nullptr || currentCallParam.objOpnd == nullptr || + currentCallParam.fieldBaseOpnd == nullptr) { + return false; + } + if (!RegOperand::IsSameReg(*firstCallParam.objOpnd, *currentCallParam.objOpnd)) { + return false; + } + return true; +} + +void WriteFieldCallPattern::Run(BB &bb, Insn &insn) { + paramDefInsns.clear(); + if (!CheckCondition(insn)) { + return; + } + auto *aarCGFunc = static_cast(cgFunc); + MemOperand &addr = + aarCGFunc->CreateMemOpnd(*currentCallParam.fieldBaseOpnd, currentCallParam.fieldOffset, k64BitSize); + Insn &strInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xstr, *currentCallParam.fieldValue, addr); + strInsn.AppendComment("store reference field"); + strInsn.MarkAsAccessRefField(true); + bb.InsertInsnAfter(insn, strInsn); + for (Insn *paramDefInsn : paramDefInsns) { + bb.RemoveInsn(*paramDefInsn); + } + bb.RemoveInsn(insn); + prevCallInsn = &strInsn; + nextInsn = strInsn.GetNextMachineInsn(); +} + +bool RemoveDecRefPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_DecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if ((mopMov != MOP_xmovrr && mopMov != MOP_xmovri64) || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R0) { + return false; + } + Operand &srcOpndOfMov = prevInsn->GetOperand(kInsnSecondOpnd); + if (!IsZeroRegister(srcOpndOfMov) && + !(srcOpndOfMov.IsImmediate() && static_cast(srcOpndOfMov).GetValue() == 0)) { + return false; + } + return true; +} + +void RemoveDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +/* + * We optimize the following pattern in this function: + * and x1, x1, #imm (is n power of 2) + * cbz/cbnz x1, .label + * => + * and x1, x1, #imm (is n power of 2) + * tbnz/tbz x1, #n, .label + */ +void OneHoleBranchesAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + if (&insn != bb.GetLastInsn()) { + return; + } + /* check cbz/cbnz insn */ + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_wcbz && thisMop != MOP_wcbnz && thisMop != MOP_xcbz && thisMop != MOP_xcbnz) { + return; + } + /* check and insn */ + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wandrri12 && prevMop != MOP_xandrri13) { + return; + } + /* check opearnd of two insns */ + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + auto &imm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int n = logValueAtBase2(imm.GetValue()); + if (n < 0) { + return; + } + + /* replace insn */ + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MOperator newOp = MOP_undef; + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbz; + break; + case MOP_wcbnz: + newOp = MOP_wtbnz; + break; + case MOP_xcbz: + newOp = MOP_xtbz; + break; + case MOP_xcbnz: + newOp = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "can not touch here"); + break; + } + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter(insn, cgFunc.GetInsnBuilder()->BuildInsn( + newOp, prevInsn->GetOperand(kInsnSecondOpnd), oneHoleOpnd, label)); + bb.RemoveInsn(insn); +} + +bool ReplaceIncDecWithIncPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + target = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target->GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if (mopMov != MOP_xmovrr) { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + !IsZeroRegister(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void ReplaceIncDecWithIncPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + std::string funcName = "MCC_IncRef_NaiveRCFast"; + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + if (st == nullptr) { + LogInfo::MapleLogger() << "WARNING: Replace IncDec With Inc fail due to no MCC_IncRef_NaiveRCFast func\n"; + return; + } + bb.RemoveInsn(*prevInsn); + target->SetFunctionSymbol(*st); +} + + +void AndCmpBranchesToTbzAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + if (&insn != bb.GetLastInsn()) { + return; + } + MOperator mopB = insn.GetMachineOpcode(); + if (mopB != MOP_beq && mopB != MOP_bne) { + return; + } + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* get the instruction before bne/beq, expects its type is cmp. */ + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcmpri && prevMop != MOP_xcmpri) { + return; + } + + /* get the instruction before "cmp", expect its type is "and". */ + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr) { + return; + } + MOperator mopAnd = prevPrevInsn->GetMachineOpcode(); + if (mopAnd != MOP_wandrri12 && mopAnd != MOP_xandrri13) { + return; + } + + /* + * check operand + * + * the real register of "cmp" and "and" must be the same. + */ + if (&(prevInsn->GetOperand(kInsnSecondOpnd)) != &(prevPrevInsn->GetOperand(kInsnFirstOpnd))) { + return; + } + + uint32 opndIdx = 2; + if (!prevPrevInsn->GetOperand(opndIdx).IsIntImmediate() || !prevInsn->GetOperand(opndIdx).IsIntImmediate()) { + return; + } + auto &immAnd = static_cast(prevPrevInsn->GetOperand(opndIdx)); + auto &immCmp = static_cast(prevInsn->GetOperand(opndIdx)); + if (immCmp.GetValue() == 0) { + int n = logValueAtBase2(immAnd.GetValue()); + if (n < 0) { + return; + } + /* judge whether the flag_reg and "w0" is live later. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &cmpReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + if (FindRegLiveOut(flagReg, *prevInsn->GetBB()) || FindRegLiveOut(cmpReg, *prevInsn->GetBB())) { + return; + } + MOperator mopNew = MOP_undef; + switch (mopB) { + case MOP_beq: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbz; + } + break; + case MOP_bne: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbnz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbnz; + } + break; + default: + CHECK_FATAL(false, "expects beq or bne insn"); + break; + } + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter(insn, cgFunc.GetInsnBuilder()->BuildInsn(mopNew, + prevPrevInsn->GetOperand(kInsnSecondOpnd), newImm, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } else { + int n = logValueAtBase2(immAnd.GetValue()); + int m = logValueAtBase2(immCmp.GetValue()); + if (n < 0 || m < 0 || n != m) { + return; + } + /* judge whether the flag_reg and "w0" is live later. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &cmpReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + if (FindRegLiveOut(flagReg, *prevInsn->GetBB()) || FindRegLiveOut(cmpReg, *prevInsn->GetBB())) { + return; + } + MOperator mopNew = MOP_undef; + switch (mopB) { + case MOP_beq: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbnz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbnz; + } + break; + case MOP_bne: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbz; + } + break; + default: + CHECK_FATAL(false, "expects beq or bne insn"); + break; + } + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter(insn, cgFunc.GetInsnBuilder()->BuildInsn(mopNew, + prevPrevInsn->GetOperand(kInsnSecondOpnd), newImm, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } +} + +void RemoveSxtBeforeStrAArch64::Run(BB &bb, Insn &insn) { + MOperator mop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (!(mop == MOP_wstrh && prevMop == MOP_xsxth32) && !(mop == MOP_wstrb && prevMop == MOP_xsxtb32)) { + return; + } + auto &prevOpnd0 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (IfOperandIsLiveAfterInsn(prevOpnd0, insn)) { + return; + } + auto &prevOpnd1 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + regno_t prevRegNO0 = prevOpnd0.GetRegisterNumber(); + regno_t prevRegNO1 = prevOpnd1.GetRegisterNumber(); + regno_t regNO0 = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (prevRegNO0 != prevRegNO1) { + return; + } + if (prevRegNO0 == regNO0) { + bb.RemoveInsn(*prevInsn); + return; + } + insn.SetOperand(0, prevOpnd1); + bb.RemoveInsn(*prevInsn); +} + +void UbfxToUxtwPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + MOP_xuxtw64, insn.GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd)); + bb.ReplaceInsn(insn, *newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxToUxtwPattern::CheckCondition(Insn &insn) { + ImmOperand &imm0 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &imm1 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((imm0.GetValue() != 0) || (imm1.GetValue() != k32BitSize)) { + return false; + } + return true; +} + +void UbfxAndCbzToTbzPattern::Run(BB &bb, Insn &insn) { + Operand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &imm3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (!CheckCondition(insn)) { + return; + } + auto &label = static_cast(useInsn->GetOperand(kInsnSecondOpnd)); + MOperator nextMop = useInsn->GetMachineOpcode(); + switch (nextMop) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + return; + } + if (newMop == MOP_undef) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, opnd2, imm3, label); + BB *useInsnBB = useInsn->GetBB(); + useInsnBB->ReplaceInsn(*useInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(*useInsn, *newInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(useInsn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxAndCbzToTbzPattern::CheckCondition(Insn &insn) { + ImmOperand &imm4 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + RegOperand &opnd1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(opnd1); + if (useInsns.size() != 1) { + return false; + } + useInsn = *useInsns.begin(); + if (useInsn == nullptr) { + return false; + } + if (imm4.GetValue() == 1) { + switch (useInsn->GetMachineOpcode()) { + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: + return true; + default: + break; + } + } + return false; +} + +bool ComplexExtendWordLslAArch64::IsExtendWordLslPattern(const Insn &insn) const { + Insn *nextInsn = insn.GetNext(); + if(nextInsn == nullptr) { + return false; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xlslrri6) { + return false; + } + return true; +} + +void ComplexExtendWordLslAArch64::Run(BB &bb, Insn &insn) { + if (!IsExtendWordLslPattern(insn)) { + return; + } + MOperator mop = insn.GetMachineOpcode(); + Insn *nextInsn = insn.GetNext(); + auto &nextOpnd2 = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); + if (nextOpnd2.GetValue() > k32BitSize) { + return; + } + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &nextOpnd1 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + regno_t regNO0 = opnd0.GetRegisterNumber(); + regno_t nextRegNO1 = nextOpnd1.GetRegisterNumber(); + if (regNO0 != nextRegNO1 || IfOperandIsLiveAfterInsn(opnd0, *nextInsn)) { + return; + } + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &nextOpnd0 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t regNO1 = opnd1.GetRegisterNumber(); + cgFunc.InsertExtendSet(regNO1); + MOperator mopNew = mop == MOP_xsxtw64 ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarch64CGFunc = static_cast(&cgFunc); + RegOperand ®1 = aarch64CGFunc->GetOrCreateVirtualRegisterOperand(regNO1); + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsnSbfiz = cgFunc.GetInsnBuilder()->BuildInsn(mopNew, + nextOpnd0, reg1, nextOpnd2, newImm); + bb.RemoveInsn(*nextInsn); + bb.ReplaceInsn(insn, newInsnSbfiz); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f77e0e22796b813ca70a49d10e90146eab0a27ea --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_phi_elimination.h" +#include "aarch64_cg.h" + +namespace maplebe { +RegOperand &AArch64PhiEliminate::CreateTempRegForCSSA(RegOperand &oriOpnd) { + return *phiEliAlloc.New(GetAndIncreaseTempRegNO(), oriOpnd.GetSize(), oriOpnd.GetRegisterType()); +} + +Insn &AArch64PhiEliminate::CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) { + DEBUG_ASSERT(destOpnd.GetRegisterType() == fromOpnd.GetRegisterType(), "do not support this move in aarch64"); + bool is64bit = destOpnd.GetSize() == k64BitSize; + bool isFloat = destOpnd.IsOfFloatOrSIMDClass(); + Insn *insn = nullptr; + if (destOpnd.GetSize() == k128BitSize) { + DEBUG_ASSERT(isFloat, "unexpect 128bit int operand in aarch64"); + insn = &cgFunc->GetInsnBuilder()->BuildVectorInsn(MOP_vmovvv, AArch64CG::kMd[MOP_vmovvv]); + insn->AddOpndChain(destOpnd).AddOpndChain(fromOpnd); + auto *vecSpecSrc = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + static_cast(insn)->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + } else { + insn = &cgFunc->GetInsnBuilder()->BuildInsn( + is64bit ? isFloat ? MOP_xvmovd : MOP_xmovrr : isFloat ? MOP_xvmovs : MOP_wmovrr, destOpnd, fromOpnd); + } + /* restore validBitNum */ + if (destOpnd.GetValidBitsNum() != k64BitSize && destOpnd.GetValidBitsNum() != k32BitSize) { + destOpnd.SetValidBitsNum(destOpnd.GetSize()); + } + if (fromOpnd.GetValidBitsNum() != k64BitSize && fromOpnd.GetValidBitsNum() != k32BitSize) { + fromOpnd.SetValidBitsNum(fromOpnd.GetSize()); + } + /* copy remat info */ + MaintainRematInfo(destOpnd, fromOpnd, true); + DEBUG_ASSERT(insn != nullptr, "create move insn failed"); + insn->SetIsPhiMovInsn(true); + return *insn; +} + +RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn) { + VRegVersion *ssaVersion = GetSSAInfo()->FindSSAVersion(ssaOpnd.GetRegisterNumber()); + DEBUG_ASSERT(ssaVersion != nullptr, "find ssaVersion failed"); + DEBUG_ASSERT(!ssaVersion->IsDeleted(), "ssaVersion has been deleted"); + RegOperand *regForRecreate = &ssaOpnd; + if (curInsn.GetMachineOpcode() != MOP_asm && + !curInsn.IsVectorOp() && + !curInsn.IsSpecialIntrinsic() && + ssaVersion->GetAllUseInsns().empty() && + !curInsn.IsAtomic()) { + CHECK_FATAL(false, "plz delete dead version"); + } + if (GetSSAInfo()->IsNoDefVReg(ssaOpnd.GetRegisterNumber())) { + regForRecreate = MakeRoomForNoDefVreg(ssaOpnd); + } else { + DEBUG_ASSERT(regForRecreate->IsSSAForm(), "Opnd is not in ssa form"); + } + RegOperand &newReg = cgFunc->GetOrCreateVirtualRegisterOperand(*regForRecreate); + + DUInsnInfo *defInfo = ssaVersion->GetDefInsnInfo(); + Insn *defInsn = defInfo != nullptr ? defInfo->GetInsn() : nullptr; + /* + * case1 : both def/use + * case2 : inline-asm (do not do aggressive optimization) "0" + * case3 : cc flag operand + */ + if (defInsn != nullptr) { + /* case 1 */ + uint32 defUseIdx = defInsn->GetBothDefUseOpnd(); + if (defUseIdx != kInsnMaxOpnd) { + if (defInfo->GetOperands().count(defUseIdx)) { + CHECK_FATAL(defInfo->GetOperands()[defUseIdx] == 1, "multiple definiation"); + Operand &preRegOpnd = defInsn->GetOperand(defUseIdx); + DEBUG_ASSERT(preRegOpnd.IsRegister(), "unexpect operand type"); + newReg.SetRegisterNumber(static_cast(preRegOpnd).GetRegisterNumber()); + } + } + /* case 2 */ + if (defInsn->GetMachineOpcode() == MOP_asm) { + auto &inputList = static_cast(defInsn->GetOperand(kAsmInputListOpnd)); + VRegVersion *LastVersion = nullptr; + for (auto inputReg : inputList.GetOperands()) { + LastVersion = GetSSAInfo()->FindSSAVersion(inputReg->GetRegisterNumber()); + if (LastVersion != nullptr && LastVersion->GetOriginalRegNO() == ssaVersion->GetOriginalRegNO()) { + break; + } + LastVersion = nullptr; + } + if (LastVersion != nullptr) { + newReg.SetRegisterNumber(LastVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } else { + const MapleMap& bindingMap = defInsn->GetRegBinding(); + auto pairIt = bindingMap.find(ssaVersion->GetOriginalRegNO()); + if (pairIt != bindingMap.end()) { + newReg.SetRegisterNumber(pairIt->second); + } + } + } + /* case 3 */ + if (ssaVersion->GetOriginalRegNO() == kRFLAG) { + newReg.SetRegisterNumber(kRFLAG); + } + } else { + newReg.SetRegisterNumber(ssaVersion->GetOriginalRegNO()); + } + MaintainRematInfo(newReg, ssaOpnd, true); + newReg.SetOpndOutOfSSAForm(); + return newReg; +} + +void AArch64PhiEliminate::AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const { + Insn *posInsn = nullptr; + bool isPosPhi = false; + FOR_BB_INSNS_REV(insn, &bb) { + if (insn->IsPhi()) { + posInsn = insn; + isPosPhi = true; + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch()) { + posInsn = insn; + continue; + } + break; + } + CHECK_FATAL(posInsn != nullptr, "insert mov for phi failed"); + if (isPosPhi) { + bb.InsertInsnAfter(*posInsn, movInsn); + } else { + bb.InsertInsnBefore(*posInsn, movInsn); + } +} + +/* copy remat info */ +void AArch64PhiEliminate::MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) { + if (CGOptions::GetRematLevel() > 0 && isCopy) { + if (fromOpnd.IsSSAForm()) { + VRegVersion *fromSSAVersion = GetSSAInfo()->FindSSAVersion(fromOpnd.GetRegisterNumber()); + regno_t rematRegNO = fromSSAVersion->GetOriginalRegNO(); + MIRPreg *fPreg = static_cast(cgFunc)->GetPseudoRegFromVirtualRegNO(rematRegNO); + if (fPreg != nullptr) { + PregIdx fPregIdx = cgFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno( + static_cast(fPreg->GetPregNo())); + RecordRematInfo(destOpnd.GetRegisterNumber(), fPregIdx); + } + } else { + regno_t rematRegNO = fromOpnd.GetRegisterNumber(); + PregIdx fPreg = FindRematInfo(rematRegNO); + if (fPreg > 0) { + RecordRematInfo(destOpnd.GetRegisterNumber(), fPreg); + } + } + } +} + +void AArch64PhiEliminate::ReCreateRegOperand(Insn &insn) { + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + A64OperandPhiElmVisitor a64OpndPhiElmVisitor(this, insn, i); + opnd.Accept(a64OpndPhiElmVisitor); + } +} + +void A64OperandPhiElmVisitor::Visit(RegOperand *v) { + if (v->IsSSAForm()) { + DEBUG_ASSERT(v->GetRegisterNumber() != kRFLAG, "both condi and reg"); + insn->SetOperand(idx, a64PhiEliminator->GetCGVirtualOpearnd(*v, *insn)); + } +} + +void A64OperandPhiElmVisitor::Visit(ListOperand *v) { + std::list tempRegStore; + auto& opndList = v->GetOperands(); + + while (!opndList.empty()) { + auto *regOpnd = opndList.front(); + opndList.pop_front(); + + if (regOpnd->IsSSAForm()) { + tempRegStore.push_back(&a64PhiEliminator->GetCGVirtualOpearnd(*regOpnd, *insn)); + } else { + tempRegStore.push_back(regOpnd); + } + } + + DEBUG_ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempRegStore.begin(), tempRegStore.end()); +} + +void A64OperandPhiElmVisitor::Visit(MemOperand *a64MemOpnd) { + RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); + RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); + if ((baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) || + (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm())) { + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + a64MemOpnd->SetBaseRegister(a64PhiEliminator->GetCGVirtualOpearnd(*baseRegOpnd, *insn)); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + a64MemOpnd->SetIndexRegister(a64PhiEliminator->GetCGVirtualOpearnd(*indexRegOpnd, *insn)); + } + } +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..416d7aa1dfb2ee15027c357e5e26e36b7aa40401 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -0,0 +1,2073 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_proepilog.h" +#include "aarch64_cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +namespace { +const std::set kFrameWhiteListFunc { +#include "framewhitelist.def" +}; + +bool IsFuncNeedFrame(const std::string &funcName) { + return kFrameWhiteListFunc.find(funcName) != kFrameWhiteListFunc.end(); +} +constexpr int32 kSoeChckOffset = 8192; + +enum RegsPushPop : uint8 { + kRegsPushOp, + kRegsPopOp +}; + +enum PushPopType : uint8 { + kPushPopSingle = 0, + kPushPopPair = 1 +}; + +MOperator pushPopOps[kRegsPopOp + 1][kRegTyFloat + 1][kPushPopPair + 1] = { + { /* push */ + { 0 }, /* undef */ + { /* kRegTyInt */ + MOP_xstr, /* single */ + MOP_xstp, /* pair */ + }, + { /* kRegTyFloat */ + MOP_dstr, /* single */ + MOP_dstp, /* pair */ + }, + }, + { /* pop */ + { 0 }, /* undef */ + { /* kRegTyInt */ + MOP_xldr, /* single */ + MOP_xldp, /* pair */ + }, + { /* kRegTyFloat */ + MOP_dldr, /* single */ + MOP_dldp, /* pair */ + }, + } +}; + +inline void AppendInstructionTo(Insn &insn, CGFunc &func) { + func.GetCurBB()->AppendInsn(insn); +} +} + +bool AArch64GenProEpilog::HasLoop() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsBackEdgeDest()) { + return true; + } + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->HasLoop()) { + return true; + } + } + } + return false; +} + +/* + * Remove redundant mov and mark optimizable bl/blr insn in the BB. + * Return value: true to call this modified block again. + */ +bool AArch64GenProEpilog::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const { + if (bb.NumInsn() == 1 && + (bb.GetLastInsn()->GetMachineOpcode() != MOP_xbr && + bb.GetLastInsn()->GetMachineOpcode() != MOP_xblr && + bb.GetLastInsn()->GetMachineOpcode() != MOP_xbl && + bb.GetLastInsn()->GetMachineOpcode() != MOP_xuncond)) { + return false; + } + FOR_BB_INSNS_REV_SAFE(insn, &bb, prev_insn) { + if (!insn->IsMachineInstruction() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + continue; + } + MOperator insnMop = insn->GetMachineOpcode(); + switch (insnMop) { + case MOP_xldr: + case MOP_xldp: + case MOP_dldr: + case MOP_dldp: { + if (bb.GetKind() == BB::kBBReturn) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (AArch64Abi::IsCalleeSavedReg(static_cast(reg.GetRegisterNumber()))) { + break; /* inserted restore from calleeregs-placement, ignore */ + } + } + return false; + } + case MOP_wmovrr: + case MOP_xmovrr: { + CHECK_FATAL(insn->GetOperand(0).IsRegister(), "operand0 is not register"); + CHECK_FATAL(insn->GetOperand(1).IsRegister(), "operand1 is not register"); + auto ®1 = static_cast(insn->GetOperand(0)); + auto ®2 = static_cast(insn->GetOperand(1)); + + if (reg1.GetRegisterNumber() != R0 || reg2.GetRegisterNumber() != R0) { + return false; + } + + bb.RemoveInsn(*insn); + break; + } + case MOP_xblr: { + if (insn->GetOperand(0).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (AArch64Abi::IsCalleeSavedReg(static_cast(reg.GetRegisterNumber()))) { + return false; /* can't tailcall, register will be overwritten by restore */ + } + } + /* flow through */ + } + [[clang::fallthrough]]; + case MOP_xbl: { + callInsns.insert(insn); + return false; + } + case MOP_xuncond: { + LabelOperand &bLab = static_cast(insn->GetOperand(0)); + if (exitBB.GetLabIdx() == bLab.GetLabelIndex()) { + break; + } + return false; + } + default: + return false; + } + } + + return true; +} + +/* Recursively invoke this function for all predecessors of exitBB */ +void AArch64GenProEpilog::TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB) { + /* callsite also in the return block as in "if () return; else foo();" + call in the exit block */ + if (!bb.IsEmpty() && !OptimizeTailBB(bb, callInsns, exitBB)) { + return; + } + + for (auto tmpBB : bb.GetPreds()) { + if (tmpBB->GetSuccs().size() != 1 || !tmpBB->GetEhSuccs().empty() || + (tmpBB->GetKind() != BB::kBBFallthru && tmpBB->GetKind() != BB::kBBGoto)) { + continue; + } + + if (OptimizeTailBB(*tmpBB, callInsns, exitBB)) { + TailCallBBOpt(*tmpBB, callInsns, exitBB); + } + } +} + +/* + * If a function without callee-saved register, and end with a function call, + * then transfer bl/blr to b/br. + * Return value: true if function do not need Prologue/Epilogue. false otherwise. + */ +bool AArch64GenProEpilog::TailCallOpt() { + /* Count how many call insns in the whole function. */ + uint32 nCount = 0; + bool hasGetStackClass = false; + + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_xbl) { + auto &target = static_cast(insn->GetOperand(0)); + if (IsFuncNeedFrame(target.GetName())) { + hasGetStackClass = true; + } + } + ++nCount; + } + } + } + if ((nCount > 0 && cgFunc.GetFunction().GetAttr(FUNCATTR_interface)) || hasGetStackClass) { + return false; + } + + if (nCount == 0) { + // no bl instr in any bb + return true; + } + + size_t exitBBSize = cgFunc.GetExitBBsVec().size(); + /* For now to reduce complexity */ + + BB *exitBB = nullptr; + if (exitBBSize == 0) { + if (cgFunc.GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc.GetCleanupLabel() && + cgFunc.GetLastBB()->GetPrev()->GetPrev() != nullptr) { + exitBB = cgFunc.GetLastBB()->GetPrev()->GetPrev(); + } else { + exitBB = cgFunc.GetLastBB()->GetPrev(); + } + } else { + exitBB = cgFunc.GetExitBBsVec().front(); + } + uint32 i = 1; + size_t optCount = 0; + do { + MapleSet callInsns(tmpAlloc.Adapter()); + TailCallBBOpt(*exitBB, callInsns, *exitBB); + if (callInsns.size() != 0) { + optCount += callInsns.size(); + (void)exitBB2CallSitesMap.emplace(exitBB, callInsns); + } + if (i < exitBBSize) { + exitBB = cgFunc.GetExitBBsVec()[i]; + ++i; + } else { + break; + } + } while (1); + + /* regular calls exist in function */ + if (nCount != optCount) { + return false; + } + return true; +} + +static bool IsAddOrSubOp(MOperator mOp) { + switch (mOp) { + case MOP_xaddrrr: + case MOP_xaddrrrs: + case MOP_xxwaddrrre: + case MOP_xaddrri24: + case MOP_xaddrri12: + case MOP_xsubrrr: + case MOP_xsubrrrs: + case MOP_xxwsubrrre: + case MOP_xsubrri12: + return true; + default: + return false; + } +} + +/* tailcallopt cannot be used if stack address of this function is taken and passed, + not checking the passing for now, just taken */ +static bool IsStackAddrTaken(CGFunc &cgFunc) { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (IsAddOrSubOp(insn->GetMachineOpcode())) { + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + if (insn->GetOperand(i).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(i)); + if (reg.GetRegisterNumber() == R29 || reg.GetRegisterNumber() == R31 || reg.GetRegisterNumber() == RSP) { + return true; + } + } + } + } + } + } + return false; +} + +bool AArch64GenProEpilog::NeedProEpilog() { + if (cgFunc.GetMirModule().GetSrcLang() != kSrcLangC) { + return true; + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || cgFunc.HasVLAOrAlloca()) { + return true; + } + bool funcHasCalls = false; + if (cgFunc.GetCG()->DoTailCall() && !IsStackAddrTaken(cgFunc) && !stackProtect) { + funcHasCalls = !TailCallOpt(); // return value == "no call instr/only or 1 tailcall" + } else { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + funcHasCalls = true; + } + } + } + } + auto &aarchCGFunc = static_cast(cgFunc); + const MapleVector ®sToRestore = (!CGOptions::DoRegSavesOpt()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + size_t calleeSavedRegSize = kTwoRegister; + CHECK_FATAL(regsToRestore.size() >= calleeSavedRegSize, "Forgot FP and LR ?"); + if (funcHasCalls || regsToRestore.size() > calleeSavedRegSize || aarchCGFunc.HasStackLoadStore() || + static_cast(cgFunc.GetMemlayout())->GetSizeOfLocals() > 0 || + cgFunc.GetFunction().GetAttr(FUNCATTR_callersensitive)) { + return true; + } + return false; +} + +void AArch64GenProEpilog::GenStackGuard(BB &bb) { + if (!stackProtect) { + return; + } + auto &aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); + StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); + + MemOperand *guardMemOp = + aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + stAddrOpnd, nullptr, &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), stkGuardSym); + MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); + insn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(insn); + + uint64 vArea = 0; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (ml->GetSizeOfGRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (ml->GetSizeOfVRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (useFP) { + stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); + } + int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + if (downStk->GetMemVaryType() == kNotVary && + aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); + } + mOp = aarchCGFunc.PickStInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &tmpInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *downStk); + tmpInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(tmpInsn); + + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { + if (!stackProtect) { + return bb; + } + + BB *formerCurBB = cgFunc.GetCurBB(); + cgFunc.GetDummyBB()->ClearInsns(); + cgFunc.SetCurBB(*(cgFunc.GetDummyBB())); + auto &aarchCGFunc = static_cast(cgFunc); + + const MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); + StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); + RegOperand &stAddrOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); + + MemOperand *guardMemOp = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, + GetPointerSize() * kBitsPerByte, stAddrOpnd, nullptr, + &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), + stkGuardSym); + MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); + insn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(insn); + + uint64 vArea = 0; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (ml->GetSizeOfGRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (ml->GetSizeOfVRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + RegOperand &checkOp = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, GetPointerSize() * kBitsPerByte, kRegTyInt); + int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (useFP) { + stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); + } + int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); + } + mOp = aarchCGFunc.PickLdInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, checkOp, *downStk); + newInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(newInsn); + + cgFunc.SelectBxor(stAddrOpnd, stAddrOpnd, checkOp, PTY_u64); + LabelIdx failLable = aarchCGFunc.CreateLabel(); + aarchCGFunc.SelectCondGoto(aarchCGFunc.GetOrCreateLabelOperand(failLable), OP_brtrue, OP_eq, + stAddrOpnd, aarchCGFunc.CreateImmOperand(0, k64BitSize, false), PTY_u64, false); + + bb.AppendBBInsns(*(cgFunc.GetCurBB())); + + LabelIdx nextBBLableIdx = aarchCGFunc.CreateLabel(); + BB *nextBB = aarchCGFunc.CreateNewBB(nextBBLableIdx, bb.IsUnreachable(), BB::kBBFallthru, bb.GetFrequency()); + bb.AppendBB(*nextBB); + bb.PushBackSuccs(*nextBB); + nextBB->PushBackPreds(bb); + cgFunc.SetCurBB(*nextBB); + MIRSymbol *failFunc = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_fail"))); + ListOperand *srcOpnds = aarchCGFunc.CreateListOpnd(*cgFunc.GetFuncScopeAllocator()); + Insn &callInsn = aarchCGFunc.AppendCall(*failFunc, *srcOpnds); + callInsn.SetDoNotRemove(true); + + BB *newBB = cgFunc.CreateNewBB(failLable, bb.IsUnreachable(), bb.GetKind(), bb.GetFrequency()); + nextBB->AppendBB(*newBB); + if (cgFunc.GetLastBB() == &bb) { + cgFunc.SetLastBB(*newBB); + } + bb.PushBackSuccs(*newBB); + nextBB->PushBackSuccs(*newBB); + newBB->PushBackPreds(*nextBB); + newBB->PushBackPreds(bb); + + bb.SetKind(BB::kBBIf); + cgFunc.SetCurBB(*formerCurBB); + return *newBB; +} + +bool AArch64GenProEpilog::InsertOpndRegs(Operand &op, std::set &vecRegs) const { + Operand *opnd = &op; + CHECK_FATAL(opnd != nullptr, "opnd is nullptr in InsertRegs"); + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : pregList) { + if (preg != nullptr) { + vecRegs.insert(preg->GetRegisterNumber()); + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + if (baseOpnd != nullptr) { + vecRegs.insert(baseOpnd->GetRegisterNumber()); + } + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if (indexOpnd != nullptr) { + vecRegs.insert(indexOpnd->GetRegisterNumber()); + } + } + if (opnd->IsRegister()) { + RegOperand *preg = static_cast(opnd); + if (preg != nullptr) { + vecRegs.insert(preg->GetRegisterNumber()); + } + } + return true; +} + +bool AArch64GenProEpilog::InsertInsnRegs(Insn &insn, bool insertSource, std::set &vecSourceRegs, + bool insertTarget, std::set &vecTargetRegs) { + Insn *curInsn = &insn; + for (uint32 o = 0; o < curInsn->GetOperandSize(); ++o) { + Operand &opnd = curInsn->GetOperand(o); + if (insertSource && curInsn->OpndIsUse(o)) { + InsertOpndRegs(opnd, vecSourceRegs); + } + if (insertTarget && curInsn->OpndIsDef(o)) { + InsertOpndRegs(opnd, vecTargetRegs); + } + } + return true; +} + +bool AArch64GenProEpilog::FindRegs(Operand &op, std::set &vecRegs) const { + Operand *opnd = &op; + if (opnd == nullptr || vecRegs.empty()) { + return false; + } + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : pregList) { + if (preg->GetRegisterNumber() == R29 || + vecRegs.find(preg->GetRegisterNumber()) != vecRegs.end()) { + return true; /* the opReg will overwrite or reread the vecRegs */ + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if ((baseOpnd != nullptr && baseOpnd->GetRegisterNumber() == R29) || + (indexOpnd != nullptr && indexOpnd->GetRegisterNumber() == R29)) { + return true; /* Avoid modifying data on the stack */ + } + if ((baseOpnd != nullptr && vecRegs.find(baseOpnd->GetRegisterNumber()) != vecRegs.end()) || + (indexOpnd != nullptr && vecRegs.find(indexOpnd->GetRegisterNumber()) != vecRegs.end())) { + return true; + } + } + if (opnd->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd); + if (regOpnd->GetRegisterNumber() == R29 || + vecRegs.find(regOpnd->GetRegisterNumber()) != vecRegs.end()) { + return true; /* dst is a target register, result_dst is a target register */ + } + } + return false; +} + +bool AArch64GenProEpilog::BackwardFindDependency(BB &ifbb, std::set &vecReturnSourceRegs, + std::list &existingInsns, + std::list &moveInsns) { + /* + * Pattern match,(*) instruction are moved down below branch. + * ******************** + * curInsn: + * in predBB + * in ifBB + * in returnBB + * ********************* + * list: the insns can be moved into the coldBB + * (1) the instruction is neither a branch nor a call, except for the ifbb.GetLastInsn() + * As long as a branch insn exists, + * the fast path finding fails and the return value is false, + * but the code sinking can be continued. + * (2) the predBB is not a ifBB, + * As long as a ifBB in preds exists, + * the code sinking fails, + * but fast path finding can be continued. + * (3) the targetRegs of insns in existingInsns can neither be reread or overwrite + * (4) the sourceRegs of insns in existingInsns can not be overwrite + * (5) the sourceRegs of insns in returnBB can neither be reread or overwrite + * (6) the targetRegs and sourceRegs cannot be R29 R30, to protect the stack + * (7) modified the reg when: + * -------------- + * curInsn: move R2,R1 + * : s s s + * s s s + * -> s s s + * ------------ + * (a) all targets cannot be R1, all sources cannot be R1 + * all targets cannot be R2, all return sources cannot be R2 + * (b) the targetRegs and sourceRegs cannot be list or MemoryAccess + * (c) no ifBB in preds, no branch insns + * (d) the bits of source-R2 must be equal to the R2 + * (e) replace the R2 with R1 + */ + BB *pred = &ifbb; + std::set vecTargetRegs; /* the targrtRegs of existingInsns */ + std::set vecSourceRegs; /* the soureRegs of existingInsns */ + bool ifPred = false; /* Indicates whether a ifBB in pred exists */ + bool bl = false; /* Indicates whether a branch insn exists */ + do { + FOR_BB_INSNS_REV(insn, pred) { + /* code sinking */ + if (insn->IsImmaterialInsn()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking */ + if (!insn->IsMachineInstruction()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking fails, the insns must be retained in the ifBB */ + if (ifPred || insn == ifbb.GetLastInsn() || insn->IsBranch() || insn->IsCall() || + insn->IsStore() || insn->IsStorePair()) { + /* fast path finding fails */ + if (insn != ifbb.GetLastInsn() && (insn->IsBranch() || insn->IsCall() || + insn->IsStore() || insn->IsStorePair())) { + bl = true; + } + InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + continue; + } + bool allow = true; /* whether allow this insn move into the codeBB */ + for (uint32 o = 0; allow && o < insn->GetOperandSize(); ++o) { + Operand &opnd = insn->GetOperand(o); + if (insn->OpndIsDef(o)) { + allow = allow & !FindRegs(opnd, vecTargetRegs); + allow = allow & !FindRegs(opnd, vecSourceRegs); + allow = allow & !FindRegs(opnd, vecReturnSourceRegs); + } + if (insn->OpndIsUse(o)) { + allow = allow & !FindRegs(opnd, vecTargetRegs); + } + } + /* if a result_dst not allowed, this insn can be allowed on the condition of mov Rx,R0/R1, + * and tje existing insns cannot be blr + * RLR 31, RFP 32, RSP 33, RZR 34 */ + if (!ifPred && !bl && !allow && (insn->GetMachineOpcode() == MOP_xmovrr || + insn->GetMachineOpcode() == MOP_wmovrr)) { + Operand *resultOpnd = &(insn->GetOperand(0)); + Operand *srcOpnd = &(insn->GetOperand(1)); + regno_t resultNO = static_cast(resultOpnd)->GetRegisterNumber(); + regno_t srcNO = static_cast(srcOpnd)->GetRegisterNumber(); + if (!FindRegs(*resultOpnd, vecTargetRegs) && !FindRegs(*srcOpnd, vecTargetRegs) && + !FindRegs(*srcOpnd, vecSourceRegs) && !FindRegs(*srcOpnd, vecReturnSourceRegs) && + (srcNO < RLR || srcNO > RZR)) { + allow = true; /* allow on the conditional mov Rx,Rxx */ + for (auto *exit : existingInsns) { + /* the registers of kOpdMem are complex to be detected */ + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsList() || opd->IsMemoryAccessOperand()) { + allow = false; + break; + } + /* Distinguish between 32-bit regs and 64-bit regs */ + if (opd->IsRegister() && + static_cast(opd)->GetRegisterNumber() == resultNO && + opd != resultOpnd) { + allow = false; + break; + } + } + } + } + /* replace the R2 with R1 */ + if (allow) { + for (auto *exit : existingInsns) { + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsRegister() && (opd == resultOpnd)) { + exit->SetOperand(o, *srcOpnd); + } + } + } + } + } + if (!allow) { /* all result_dsts are not target register */ + /* code sinking fails */ + InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + } else { + moveInsns.push_back(insn); + } + } + if (pred->GetPreds().empty()) { + break; + } + if (!ifPred) { + for (auto *tmPred : pred->GetPreds()) { + pred = tmPred; + /* try to find the BB without branch */ + if (tmPred->GetKind() == BB::kBBGoto || tmPred->GetKind() == BB::kBBFallthru) { + ifPred = false; + break; + } else { + ifPred = true; + } + } + } + } while (pred != nullptr); + for (std::set::iterator it = vecTargetRegs.begin(); it != vecTargetRegs.end(); ++it) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(*it))) { /* flag register */ + return false; + } + } + return !bl; +} + +BB *AArch64GenProEpilog::IsolateFastPath(BB &bb) { + /* + * Detect "if (cond) return" fast path, and move extra instructions + * to the slow path. + * Must match the following block structure. BB1 can be a series of + * single-pred/single-succ blocks. + * BB1 ops1 cmp-br to BB3 BB1 cmp-br to BB3 + * BB2 ops2 br to retBB ==> BB2 ret + * BB3 slow path BB3 ops1 ops2 + * if the detect is successful, BB3 will be used to generate prolog stuff. + */ + if (bb.GetPrev() != nullptr) { + return nullptr; + } + BB *ifBB = nullptr; + BB *returnBB = nullptr; + BB *coldBB = nullptr; + { + BB *curBB = &bb; + /* Look for straight line code */ + while (1) { + if (!curBB->GetEhSuccs().empty()) { + return nullptr; + } + if (curBB->GetSuccs().size() == 1) { + if (curBB->HasCall()) { + return nullptr; + } + BB *succ = curBB->GetSuccs().front(); + if (succ->GetPreds().size() != 1 || !succ->GetEhPreds().empty()) { + return nullptr; + } + curBB = succ; + } else if (curBB->GetKind() == BB::kBBIf) { + ifBB = curBB; + break; + } else { + return nullptr; + } + } + } + /* targets of if bb can only be reached by if bb */ + { + CHECK_FATAL(!ifBB->GetSuccs().empty(), "null succs check!"); + BB *first = ifBB->GetSuccs().front(); + BB *second = ifBB->GetSuccs().back(); + if (first->GetPreds().size() != 1 || !first->GetEhPreds().empty()) { + return nullptr; + } + if (second->GetPreds().size() != 1 || !second->GetEhPreds().empty()) { + return nullptr; + } + /* One target of the if bb jumps to a return bb */ + if (first->GetKind() != BB::kBBGoto && first->GetKind() != BB::kBBFallthru) { + return nullptr; + } + if (first->GetSuccs().size() != 1) { + return nullptr; + } + if (first->GetSuccs().front()->GetKind() != BB::kBBReturn) { + return nullptr; + } + if (first->GetSuccs().front()->GetPreds().size() != 1) { + return nullptr; + } + if (first->GetSuccs().front()->NumInsn() > 2) { /* avoid a insn is used to debug */ + return nullptr; + } + if (second->GetSuccs().empty()) { + return nullptr; + } + returnBB = first; + coldBB = second; + } + /* Search backward looking for dependencies for the cond branch */ + std::list existingInsns; /* the insns must be retained in the ifBB (and the return BB) */ + std::list moveInsns; /* instructions to be moved to coldbb */ + /* + * The control flow matches at this point. + * Make sure the SourceRegs of the insns in returnBB (vecReturnSourceReg) cannot be overwrite. + * the regs in insns have three forms: list, MemoryAccess, or Register. + */ + CHECK_FATAL(returnBB != nullptr, "null ptr check"); + std::set vecReturnSourceRegs; + FOR_BB_INSNS_REV(insn, returnBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return nullptr; + } + InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + FOR_BB_INSNS_REV(insn, returnBB->GetSuccs().front()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return nullptr; + } + InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + /* + * The mv is the 1st move using the parameter register leading to the branch + * The ld is the load using the parameter register indirectly for the branch + * The depMv is the move which preserves the result of the load but might + * destroy a parameter register which will be moved below the branch. + */ + bool fast = BackwardFindDependency(*ifBB, vecReturnSourceRegs, existingInsns, moveInsns); + /* move extra instructions to the slow path */ + if (!fast) { + return nullptr; + } + for (auto in : moveInsns) { + in->GetBB()->RemoveInsn(*in); + CHECK_FATAL(coldBB != nullptr, "null ptr check"); + static_cast(coldBB->InsertInsnBegin(*in)); + } + /* All instructions are in the right place, replace branch to ret bb to just ret. */ + /* Remove the lastInsn of gotoBB */ + if (returnBB->GetKind() == BB::kBBGoto) { + returnBB->RemoveInsn(*returnBB->GetLastInsn()); + } + BB *tgtBB = returnBB->GetSuccs().front(); + CHECK_FATAL(tgtBB != nullptr, "null ptr check"); + FOR_BB_INSNS(insn, tgtBB) { + returnBB->AppendInsn(*insn); /* add the insns such as MOP_xret */ + } + returnBB->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); + /* bb is now a retbb and has no succ. */ + returnBB->SetKind(BB::kBBReturn); + auto predIt = std::find(tgtBB->GetPredsBegin(), tgtBB->GetPredsEnd(), returnBB); + tgtBB->ErasePreds(predIt); + tgtBB->ClearInsns(); + returnBB->ClearSuccs(); + if (tgtBB->GetPrev() != nullptr && tgtBB->GetNext() != nullptr) { + tgtBB->GetPrev()->SetNext(tgtBB->GetNext()); + tgtBB->GetNext()->SetPrev(tgtBB->GetPrev()); + } + SetFastPathReturnBB(tgtBB); + return coldBB; +} + +MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, + const MemOperand &mo, uint32 bitLen, AArch64reg baseRegNum) { + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(mo.GetAddrMode() == MemOperand::kAddrModeBOi, "mode should be kAddrModeBOi"); + OfstOperand *ofstOp = mo.GetOffsetImmediate(); + int32 offsetVal = static_cast(ofstOp->GetOffsetValue()); + CHECK_FATAL(offsetVal > 0, "offsetVal should be greater than 0"); + CHECK_FATAL((static_cast(offsetVal) & 0x7) == 0, "(offsetVal & 0x7) should be equal to 0"); + /* + * Offset adjustment due to FP/SP has already been done + * in AArch64GenProEpilog::GeneratePushRegs() and AArch64GenProEpilog::GeneratePopRegs() + */ + RegOperand &br = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(baseRegNum, bitLen, kRegTyInt); + if (aarchCGFunc.GetSplitBaseOffset() == 0) { + aarchCGFunc.SetSplitBaseOffset(offsetVal); /* remember the offset; don't forget to clear it */ + ImmOperand &immAddEnd = aarchCGFunc.CreateImmOperand(offsetVal, k64BitSize, true); + RegOperand *origBaseReg = mo.GetBaseRegister(); + aarchCGFunc.SelectAdd(br, *origBaseReg, immAddEnd, PTY_i64); + } + offsetVal = offsetVal - aarchCGFunc.GetSplitBaseOffset(); + return &aarchCGFunc.CreateReplacementMemOperand(bitLen, br, offsetVal); +} + +void AArch64GenProEpilog::AppendInstructionPushPair(CGFunc &cgFunc, + AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); + } + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + std::string comment = "SAVE CALLEE REGISTER PAIR"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); + + /* Append CFi code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + stackFrameSize -= static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + int32 cfiOffset = stackFrameSize - offset; + BB *curBB = cgFunc.GetCurBB(); + Insn *newInsn = curBB->InsertInsnAfter(pushInsn, aarchCGFunc.CreateCfiOffsetInsn(reg0, -cfiOffset, k64BitSize)); + curBB->InsertInsnAfter(*newInsn, aarchCGFunc.CreateCfiOffsetInsn(reg1, -cfiOffset + kOffset8MemPos, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionPushSingle(CGFunc &cgFunc, + AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && + aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R9); + } + + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + std::string comment = "SAVE CALLEE REGISTER"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); + + /* Append CFI code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + stackFrameSize -= static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + int32 cfiOffset = stackFrameSize - offset; + cgFunc.GetCurBB()->InsertInsnAfter(pushInsn, + aarchCGFunc.CreateCfiOffsetInsn(reg, -cfiOffset, k64BitSize)); + } +} + +Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int64 argsToStkPassSize, + AArch64reg reg0, AArch64reg reg1, + RegType rty, bool isAllocate) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopPair]; + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + if (argsToStkPassSize <= kStrLdrImm64UpperBound - kOffset8MemPos) { + mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopSingle] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), size * kBitsPerByte); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o2); + AppendInstructionTo(insn1, cgFunc); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize + size), + size * kBitsPerByte); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } else { + RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); + ImmOperand &io1 = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k64BitSize, true); + aarchCGFunc.SelectCopyImm(oo, io1, PTY_i64); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); + MemOperand *mo = aarchCGFunc.CreateMemOperand( + MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); + AppendInstructionTo(insn1, cgFunc); + ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); + aarchCGFunc.SelectAdd(oo, oo, io2, PTY_i64); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + mo = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOrX, + size * kBitsPerByte, rsp, oo, 0); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o1, *mo); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } +} + +Insn &AArch64GenProEpilog::CreateAndAppendInstructionForAllocateCallFrame(int64 argsToStkPassSize, + AArch64reg reg0, AArch64reg reg1, + RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Insn *allocInsn = nullptr; + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + allocInsn = &AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, true); + } else { + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), + GetPointerSize() * kBitsPerByte); + allocInsn = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*allocInsn, cgFunc); + } + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + return *allocInsn; +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame")); + } + + Insn *ipoint = nullptr; + /* + * stackFrameSize includes the size of args to stack-pass + * if a function has neither VLA nor alloca. + */ + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + /* + * ldp/stp's imm should be within -512 and 504; + * if stp's imm > 512, we fall back to the stp-sub version + */ + bool useStpSub = false; + int64 offset = 0; + int32 cfiOffset = 0; + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + /* + * stack_frame_size == size of formal parameters + callee-saved (including FP/RL) + * + size of local vars + * + size of actuals + * (when passing more than 8 args, its caller's responsibility to + * allocate space for it. size of actuals represent largest such size in the function. + */ + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + cfiOffset = offset; + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + } + + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + CHECK_FATAL(!useStpSub, "Invalid assumption"); + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(argsToStkPassSize, reg0, reg1, rty); + } + + if (useStpSub) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + aarchCGFunc.SetUsedStpSubPairForCallFrameAllocation(true); + } + + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + int32 cfiOffsetSecond = 0; + if (useStpSub) { + cfiOffsetSecond = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffsetSecond, *ipoint); + } + cfiOffsetSecond = GetOffsetFromCFA(); + if (!cgFunc.HasVLAOrAlloca()) { + cfiOffsetSecond -= argsToStkPassSize; + } + if (cgFunc.GenCfi()) { + BB *curBB = cgFunc.GetCurBB(); + if (useFP) { + ipoint = curBB->InsertInsnAfter( + *ipoint, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffsetSecond, k64BitSize)); + } + curBB->InsertInsnAfter(*ipoint, + aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffsetSecond + kOffset8MemPos, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame for debugging")); + } + + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + + Insn *ipoint = nullptr; + int32 cfiOffset = 0; + + if (argsToStkPassSize > 0) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + (void)InsertCFIDefCfaOffset(cfiOffset, *ipoint); + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + argsToStkPassSize -= (kDivide2 * k8ByteSize); + } + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(argsToStkPassSize, reg0, reg1, rty); + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + cfiOffset = GetOffsetFromCFA(); + cfiOffset -= argsToStkPassSize; + } else { + bool useStpSub = false; + + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + RegOperand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + } else { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + cfiOffset = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + } + + if (useStpSub) { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*ipoint, cgFunc); + } + + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + cfiOffset = GetOffsetFromCFA(); + } + if (cgFunc.GenCfi()) { + BB *curBB = cgFunc.GetCurBB(); + if (useFP) { + ipoint = curBB->InsertInsnAfter(*ipoint, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffset, k64BitSize)); + } + curBB->InsertInsnAfter(*ipoint, aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffset + kOffset8MemPos, k64BitSize)); + } +} + +/* + * From AArch64 Reference Manual + * C1.3.3 Load/Store Addressing Mode + * ... + * When stack alignment checking is enabled by system software and + * the base register is the SP, the current stack pointer must be + * initially quadword aligned, that is aligned to 16 bytes. Misalignment + * generates a Stack Alignment fault. The offset does not have to + * be a multiple of 16 bytes unless the specific Load/Store instruction + * requires this. SP cannot be used as a register offset. + */ +void AArch64GenProEpilog::GeneratePushRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + const MapleVector ®sToSave = (!CGOptions::DoRegSavesOpt()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToSave.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("save callee-saved registers")); + } + + /* + * Even if we don't use RFP, since we push a pair of registers in one instruction + * and the stack needs be aligned on a 16-byte boundary, push RFP as well if function has a call + * Make sure this is reflected when computing callee_saved_regs.size() + */ + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionAllocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionAllocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + if (useFP) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("copy SP to FP")); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + if (!isLmbc || cgFunc.SeenFP() || cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + } + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(stackBaseReg, + static_cast( + cgFunc.GetMemlayout())->RealStackFrameSize() - argsToStkPassSize, k64BitSize)); + } + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn( + cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa_register). + AddOpndChain(aarchCGFunc.CreateCfiRegOperand(stackBaseReg, k64BitSize))); + } + } + } + + MapleVector::const_iterator it = regsToSave.begin(); + /* skip the first two registers */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast((memLayout->RealStackFrameSize() - + aarchCGFunc.SizeOfCalleeSaved()) - memLayout->GetSizeOfLocals()); + } else { + offset = (static_cast(memLayout->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))) - /* for FP/LR */ + memLayout->SizeOfArgsToStackPass()); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignment; + } + + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + for (; it != regsToSave.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + } else { + AppendInstructionPushPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for pop pairs as well. + */ + aarchCGFunc.SetSplitBaseOffset(0); +} + +void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + uint32 dataSizeBits = size * kBitsPerByte; + uint32 offset; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()); /* SP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + } else { + offset = (UINT32_MAX - memlayout->GetSizeOfGRSaveArea()) + 1; /* FP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset -= size; + } + } + uint32 grSize = (UINT32_MAX - offset) + 1; + uint32 start_regno = k8BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + DEBUG_ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for GR Save Area"); + for (uint32 i = start_regno + static_cast(R0); i < static_cast(R8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 8) { + tmpOffset += 8U - (dataSizeBits >> 3); + } + } + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + Insn &inst = cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_i64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += size; + } + if (!CGOptions::UseGeneralRegOnly()) { + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()); + } else { + offset = (UINT32_MAX - (memlayout->GetSizeOfVRSaveArea() + grSize)) + 1; + } + start_regno = k8BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + DEBUG_ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = start_regno + static_cast(V0); i < static_cast(V8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 16) { + tmpOffset += 16U - (dataSizeBits >> 3); + } + } + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + Insn &inst = cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_f64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += (size * k2BitSize); + } + } + } +} + +void AArch64GenProEpilog::AppendInstructionStackCheck(AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* sub x16, sp, #0x2000 */ + auto &x16Opnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, rty); + auto &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, rty); + auto &imm1 = aarchCGFunc.CreateImmOperand(offset, k64BitSize, true); + aarchCGFunc.SelectSub(x16Opnd, spOpnd, imm1, PTY_u64); + + /* ldr wzr, [x16] */ + auto &wzr = cgFunc.GetZeroOpnd(k32BitSize); + auto &refX16 = aarchCGFunc.CreateMemOpnd(reg, 0, k64BitSize); + auto &soeInstr = cgFunc.GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, refX16); + if (currCG->GenerateVerboseCG()) { + soeInstr.SetComment("soerror"); + } + soeInstr.SetDoNotRemove(true); + AppendInstructionTo(soeInstr, cgFunc); +} + +void AArch64GenProEpilog::GenerateProlog(BB &bb) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + if (!cgFunc.GetHasProEpilogue()) { + return; + } + + // insert .loc for function + if (currCG->GetCGOptions().WithLoc() && + (!currCG->GetMIRModule()->IsCModule() || currCG->GetMIRModule()->IsWithDbgInfo())) { + MIRFunction *func = &cgFunc.GetFunction(); + MIRSymbol *fSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + if (currCG->GetCGOptions().WithSrc()) { + uint32 tempmaxsize = static_cast(currCG->GetMIRModule()->GetSrcFileInfo().size()); + uint32 endfilenum = currCG->GetMIRModule()->GetSrcFileInfo()[tempmaxsize - 1].second; + if (fSym->GetSrcPosition().FileNum() != 0 && fSym->GetSrcPosition().FileNum() <= endfilenum) { + Operand *o0 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().FileNum()); + int64_t lineNum = fSym->GetSrcPosition().LineNum(); + if (lineNum == 0) { + if (cgFunc.GetFunction().GetAttr(FUNCATTR_native)) { + lineNum = 0xffffe; + } else { + lineNum = 0xffffd; + } + } + Operand *o1 = cgFunc.CreateDbgImmOperand(lineNum); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } else { + Operand *o0 = cgFunc.CreateDbgImmOperand(1); + Operand *o1 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().MplLineNum()); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } + + const MapleVector ®sToSave = (!CGOptions::DoRegSavesOpt()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + /* + * Among other things, push the FP & LR pair. + * FP/LR are added to the callee-saved list in AllocateRegisters() + * We add them to the callee-saved list regardless of UseFP() being true/false. + * Activation Frame is allocated as part of pushing FP/LR pair + */ + GeneratePushRegs(); + } else { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame")); + } + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + + int32 offset = stackFrameSize; + (void)InsertCFIDefCfaOffset(offset, *(cgFunc.GetCurBB()->GetLastInsn())); + } + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("copy SP to FP")); + } + if (useFP) { + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn( + stackBaseReg, + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - argsToStkPassSize, + k64BitSize)); + } + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn( + cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa_register).AddOpndChain( + aarchCGFunc.CreateCfiRegOperand(stackBaseReg, k64BitSize))); + } + } + } + } + GeneratePushUnnamedVarargRegs(); + if (currCG->DoCheckSOE()) { + AppendInstructionStackCheck(R16, kRegTyInt, kSoeChckOffset); + } + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateRet(BB &bb) { + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); +} + +/* + * If all the preds of exitBB made the TailcallOpt(replace blr/bl with br/b), return true, we don't create ret insn. + * Otherwise, return false, create the ret insn. + */ +bool AArch64GenProEpilog::TestPredsOfRetBB(const BB &exitBB) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (cgFunc.GetMirModule().IsCModule() && + (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || + ml->GetSizeOfLocals() > 0 || cgFunc.HasVLAOrAlloca())) { + return false; + } + for (auto tmpBB : exitBB.GetPreds()) { + Insn *firstInsn = tmpBB->GetFirstInsn(); + if ((firstInsn == nullptr || tmpBB->IsCommentBB()) && (!tmpBB->GetPreds().empty())) { + if (!TestPredsOfRetBB(*tmpBB)) { + return false; + } + } else { + Insn *lastInsn = tmpBB->GetLastInsn(); + if (lastInsn == nullptr) { + return false; + } + MOperator insnMop = lastInsn->GetMachineOpcode(); + if (insnMop != MOP_tail_call_opt_xbl && insnMop != MOP_tail_call_opt_xblr) { + return false; + } + } + } + return true; +} + +void AArch64GenProEpilog::AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R9); + } + + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + popInsn.SetComment("RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); + + /* Append CFI code. */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, + AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, + static_cast(*o2), dataSize, R16); + } + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + popInsn.SetComment("RESTORE RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); + + /* Append CFI code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg0, k64BitSize)); + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg1, k64BitSize)); + } +} + + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool useLdpAdd = false; + int32 offset = 0; + + Operand *o2 = nullptr; + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerSize() * kBitsPerByte); + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useLdpAdd = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + o2 = &aarchCGFunc.CreateCallFrameOperand(offset, GetPointerSize() * kBitsPerByte); + } + + if (useLdpAdd) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + if (cgFunc.GenCfi()) { + int64 cfiOffset = GetOffsetFromCFA(); + BB *curBB = cgFunc.GetCurBB(); + curBB->InsertInsnAfter(*(curBB->GetLastInsn()), + aarchCGFunc.CreateCfiDefCfaInsn(RSP, cfiOffset - stackFrameSize, k64BitSize)); + } + } + + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + CHECK_FATAL(!useLdpAdd, "Invalid assumption"); + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + + if (cgFunc.GenCfi()) { + /* Append CFI restore */ + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool isLmbc = (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); + if (cgFunc.HasVLAOrAlloca() || argsToStkPassSize == 0 || isLmbc) { + int lmbcOffset = 0; + if (!isLmbc) { + stackFrameSize -= argsToStkPassSize; + } else { + lmbcOffset = argsToStkPassSize - (kDivide2 * k8ByteSize); + } + if (stackFrameSize > kStpLdpImm64UpperBound || isLmbc) { + Operand *o2; + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + if (cgFunc.GenCfi()) { + /* Append CFI restore */ + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + if (cgFunc.GenCfi()) { + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + } + } else { + Operand *o2; + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerSize() * kBitsPerByte); + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + + if (cgFunc.GenCfi()) { + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } +} + +void AArch64GenProEpilog::GeneratePopRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + const MapleVector ®sToRestore = (!CGOptions::DoRegSavesOpt()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToRestore.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("restore callee-saved registers")); + } + + MapleVector::const_iterator it = regsToRestore.begin(); + /* + * Even if we don't use FP, since we push a pair of registers + * in a single instruction (i.e., stp) and the stack needs be aligned + * on a 16-byte boundary, push FP as well if the function has a call. + * Make sure this is reflected when computing calleeSavedRegs.size() + * skip the first two registers + */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast((memLayout->RealStackFrameSize() - + aarchCGFunc.SizeOfCalleeSaved()) - memLayout->GetSizeOfLocals()); + } else { + offset = (static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))) - /* for FP/LR */ + memLayout->SizeOfArgsToStackPass(); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignment; + } + + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + /* + * We are using a cleared dummy block; so insertPoint cannot be ret; + * see GenerateEpilog() + */ + for (; it != regsToRestore.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + } else { + /* flush the pair */ + AppendInstructionPopPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionDeallocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionDeallocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(RSP, 0, k64BitSize)); + } + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for the next function, maybe? (seems not necessary, but...) + */ + aarchCGFunc.SetSplitBaseOffset(0); +} + +void AArch64GenProEpilog::AppendJump(const MIRSymbol &funcSymbol) { + auto &aarchCGFunc = static_cast(cgFunc); + Operand &targetOpnd = aarchCGFunc.GetOrCreateFuncNameOpnd(funcSymbol); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); +} + +void AArch64GenProEpilog::GenerateEpilog(BB &bb) { + if (!cgFunc.GetHasProEpilogue()) { + if (bb.GetPreds().empty() || !TestPredsOfRetBB(bb)) { + GenerateRet(bb); + } + return; + } + + /* generate stack protected instruction */ + BB &epilogBB = GenStackGuardCheckInsn(bb); + + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + + if (cgFunc.HasVLAOrAlloca() && cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + aarchCGFunc.SelectCopy(spOpnd, PTY_u64, fpOpnd, PTY_u64); + } + + /* Hack: exit bb should always be reachable, since we need its existance for ".cfi_remember_state" */ + if (&epilogBB != cgFunc.GetLastBB() && epilogBB.GetNext() != nullptr) { + BB *nextBB = epilogBB.GetNext(); + do { + if (nextBB == cgFunc.GetLastBB() || !nextBB->IsEmpty()) { + break; + } + nextBB = nextBB->GetNext(); + } while (nextBB != nullptr); + if (nextBB != nullptr && !nextBB->IsEmpty() && cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_remember_state)); + cgFunc.GetCurBB()->SetHasCfi(); + nextBB->InsertInsnBefore(*nextBB->GetFirstInsn(), + cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_restore_state)); + nextBB->SetHasCfi(); + } + } + + const MapleVector ®sToSave = (!CGOptions::DoRegSavesOpt()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + GeneratePopRegs(); + } else { + auto stackFrameSize = static_cast(cgFunc.GetMemlayout())->RealStackFrameSize(); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("pop up activation frame")); + } + + if (cgFunc.HasVLAOrAlloca()) { + auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsToStkPass().GetSize(); + stackFrameSize = stackFrameSize < size ? 0 : stackFrameSize - size; + } + + if (stackFrameSize > 0) { + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(RSP, 0, k64BitSize)); + } + } + } + } + + if (currCG->InstrumentWithDebugTraceCall()) { + AppendJump(*(currCG->GetDebugTraceExitFunction())); + } + + GenerateRet(*(cgFunc.GetCurBB())); + epilogBB.AppendBBInsns(*cgFunc.GetCurBB()); + if (cgFunc.GetCurBB()->GetHasCfi()) { + epilogBB.SetHasCfi(); + } + + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateEpilogForCleanup(BB &bb) { + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(!cgFunc.GetExitBBsVec().empty(), "exit bb size is zero!"); + if (cgFunc.GetExitBB(0)->IsUnreachable()) { + /* if exitbb is unreachable then exitbb can not be generated */ + GenerateEpilog(bb); + } else if (aarchCGFunc.NeedCleanup()) { /* bl to the exit epilogue */ + LabelOperand &targetOpnd = aarchCGFunc.GetOrCreateLabelOperand(cgFunc.GetExitBB(0)->GetLabIdx()); + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + } +} + + +void AArch64GenProEpilog::ConvertToTailCalls(MapleSet &callInsnsMap) { + BB *exitBB = GetCurTailcallExitBB(); + + /* ExitBB is filled only by now. If exitBB has restore of SP indicating extra stack space has + been allocated, such as a function call with more than 8 args, argument with large aggr etc */ + FOR_BB_INSNS(insn, exitBB) { + if (insn->GetMachineOpcode() == MOP_xaddrri12 || insn->GetMachineOpcode() == MOP_xaddrri24) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (reg.GetRegisterNumber() == RSP) { + return; + } + } + } + + /* Replace all of the call insns. */ + for (Insn *callInsn : callInsnsMap) { + MOperator insnMop = callInsn->GetMachineOpcode(); + switch (insnMop) { + case MOP_xbl: { + callInsn->SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xbl]); + break; + } + case MOP_xblr: { + callInsn->SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xblr]); + break; + } + default: + CHECK_FATAL(false, "Internal error."); + break; + } + BB *bb = callInsn->GetBB(); + if (bb->GetKind() == BB::kBBGoto) { + bb->SetKind(BB::kBBFallthru); + if (bb->GetLastInsn()->GetMachineOpcode() == MOP_xuncond) { + bb->RemoveInsn(*bb->GetLastInsn()); + } + } + for (auto sBB: bb->GetSuccs()) { + bb->RemoveSuccs(*sBB); + sBB->RemovePreds(*bb); + break; + } + } + + /* copy instrs from exit block */ + for (Insn *callInsn: callInsnsMap) { + BB *toBB = callInsn->GetBB(); + BB *fromBB = exitBB; + if (toBB == fromBB) { + /* callsite also in the return exit block, just change the return to branch */ + Insn *lastInsn = toBB->GetLastInsn(); + if (lastInsn->GetMachineOpcode() == MOP_xret) { + Insn *newInsn = cgFunc.GetTheCFG()->CloneInsn(*callInsn); + toBB->ReplaceInsn(*lastInsn, *newInsn); + for (Insn *insn = callInsn->GetNextMachineInsn(); insn != newInsn; insn = insn->GetNextMachineInsn()) { + insn->SetDoNotRemove(true); + } + toBB->RemoveInsn(*callInsn); + return; + } + CHECK_FATAL(0, "Tailcall in incorrect block"); + } + FOR_BB_INSNS_SAFE(insn, fromBB, next) { + if (insn->IsCfiInsn() || (insn->IsMachineInstruction() && insn->GetMachineOpcode() != MOP_xret)) { + Insn *newInsn = cgFunc.GetTheCFG()->CloneInsn(*insn); + newInsn->SetDoNotRemove(true); + toBB->InsertInsnBefore(*callInsn, *newInsn); + } + } + } + + /* remove instrs in exit block */ + BB *bb = exitBB; + if (bb->GetPreds().size() > 0) { + return; /* exit block still needed by other non-tailcall blocks */ + } + Insn &junk = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + bb->AppendInsn(junk); + FOR_BB_INSNS_SAFE(insn, bb, next) { + if (insn->GetMachineOpcode() != MOP_pseudo_none) { + bb->RemoveInsn(*insn); + } + } +} + +void AArch64GenProEpilog::Run() { + CHECK_FATAL(cgFunc.GetFunction().GetBody()->GetFirst()->GetOpCode() == OP_label, + "The first statement should be a label"); + NeedStackProtect(); + cgFunc.SetHasProEpilogue(NeedProEpilog()); + if (cgFunc.GetHasProEpilogue()) { + GenStackGuard(*(cgFunc.GetFirstBB())); + } + BB *proLog = nullptr; + if (cgFunc.GetCG()->DoPrologueEpilogue() && Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2) { + /* There are some O2 dependent assumptions made */ + proLog = IsolateFastPath(*(cgFunc.GetFirstBB())); + } + + if (cgFunc.IsExitBBsVecEmpty()) { + if (cgFunc.GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc.GetCleanupLabel() && + cgFunc.GetLastBB()->GetPrev()->GetPrev()) { + cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()->GetPrev()); + } else { + cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()); + } + } + + if (proLog != nullptr) { + GenerateProlog(*proLog); + proLog->SetFastPath(true); + cgFunc.GetFirstBB()->SetFastPath(true); + } else { + GenerateProlog(*(cgFunc.GetFirstBB())); + } + + for (auto *exitBB : cgFunc.GetExitBBsVec()) { + if (GetFastPathReturnBB() != exitBB) { + GenerateEpilog(*exitBB); + } + } + + if (cgFunc.GetFunction().IsJava()) { + GenerateEpilogForCleanup(*(cgFunc.GetCleanupBB())); + } + + if (cgFunc.GetMirModule().IsCModule() && !exitBB2CallSitesMap.empty()) { + cgFunc.GetTheCFG()->InitInsnVisitor(cgFunc); + for (auto pair : exitBB2CallSitesMap) { + BB *curExitBB = pair.first; + MapleSet& callInsnsMap = pair.second; + SetCurTailcallExitBB(curExitBB); + ConvertToTailCalls(callInsnsMap); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d7d535d7187e2b3b7aa9441b35f7435539479f49 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp @@ -0,0 +1,2415 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_prop.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "aarch64_reg_coalesce.h" +#include + +namespace maplebe { + +#define PROP_DUMP CG_DEBUG_FUNC(cgFunc) + +bool MayOverflow(const ImmOperand &value1, const ImmOperand &value2, bool is64Bit, bool isAdd, bool isSigned) { + if (value1.GetVary() || value2.GetVary()) { + return false; + } + int64 cstA = value1.GetValue(); + int64 cstB = value2.GetValue(); + if (isAdd) { + int64 res = static_cast(static_cast(cstA) + static_cast(cstB)); + if (!isSigned) { + return static_cast(res) < static_cast(cstA); + } + auto rightShiftNumToGetSignFlag = (is64Bit ? 64 : 32) - 1; + return (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag); + } else { + /* sub */ + if (!isSigned) { + return cstA < cstB; + } + int64 res = static_cast(static_cast(cstA) - static_cast(cstB)); + auto rightShiftNumToGetSignFlag = (is64Bit ? 64 : 32) - 1; + return (static_cast(cstA) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag); + } +} + +bool AArch64Prop::IsInLimitCopyRange(VRegVersion *toBeReplaced) { + uint32 baseID = toBeReplaced->GetDefInsnInfo()->GetInsn()->GetId(); + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it : useList) { + if (it.second->GetInsn()->GetId() - baseID > k16BitSize) { + return false; + } + } + return true; +} + +void AArch64Prop::CopyProp() { + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo(), GetRegll()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +void AArch64Prop::TargetProp(Insn &insn) { + A64ConstProp a64ConstProp(*memPool, *cgFunc, *GetSSAInfo(), insn); + a64ConstProp.DoOpt(); + A64StrLdrProp a64StrLdrProp(*memPool, *cgFunc, *GetSSAInfo(), insn, *GetDce()); + a64StrLdrProp.DoOpt(); +} + +void A64ConstProp::DoOpt() { + if (curInsn->GetMachineOpcode() == MOP_wmovri32 || curInsn->GetMachineOpcode() == MOP_xmovri64) { + Operand &destOpnd = curInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(destOpnd.IsRegister(), "must be reg operand"); + auto &destReg = static_cast(destOpnd); + if (destReg.IsSSAForm()) { + VRegVersion *destVersion = ssaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + Operand &constOpnd = curInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(constOpnd.IsImmediate(), "must be imm operand"); + auto &immOperand = static_cast(constOpnd); + bool isZero = immOperand.IsZero(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (isZero) { + ZeroRegProp(*useDUInfoIt.second, *destVersion->GetSSAvRegOpnd()); + destVersion->CheckDeadUse(*useDUInfoIt.second->GetInsn()); + } + (void)ConstProp(*useDUInfoIt.second, immOperand); + } + } + } +} + +void A64ConstProp::ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) { + auto *useInsn = useDUInfo.GetInsn(); + const InsnDesc *md = &AArch64CG::kMd[(useInsn->GetMachineOpcode())]; + /* special case */ + bool isSpecficCase = useInsn->GetMachineOpcode() == MOP_wbfirri5i5 || useInsn->GetMachineOpcode() == MOP_xbfirri6i6; + isSpecficCase &= (useDUInfo.GetOperands().size() == 1) && (useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd); + if (useInsn->IsStore() || md->IsCondDef() || isSpecficCase) { + RegOperand &zeroOpnd = cgFunc->GetZeroOpnd(toReplaceReg.GetSize()); + for (auto &opndIt : useDUInfo.GetOperands()) { + if (useInsn->IsStore() && opndIt.first != 0) { + return; + } + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor(*cgFunc, *useInsn, + opndIt.first, toReplaceReg, zeroOpnd); + opnd.Accept(replaceRegOpndVisitor); + useDUInfo.ClearDU(opndIt.first); + } + } +} + +MOperator A64ConstProp::GetReversalMOP(MOperator arithMop) { + switch (arithMop) { + case MOP_waddrri12: + return MOP_wsubrri12; + case MOP_xaddrri12: + return MOP_xsubrri12; + case MOP_xsubrri12: + return MOP_xaddrri12; + case MOP_wsubrri12: + return MOP_waddrri12; + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetRegImmMOP(MOperator regregMop, bool withLeftShift) { + switch (regregMop) { + case MOP_xaddrrrs: + case MOP_xaddrrr: { + return withLeftShift ? MOP_xaddrri24 : MOP_xaddrri12; + } + case MOP_waddrrrs: + case MOP_waddrrr: { + return withLeftShift ? MOP_waddrri24 : MOP_waddrri12; + } + case MOP_xsubrrrs: + case MOP_xsubrrr: { + return withLeftShift ? MOP_xsubrri24 : MOP_xsubrri12; + } + case MOP_wsubrrrs: + case MOP_wsubrrr: { + return withLeftShift ? MOP_wsubrri24 : MOP_wsubrri12; + } + case MOP_xandrrrs: + return MOP_xandrri13; + case MOP_wandrrrs: + return MOP_wandrri12; + case MOP_xeorrrrs: + return MOP_xeorrri13; + case MOP_weorrrrs: + return MOP_weorrri12; + case MOP_xiorrrrs: + case MOP_xbfirri6i6: + return MOP_xiorrri13; + case MOP_wiorrrrs: + case MOP_wbfirri5i5: + return MOP_wiorrri12; + case MOP_xmovrr: { + return MOP_xmovri64; + } + case MOP_wmovrr: { + return MOP_wmovri32; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn) { + MOperator arithMop = arithInsn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (arithMop) { + case MOP_waddrrr: + case MOP_xaddrrr: { + newVal = constVal + constVal; + newMop = (arithMop == MOP_waddrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_waddrrrs: + case MOP_xaddrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch(sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal + static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal + (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal + (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_waddrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + newVal = 0; + newMop = (arithMop == MOP_wsubrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch (sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal - static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal - (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal - (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_wsubrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + default: + DEBUG_ASSERT(false, "this case is not supported currently"); + break; + } + return newMop; +} + +void A64ConstProp::ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const { + ssaInfo->ReplaceInsn(oriInsn, newInsn); + oriInsn.GetBB()->ReplaceInsn(oriInsn, newInsn); + /* dump insn replacement here */ +} + +bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + if (constOpnd.IsSingleInstructionMovable(destOpnd.GetSize())) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + DEBUG_ASSERT(useOpndIdx == kInsnSecondOpnd, "invalid instruction in ssa form"); + if (useOpndIdx == kInsnSecondOpnd) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, constOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } else { + DEBUG_ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +/* support add now */ +bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + MOperator newMop = GetRegImmMOP(curMop, false); + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + CHECK_FATAL(useOpndIdx == kInsnSecondOpnd || useOpndIdx == kInsnThirdOpnd, "check this insn"); + Insn *newInsn = nullptr; + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &constOpnd, kInsnThirdOpnd)) { + if (useOpndIdx == kInsnThirdOpnd) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), constOpnd); + } else if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Add) { /* swap operand due to legality in aarch */ + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnThirdOpnd), constOpnd); + } + } + /* try aggressive opt in aarch64 add and sub */ + if (newInsn == nullptr && (aT == kAArch64Add || aT == kAArch64Sub)) { + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + /* try aarch64 imm shift mode */ + tempImm->SetValue(tempImm->GetValue() >> 12); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd) && + CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + DEBUG_ASSERT(false, "NIY"); + } + auto *zeroImm = &(static_cast(cgFunc)->CreateImmOperand( + 0, constOpnd.GetSize(), true)); + /* value in immOpnd is signed */ + if (MayOverflow(*zeroImm, constOpnd, constOpnd.GetSize() == 64, false, true)) { + return false; + } + /* (constA - var) can not reversal to (var + (-constA)) */ + if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Sub) { + return false; + } + /* Addition and subtraction reversal */ + tempImm->SetValue(-constOpnd.GetValue()); + newMop = GetReversalMOP(newMop); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd)) { + auto *cgImm = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), *cgImm); + if (useOpndIdx == kInsnSecondOpnd) { /* swap operand due to legality in aarch */ + newInsn->SetOperand(kInsnSecondOpnd, useInsn->GetOperand(kInsnThirdOpnd)); + } + } + } + if (newInsn != nullptr) { + ReplaceInsnAndUpdateSSA(*useInsn, *newInsn); + return true; + } + } else if (useDUInfo.GetOperands().size() == 2) { + /* only support add & sub now */ + int64 newValue = 0; + MOperator newMop = GetFoldMopAndVal(newValue, constOpnd.GetValue(), *useInsn); + bool isSigned = (newValue < 0); + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + tempImm->SetValue(newValue); + tempImm->SetSigned(isSigned); + if (tempImm->IsSingleInstructionMovable()) { + auto *newImmOpnd = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + auto &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), *newImmOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } else { + CHECK_FATAL(false, "invalid immediate"); + } + } else { + DEBUG_ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, + ArithmeticType aT) { + Insn *useInsn = useDUInfo.GetInsn(); + if (useDUInfo.GetOperands().size() == 1) { + Operand &existedImm = useInsn->GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(existedImm.IsImmediate(), "must be"); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + bool is64Bit = destOpnd.GetSize() == k64BitSize; + ImmOperand *foldConst = CanDoConstFold(constOpnd, static_cast(existedImm), aT, is64Bit); + if (foldConst != nullptr) { + MOperator newMop = is64Bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, *foldConst); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + return false; +} + +bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnThirdOpnd) { + auto &shiftBit = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + int64 val = constOpnd.GetValue(); + if (shiftBit.GetShiftOp() == BitShiftOperand::kLSL) { + val = val << shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kLSR) { + val = val >> shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kASR) { + val = static_cast((static_cast(val)) >> shiftBit.GetShiftAmount()); + } else { + CHECK_FATAL(false, "shift type is not defined"); + } + auto *newImm = static_cast(constOpnd.Clone(*constPropMp)); + newImm->SetValue(val); + MOperator newMop = GetRegImmMOP(curMop, false); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, newImm, kInsnThirdOpnd)) { + auto *cgNewImm = static_cast(constOpnd.Clone(*cgFunc->GetMemoryPool())); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), *cgNewImm); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + return false; +} + +bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { + MOperator curMop = useDUInfo.GetInsn()->GetMachineOpcode(); + switch (curMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return MovConstReplace(useDUInfo, constOpnd); + } + case MOP_xsubrrr: + case MOP_wsubrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xaddrrr: + case MOP_waddrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_waddrri12: + case MOP_xaddrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_xsubrri12: + case MOP_wsubrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xiorrrrs: + case MOP_wiorrrrs: + case MOP_xeorrrrs: + case MOP_weorrrrs: + case MOP_xandrrrs: + case MOP_wandrrrs: + case MOP_xaddrrrs: + case MOP_waddrrrs: + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + return ShiftConstReplace(useDUInfo, constOpnd); + } + case MOP_wbfirri5i5: + case MOP_xbfirri6i6: { + return BitInsertReplace(useDUInfo, constOpnd); + } + default: + break; + } + return false; +} + +bool A64ConstProp::BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnSecondOpnd) { + auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + auto val = static_cast(constOpnd.GetValue()); + /* bfi width in the range [1 -64] */ + auto width = static_cast(widthOpnd.GetValue()); + /* bit number of the lsb of the destination bitfield */ + auto lsb = static_cast(lsbOpnd.GetValue()); + val = val & ((1U << width) - 1U); + if (__builtin_popcountl(val) == width) { + val = val << lsb; + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &newOpnd = cgFunc->CreateImmOperand(PTY_i64, static_cast(val)); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &newOpnd, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnFirstOpnd), newOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + } + return false; +} + +ImmOperand *A64ConstProp::CanDoConstFold( + const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit) { + auto *tempImm = static_cast(value1.Clone(*constPropMp)); + int64 newVal = 0; + bool isSigned = value1.IsSignedValue(); + if (value1.IsSignedValue() != value2.IsSignedValue()) { + isSigned = false; + } + if (MayOverflow(value1, value2, is64Bit, aT == kAArch64Add, isSigned)) { + return nullptr; + } + switch (aT) { + case kAArch64Add : { + newVal = value1.GetValue() + value2.GetValue(); + break; + } + case kAArch64Sub : { + newVal = value1.GetValue() - value2.GetValue(); + break; + } + default: + return nullptr; + } + if (!is64Bit && isSigned && (newVal > INT_MAX || newVal < INT_MIN)) { + return nullptr; + } + if (!is64Bit && !isSigned && (newVal > UINT_MAX || newVal < 0)) { + return nullptr; + } + if (newVal < 0) { + tempImm->SetSigned(); + } + tempImm->SetValue(newVal); + if (value2.GetVary() == kUnAdjustVary) { + tempImm->SetVary(kUnAdjustVary); + } + bool canBeMove = tempImm->IsSingleInstructionMovable(k64BitSize); + return canBeMove ? static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())) : nullptr; +} + +void A64StrLdrProp::DoOpt() { + DEBUG_ASSERT(curInsn != nullptr, "not input insn"); + bool tryOptAgain = false; + do { + tryOptAgain = false; + MemOperand *currMemOpnd = StrLdrPropPreCheck(*curInsn); + if (currMemOpnd != nullptr && memPropMode != kUndef) { + /* can be changed to recursive propagation */ + if (ReplaceMemOpnd(*currMemOpnd, nullptr)) { + tryOptAgain = true; + } + replaceVersions.clear(); + } + } while (tryOptAgain); +} + +bool A64StrLdrProp::ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn) { + auto GetDefInsn = [&defInsn, this](const RegOperand ®Opnd, std::vector &allUseInsns)->void { + if (regOpnd.IsSSAForm() && defInsn == nullptr) { + VRegVersion *replacedV = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + if (replacedV->GetDefInsnInfo() != nullptr) { + for (auto it : replacedV->GetAllUseInsns()) { + allUseInsns.emplace_back(it.second->GetInsn()); + } + defInsn = replacedV->GetDefInsnInfo()->GetInsn(); + } + } + }; + RegOperand *replacedReg = nullptr; + std::vector allUseInsns; + std::vector newMemOpnds; + bool doFullReplaceProp = true; /* due to register pressure, do not do partial prop */ + if (memPropMode == kPropBase) { + replacedReg = currMemOpnd.GetBaseRegister(); + } else { + Operand *offset = currMemOpnd.GetOffset(); + DEBUG_ASSERT(offset->IsRegister(), "must be"); + replacedReg = static_cast(offset); + } + CHECK_FATAL(replacedReg != nullptr, "check this insn"); + GetDefInsn(*replacedReg, allUseInsns); + if (defInsn != nullptr) { + for (auto useInsn : allUseInsns) { + MemOperand *oldMemOpnd = StrLdrPropPreCheck(*useInsn, memPropMode); + if (CheckSameReplace(*replacedReg, oldMemOpnd)) { + MemOperand *newMemOpnd = SelectReplaceMem(*defInsn, *oldMemOpnd); + if (newMemOpnd != nullptr) { + uint32 opndIdx = GetMemOpndIdx(oldMemOpnd, *useInsn); + if (CheckNewMemOffset(*useInsn, newMemOpnd, opndIdx)) { + newMemOpnds.emplace_back(newMemOpnd); + continue; + } + } + } + doFullReplaceProp = false; + break; + } + } else { + doFullReplaceProp = false; + } + if (doFullReplaceProp) { + for (size_t i = 0; i < newMemOpnds.size(); ++i) { + DoMemReplace(*replacedReg, *newMemOpnds[i], *allUseInsns[i]); + } + return true; + } + return false; +} + +bool A64StrLdrProp::CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const { + if (memOpnd != nullptr && memPropMode != kUndef) { + if (memPropMode == kPropBase) { + return replacedReg.GetRegisterNumber() == memOpnd->GetBaseRegister()->GetRegisterNumber(); + } else { + Operand *offset = memOpnd->GetOffset(); + DEBUG_ASSERT(offset->IsRegister(), "must be"); + return replacedReg.GetRegisterNumber() == static_cast(offset)->GetRegisterNumber(); + } + } + return false; +} + +uint32 A64StrLdrProp::GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const { + uint32 opndIdx = kInsnMaxOpnd; + if (insn.IsLoadPair() || insn.IsStorePair()) { + DEBUG_ASSERT(newMemOpnd->GetOffsetImmediate() != nullptr, "unexpect insn"); + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + return opndIdx; +} + +void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn) { + VRegVersion *replacedV = ssaInfo->FindSSAVersion(replacedReg.GetRegisterNumber()); + DEBUG_ASSERT(replacedV != nullptr, "must in ssa form"); + uint32 opndIdx = GetMemOpndIdx(&newMem, useInsn); + replacedV->RemoveUseInsn(useInsn, opndIdx); + if (replacedV->GetAllUseInsns().empty()) { + (void)cgDce->RemoveUnuseDef(*replacedV); + } + for (auto &replaceit : replaceVersions) { + replaceit.second->AddUseInsn(*ssaInfo, useInsn, opndIdx); + } + useInsn.SetOperand(opndIdx, newMem); +} + +MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod) { + memPropMode = kUndef; + if (insn.IsLoad() || insn.IsStore()) { + if (insn.IsAtomic()) { + return nullptr; + } + auto *currMemOpnd = static_cast(insn.GetMemOpnd()); + if (currMemOpnd != nullptr) { + memPropMode = SelectStrLdrPropMode(*currMemOpnd); + if (prevMod != kUndef) { + if (prevMod != memPropMode) { + memPropMode = prevMod; + return nullptr; + } + } + return currMemOpnd; + } + } + return nullptr; +} + +MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + MemPropMode innerMemPropMode = kUndef; + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + innerMemPropMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + innerMemPropMode = kPropOffset; + auto amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + innerMemPropMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + innerMemPropMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + innerMemPropMode = kPropUnsignedExtend; + } + break; + } + default: + innerMemPropMode = kUndef; + } + return innerMemPropMode; +} + +MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd) { + MemOperand *newMemOpnd = nullptr; + Operand *offset = currMemOpnd.GetOffset(); + RegOperand *base = currMemOpnd.GetBaseRegister(); + MOperator opCode = defInsn.GetMachineOpcode(); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri24: + case MOP_waddrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + int64 defVal = (immOpnd.GetValue() << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xsubrri24: + case MOP_wsubrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + int64 defVal = -(immOpnd.GetValue() << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + + if (replace != nullptr && newOfst != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *replace, newOfst, nullptr, nullptr); + } + } + break; + } + case MOP_xaddrrrs: + case MOP_waddrrrs: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newBaseOpnd = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newIndexOpnd = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + auto &shift = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + break; + } + if (newBaseOpnd != nullptr && newIndexOpnd != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *newBaseOpnd, *newIndexOpnd, + shift.GetShiftAmount(), false); + } + } + break; + } + case MOP_xadrpl12: { + if (memPropMode == kPropBase) { + if (currMemOpnd.GetSize() >= 128) { + // We can not be sure that the page offset is 16-byte aligned + break; + } + auto *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + auto *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc)->CreateOfstOpnd(static_cast(val), + k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + RegOperand *replace = GetReplaceReg( + static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeLo12Li, currMemOpnd.GetSize(), *replace, nullptr, newOfsetOpnd, addr); + } + } + break; + } + /* do this in const prop ? */ + case MOP_wmovri32: + case MOP_xmovri64: { + if (memPropMode == kPropOffset) { + auto *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOi, currMemOpnd.GetSize(), *base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + auto *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (newOfst != nullptr) { + uint32 shift = static_cast(static_cast(imm->GetValue())); + if (memPropMode == kPropOffset) { + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } else if (memPropMode == kPropShift) { + shift += currMemOpnd.ShiftAmount(); + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, *base, static_cast(currMemOpnd.ShiftAmount()), + true, currMemOpnd.GetSize()); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, *base, static_cast(currMemOpnd.ShiftAmount()), + false, currMemOpnd.GetSize()); + break; + } + default: + break; + } + return newMemOpnd; +} + +RegOperand *A64StrLdrProp::GetReplaceReg(RegOperand &a64Reg) { + if (a64Reg.IsSSAForm()) { + regno_t ssaIndex = a64Reg.GetRegisterNumber(); + replaceVersions[ssaIndex] = ssaInfo->FindSSAVersion(ssaIndex); + DEBUG_ASSERT(replaceVersions.size() <= 2, "CHECK THIS CASE IN A64PROP"); + return &a64Reg; + } + return nullptr; +} + +MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, + int64 defVal, uint32 memSize) const { + if (memPropMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOi, memSize, + replace, nullptr, newOfstImm, nullptr); +} + +MemOperand *A64StrLdrProp::SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, + bool isSigned, uint32 memSize) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (newOfst == nullptr) { + return nullptr; + } + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (memPropMode == kPropShift) || ((memPropMode == kPropSignedExtend) && isSigned) || + ((memPropMode == kPropUnsignedExtend) && !isSigned); + if (memPropMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const { + auto *a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc->IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = static_cast(newMemOpnd->ShiftAmount()); + if (!AArch64StoreLoadOpt::CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +void AArch64Prop::PropPatternOpt() { + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +bool ExtendShiftPattern::IsSwapInsn(const Insn &insn) const { + MOperator op = insn.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xiorrrr: + case MOP_wiorrrr: + return true; + default: + return false; + } +} + +void ExtendShiftPattern::SetExMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftPattern::SetLsMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftPattern::SelectExtendOrShift(const Insn &def) { + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* Optimize ExtendShiftPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ +constexpr uint32 kExtenAddShiftNum = 5; +ExtendShiftPattern::SuffixType optTable[kExtenAddShiftNum][kExtenAddShiftNum] = { + { ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSL, ExtendShiftPattern::kLSR, + ExtendShiftPattern::kASR, ExtendShiftPattern::kExten }, + { ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSL, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kExten }, + { ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSR, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix }, + { ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kASR, ExtendShiftPattern::kNoSuffix }, + { ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kExten } +}; + +/* Check whether ExtendShiftPattern optimization can be performed. */ +ExtendShiftPattern::SuffixType ExtendShiftPattern::CheckOpType(const Operand &lastOpnd) const { + /* Assign values to useType and defType. */ + uint32 useType = ExtendShiftPattern::kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = ExtendShiftPattern::kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = ExtendShiftPattern::kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return ExtendShiftPattern::kNoSuffix; + } + } + return optTable[useType][defType]; +} + +constexpr uint32 kExMopTypeSize = 9; +constexpr uint32 kLsMopTypeSize = 15; + +MOperator exMopTable[kExMopTypeSize] = { + MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre +}; +MOperator lsMopTable[kLsMopTypeSize] = { + MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs +}; +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) { + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + ExtendShiftPattern::SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == ExtendShiftPattern::kNoSuffix) { + return; + }else if (optType == ExtendShiftPattern::kExten) { + replaceOp = exMopTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMopTable[lsMOpType]; + if (amount >= k32BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + /* update ssa info */ + optSsaInfo->ReplaceInsn(use, *replaceUseInsn); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftPattern::Optimize(Insn &insn) { + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + auto &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + auto &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftPattern::DoExtendShiftOpt(Insn &insn) { + if (!CheckAllOpndCondition(insn)) { + return; + } + Optimize(*curInsn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +void ExtendShiftPattern::SwapOpnd(Insn &insn) { + Insn *swapInsn = &cgFunc.GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), + insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnThirdOpnd), + insn.GetOperand(kInsnSecondOpnd)); + insn.GetBB()->ReplaceInsn(insn, *swapInsn); + optSsaInfo->ReplaceInsn(insn, *swapInsn); + curInsn = swapInsn; + replaceIdx = kInsnThirdOpnd; +} + +bool ExtendShiftPattern::CheckAllOpndCondition(Insn &insn) { + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + curInsn = &insn; + if (IsSwapInsn(insn)) { + if (CheckCondition(insn)) { + return true; + } + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + replaceIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + SwapOpnd(insn); + return true; + } + } else { + return CheckCondition(insn); + } + return false; +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftPattern::CheckCondition(Insn &insn) { + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); + regno_t regNo = regOperand.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(regNo); + defInsn = FindDefInsn(useVersion); + if (!defInsn || (useVersion->GetAllUseInsns().size() > 1)) { + return false; + } + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + if (useVersion->HasImplicitCvt() && shiftOp != BitShiftOperand::kUndef) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > regOperand.GetSize())) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + VRegVersion *replaceUseV = optSsaInfo->FindSSAVersion(defSrcRegNo); + CHECK_FATAL(replaceUseV != nullptr, "useVRegVersion must not be null based on ssa"); + if (replaceUseV->GetAllUseInsns().size() > 1) { + return false; + } + return true; +} + +void ExtendShiftPattern::Init() { + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + newInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + optSuccess = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; +} + +void ExtendShiftPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtendMovPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool ExtendMovPattern::CheckSrcReg(regno_t srcRegNo, uint32 validNum) { + InsnSet srcDefSet; + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(srcRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + Insn *insn = defInfo->GetInsn(); + srcDefSet.insert(insn); + /* reserve insn set for non ssa version. */ + for (auto defInsn : srcDefSet) { + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + uint32 bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo, validNum)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) && !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) || !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtendMovPattern::BitNotAffected(const Insn &insn, uint32 validNum) { + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(desRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + if (!CheckSrcReg(srcRegNo, validNum)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtendMovPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: return BitNotAffected(insn, k16BitSize); + default: return false; + } +} + +/* No initialization required */ +void ExtendMovPattern::Init() { + replaceMop = MOP_undef; +} + +void ExtendMovPattern::Optimize(Insn &insn) { + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void CopyRegProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CopyRegProp::IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const { + DEBUG_ASSERT(destVersion != nullptr, "find destVersion failed"); + DEBUG_ASSERT(srcVersion != nullptr, "find srcVersion failed"); + LiveInterval *dstll = nullptr; + LiveInterval *srcll = nullptr; + if (destVersion->GetOriginalRegNO() == srcVersion->GetOriginalRegNO()) { + return true; + } + regno_t dstRegNO = dstReg.GetRegisterNumber(); + regno_t srcRegNO = srcReg.GetRegisterNumber(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + + dstll = regll->GetLiveInterval(dstRegNO); + srcll = regll->GetLiveInterval(srcRegNO); + static_cast(regll)->CheckInterference(*dstll, *srcll); + BB *useBB = useInsn->GetBB(); + if (dstll->IsConflictWith(srcRegNO) && + /* support override value when the version is not transphi */ + (((useBB->IsInPhiDef(srcRegNO) || useBB->IsInPhiList(srcRegNO)) && useBB->HasCriticalEdge()) || + useBB->IsInPhiList(dstRegNO))) { + return false; + } + } + if (dstll && srcll) { + regll->CoalesceLiveIntervals(*dstll, *srcll); + } + return true; +} + +bool CopyRegProp::CheckCondition(Insn &insn) { + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(destOpnd.IsRegister() && srcOpnd.IsRegister(), "must be"); + auto &destReg = static_cast(destOpnd); + auto &srcReg = static_cast(srcOpnd); + if (srcReg.GetRegisterNumber() == RZR) { + insn.SetMOP(AArch64CG::kMd[mOp == MOP_xmovrr ? MOP_xmovri64 : MOP_wmovri32]); + insn.SetOperand(kInsnSecondOpnd, cgFunc.CreateImmOperand(PTY_u64, 0)); + } + if (destReg.IsSSAForm() && srcReg.IsSSAForm()) { + /* case for ExplicitExtendProp */ + if (destReg.GetSize() != srcReg.GetSize()) { + VaildateImplicitCvt(destReg, srcReg, insn); + return false; + } + if (destReg.GetValidBitsNum() >= srcReg.GetValidBitsNum()) { + destReg.SetValidBitsNum(srcReg.GetValidBitsNum()); + } else { + MapleVector &propInsns = optSsaInfo->GetSafePropInsns(); + if (std::find(propInsns.begin(), propInsns.end(), insn.GetId()) == propInsns.end()) { + CHECK_FATAL(false, "do not support explicit extract bit in mov"); + return false; + } + } + destVersion = optSsaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcReg.GetRegisterNumber()); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + if (!IsValidCopyProp(destReg, srcReg)) { + return false; + } + return true; + } else { + /* should be eliminated by ssa peep */ + } + } + } + return false; +} + +void CopyRegProp::Optimize(Insn &insn) { + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + if (cgFunc.IsExtendReg(destVersion->GetSSAvRegOpnd()->GetRegisterNumber())) { + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } +} + +void CopyRegProp::VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn) { + DEBUG_ASSERT(movInsn.GetMachineOpcode() == MOP_xmovrr || movInsn.GetMachineOpcode() == MOP_wmovrr, "NIY explicit CVT"); + if (destReg.GetSize() == k64BitSize && srcReg.GetSize() == k32BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } else if (destReg.GetSize() == k32BitSize && srcReg.GetSize() == k64BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xubfxrri6i6]); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, 0)); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, k32BitSize)); + } else { + CHECK_FATAL(false, " unknown explicit integer cvt, need implement in ssa prop "); + } + destReg.SetValidBitsNum(k32BitSize); +} + +void RedundantPhiProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + for (auto phiIt : bb->GetPhiInsns()) { + Init(); + if (!CheckCondition(*phiIt.second)) { + continue; + } + Optimize(*phiIt.second); + } + } +} + +void RedundantPhiProp::Optimize(Insn &insn) { + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); +} + +bool RedundantPhiProp::CheckCondition(Insn &insn) { + DEBUG_ASSERT(insn.IsPhi(), "must be phi insn here"); + auto &phiOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (phiOpnd.IsRedundancy()) { + auto &phiDestReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + destVersion = optSsaInfo->FindSSAVersion(phiDestReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + uint32 srcRegNO = phiOpnd.GetOperands().begin()->second->GetRegisterNumber(); + srcVersion = optSsaInfo->FindSSAVersion(srcRegNO); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + return true; + } + return false; +} + +bool ValidBitNumberProp::CheckCondition(Insn &insn) { + /* extend to all shift pattern in future */ + RegOperand *destOpnd = nullptr; + RegOperand *srcOpnd = nullptr; + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + } + if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { + return false; + } + } + if (destOpnd != nullptr && destOpnd->IsSSAForm() && + srcOpnd != nullptr && srcOpnd->IsSSAForm()) { + destVersion = optSsaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + if (destVersion->HasImplicitCvt()) { + return false; + } + for (auto destUseIt : destVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { + return false; + } + /* if srcOpnd upper 32 bits are valid, it can not prop to mop_x */ + if (srcOpnd->GetSize() == k64BitSize && destOpnd->GetSize() == k64BitSize) { + const auto *useMD = useInsn->GetDesc(); + for (auto opndUseIt : destUseIt.second->GetOperands()) { + const OpndDesc *useProp = useMD->opndMD[opndUseIt.first]; + if (useProp->GetSize() == k64BitSize) { + return false; + } + } + } + } + srcVersion->SetImplicitCvt(); + return true; + } + return false; +} + +void ValidBitNumberProp::Optimize(Insn &insn) { + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); +} + +void ValidBitNumberProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void FpSpConstProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool FpSpConstProp::CheckCondition(Insn &insn) { + std::set defRegs = insn.GetDefRegs(); + auto &a64CGFunc = static_cast(cgFunc); + if (defRegs.size() <= 1) { + if (insn.ScanReg(RSP)) { + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + /* not safe due to varied sp in alloca */ + if (cgFunc.HasVLAOrAlloca()) { + return false; + } + } + if (insn.ScanReg(RFP)) { + DEBUG_ASSERT(fpSpBase == nullptr, " unexpect for both sp fp using "); + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + } + if (fpSpBase == nullptr) { + return false; + } + if (insn.GetMachineOpcode() == MOP_xaddrri12) { + aT = kAArch64Add; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } else if (insn.GetMachineOpcode() == MOP_xsubrri12) { + aT = kAArch64Sub; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } + } + return false; +} + +bool FpSpConstProp::GetValidSSAInfo(Operand &opnd) { + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsSSAForm()) { + replaced = optSsaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + DEBUG_ASSERT(replaced != nullptr, "find ssa version failed in FpSpConstProp"); + return true; + } + } + return false; +} + +int64 FpSpConstProp::ArithmeticFold(int64 valInUse, ArithmeticType useAT) const { + int64 valInDef = shiftOpnd->GetValue(); + int64 returnVal = 0; + CHECK_FATAL(aT == kAArch64Add || aT == kAArch64Sub, "unsupport sp/fp arthimetic in aarch64"); + if (useAT == aT) { + returnVal = valInUse + valInDef; + } else { + returnVal = valInUse - valInDef; + } + return returnVal; +} + +void FpSpConstProp::PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn) { + MOperator useMop = useInsn.GetMachineOpcode(); + if (useInsn.IsAtomic()) { + return; + } + if (useInsn.IsStore() || useInsn.IsLoad()) { + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndIt = useDUInfo.GetOperands().begin(); + if (useOpndIt->first == kInsnSecondOpnd || useOpndIt->first == kInsnThirdOpnd) { + DEBUG_ASSERT(useOpndIt->second == 1, "multiple use in memory opnd"); + auto *a64memOpnd = static_cast(useInsn.GetMemOpnd()); + if (a64memOpnd->IsIntactIndexed() && a64memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + auto *ofstOpnd = static_cast(a64memOpnd->GetOffsetImmediate()); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + int64 newVal = ArithmeticFold(ofstOpnd->GetValue(), kAArch64Add); + auto *newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(newVal), + k64BitSize); + if (ofstOpnd->GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newOfstImm->SetVary(kUnAdjustVary); + } + auto *newMem = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, a64memOpnd->GetSize(), *fpSpBase, + nullptr, newOfstImm, nullptr); + if (static_cast(cgFunc).IsOperandImmValid(useMop, newMem, useOpndIt->first)) { + useInsn.SetMemOpnd(newMem); + useDUInfo.DecreaseDU(useOpndIt->first); + replaced->CheckDeadUse(useInsn); + } + } + } + } else { + /* + * case : store stack location on stack + * add x1, sp, #8 + * ... + * store x1 [x1, #16] + * not prop , not benefit to live range yet + */ + return; + } + } +} + +void FpSpConstProp::PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT) { + if (useDUInfo.GetOperands().size() == 1) { + auto &a64cgFunc = static_cast(cgFunc); + MOperator useMop = useInsn.GetMachineOpcode(); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &curVal = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newVal = a64cgFunc.CreateImmOperand(ArithmeticFold(curVal.GetValue(), curAT), + curVal.GetSize(), false); + if (newVal.GetValue() < 0) { + newVal.Negate(); + useMop = A64ConstProp::GetReversalMOP(useMop); + } + if (curVal.GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newVal.SetVary(kUnAdjustVary); + } + if (static_cast(cgFunc).IsOperandImmValid(useMop, &newVal, kInsnThirdOpnd)) { + Insn &newInsn = + cgFunc.GetInsnBuilder()->BuildInsn(useMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop) { + if (useDUInfo.GetOperands().size() == 1) { + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &newVal = *static_cast(shiftOpnd->Clone(*cgFunc.GetMemoryPool())); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(oriMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::Optimize(Insn &insn) { + for (auto &useInsnInfo : replaced->GetAllUseInsns()) { + Insn *useInsn = useInsnInfo.second->GetInsn(); + MOperator useMop = useInsn->GetMachineOpcode(); + PropInMem(*useInsnInfo.second, *useInsn); + switch (useMop) { + case MOP_xmovrr: + case MOP_wmovrr: + PropInCopy(*useInsnInfo.second, *useInsn, insn.GetMachineOpcode()); + break; + case MOP_xaddrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Add); + break; + case MOP_xsubrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Sub); + break; + default: + break; + } + } +} + +bool A64PregCopyPattern::DFSFindValidDefInsns(Insn *curDefInsn, RegOperand *lastPhiDef, + std::unordered_map &visited) { + if (curDefInsn == nullptr) { + return false; + } + /* + * avoid the case as following: + * R113 and R117 define each other. + * [BB5] ---------------------------- + * phi: R113, (R111<4>, R117<9>) | + * / \ | + * / \ | + * [BB6] ---- [BB7] | + * add R116, R113, #4 phi: R117, (R113<5>, R116<6>) | + * / \ | + * / \ | + * [BB8] [BB28] | + * / | + * / | + * [BB9] ------ [BB5] | + * mov R1, R117 -------------------------- + * + * but the cases as following is right: + * (1) + * [BB124] + * add R339, R336, #345 -------- is found twice + * / \ + * / \ + * / [BB125] + * \ / + * \ / + * [BB56] + * phi: R370, (R339<124>, R339<125>) + * | + * | + * [BB61] + * mov R0, R370 + * (2) + * [BB17] + * phi: R242, (R241<14>, R218<53>) ------- is found twice + * / \ + * / \ + * / [BB26] [BB32] + * \ \ / + * \ [BB27] + * \ phi: R273, (R242<26>, R320<32>) + * [BB25] / + * \ [BB42] + * \ / + * [BB43] + * phi: R321, (R242<25>, R273<42>) + * | + * [BB47] + * mov R0, R321 + */ + if (visited[curDefInsn->GetId()] && curDefInsn->IsPhi() && lastPhiDef != nullptr) { + auto &curPhiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &curPhiListIt : curPhiOpnd.GetOperands()) { + auto &curUseOpnd = static_cast(*curPhiListIt.second); + if (&curUseOpnd == lastPhiDef) { + return false; + } + } + } + if (visited[curDefInsn->GetId()]) { + return true; + } + visited[curDefInsn->GetId()] = true; + if (!curDefInsn->IsPhi()) { + CHECK_FATAL(curDefInsn->IsMachineInstruction(), "expect valid insn"); + (void)validDefInsns.emplace_back(curDefInsn); + return true; + } + auto &phiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + lastPhiDef = &static_cast(curDefInsn->GetOperand(kInsnFirstOpnd)); + if (!DFSFindValidDefInsns(defInsn, lastPhiDef, visited)) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckMultiUsePoints(const Insn *defInsn) const { + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + /* use: (phi) or (mov preg) */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (!useInsn->IsPhi() && useInsn->GetMachineOpcode() != MOP_wmovrr && useInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + if ((useInsn->GetMachineOpcode() == MOP_wmovrr || useInsn->GetMachineOpcode() == MOP_xmovrr) && + !static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsPhysicalRegister()) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn) { + std::unordered_map visited; + RegOperand *lastPhiDef = (defInsn.IsPhi() ? &static_cast(defInsn.GetOperand(kInsnFirstOpnd)) : nullptr); + if (!DFSFindValidDefInsns(&defInsn, lastPhiDef, visited)) { + return false; + } + if (!CheckValidDefInsn(validDefInsns[0])) { + return false; + } + MOperator defMop = validDefInsns[0]->GetMachineOpcode(); + uint32 defOpndNum = validDefInsns[0]->GetOperandSize(); + for (size_t i = 1; i < validDefInsns.size(); ++i) { + if (defMop != validDefInsns[i]->GetMachineOpcode()) { + return false; + } + if (!CheckMultiUsePoints(validDefInsns[i])) { + return false; + } + for (uint32 idx = 0; idx < defOpndNum; ++idx) { + if (validDefInsns[0]->OpndIsDef(idx) && validDefInsns[i]->OpndIsDef(idx)) { + continue; + } + Operand &opnd1 = validDefInsns[0]->GetOperand(idx); + Operand &opnd2 = validDefInsns[i]->GetOperand(idx); + if (!opnd1.Equals(opnd2) && differIdx == -1) { + differIdx = static_cast(idx); + if (!validDefInsns[0]->GetOperand(static_cast(differIdx)).IsRegister() || + !validDefInsns[i]->GetOperand(static_cast(differIdx)).IsRegister()) { + return false; + } + auto &differOpnd1 = static_cast(validDefInsns[0]->GetOperand(static_cast(differIdx))); + auto &differOpnd2 = static_cast(validDefInsns[1]->GetOperand(static_cast(differIdx))); + /* avoid cc reg */ + if (!differOpnd1.IsOfIntClass() || !differOpnd2.IsOfIntClass() || + differOpnd1.IsPhysicalRegister() || differOpnd2.IsPhysicalRegister()) { + return false; + } + VRegVersion *differVersion1 = optSsaInfo->FindSSAVersion(differOpnd1.GetRegisterNumber()); + VRegVersion *differVersion2 = optSsaInfo->FindSSAVersion(differOpnd2.GetRegisterNumber()); + if (!differVersion1 || !differVersion2) { + return false; + } + if (differVersion1->GetOriginalRegNO() != differVersion2->GetOriginalRegNO()) { + return false; + } + differOrigNO = differVersion1->GetOriginalRegNO(); + } else if (!opnd1.Equals(opnd2) && idx != differIdx) { + return false; + } + } + if (differIdx <= 0) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckUselessDefInsn(const Insn *defInsn) const { + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + if (defVersion->GetAllUseInsns().size() == 1) { + return true; + } + /* + * avoid the case as following + * In a loop: + * [BB43] + * phi: R356, (R345<42>, R377<63>) + * / \ + * / \ + * [BB44] \ + * add R377, R356, #1 / + * mov R1, R377 / + * bl / + * \ / + * \ / + * [BB63] + */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (useInsn->IsPhi()) { + auto &phiDefOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + continue; + } + Operand &opnd = defInsn->GetOperand(i); + if (opnd.IsRegister() && static_cast(opnd).GetRegisterNumber() == phiDefOpnd.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckValidDefInsn(const Insn *defInsn) { + const auto *md = defInsn->GetDesc(); + CHECK_FATAL(md != nullptr, "expect valid AArch64MD"); + /* this pattern applies to all basicOps */ + if (md->IsMove() || md->IsStore() || md->IsLoad() || md->IsLoadStorePair() || md->IsCall() || + md->IsDMB() || md->IsVectorOp() || md->IsCondDef() || md->IsCondBranch() || md->IsUnCondBranch()) { + return false; + } + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsn->GetOperand(i); + if (!opnd.IsRegister() && !opnd.IsImmediate() && !opnd.IsOpdShift() && !opnd.IsOpdExtend()) { + return false; + } + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (cgFunc.IsSPOrFP(regOpnd) || regOpnd.IsPhysicalRegister() || + (!regOpnd.IsOfIntClass() && !regOpnd.IsOfFloatOrSIMDClass())) { + return false; + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_xmovrr && curMop != MOP_wmovrr) { + return false; + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!dstOpnd.IsPhysicalRegister()) { + return false; + } + regno_t useRegNO = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useRegNO); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + Operand &defDstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + /* avoid inline-asm */ + if (!defDstOpnd.IsRegister()) { + return false; + } + if (!CheckMultiUsePoints(defInsn)) { + return false; + } + if (defInsn->IsPhi()) { + isCrossPhi = true; + firstPhiInsn = defInsn; + return CheckPhiCaseCondition(insn, *defInsn); + } else { + if (!CheckValidDefInsn(defInsn)) { + return false; + } + if (!CheckUselessDefInsn(defInsn)) { + return false; + } + (void)validDefInsns.emplace_back(defInsn); + } + return true; +} + +Insn &A64PregCopyPattern::CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn) { + CHECK_FATAL(!newPhiList.empty(), "empty newPhiList"); + RegOperand *differOrigOpnd = cgFunc.GetVirtualRegisterOperand(differOrigNO); + CHECK_FATAL(differOrigOpnd != nullptr, "get original opnd default"); + PhiOperand &phiList = optSsaInfo->CreatePhiOperand(); + for (auto &it : newPhiList) { + phiList.InsertOpnd(it.first, *it.second); + } + Insn &phiInsn = cgFunc.GetCG()->BuildPhiInsn(*differOrigOpnd, phiList); + optSsaInfo->CreateNewInsnSSAInfo(phiInsn); + BB *bb = curInsn->GetBB(); + (void)bb->InsertInsnBefore(*curInsn, phiInsn); + /* */ + bb->AddPhiInsn(static_cast(phiInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(), phiInsn); + return phiInsn; +} + +/* + * Check whether the required phi is available, do not insert phi repeatedly. + */ +RegOperand *A64PregCopyPattern::CheckAndGetExistPhiDef(Insn &phiInsn, std::vector &validDifferRegNOs) const { + MapleMap &phiInsns = phiInsn.GetBB()->GetPhiInsns(); + for (auto &phiIt : phiInsns) { + auto &def = static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(def.GetRegisterNumber()); + /* + * if the phi of the change point has been created (according to original regNO), return the phiDefOpnd. + * But, there is a problem: the phiDefOpnd of the same original regNO is not the required phi. + * For example: (in parentheses is the original regNO) + * add R110(R80), R106(R80), #1 add R122(R80), R118(R80), #1 + * \ / + * \ / + * (1) phi: R123(R80), [R110, R122] + * mov R0, R123 + * It will return R123 of phi(1) because the differOrigNO is 80, but that's not what we want, + * we need to create a new phi(2): R140(R80), [R106, R118]. + * so we need to check whether all phiOpnds have correct ssaRegNO. + */ + if (defVersion->GetOriginalRegNO() == differOrigNO) { + auto &phiOpnd = static_cast(phiIt.second->GetOperand(kInsnSecondOpnd)); + if (phiOpnd.GetOperands().size() == validDifferRegNOs.size()) { + bool exist = true; + for (auto &phiListIt : phiOpnd.GetOperands()) { + if (std::find(validDifferRegNOs.begin(), validDifferRegNOs.end(), + static_cast(phiListIt.second)->GetRegisterNumber()) == validDifferRegNOs.end()) { + exist = false; + break; + } + } + if (exist) { + return &static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + } + } + } + } + return nullptr; +} + +RegOperand &A64PregCopyPattern::DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited) { + CHECK_FATAL(curInsn, "curInsn must not be null"); + if (visited[curInsn->GetId()] != nullptr) { + return *visited[curInsn->GetId()]; + } + if (!curInsn->IsPhi()) { + return static_cast(curInsn->GetOperand(static_cast(differIdx))); + } + std::unordered_map differPhiList; + std::vector validDifferRegNOs; + auto &phiOpnd = static_cast(curInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + CHECK_FATAL(defInsn != nullptr, "get defInsn failed"); + RegOperand &phiDefOpnd = DFSBuildPhiInsn(defInsn, visited); + (void)differPhiList.emplace(phiListIt.first, &phiDefOpnd); + (void)validDifferRegNOs.emplace_back(phiDefOpnd.GetRegisterNumber()); + } + /* + * The phi in control flow may already exists. + * For example: + * [BB26] [BB45] + * add R191, R103, R187 add R166, R103, R164 + * \ / + * \ / + * [BB27] + * phi: R192, (R191<26>, R166<45>) ------ curInsn + * phi: R194, (R187<26>, R164<45>) ------ the phi witch we need already exists + * / validDifferRegNOs : [187, 164] + * / + * [BB28] [BB46] + * add R215, R103, R211 / + * \ / + * \ / + * [BB29] + * phi: R216, (R215<28>, R192<46>) + * phi: R218, (R211<28>, R194<46>) ------ the phi witch we need already exists + * mov R0, R216 validDifferRegNOs : [211, 194] + */ + RegOperand *existPhiDef = CheckAndGetExistPhiDef(*curInsn, validDifferRegNOs); + if (existPhiDef == nullptr) { + Insn &phiInsn = CreateNewPhiInsn(differPhiList, curInsn); + visited[curInsn->GetId()] = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + existPhiDef = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + } + return *existPhiDef; +} + +void A64PregCopyPattern::Optimize(Insn &insn) { + Insn *defInsn = *validDefInsns.begin(); + MOperator newMop = defInsn->GetMachineOpcode(); + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, AArch64CG::kMd[newMop]); + uint32 opndNum = defInsn->GetOperandSize(); + newInsn.ResizeOpnds(opndNum); + if (!isCrossPhi) { + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } else { + std::vector validDifferRegNOs; + for (Insn *vdInsn : validDefInsns) { + auto &vdOpnd = static_cast(vdInsn->GetOperand(static_cast(differIdx))); + (void)validDifferRegNOs.emplace_back(vdOpnd.GetRegisterNumber()); + } + RegOperand *differPhiDefOpnd = CheckAndGetExistPhiDef(*firstPhiInsn, validDifferRegNOs); + if (differPhiDefOpnd == nullptr) { + std::unordered_map visited; + differPhiDefOpnd = &DFSBuildPhiInsn(firstPhiInsn, visited); + } + CHECK_FATAL(differPhiDefOpnd, "get differPhiDefOpnd failed"); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else if (i == static_cast(differIdx)) { + newInsn.SetOperand(i, *differPhiDefOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } + insn.GetBB()->ReplaceInsn(insn, newInsn); + /* update ssa info */ + optSsaInfo->ReplaceInsn(insn, newInsn); + + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In A64PregCopyPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } +} + +void A64PregCopyPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } + validDefInsns.clear(); + validDefInsns.shrink_to_fit(); +} + +void A64ReplaceRegOpndVisitor::Visit(RegOperand *v) { + (void)v; + insn->SetOperand(idx, *newReg); +} +void A64ReplaceRegOpndVisitor::Visit(MemOperand *a64memOpnd) { + bool changed = false; + CHECK_FATAL(a64memOpnd->IsIntactIndexed(), "NYI post/pre index model"); + StackMemPool tempMemPool(memPoolCtrler, "temp mempool for A64ReplaceRegOpndVisitor"); + auto *cpyMem = a64memOpnd->Clone(tempMemPool); + if (cpyMem->GetBaseRegister() != nullptr && + cpyMem->GetBaseRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + cpyMem->SetBaseRegister(*static_cast(newReg)); + changed = true; + } + if (cpyMem->GetIndexRegister() != nullptr && + cpyMem->GetIndexRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + CHECK_FATAL(!changed, "base reg is equal to index reg"); + cpyMem->SetIndexRegister(*newReg); + changed = true; + } + if (changed) { + insn->SetMemOpnd(&static_cast(cgFunc)->GetOrCreateMemOpnd(*cpyMem)); + } +} +void A64ReplaceRegOpndVisitor::Visit(ListOperand *v) { + for (auto &it : v->GetOperands()) { + if (it->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it = newReg; + } + } +} +void A64ReplaceRegOpndVisitor::Visit(PhiOperand *v) { + for (auto &it : v->GetOperands()) { + if (it.second->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it.second = newReg; + } + } + auto &phiDest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + if (phiDest.GetValidBitsNum() > v->GetLeastCommonValidBit()) { + phiDest.SetValidBitsNum(v->GetLeastCommonValidBit()); + } +} +} + diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f86e2fb962686b6ced0eaf9432a3f3052c67ddaa --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp @@ -0,0 +1,534 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "loop.h" +#include "aarch64_ra_opt.h" + +namespace maplebe { +using namespace std; +bool RaX0Opt::PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const { + if (opnd != nullptr) { + RegOperand *regopnd = static_cast(opnd); + regno_t regCandidate = regopnd->GetRegisterNumber(); + if (regCandidate == replaceReg) { + return true; + } + } + return false; +} + +/* + * Replace replace_reg with rename_reg. + * return true if there is a redefinition that needs to terminate the propagation. + */ +bool RaX0Opt::PropagateRenameReg(Insn *nInsn, const X0OptInfo &optVal) const { + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + const InsnDesc *md = nInsn->GetDesc(); + int32 lastOpndId = static_cast(nInsn->GetOperandSize() - 1); + for (int32_t i = lastOpndId; i >= 0; i--) { + Operand &opnd = nInsn->GetOperand(static_cast(i)); + + if (opnd.IsList()) { + /* call parameters */ + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memopnd = static_cast(opnd); + if (PropagateX0CanReplace(memopnd.GetBaseRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetBaseRegister(*renameOpnd); + } + if (PropagateX0CanReplace(memopnd.GetIndexRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetIndexRegister(*renameOpnd); + } + } else if (opnd.IsRegister()) { + bool isdef = (md->GetOpndDes(i))->IsRegDef(); + RegOperand ®opnd = static_cast(opnd); + regno_t regCandidate = regopnd.GetRegisterNumber(); + if (isdef) { + /* Continue if both replace_reg & rename_reg are not redefined. */ + if (regCandidate == optVal.GetReplaceReg() || regCandidate == renameReg) { + return true; + } + } else { + if (regCandidate == optVal.GetReplaceReg()) { + nInsn->SetOperand(static_cast(i), *optVal.GetRenameOpnd()); + } + } + } + } + return false; /* false == no redefinition */ +} + +/* Propagate x0 from a call return value to a def of x0. + * This eliminates some local reloads under high register pressure, since + * the use has been replaced by x0. + */ +bool RaX0Opt::PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const { + if (insn->GetMachineOpcode() != MOP_xmovrr && insn->GetMachineOpcode() != MOP_wmovrr) { + return false; + } + RegOperand &movSrc = static_cast(insn->GetOperand(1)); + if (movSrc.GetRegisterNumber() != R0) { + return false; + } + + optVal.SetMovSrc(&movSrc); + return true; +} + +bool RaX0Opt::PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, + uint32 index) const { + bool isdef = (md->GetOpndDes(static_cast(index)))->IsRegDef(); + if (isdef) { + RegOperand &opnd = static_cast(ninsn->GetOperand(index)); + if (opnd.GetRegisterNumber() == optVal.GetReplaceReg()) { + return true; + } + } + return false; +} + +bool RaX0Opt::PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal) { + bool redefined = false; + for (Insn *ninsn = insn->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + + if (ninsn->IsCall()) { + break; + } + + /* Will continue as long as the reg being replaced is not redefined. + * Does not need to check for x0 redefinition. The mov instruction src + * being replaced already defines x0 and will terminate this loop. + */ + const InsnDesc *md = ninsn->GetDesc(); + for (uint32 i = 0; i < ninsn->GetDefRegs().size(); i++) { + redefined = PropagateX0DetectRedefine(md, ninsn, optVal, i); + if (redefined) { + break; + } + } + if (redefined) { + break; + } + + /* Look for move where src is the register equivalent to x0. */ + if (ninsn->GetMachineOpcode() != MOP_xmovrr && ninsn->GetMachineOpcode() != MOP_wmovrr) { + continue; + } + + Operand *src = &ninsn->GetOperand(1); + RegOperand *srcreg = static_cast(src); + if (srcreg->GetRegisterNumber() != optVal.GetReplaceReg()) { + continue; + } + + /* Setup for the next optmization pattern. */ + Operand *dst = &ninsn->GetOperand(0); + RegOperand *dstreg = static_cast(dst); + if (dstreg->GetRegisterNumber() != R0) { + /* This is to set up for further propagation later. */ + if (srcreg->GetRegisterNumber() == optVal.GetReplaceReg()) { + if (optVal.GetRenameInsn() != nullptr) { + redefined = true; + break; + } else { + optVal.SetRenameInsn(ninsn); + optVal.SetRenameOpnd(dst); + optVal.SetRenameReg(dstreg->GetRegisterNumber()); + } + } + continue; + } + + if (redefined) { + break; + } + + /* x0 = x0 */ + ninsn->SetOperand(1, *optVal.GetMovSrc()); + break; + } + + return redefined; +} + +bool RaX0Opt::PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal) { + bool redefined = false; + for (Insn *ninsn = optVal.GetRenameInsn()->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != bb->GetLiveOutRegNO().end()) { + bb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + bb->InsertLiveOutRegNO(renameReg); + } + return redefined; +} + +void RaX0Opt::PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal) { + bool redefined = false; + for (Insn *ninsn = nextBb->GetFirstInsn(); ninsn != nextBb->GetLastInsn()->GetNext(); ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != nextBb->GetLiveOutRegNO().end()) { + nextBb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + nextBb->InsertLiveOutRegNO(renameReg); + } +} + +/* + * Perform optimization. + * First propagate x0 in a bb. + * Second propagation see comment in function. + */ +void RaX0Opt::PropagateX0() { + FOR_ALL_BB(bb, cgFunc) { + X0OptInfo optVal; + + Insn *insn = bb->GetFirstInsn(); + while ((insn != nullptr) && !insn->IsMachineInstruction()) { + insn = insn->GetNext(); + continue; + } + if (insn == nullptr) { + continue; + } + if (!PropagateX0DetectX0(insn, optVal)) { + continue; + } + + /* At this point the 1st insn is a mov from x0. */ + RegOperand &movDst = static_cast(insn->GetOperand(0)); + optVal.SetReplaceReg(movDst.GetRegisterNumber()); + optVal.ResetRenameInsn(); + bool redefined = PropagateX0Optimize(bb, insn, optVal); + if (redefined || (optVal.GetRenameInsn() == nullptr)) { + continue; + } + + /* Next pattern to help LSRA. Short cross bb live interval. + * Straight line code. Convert reg2 into bb local. + * bb1 + * mov reg2 <- x0 => mov reg2 <- x0 + * mov reg1 <- reg2 mov reg1 <- reg2 + * call call + * bb2 : livein< reg1 reg2 > + * use reg2 use reg1 + * .... + * reg2 not liveout + * + * Can allocate caller register for reg2. + * + * Further propagation of very short live interval cross bb reg + */ + if (optVal.GetRenameReg() < kMaxRegNum) { /* dont propagate physical reg */ + continue; + } + BB *nextBb = bb->GetNext(); + if (nextBb == nullptr) { + break; + } + if (bb->GetSuccs().size() != 1 || nextBb->GetPreds().size() != 1) { + continue; + } + if (bb->GetSuccs().front() != nextBb || nextBb->GetPreds().front() != bb) { + continue; + } + if (bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) == bb->GetLiveOutRegNO().end() || + bb->GetLiveOutRegNO().find(optVal.GetRenameReg()) == bb->GetLiveOutRegNO().end() || + nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) != nextBb->GetLiveOutRegNO().end()) { + continue; + } + /* Replace replace_reg by rename_reg. */ + redefined = PropagateX0ForCurrBb(bb, optVal); + if (redefined) { + continue; + } + PropagateX0ForNextBb(nextBb, optVal); + } +} + +void VregRename::PrintRenameInfo(regno_t regno) const { + VregRenameInfo *info = (regno <= maxRegnoSeen) ? renameInfo[regno] : nullptr; + if (info == nullptr || (info->numDefs == 0 && info->numUses == 0)) { + return; + } + LogInfo::MapleLogger() << "reg: " << regno; + if (info->firstBBLevelSeen) { + LogInfo::MapleLogger() << " fromLevel " << info->firstBBLevelSeen->GetInternalFlag2(); + } + if (info->lastBBLevelSeen) { + LogInfo::MapleLogger() << " toLevel " << info->lastBBLevelSeen->GetInternalFlag2(); + } + if (info->numDefs) { + LogInfo::MapleLogger() << " defs " << info->numDefs; + } + if (info->numUses) { + LogInfo::MapleLogger() << " uses " << info->numUses; + } + if (info->numDefs) { + LogInfo::MapleLogger() << " innerDefs " << info->numInnerDefs; + } + if (info->numUses) { + LogInfo::MapleLogger() << " innerUses " << info->numInnerUses; + } + LogInfo::MapleLogger() << "\n"; +} + +void VregRename::PrintAllRenameInfo() const { + for (uint32 regno = 0; regno < cgFunc->GetMaxRegNum(); ++regno) { + PrintRenameInfo(regno); + } +} + +bool VregRename::IsProfitableToRename(const VregRenameInfo *info) const{ + if ((info->numInnerDefs == 0) && (info->numUses != info->numInnerUses)) { + return true; + } + return false; +} + +void VregRename::RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop) { + regno_t vreg = ropnd->GetRegisterNumber(); + VregRenameInfo *info = (vreg <= maxRegnoSeen) ? renameInfo[vreg] : nullptr; + if ((info == nullptr) || loop->GetMultiEntries().size() || (!IsProfitableToRename(info))) { + return; + } + + uint32 size = (ropnd->GetSize() == k64BitSize) ? k8ByteSize : k4ByteSize; + regno_t newRegno = cgFunc->NewVReg(ropnd->GetRegisterType(), size); + RegOperand *renameVreg = &cgFunc->CreateVirtualRegisterOperand(newRegno); + + const BB *header = loop->GetHeader(); + for (auto pred : header->GetPreds()) { + if (find(loop->GetBackedge().begin(), loop->GetBackedge().end(), pred) != loop->GetBackedge().end()) { + continue; + } + MOperator mOp = (ropnd->GetRegisterType() == kRegTyInt) ? + ((size == k8BitSize) ? MOP_xmovrr : MOP_wmovrr) : + ((size == k8BitSize) ? MOP_xvmovd : MOP_xvmovs); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *renameVreg, *ropnd); + Insn *last = pred->GetLastInsn(); + if (last) { + if (last->IsBranch()) { + last->GetBB()->InsertInsnBefore(*last, newInsn); + } else { + last->GetBB()->InsertInsnAfter(*last, newInsn); + } + } else { + pred->AppendInsn(newInsn); + } + } + + for (auto bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + MemOperand *newMemOpnd = nullptr; + if (base != nullptr && base->IsVirtualRegister() && base->GetRegisterNumber() == vreg) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + newMemOpnd->SetBaseRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister() && offset->GetRegisterNumber() == vreg) { + if (newMemOpnd == nullptr) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + } + newMemOpnd->SetIndexRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() == vreg) { + insn->SetOperand(i, *renameVreg); + } + } + } + } +} + +void VregRename::RenameFindLoopVregs(const CGFuncLoops *loop) { + for (auto *bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RenameProfitableVreg(base, loop); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RenameProfitableVreg(offset, loop); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + RenameProfitableVreg(static_cast(opnd), loop); + } + } + } + } +} + +/* Only the bb level is important, not the bb itself. + * So if multiple bbs have the same level, only one bb represents the level + */ +void VregRename::UpdateVregInfo(regno_t vreg, BB *bb, bool isInner, bool isDef) { + VregRenameInfo *info = renameInfo[vreg]; + if (info == nullptr) { + info = memPool->New(); + renameInfo[vreg] = info; + if (vreg > maxRegnoSeen) { + maxRegnoSeen = vreg; + } + } + if (isDef) { + info->numDefs++; + if (isInner) { + info->numInnerDefs++; + } + } else { + info->numUses++; + if (isInner) { + info->numInnerUses++; + } + } + if (info->firstBBLevelSeen) { + if (info->firstBBLevelSeen->GetInternalFlag2() > bb->GetInternalFlag2()) { + info->firstBBLevelSeen = bb; + } + } else { + info->firstBBLevelSeen = bb; + } + if (info->lastBBLevelSeen) { + if (info->lastBBLevelSeen->GetInternalFlag2() < bb->GetInternalFlag2()) { + info->lastBBLevelSeen = bb; + } + } else { + info->lastBBLevelSeen = bb; + } +} + +void VregRename::RenameGetFuncVregInfo() { + FOR_ALL_BB(bb, cgFunc) { + bool isInner = bb->GetLoop() ? bb->GetLoop()->GetInnerLoops().empty() : false; + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + regno_t vreg = base->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + regno_t vreg = offset->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + bool isdef = (md->opndMD[i])->IsRegDef(); + regno_t vreg = static_cast(opnd)->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, isdef); + } + } + } + } +} + +void VregRename::RenameFindVregsToRename(const CGFuncLoops *loop) { + if (loop->GetInnerLoops().empty()) { + RenameFindLoopVregs(loop); + return; + } + for (auto inner : loop->GetInnerLoops()) { + RenameFindVregsToRename(inner); + } +} + + +void VregRename::VregLongLiveRename() { + if (cgFunc->GetLoops().size() == 0) { + return; + } + RenameGetFuncVregInfo(); + for (const auto *lp : cgFunc->GetLoops()) { + RenameFindVregsToRename(lp); + } +} + +void AArch64RaOpt::Run() { + RaX0Opt x0Opt(cgFunc); + x0Opt.PropagateX0(); + + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && CGOptions::DoVregRename()) { + /* loop detection considers EH bb. That is not handled. So C only for now. */ + LoopFinder *lf = memPool->New(*cgFunc, *memPool); + lf->FormLoopHierarchy(); + VregRename rename(cgFunc, memPool); + Bfs localBfs(*cgFunc, *memPool); + rename.bfs = &localBfs; + rename.bfs->ComputeBlockOrder(); + rename.VregLongLiveRename(); + cgFunc->ClearLoopInfo(); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..baa2b61277ead6d8de6481aa6752cfdad467a9d1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -0,0 +1,1242 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +namespace maplebe { +/* MCC_ClearLocalStackRef clear 1 stack slot, and MCC_DecRefResetPair clear 2 stack slot, + * the stack positins cleared are recorded in callInsn->clearStackOffset + */ +constexpr short kFirstClearMemIndex = 0; +constexpr short kSecondClearMemIndex = 1; + +/* insert pseudo insn for parameters definition */ +void AArch64ReachingDefinition::InitStartGen() { + BB *bb = cgFunc->GetFirstBB(); + + /* Parameters should be define first. */ + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo pLoc; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); ++i) { + MIRType *type = cgFunc->GetFunction().GetNthParamType(i); + (void)parmLocator.LocateNextParm(*type, pLoc, i == 0, &cgFunc->GetFunction()); + if (pLoc.reg0 == 0) { + /* If is a large frame, parameter addressing mode is based vreg:Vra. */ + continue; + } + + uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } + RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; + + MOperator mOp; + if (regType == kRegTyInt) { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_w; + } else { + mOp = MOP_pseudo_param_def_x; + } + } else { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_s; + } else { + mOp = MOP_pseudo_param_def_d; + } + } + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg0), srcBitSize, regType); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1) { + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg1), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd1); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg2) { + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg2), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd2); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg3) { + RegOperand ®Opnd3 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg3), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd3); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + + { + /* + * define memory address since store param may be transfered to stp and which with the short offset range. + * we can not get the correct definition before RA. + * example: + * add x8, sp, #712 + * stp x0, x1, [x8] // store param: _this Reg40_R313644 + * stp x2, x3, [x8,#16] // store param: Reg41_R333743 Reg42_R333622 + * stp x4, x5, [x8,#32] // store param: Reg43_R401297 Reg44_R313834 + * str x7, [x8,#48] // store param: Reg46_R401297 + */ + MIRSymbol *sym = cgFunc->GetFunction().GetFormal(i); + if (!sym->IsPreg()) { + MIRSymbol *firstSym = cgFunc->GetFunction().GetFormal(i); + const AArch64SymbolAlloc *firstSymLoc = + static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(firstSym->GetStIndex())); + int32 stOffset = cgFunc->GetBaseOffset(*firstSymLoc); + MIRType *firstType = cgFunc->GetFunction().GetNthParamType(i); + uint32 firstSymSize = cgFunc->GetBecommon().GetTypeSize(firstType->GetTypeIndex()); + uint32 firstStackSize = firstSymSize < k4ByteSize ? k4ByteSize : firstSymSize; + + MemOperand *memOpnd = aarchCGFunc->CreateStackMemOpnd(RFP, stOffset, firstStackSize * kBitsPerByte); + MOperator mopTemp = firstStackSize <= k4ByteSize ? MOP_pseudo_param_store_w : MOP_pseudo_param_store_x; + Insn &pseudoInsnTemp = cgFunc->GetInsnBuilder()->BuildInsn(mopTemp, *memOpnd); + bb->InsertInsnBegin(pseudoInsnTemp); + pseudoInsns.emplace_back(&pseudoInsnTemp); + } + } + } + + /* if function has "bl MCC_InitializeLocalStackRef", should define corresponding memory. */ + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + + for (uint32 i = 0; i < a64CGFunc->GetRefCount(); ++i) { + MemOperand *memOpnd = a64CGFunc->CreateStackMemOpnd( + RFP, static_cast(a64CGFunc->GetBeginOffset() + i * k8BitSize), k64BitSize); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ref_init_x, *memOpnd); + + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + } +} + +/* insert pseudoInsns for ehBB, R0 and R1 are defined in pseudoInsns */ +void AArch64ReachingDefinition::InitEhDefine(BB &bb) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd1); + bb.InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + + /* insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &newPseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd2); + bb.InsertInsnBegin(newPseudoInsn); + pseudoInsns.emplace_back(&newPseudoInsn); +} + +/* insert pseudoInsns for return value R0/V0 */ +void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { + AArch64reg regNO = static_cast(cgFunc)->GetReturnRegisterNumber(); + if (regNO == kInvalidRegNO) { + return; + } + + if (regNO == R0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyInt); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } else if (regNO == V0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyFloat); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } +} + +void AArch64ReachingDefinition::AddRetPseudoInsns() { + uint32 exitBBSize = cgFunc->GetExitBBsVec().size(); + if (exitBBSize == 0) { + if (cgFunc->GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc->GetCleanupLabel() && + cgFunc->GetLastBB()->GetPrev()->GetPrev()) { + AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()->GetPrev()); + } else { + AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()); + } + } else { + for (uint32 i = 0; i < exitBBSize; ++i) { + AddRetPseudoInsn(*cgFunc->GetExitBB(i)); + } + } +} + +void AArch64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) { + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + regGen[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) { + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + regUse[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +/* all caller saved register are modified by call insn */ +void AArch64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) { + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + for (auto i : callerSaveRegs) { + regGen[bb.GetId()]->SetBit(i); + } + } else { + for (uint32 i = R0; i <= V31; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + regGen[bb.GetId()]->SetBit(i); + } + } + } +} + +/* reg killed killed by call insn */ +bool AArch64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const { + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + return callerSaveRegs.find(regNO) != callerSaveRegs.end(); + } else { + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); + } +} + +bool AArch64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, + const Insn &endInsn, regno_t regNO) const { + DEBUG_ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + if (CGOptions::DoIPARA()) { + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + return true; + } + } + return false; + } else { + return HasCallBetweenInsnInSameBB(startInsn, endInsn); + } +} +/* + * find definition for register between startInsn and endInsn. + * startInsn and endInsn is not in same BB + * make sure that in path between startBB and endBB there is no redefine. + */ +std::vector AArch64ReachingDefinition::FindRegDefBetweenInsnGlobal( + uint32 regNO, Insn *startInsn, Insn *endInsn) const { + DEBUG_ASSERT(startInsn->GetBB() != endInsn->GetBB(), "call FindRegDefBetweenInsn please"); + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + /* check startBB */ + BB *startBB = startInsn->GetBB(); + std::vector startBBdefInsnVec = FindRegDefBetweenInsn(regNO, startInsn->GetNext(), startBB->GetLastInsn()); + if (startBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*startBBdefInsnVec.begin()); + } + if (startBBdefInsnVec.size() > 1 || + (startBBdefInsnVec.empty() && regOut[startBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && startInsn->GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn->GetNext(), *startBB->GetLastInsn(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + /* check endBB */ + BB *endBB = endInsn->GetBB(); + std::vector endBBdefInsnVec = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPrev()); + if (endBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*endBBdefInsnVec.begin()); + } + if (endBBdefInsnVec.size() > 1 || (endBBdefInsnVec.empty() && regIn[endBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && endInsn->GetPrev() != nullptr && + KilledByCallBetweenInsnInSameBB(*endBB->GetFirstInsn(), *endInsn->GetPrev(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + InsnSet defInsnSet; + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[endBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + std::list pathStatus; + if (DFSFindRegInfoBetweenBB(*startBB, *endBB, regNO, visitedBB, pathStatus, kDumpRegIn)) { + defInsnVec.emplace_back(endInsn); + } + return defInsnVec; +} + +static bool IsRegInAsmList(Insn *insn, uint32 index, uint32 regNO, InsnSet &insnSet) { + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + insnSet.insert(insn); + return true; + } + } + return false; +} + +void AArch64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const { + if (!regGen[bb.GetId()]->TestBit(regNO)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, defInsnSet)) { + continue; + } + IsRegInAsmList(insn, kAsmClobberListOpnd, regNO, defInsnSet); + continue; + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + (void)defInsnSet.insert(insn); + continue; + } + if (insn->IsRegDefined(regNO)) { + (void)defInsnSet.insert(insn); + } + } +} + +/* check whether call insn changed the stack status or not. */ +bool AArch64ReachingDefinition::CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const { + return offset == callInsn.GetClearStackOffset(kFirstClearMemIndex) || + offset == callInsn.GetClearStackOffset(kSecondClearMemIndex); +} + +/* + * find definition for stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + * special case: + * MCC_ClearLocalStackRef clear designate stack position, the designate stack position is thought defined + * for example: + * add x0, x29, #24 + * bl MCC_ClearLocalStackRef + */ +std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn( + uint32 offset, const Insn *startInsn, Insn *endInsn) const { + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + DEBUG_ASSERT(endInsn->GetId() >= startInsn->GetId(), "two insns must be in a same BB"); + if (!memGen[startInsn->GetBB()->GetId()]->TestBit(offset / kMemZoomSize)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (insn->IsCall()) { + if (CallInsnClearDesignateStackRef(*insn, offset)) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + if (!insn->IsSpillInsn() && cgFunc->IsAfterRegAlloc()) { + break; + } + + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if ((offset == memOffset) || + (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode()))) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + } + } + } + return defInsnVec; +} + +void AArch64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const { + if (!memGen[bb.GetId()]->TestBit(offset / kMemZoomSize)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + (void)defInsnSet.insert(insn); + } + continue; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + (void)defInsnSet.insert(insn); + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (offset == memOffset) { + (void)defInsnSet.insert(insn); + break; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)defInsnSet.insert(insn); + break; + } + } + } + } +} + +/* + * find defininition for register Iteratively. + * input: + * startBB: find definnition starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, + std::vector &visitedBB, InsnSet &defInsnSet) const { + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (regGen[predBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predBB->HasCall())) { + defInsnVec.clear(); + defInsnVec = FindRegDefBetweenInsn(regNO, predBB->GetFirstInsn(), predBB->GetLastInsn()); + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + } else if (regIn[predBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predBB, regNO, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (regGen[predEhBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predEhBB->HasCall())) { + FindRegDefInBB(regNO, *predEhBB, defInsnSet); + } + + if (regIn[predEhBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predEhBB, regNO, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for stack memory iteratively. + * input: + * startBB: find definnition starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, + std::vector &visitedBB, InsnSet &defInsnSet) const { + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (memGen[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + defInsnVec.clear(); + defInsnVec = FindMemDefBetweenInsn(offset, predBB->GetFirstInsn(), predBB->GetLastInsn()); + DEBUG_ASSERT(!defInsnVec.empty(), "opnd must be defined in this bb"); + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + } else if (memIn[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predBB, offset, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (memGen[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + FindMemDefInBB(offset, *predEhBB, defInsnSet); + } + + if (memIn[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predEhBB, offset, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for register. + * input: + * insn: the insn in which register is used + * indexOrRegNO: the index of register in insn or the No of register to be find + * isRegNO: if indexOrRegNO is index, this argument is false, else is true + * return: + * the set of definition insns for register + */ +InsnSet AArch64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + + std::vector defInsnVec; + if (regGen[insn.GetBB()->GetId()]->TestBit(regNO)) { + defInsnVec = FindRegDefBetweenInsn(regNO, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + InsnSet defInsnSet; + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (regGen[bb->GetId()]->TestBit(regNO)) { + FindRegDefInBB(regNO, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + } + return defInsnSet; +} + +bool AArch64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, + BB* movBB) const { + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + if (startInsn->GetBB() == endInsn->GetBB()) { + if (startInsn->GetNextMachineInsn() == endInsn) { + return false; + } else { + return FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), endInsn->GetPreviousMachineInsn()); + } + } else { + /* check Start BB */ + BB* startBB = startInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), startBB->GetLastInsn())) { + return true; + } + /* check End BB */ + BB *endBB = endInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPreviousMachineInsn())) { + return true; + } + /* Global : startBB cannot dominate BB which it doesn't dominate before */ + if (startBB == movBB) { + return false; /* it will not change dominate */ + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[movBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + if (DFSFindRegDomianBetweenBB(*startBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + CHECK_FATAL((startInsn.GetBB() != endInsn.GetBB()), "Is same BB!"); + /* check Start BB */ + BB* startBB = startInsn.GetBB(); + auto startInsnSet = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startBB->GetLastInsn()); + if (!startInsnSet.empty()) { + return true; + } + /* check End BB */ + BB *endBB = endInsn.GetBB(); + auto endInsnSet = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn.GetPrev()); + if (!endInsnSet.empty()) { + return true; + } + if (!startBB->GetSuccs().empty()) { + for (auto *succ : startBB->GetSuccs()) { + if (succ == endBB) { + return (!startInsnSet.empty() && !endInsnSet.empty()); + } + } + } + /* check bb Between start and end */ + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[startBB->GetId()] = kNormalVisited; + visitedBB[endBB->GetId()] = kNormalVisited; + return DFSFindRegDefBetweenBB(*startBB, *endBB, regNO, visitedBB); +} + +bool AArch64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const { + if (&startBB == &endBB) { + return false; + } + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + if (DFSFindRegDefBetweenBB(*succBB, endBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, + std::vector &visitedBB) const { + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regIn[succBB->GetId()]->TestBit(regNO)) { + return true; + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (DFSFindRegDomianBetweenBB(*succBB, regNO, visitedBB)) { + return true; + } + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB, + std::list &pathStatus, DumpType infoType) const { + for (auto succBB : startBB.GetSuccs()) { + if (succBB == &endBB) { + for (auto status : pathStatus) { + if (!status) { + return true; + } + } + continue; + } + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + /* path is no clean check regInfo */ + bool isPathClean = true; + switch (infoType) { + case kDumpRegUse: { + isPathClean = !regUse[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegGen: { + isPathClean = !regGen[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegIn: { + isPathClean = !(regIn[succBB->GetId()]->TestBit(regNO) || regGen[succBB->GetId()]->TestBit(regNO)); + break; + } + default: + CHECK_FATAL(false, "NIY"); + } + pathStatus.emplace_back(isPathClean); + if (DFSFindRegInfoBetweenBB(*succBB, endBB, regNO, visitedBB, pathStatus, infoType)) { + return true; + } + pathStatus.pop_back(); + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const { + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + InsnSet Temp; + if (IsRegInAsmList(insn, kAsmInputListOpnd, regNO, Temp) || + IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, Temp)) { + return true; + } + continue; + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + continue; + } + + auto *regProp = md->opndMD[i]; + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + return true; + } + } + } + return false; +} + +/* + * find insn using register between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, + InsnSet ®UseInsnSet) const { + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + IsRegInAsmList(insn, kAsmInputListOpnd, regNO, regUseInsnSet); + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, regUseInsnSet)) { + break; + } + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + if (baseOpnd != nullptr && + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && + baseOpnd->GetRegisterNumber() == regNO) { + findFinish = true; + } + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* + * find insn using stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &memUseInsnSet) const { + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + DEBUG_ASSERT(endInsn->GetId() >= startInsn->GetId(), "end ID must be greater than or equal to start ID"); + + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + return true; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + return true; + } + continue; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!opnd.IsMemoryAccessOperand()) { + continue; + } + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base == nullptr || !IsFrameReg(*base)) { + continue; + } + + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "offset must not be Register for frame MemOperand"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetValue(); + + if (insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + if (memOffset == offset) { + findFinish = true; + continue; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + findFinish = true; + continue; + } + } + + if (!md->opndMD[i]->IsUse()) { + continue; + } + + if (offset == memOffset) { + (void)memUseInsnSet.insert(insn); + } else if (insn->IsLoadPair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)memUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* find all definition for stack memory operand insn.opnd[index] */ +InsnSet AArch64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const { + InsnSet defInsnSet; + int64 memOffSet = 0; + if (!isOffset) { + Operand &opnd = insn.GetOperand(indexOrOffset); + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *indexReg = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || indexReg) { + return defInsnSet; + } + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + } else { + memOffSet = indexOrOffset; + } + std::vector defInsnVec; + if (memGen[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + defInsnVec = FindMemDefBetweenInsn(memOffSet, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + + if (memGen[bb->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + FindMemDefInBB(memOffSet, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + } + + return defInsnSet; +} + +/* + * find all insn using stack memory operand insn.opnd[index] + * secondMem is used to represent the second stack memory opernad in store pair insn + */ +InsnSet AArch64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const { + Operand &opnd = insn.GetOperand(index); + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + + InsnSet useInsnSet; + if (base == nullptr || !IsFrameReg(*base)) { + return useInsnSet; + } + + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "IndexRegister no nullptr"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (secondMem) { + DEBUG_ASSERT(insn.IsStorePair(), "second MemOperand can only be defined in stp insn"); + memOffSet += GetEachMemSizeOfPair(insn.GetMachineOpcode()); + } + /* memOperand may be redefined in current BB */ + bool findFinish = FindMemUseBetweenInsn(memOffSet, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !memOut[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + if (insn.GetBB()->GetEhSuccs().size() != 0) { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, false); + } + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB) { + if (memUse[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + findFinish = FindMemUseBetweenInsn(memOffSet, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (findFinish || !memOut[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + return useInsnSet; + } + } + DFSFindUseForMemOpnd(*firstCleanUpBB, memOffSet, visitedBB, useInsnSet, false); + } + return useInsnSet; +} + +/* + * initialize bb.gen and bb.use + * if it is not computed in first time, bb.gen and bb.use must be cleared firstly + */ +void AArch64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) { + if (!firstTime && (mode & kRDRegAnalysis)) { + regGen[bb.GetId()]->ResetAllBit(); + regUse[bb.GetId()]->ResetAllBit(); + } + if (!firstTime && (mode & kRDMemAnalysis)) { + memGen[bb.GetId()]->ResetAllBit(); + memUse[bb.GetId()]->ResetAllBit(); + } + + if (bb.IsEmpty()) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + GenAllAsmDefRegs(bb, *insn, kAsmOutputListOpnd); + GenAllAsmDefRegs(bb, *insn, kAsmClobberListOpnd); + GenAllAsmUseRegs(bb, *insn, kAsmInputListOpnd); + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + GenAllCallerSavedRegs(bb, *insn); + InitMemInfoForClearStackCall(*insn); + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (opnd.IsList() && (mode & kRDRegAnalysis)) { + DEBUG_ASSERT(regProp->IsUse(), "ListOperand is used in insn"); + InitInfoForListOpnd(bb, opnd); + } else if (opnd.IsMemoryAccessOperand()) { + InitInfoForMemOperand(*insn, opnd, regProp->IsDef()); + } else if (opnd.IsConditionCode() && (mode & kRDRegAnalysis)) { + DEBUG_ASSERT(regProp->IsUse(), "condition code is used in insn"); + InitInfoForConditionCode(bb); + } else if (opnd.IsRegister() && (mode & kRDRegAnalysis)) { + InitInfoForRegOpnd(bb, opnd, regProp->IsDef()); + } + } + } +} + +void AArch64ReachingDefinition::InitMemInfoForClearStackCall(Insn &callInsn) { + if (!(mode & kRDMemAnalysis) || !callInsn.IsClearDesignateStackCall()) { + return; + } + int64 firstOffset = callInsn.GetClearStackOffset(kFirstClearMemIndex); + constexpr int64 defaultValOfClearMemOffset = -1; + if (firstOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(firstOffset / kMemZoomSize); + } + int64 secondOffset = callInsn.GetClearStackOffset(kSecondClearMemIndex); + if (secondOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(static_cast(secondOffset / kMemZoomSize)); + } +} + +void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef) { + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr) { + return; + } + if ((mode & kRDMemAnalysis) && IsFrameReg(*base)) { + if (index != nullptr) { + SetAnalysisMode(kRDRegAnalysis); + return; + } + CHECK_FATAL(index == nullptr, "Existing [x29 + index] Memory Address"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate(), "offset must be a immediate value"); + int64 offsetVal = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if ((offsetVal % kMemZoomSize) != 0) { + SetAnalysisMode(kRDRegAnalysis); + } + + if (!isDef) { + memUse[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsLoadPair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memUse[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } else if (isDef) { + memGen[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsStorePair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memGen[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } + } + + if ((mode & kRDRegAnalysis) != 0) { + regUse[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + if (index != nullptr) { + regUse[insn.GetBB()->GetId()]->SetBit(index->GetRegisterNumber()); + } + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + /* Base operand has changed. */ + regGen[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + } + } +} + +void AArch64ReachingDefinition::InitInfoForListOpnd(const BB &bb, Operand &opnd) { + ListOperand *listOpnd = static_cast(&opnd); + for (auto listElem : listOpnd->GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "used Operand in call insn must be Register"); + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::InitInfoForConditionCode(const BB &bb) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + regUse[bb.GetId()]->SetBit(rflagReg.GetRegisterNumber()); +} + +void AArch64ReachingDefinition::InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef) { + RegOperand *regOpnd = static_cast(&opnd); + if (!isDef) { + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } else { + regGen[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +int32 AArch64ReachingDefinition::GetStackSize() const { + const int sizeofFplr = kDivide2 * kIntregBytelen; + return static_cast(static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() + sizeofFplr); +} + +bool AArch64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const { + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); +} + +int64 AArch64ReachingDefinition::GetEachMemSizeOfPair(MOperator opCode) const { + switch (opCode) { + case MOP_wstp: + case MOP_sstp: + case MOP_wstlxp: + case MOP_wldp: + case MOP_xldpsw: + case MOP_sldp: + case MOP_wldaxp: + return kWordByteNum; + case MOP_xstp: + case MOP_dstp: + case MOP_xstlxp: + case MOP_xldp: + case MOP_dldp: + case MOP_xldaxp: + return kDoubleWordByteNum; + default: + return 0; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b60a447676facdaab0a30c77b42e0ed5be0cde1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp @@ -0,0 +1,427 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_reg_coalesce.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +#define REGCOAL_DUMP CG_DEBUG_FUNC(*cgFunc) + +bool AArch64LiveIntervalAnalysis::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + if (regOpnd.GetRegisterNumber() == RZR) { + return true; + } + if (!regOpnd.IsVirtualRegister()) { + return true; + } + return false; +} + +LiveInterval *AArch64LiveIntervalAnalysis::GetOrCreateLiveInterval(regno_t regNO) { + LiveInterval *lr = GetLiveInterval(regNO); + if (lr == nullptr) { + lr = memPool->New(alloc); + vregIntervals[regNO] = lr; + lr->SetRegNO(regNO); + } + return lr; +} + +void AArch64LiveIntervalAnalysis::UpdateCallInfo() { + for (auto vregNO : vregLive) { + LiveInterval *lr = GetLiveInterval(vregNO); + if (lr == nullptr) { + return; + } + lr->IncNumCall(); + } +} + +void AArch64LiveIntervalAnalysis::SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regOpnd)) { + return; + } + LiveInterval *lr = GetOrCreateLiveInterval(regNO); + uint32 point = isDef ? insn.GetId() : (insn.GetId() - 1); + lr->AddRange(insn.GetBB()->GetId(), point, vregLive.find(regNO) != vregLive.end()); + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (candidates.find(regNO) != candidates.end()) { + lr->AddRefPoint(&insn, isDef); + } + if (isDef) { + vregLive.erase(regNO); + } else { + vregLive.insert(regNO); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachDefOperand(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, true); + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveIntervalByOp(opnd, insn, true); + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveIntervalByOp(opnd, insn, true); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachUseOperand(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && i == kAsmInputListOpnd) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, false); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveIntervalByOp(*op, insn, false); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveIntervalByOp(*base, insn, false); + } + if (offset != nullptr) { + SetupLiveIntervalByOp(*offset, insn, false); + } + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto opIt : phiOpnd.GetOperands()) { + SetupLiveIntervalByOp(*opIt.second, insn, false); + } + } else { + SetupLiveIntervalByOp(opnd, insn, false); + } + } +} + +/* handle live range for bb->live_out */ +void AArch64LiveIntervalAnalysis::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) { + --currPoint; + + if (liveOut >= kAllRegNum) { + (void)vregLive.insert(liveOut); + LiveInterval *lr = GetOrCreateLiveInterval(liveOut); + if (lr == nullptr) { + return; + } + lr->AddRange(bb.GetId(), currPoint, false); + return; + } +} + +void AArch64LiveIntervalAnalysis::CollectCandidate() { + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regDest.GetRegisterNumber() == regSrc.GetRegisterNumber()) { + continue; + } + if (regDest.IsVirtualRegister()) { + candidates.insert(regDest.GetRegisterNumber()); + } + if (regSrc.IsVirtualRegister()) { + candidates.insert(regSrc.GetRegisterNumber()); + } + } + } + } +} + +bool AArch64LiveIntervalAnalysis::IsRegistersCopy(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + return true; + } + return false; +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervals() { + /* colloct refpoints and build interfere only for cands. */ + CollectCandidate(); + + uint32 currPoint = static_cast(cgFunc->GetTotalNumberOfInstructions()) + + static_cast(bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveIntervalInLiveOut(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { + if (!runAnalysis) { + insn->SetId(currPoint); + } + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + --currPoint; + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + continue; + } + + ComputeLiveIntervalsForEachDefOperand(*insn); + ComputeLiveIntervalsForEachUseOperand(*insn); + + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + + /* distinguish use/def */ + currPoint -= 2; + } + for (auto lin : bb->GetLiveInRegNO()) { + if (lin >= kAllRegNum) { + LiveInterval *li = GetLiveInterval(lin); + if (li != nullptr) { + li->AddRange(bb->GetId(), currPoint, currPoint); + } + } + } + /* move one more step for each BB */ + --currPoint; + } + + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "\nAfter ComputeLiveIntervals\n"; + Dump(); + } +} + +void AArch64LiveIntervalAnalysis::CheckInterference(LiveInterval &li1, LiveInterval &li2) const { + auto ranges1 = li1.GetRanges(); + auto ranges2 = li2.GetRanges(); + bool conflict = false; + for (auto range : ranges1) { + auto bbid = range.first; + auto posVec1 = range.second; + auto it = ranges2.find(bbid); + if (it == ranges2.end()) { + continue; + } else { + /* check overlap */ + auto posVec2 = it->second; + for (auto pos1 : posVec1) { + for (auto pos2 : posVec2) { + if (!((pos1.first < pos2.first && pos1.second < pos2.first) || + (pos2.first < pos1.second && pos2.second < pos1.first))) { + conflict = true; + break; + } + } + } + } + } + if (conflict) { + li1.AddConflict(li2.GetRegNO()); + li2.AddConflict(li1.GetRegNO()); + } + return; +} + +/* replace regDest with regSrc. */ +void AArch64LiveIntervalAnalysis::CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src) { + LiveInterval *lrDest = GetLiveInterval(regDest.GetRegisterNumber()); + if (lrDest == nullptr) { + return; + } + LiveInterval *lrSrc = GetLiveInterval(regSrc.GetRegisterNumber()); + /* replace dest with src */ + if (regDest.GetSize() != regSrc.GetSize()) { + CHECK_FATAL(cgFunc->IsExtendReg(regDest.GetRegisterNumber()) || + cgFunc->IsExtendReg(regSrc.GetRegisterNumber()), "expect equal size in reg coalesce"); + cgFunc->InsertExtendSet(regSrc.GetRegisterNumber()); + } + + regno_t destNO = regDest.GetRegisterNumber(); + /* replace all refPoints */ + for (auto insn : lrDest->GetDefPoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + for (auto insn : lrDest->GetUsePoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + + DEBUG_ASSERT(lrDest && lrSrc, "get live interval failed"); + CoalesceLiveIntervals(*lrDest, *lrSrc); +} + +void AArch64LiveIntervalAnalysis::CollectMoveForEachBB(BB &bb, std::vector &movInsns) const { + FOR_BB_INSNS_SAFE(insn, &bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + auto ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + movInsns.emplace_back(insn); + } + } +} + +void AArch64LiveIntervalAnalysis::CoalesceMoves(std::vector &movInsns, bool phiOnly) { + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + bool changed = false; + do { + changed = false; + for (auto insn : movInsns) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + if (!insn->IsPhiMovInsn() && phiOnly) { + continue; + } + if (a64CGFunc->IsRegRematCand(regDest) != a64CGFunc->IsRegRematCand(regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + if (a64CGFunc->IsRegRematCand(regDest) && a64CGFunc->IsRegRematCand(regSrc) && + !a64CGFunc->IsRegSameRematInfo(regDest, regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + LiveInterval *li1 = GetLiveInterval(regDest.GetRegisterNumber()); + LiveInterval *li2 = GetLiveInterval(regSrc.GetRegisterNumber()); + if (li1 == nullptr || li2 == nullptr) { + return; + } + CheckInterference(*li1, *li2); + if (!li1->IsConflictWith(regSrc.GetRegisterNumber()) || + (li1->GetDefPoint().size() == 1 && li2->GetDefPoint().size() == 1)) { + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "try to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + CoalesceRegPair(regDest, regSrc); + changed = true; + } else { + if (insn->IsPhiMovInsn() && phiOnly && REGCOAL_DUMP) { + LogInfo::MapleLogger() << "fail to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } while (changed); +} + +void AArch64LiveIntervalAnalysis::CoalesceRegisters() { + std::vector movInsns; + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + if (REGCOAL_DUMP) { + cgFunc->DumpCFGToDot("regcoal-"); + LogInfo::MapleLogger() << "handle function: " << a64CGFunc->GetFunction().GetName() << std::endl; + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (!bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + + /* handle phi move first. */ + CoalesceMoves(movInsns, true); + + /* clean up dead mov */ + a64CGFunc->CleanupDeadMov(REGCOAL_DUMP); +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e02e58c99b92e55f567f5996e6eb505d50eb5f45 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp @@ -0,0 +1,156 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +void AArch64RegInfo::Init() { + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (IsYieldPointReg(regNO)) { + continue; + } + if (regNO == R29 && !GetCurrFunction()->UseFP()) { + AddToAllRegs(regNO); + continue; + } + if (!AArch64Abi::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void AArch64RegInfo::Fini() { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + a64CGFunc->AddtoCalleeSaved(RFP); + a64CGFunc->AddtoCalleeSaved(RLR); + a64CGFunc->NoteFPLRAddedToCalleeSavedList(); +} + +void AArch64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + for (auto reg: savedRegs) { + a64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool AArch64RegInfo::IsSpecialReg(regno_t regno) const { + AArch64reg reg = static_cast(regno); + if ((reg == RLR) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + + return false; +} +bool AArch64RegInfo::IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) { + return AArch64Abi::IsSpillRegInRA(static_cast(regNO), has3RegOpnd); +} +bool AArch64RegInfo::IsCalleeSavedReg(regno_t regno) const { + return AArch64Abi::IsCalleeSavedReg(static_cast(regno)); +} +bool AArch64RegInfo::IsYieldPointReg(regno_t regno) const { + /* when yieldpoint is enabled, x19 is reserved. */ + if (GetCurrFunction()->GetCG()->GenYieldPoint()) { + return (static_cast(regno) == RYP); + } + return false; +} +bool AArch64RegInfo::IsUnconcernedReg(regno_t regNO) const { + /* RFP = 32, RLR = 31, RSP = 33, RZR = 34, ccReg */ + if ((regNO >= RLR && regNO <= RZR) || regNO == RFP) { + return true; + } + + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (IsYieldPointReg(regNO)) { + return true; + } + return false; +} + +bool AArch64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { + return true; + } + return IsUnconcernedReg(regNO); +} + +RegOperand *AArch64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, maplebe::RegType kind, uint32 flag) { + AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); + return &aarch64CgFunc->GetOrCreatePhysicalRegisterOperand(static_cast(regNO), size, kind, flag); +} + +ListOperand* AArch64RegInfo::CreateListOperand() { + AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); + return (aarch64CgFunc->CreateListOpnd(*aarch64CgFunc->GetFuncScopeAllocator())); +} + +Insn *AArch64RegInfo::BuildMovInstruction(Operand &opnd0, Operand &opnd1) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + MOperator mop = a64CGFunc->PickMovInsn(static_cast(opnd0), + static_cast(opnd1)); + Insn *newInsn = &a64CGFunc->GetInsnBuilder()->BuildInsn(mop, opnd0, opnd1); + return newInsn; +} + +Insn *AArch64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(regSize, stype), phyOpnd, memOpnd); +} + +Insn *AArch64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickLdInsn(regSize, stype), phyOpnd, memOpnd); +} + +Insn *AArch64RegInfo::BuildCommentInsn(const std::string &comment) { + return &(static_cast(GetCurrFunction())->CreateCommentInsn("split around loop begin")); +} + +MemOperand *AArch64RegInfo::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->GetOrCreatSpillMem(vrNum); +} +MemOperand *AArch64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->AdjustMemOperandIfOffsetOutOfRange(memOpnd, static_cast(vrNum), isDest, insn, + static_cast(regNum), isOutOfRange); +} +void AArch64RegInfo::FreeSpillRegMem(regno_t vrNum) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->FreeSpillRegMem(vrNum); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3852ffec710966852bfaeb282026e522f72cee15 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp @@ -0,0 +1,850 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_regsaves.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" +#include "aarch64_cg.h" +#include "aarch64_proepilog.h" +#include "cg_dominance.h" +#include "cg_ssa_pre.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +#define RS_DUMP GetEnabledDebug() +#define RS_EXTRA (RS_DUMP && true) +#define mLog LogInfo::MapleLogger() +#define threshold 8 +#define ONE_REG_AT_A_TIME 0 + +using BBId = uint32; + +void AArch64RegSavesOpt::InitData() { + calleeBitsDef = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retDef = memset_s(calleeBitsDef, cgFunc->NumBBs() * sizeof(CalleeBitsType), + 0, cgFunc->NumBBs() * sizeof(CalleeBitsType)); + calleeBitsUse = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retUse = memset_s(calleeBitsUse, cgFunc->NumBBs() * sizeof(CalleeBitsType), + 0, cgFunc->NumBBs() * sizeof(CalleeBitsType)); + CHECK_FATAL(retDef == EOK && retUse == EOK, "memset_s of calleesBits failed"); + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + const MapleVector &sp = aarchCGFunc->GetCalleeSavedRegs(); + if (!sp.empty()) { + if (std::find(sp.begin(), sp.end(), RFP) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RFP); + } + if (std::find(sp.begin(), sp.end(), RLR) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RLR); + } + } + + for (auto bb : bfs->sortedBBs) { + SetId2bb(bb); + } +} + + +void AArch64RegSavesOpt::CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse) { + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (!AArch64Abi::IsCalleeSavedReg(static_cast(regNO)) || + (regNO >= R29 && regNO <= R31)) { + return; /* check only callee-save registers */ + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + /* First def */ + if (!IsCalleeBitSet(GetCalleeBitsDef(), bb.GetId(), regNO)) { + SetCalleeBit(GetCalleeBitsDef(), bb.GetId(), regNO); + } + } + if (isUse) { + /* Last use */ + SetCalleeBit(GetCalleeBitsUse(), bb.GetId(), regNO); + } +} + +void AArch64RegSavesOpt::GenerateReturnBBDefUse(const BB &bb) { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64RegSavesOpt::ProcessAsmListOpnd(const BB &bb, Operand &opnd, uint32 idx) { + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void AArch64RegSavesOpt::ProcessListOpnd(const BB &bb, Operand &opnd) { + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, false, true); + } +} + +void AArch64RegSavesOpt::ProcessMemOpnd(const BB &bb, Operand &opnd) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void AArch64RegSavesOpt::ProcessCondOpnd(const BB &bb) { + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +/* Record in each local BB the 1st def and the last use of a callee-saved + register */ +void AArch64RegSavesOpt::GetLocalDefUse() { + for (auto bbp : bfs->sortedBBs) { + BB &bb = *bbp; + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + continue; + } + + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + bool isAsm = (insn->GetMachineOpcode() == MOP_asm); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsRegDef(); + bool isUse = regProp->IsRegUse(); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else { + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } /* for all operands */ + } /* for all insns */ + } /* for all sortedBBs */ + + if (RS_DUMP) { + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + mLog << i << " : " << calleeBitsDef[i] << " " << calleeBitsUse[i] << "\n";; + } + } +} + +void AArch64RegSavesOpt::PrintBBs() const { + mLog << "RegSaves LiveIn/Out of BFS nodes:\n"; + for (auto *bb : bfs->sortedBBs) { + mLog << "< === > "; + mLog << bb->GetId(); + mLog << " pred:["; + for (auto *predBB : bb->GetPreds()) { + mLog << " " << predBB->GetId(); + } + mLog << "] succs:["; + for (auto *succBB : bb->GetSuccs()) { + mLog << " " << succBB->GetId(); + } + mLog << "]\n LiveIn of [" << bb->GetId() << "]: "; + for (auto liveIn: bb->GetLiveInRegNO()) { + mLog << liveIn << " "; + } + mLog << "\n LiveOut of [" << bb->GetId() << "]: "; + for (auto liveOut: bb->GetLiveOutRegNO()) { + mLog << liveOut << " "; + } + mLog << "\n"; + } +} + +/* 1st def MUST not have preceding save in dominator list. Each dominator + block must not have livein or liveout of the register */ +int32 AArch64RegSavesOpt::CheckCriteria(BB *bb, regno_t reg) const { + /* Already a site to save */ + SavedRegInfo *sp = bbSavedRegs[bb->GetId()]; + if (sp != nullptr && sp->ContainSaveReg(reg)) { + return 1; + } + + /* This preceding block has livein OR liveout of reg */ + MapleSet &liveIn = bb->GetLiveInRegNO(); + MapleSet &liveOut = bb->GetLiveOutRegNO(); + if (liveIn.find(reg) != liveIn.end() || + liveOut.find(reg) != liveOut.end()) { + return 2; + } + + return 0; +} + +/* Return true if reg is already to be saved in its dominator list */ +bool AArch64RegSavesOpt::AlreadySavedInDominatorList(const BB *bb, regno_t reg) const { + BB *aBB = GetDomInfo()->GetDom(bb->GetId()); + + if (RS_DUMP) { + mLog << "Checking dom list starting " << bb->GetId() << " for saved R" << (reg - 1) << ":\n "; + } + while (!aBB->GetPreds().empty()) { /* can't go beyond prolog */ + if (RS_DUMP) { + mLog << aBB->GetId() << " "; + } + if (int t = CheckCriteria(aBB, reg)) { + if (RS_DUMP) { + if (t == 1) { + mLog << " --R" << (reg - 1) << " saved here, skip!\n"; + } else { + mLog << " --R" << (reg - 1) << " has livein/out, skip!\n"; + } + } + return true; /* previously saved, inspect next reg */ + } + aBB = GetDomInfo()->GetDom(aBB->GetId()); + } + return false; /* not previously saved, to save at bb */ +} + +/* Determine callee-save regs save locations and record them in bbSavedRegs. + Save is needed for a 1st def callee-save register at its dominator block + outside any loop. */ +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() { + if (RS_DUMP) { + mLog << "Determining regsave sites using dom list for " << cgFunc->GetName() << ":\n"; + } + for (auto *bb : bfs->sortedBBs) { + if (RS_DUMP) { + mLog << "BB: " << bb->GetId() << "\n"; + } + CalleeBitsType c = GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId()); + if (c == 0) { + continue; + } + CalleeBitsType mask = 1; + for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << 3); ++i) { + if ((c & mask) != 0) { + MapleSet &liveIn = bb->GetLiveInRegNO(); + regno_t reg = ReverseRegBitMap(i); + if (oneAtaTime && oneAtaTimeReg != reg) { + mask <<= 1; + continue; + } + if (liveIn.find(reg) == liveIn.end()) { /* not livein */ + BB *bbDom = bb; /* start from current BB */ + bool done = false; + while (bbDom->GetLoop() != nullptr) { + bbDom = GetDomInfo()->GetDom(bbDom->GetId()); + if (CheckCriteria(bbDom, reg)) { + done = true; + break; + } + DEBUG_ASSERT(bbDom, "Can't find dominator for save location"); + } + if (done) { + mask <<= 1; + continue; + } + + /* Check if a dominator of bbDom was already a location to save */ + if (AlreadySavedInDominatorList(bbDom, reg)) { + mask <<= 1; + continue; /* no need to save again, next reg */ + } + + /* Check if the newly found block is a dominator of block(s) in the current + to be saved list. If so, remove these blocks from bbSavedRegs */ + uint32 creg = i; + SavedBBInfo *sp = regSavedBBs[creg]; + if (sp == nullptr) { + regSavedBBs[creg] = memPool->New(alloc); + } else { + for (BB *sbb : sp->GetBBList()) { + for (BB *abb = sbb; !abb->GetPreds().empty();) { + if (abb->GetId() == bbDom->GetId()) { + /* Found! Don't plan to save in abb */ + sp->RemoveBB(sbb); + bbSavedRegs[sbb->GetId()]->RemoveSaveReg(reg); + if (RS_DUMP) { + mLog << " --R" << (reg - 1) << " save removed from BB" << sbb->GetId() << "\n"; + } + break; + } + abb = GetDomInfo()->GetDom(abb->GetId()); + } + } + } + regSavedBBs[creg]->InsertBB(bbDom); + + uint32 bid = bbDom->GetId(); + if (RS_DUMP) { + mLog << " --R" << (reg - 1); + mLog << " to save in " << bid << "\n"; + } + SavedRegInfo *ctx = GetbbSavedRegsEntry(bid); + if (!ctx->ContainSaveReg(reg)) { + ctx->InsertSaveReg(reg); + } + } + } + mask <<= 1; + CalleeBitsType t = c; + t >>= 1; + if (t == 0) { + break; /* short cut */ + } + } + } +} + +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); + if (RS_DUMP) { + mLog << "Determining regsave sites using ssa_pre for " << cgFunc->GetName() << ":\n"; + } + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto reg : callees) { + if (reg >= R29 && reg < V8) { + continue; /* save/restore in prologue, epilogue */ + } + if (oneAtaTime && oneAtaTimeReg != reg) { + continue; + } + + SsaPreWorkCand wkCand(&sprealloc); + for (uint32 bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || + IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + (void)wkCand.occBBs.insert(bid); + } + } + DoSavePlacementOpt(cgFunc, GetDomInfo(), &wkCand); + if (wkCand.saveAtEntryBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.saveAtProlog = true; + } + if (wkCand.saveAtProlog) { + /* Save cannot be applied, skip this reg and place save/restore + in prolog/epilog */ + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + if (RS_DUMP) { + mLog << "Save R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; + } + continue; + } + if (!wkCand.saveAtEntryBBs.empty()) { + for (uint32 entBB : wkCand.saveAtEntryBBs) { + if (RS_DUMP) { + std::string r = reg <= R28 ? "r" : "v"; + mLog << "BB " << entBB << " save: " << r << (reg - 1) << "\n"; + } + GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg); + } + } + } +} + +/* Determine calleesave regs restore locations by calling ssu-pre, + previous bbSavedRegs memory is cleared and restore locs recorded in it */ +bool AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); + if (RS_DUMP) { + mLog << "Determining Callee Restore Locations:\n"; + } + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto reg : callees) { + if (reg >= R29 && reg < V8) { + continue; /* save/restore in prologue, epilogue */ + } + if (oneAtaTime && oneAtaTimeReg != reg) { + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + continue; + } + + SPreWorkCand wkCand(&sprealloc); + for (uint32 bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the saved BB locations of this callee-saved register */ + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->ContainSaveReg(reg)) { + (void)wkCand.saveBBs.insert(bid); + } + } + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || + IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + (void)wkCand.occBBs.insert(bid); + } + } + DoRestorePlacementOpt(cgFunc, GetPostDomInfo(), &wkCand); + if (wkCand.saveBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.restoreAtEpilog = true; + } + /* splitted empty block for critical edge present, skip function */ + MapleSet rset = wkCand.restoreAtEntryBBs; + for (auto bbid : wkCand.restoreAtExitBBs) { + (void)rset.insert(bbid); + } + for (auto bbid : rset) { + BB *bb = GetId2bb(bbid); + if (bb->GetKind() == BB::kBBGoto && bb->NumInsn() == 1) { + aarchCGFunc->GetProEpilogSavedRegs().clear(); + const MapleVector &calleesNew = aarchCGFunc->GetCalleeSavedRegs(); + for (auto areg : calleesNew) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(areg); + } + return false; + } + } + if (wkCand.restoreAtEpilog) { + /* Restore cannot b3 applied, skip this reg and place save/restore + in prolog/epilog */ + for (size_t bid = 1; bid < bbSavedRegs.size(); ++bid) { + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr && !sp->GetSaveSet().empty()) { + if (sp->ContainSaveReg(reg)) { + sp->RemoveSaveReg(reg); + } + } + } + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + if (RS_DUMP) { + mLog << "Restore R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; + } + continue; + } + if (!wkCand.restoreAtEntryBBs.empty() || !wkCand.restoreAtExitBBs.empty()) { + for (uint32 entBB : wkCand.restoreAtEntryBBs) { + if (RS_DUMP) { + std::string r = reg <= R28 ? "r" : "v"; + mLog << "BB " << entBB << " restore: " << r << (reg - 1) << "\n"; + } + GetbbSavedRegsEntry(entBB)->InsertEntryReg(reg); + } + for (uint32 exitBB : wkCand.restoreAtExitBBs) { + BB *bb = GetId2bb(exitBB); + if (bb->GetKind() == BB::kBBIgoto) { + CHECK_FATAL(false, "igoto detected"); + } + Insn *lastInsn = bb->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->IsBranch() && (!lastInsn->GetOperand(0).IsRegister() || /* not a reg OR */ + (!AArch64Abi::IsCalleeSavedReg( /* reg but not cs */ + static_cast(static_cast(lastInsn->GetOperand(0)).GetRegisterNumber()))))) { + /* To insert in this block - 1 instr */ + SavedRegInfo *sp = GetbbSavedRegsEntry(exitBB); + sp->InsertExitReg(reg); + sp->insertAtLastMinusOne = true; + } else if (bb->GetSuccs().size() > 1) { + for (BB *sbb : bb->GetSuccs()) { + if (sbb->GetPreds().size() > 1) { + CHECK_FATAL(false, "critical edge detected"); + } + /* To insert at all succs */ + GetbbSavedRegsEntry(sbb->GetId())->InsertEntryReg(reg); + } + } else { + /* otherwise, BB_FT etc */ + GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); + } + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << "BB " << exitBB << " restore: " << r << (reg - 1) << "\n"; + } + } + } + } + return true; +} + +int32 AArch64RegSavesOpt::FindNextOffsetForCalleeSave() const { + int32 offset = static_cast( + static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() - + (static_cast(cgFunc)->SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* FP/LR */) - + cgFunc->GetMemlayout()->SizeOfArgsToStackPass()); + + if (cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc->GetMemlayout()); + int saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + return offset; +} + +void AArch64RegSavesOpt::InsertCalleeSaveCode() { + uint32 bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + if (RS_DUMP) { + mLog << "Inserting Save: \n"; + } + int32 offset = FindNextOffsetForCalleeSave(); + offset += static_cast((aarchCGFunc->GetProEpilogSavedRegs().size() - 2) << 3); // 2 for R29,RLR 3 for 8 bytes + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + if (bbSavedRegs[bid] != nullptr && !bbSavedRegs[bid]->GetSaveSet().empty()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + for (auto areg : bbSavedRegs[bid]->GetSaveSet()) { + AArch64reg reg = static_cast(areg); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + std::string r = reg <= R28 ? "R" : "V"; + /* If reg not seen before, record offset and then update */ + if (regOffset.find(areg) == regOffset.end()) { + regOffset[areg] = static_cast(offset); + offset += static_cast(kIntregBytelen); + } + if (firstHalf == kRinvalid) { + /* 1st half in reg pair */ + firstHalf = reg; + if (RS_DUMP && reg > 0) { + mLog << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; + } + } else { + if (regOffset[reg] == (regOffset[firstHalf] + k8ByteSize)) { + /* firstHalf & reg consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, firstHalf, reg, regType, + static_cast(regOffset[firstHalf])); + } else if (regOffset[firstHalf] == (regOffset[reg] + k8ByteSize)) { + /* reg & firstHalf consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, reg, firstHalf, regType, + static_cast(regOffset[reg])); + } else { + /* regs cannot be paired */ + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, firstHalf, regType, + static_cast(regOffset[firstHalf])); + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, reg, regType, + static_cast(regOffset[reg])); + } + firstHalf = kRinvalid; + if (RS_DUMP) { + mLog << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; + } + } + } + + if (intRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, + intRegFirstHalf, kRegTyInt, static_cast(regOffset[intRegFirstHalf])); + } + + if (fpRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, + fpRegFirstHalf, kRegTyFloat, static_cast(regOffset[fpRegFirstHalf])); + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } + cgFunc->SetCurBB(*saveBB); +} + +/* DFS to verify the save/restore are in pair(s) within a path */ +void AArch64RegSavesOpt::Verify(regno_t reg, BB *bb, std::set *visited, BBId *s, BBId *r) { + (void)visited->insert(bb); + BBId bid = bb->GetId(); + if (RS_EXTRA) { + mLog << bid << ","; /* path trace can be long */ + } + + if (bbSavedRegs[bid]) { + bool entryRestoreMet = false; + if (bbSavedRegs[bid]->ContainEntryReg(reg)) { + if (RS_EXTRA) { + mLog << "[^" << bid << "],"; // entry restore found + } + if (*s == 0) { + mLog << "Alert: nR@" << bid << " found w/o save\n"; + return; + } + /* complete s/xR found, continue */ + mLog << "(" << *s << "," << bid << ") "; + *r = bid; + entryRestoreMet = true; + } + if (bbSavedRegs[bid]->ContainSaveReg(reg)) { + if (RS_EXTRA) { + mLog << "[" << bid << "],"; // save found + } + if (*s != 0 && !entryRestoreMet) { + /* another save found before last save restored */ + mLog << "Alert: save@" << bid << " found after save@" << *s << "\n"; + return; + } + if (entryRestoreMet) { + *r = 0; + } + *s = bid; + } + if (bbSavedRegs[bid]->ContainExitReg(reg)) { + if (RS_EXTRA) { + mLog << "[" << bid << "$],"; // exit restore found + } + if (*s == 0) { + mLog << "Alert: xR@" << bid << " found w/o save\n"; + return; + } + /* complete s/xR found, continue */ + mLog << "(" << *s << "," << bid << ") "; + *r = bid; + } + } + + if (bb->GetSuccs().size() == 0) { + if (*s != 0 && *r == 0) { + mLog << "Alert: save@" << *s << " w/o restore reaches end"; + } + mLog << " " << bid << " ended>\n"; + *r = 0; + } + for (BB *sBB : bb->GetSuccs()) { + if (visited->count(sBB) == 0) { + Verify(reg, sBB, visited, s, r); + } + } + if (*s == bid) { + /* clear only when returned from previous calls to the orig save site */ + /* clear savebid since all of its succs already visited */ + *s = 0; + } + if (*r == bid) { + /* clear restorebid if all of its preds already visited */ + bool clear = true; + for (BB *pBB : bb->GetPreds()) { + if (visited->count(pBB) == 0) { + clear = false; + break; + } + } + if (clear) { + *r = 0; + } + } +} + +void AArch64RegSavesOpt::InsertCalleeRestoreCode() { + uint32 bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + if (RS_DUMP) { + mLog << "Inserting Restore: \n"; + } + int32 offset = FindNextOffsetForCalleeSave(); + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->GetEntrySet().empty() && sp->GetExitSet().empty()) { + continue; + } + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetEntrySet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << r << (reg - 1) << " entry restore in BB " << bid << " Saved Offset = " << offset << "\n"; + if (RS_EXTRA) { + mLog << " for save @BB [ "; + for (size_t b = 1; b < bbSavedRegs.size(); ++b) { + if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { + mLog << b << " "; + } + } + mLog << "]\n"; + } + } + + /* restore is always the same from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); /* do not let ebo remove these restores */ + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetExitSet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << r << (reg - 1) << " exit restore in BB " << bid << " Offset = " << offset << "\n"; + mLog << " for save @BB [ "; + for (size_t b = 1; b < bbSavedRegs.size(); ++b) { + if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { + mLog << b << " "; + } + } + mLog << "]\n"; + } + + /* restore is always single from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); + } + if (sp->insertAtLastMinusOne) { + bb->InsertAtEndMinus1(*aarchCGFunc->GetDummyBB()); + } else { + bb->InsertAtEnd(*aarchCGFunc->GetDummyBB()); + } + } + } + cgFunc->SetCurBB(*saveBB); +} + +/* Callee-save registers save/restore placement optimization */ +void AArch64RegSavesOpt::Run() { + // DotGenerator::GenerateDot("SR", *cgFunc, cgFunc->GetMirModule(), true, cgFunc->GetName()); + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1) { + return; + } + +#if ONE_REG_AT_A_TIME + /* only do reg placement on the following register, others in pro/epilog */ + oneAtaTime = true; + oneAtaTimeReg = R25; +#endif + + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); + if (RS_DUMP) { + mLog << "##Calleeregs Placement for: " << cgFunc->GetName() << "\n"; + PrintBBs(); + } + +#ifdef REDUCE_COMPLEXITY + CGOptions::EnableRegSavesOpt(); + for (auto bb : bfs->sortedBBs) { + if (bb->GetSuccs().size() > threshold) { + CGOptions::DisableRegSavesOpt(); + return; + } + } +#endif + + /* Determined 1st def and last use of all callee-saved registers used + for all BBs */ + InitData(); + GetLocalDefUse(); + + /* Determine save sites at dominators of 1st def with no live-in and + not within loop */ + if (CGOptions::UseSsaPreSave()) { + DetermineCalleeSaveLocationsPre(); + } else { + DetermineCalleeSaveLocationsDoms(); + } + + /* Determine restore sites */ + if (!DetermineCalleeRestoreLocations()) { + return; + } + +#ifdef VERIFY + /* Verify saves/restores are in pair */ + if (RS_DUMP) { + std::vector rlist = { R19, R20, R21, R22, R23, R24, R25, R26, R27, R28 }; + for (auto reg : rlist) { + mLog << "Verify calleeregs_placement data for R" << (reg - 1) << ":\n"; + std::set visited; + uint32 saveBid = 0; + uint32 restoreBid = 0; + Verify(reg, cgFunc->GetFirstBB(), &visited, &saveBid, &restoreBid); + mLog << "\nVerify Done\n"; + } + } +#endif + + /* Generate callee save instrs at found sites */ + InsertCalleeSaveCode(); + + /* Generate callee restores at found sites */ + InsertCalleeRestoreCode(); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..004bb059a2405688ea2913542d92211f93ab5e2e --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -0,0 +1,1509 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_schedule.h" +#include +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_dependence.h" +#include "pressure.h" + +/* + * This phase is Instruction Scheduling. + * There is a local list scheduling, it is scheduling in basic block. + * The entry is AArch64Schedule::ListScheduling, will traversal all basic block, + * for a basic block: + * 1. build a dependence graph; + * 2. combine clinit pairs and str&ldr pairs; + * 3. reorder instructions. + */ +namespace maplebe { +namespace { +constexpr uint32 kClinitAdvanceCycle = 10; +constexpr uint32 kAdrpLdrAdvanceCycle = 2; +constexpr uint32 kClinitTailAdvanceCycle = 4; +constexpr uint32 kSecondToLastNode = 2; +} + +uint32 AArch64Schedule::maxUnitIndex = 0; +/* reserve two register for special purpose */ +int AArch64Schedule::intRegPressureThreshold = static_cast(R27 - R0); +int AArch64Schedule::fpRegPressureThreshold = static_cast(V30 - V0); +int AArch64Schedule::intCalleeSaveThresholdBase = static_cast(R29 - R19); +int AArch64Schedule::intCalleeSaveThresholdEnhance = static_cast(R30 - R19); +int AArch64Schedule::fpCalleeSaveThreshold = static_cast(R16 - R8); +/* Init schedule's data struction. */ +void AArch64Schedule::Init() { + readyList.clear(); + nodeSize = nodes.size(); + lastSeparatorIndex = 0; + mad->ReleaseAllUnits(); + DepNode *node = nodes[0]; + + DEBUG_ASSERT(node->GetType() == kNodeTypeSeparator, "CG internal error, the first node should be a separator node."); + + if (CGOptions::IsDruteForceSched() || CGOptions::IsSimulateSched()) { + for (auto nodeTemp : nodes) { + nodeTemp->SetVisit(0); + nodeTemp->SetState(kNormal); + nodeTemp->SetSchedCycle(0); + nodeTemp->SetEStart(0); + nodeTemp->SetLStart(0); + } + } + + readyList.emplace_back(node); + node->SetState(kReady); + + /* Init validPredsSize and validSuccsSize. */ + for (auto nodeTemp : nodes) { + nodeTemp->SetValidPredsSize(nodeTemp->GetPreds().size()); + nodeTemp->SetValidSuccsSize(nodeTemp->GetSuccs().size()); + } +} + +/* + * A insn which can be combine should meet this conditions: + * 1. it is str/ldr insn; + * 2. address mode is kAddrModeBOi, [baseReg, offset]; + * 3. the register operand size equal memory operand size; + * 4. if define USE_32BIT_REF, register operand size should be 4 byte; + * 5. for stp/ldp, the imm should be within -512 and 504(64bit), or -256 and 252(32bit); + * 6. pair instr for 8/4 byte registers must have multiple of 8/4 for imm. + * If insn can be combine, return true. + */ +bool AArch64Schedule::CanCombine(const Insn &insn) const { + MOperator opCode = insn.GetMachineOpcode(); + if ((opCode != MOP_xldr) && (opCode != MOP_wldr) && (opCode != MOP_dldr) && (opCode != MOP_sldr) && + (opCode != MOP_xstr) && (opCode != MOP_wstr) && (opCode != MOP_dstr) && (opCode != MOP_sstr)) { + return false; + } + + DEBUG_ASSERT(insn.GetOperand(1).IsMemoryAccessOperand(), "expects mem operands"); + auto &memOpnd = static_cast(insn.GetOperand(1)); + MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); + if ((addrMode != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + return false; + } + + auto ®Opnd = static_cast(insn.GetOperand(0)); + if (regOpnd.GetSize() != memOpnd.GetSize()) { + return false; + } + + uint32 size = regOpnd.GetSize() >> kLog2BitsPerByte; +#ifdef USE_32BIT_REF + if (insn.IsAccessRefField() && (size > (kIntregBytelen >> 1))) { + return false; + } +#endif /* USE_32BIT_REF */ + + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + if (offset == nullptr) { + return false; + } + int32 offsetValue = static_cast(offset->GetOffsetValue()); + if (size == kIntregBytelen) { /* 64 bit */ + if ((offsetValue <= kStpLdpImm64LowerBound) || (offsetValue >= kStpLdpImm64UpperBound)) { + return false; + } + } else if (size == (kIntregBytelen >> 1)) { /* 32 bit */ + if ((offsetValue <= kStpLdpImm32LowerBound) || (offsetValue >= kStpLdpImm32UpperBound)) { + return false; + } + } + + /* pair instr for 8/4 byte registers must have multiple of 8/4 for imm */ + if ((static_cast(offsetValue) % size) != 0) { + return false; + } + return true; +} + +/* After building dependence graph, combine str&ldr pairs. */ +void AArch64Schedule::MemoryAccessPairOpt() { + Init(); + std::vector memList; + + while ((!readyList.empty()) || !memList.empty()) { + DepNode *readNode = nullptr; + if (!readyList.empty()) { + readNode = readyList[0]; + readyList.erase(readyList.begin()); + } else { + if (memList[0]->GetType() != kNodeTypeEmpty) { + FindAndCombineMemoryAccessPair(memList); + } + readNode = memList[0]; + memList.erase(memList.begin()); + } + + /* schedule readNode */ + CHECK_FATAL(readNode != nullptr, "readNode is null in MemoryAccessPairOpt"); + readNode->SetState(kScheduled); + + /* add readNode's succs to readyList or memList. */ + for (auto succLink : readNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + DEBUG_ASSERT(succNode.GetState() == kNormal, "schedule state should be kNormal"); + succNode.SetState(kReady); + DEBUG_ASSERT(succNode.GetInsn() != nullptr, "insn can't be nullptr!"); + if (CanCombine(*succNode.GetInsn())) { + memList.emplace_back(&succNode); + } else { + readyList.emplace_back(&succNode); + } + } + } + } + + for (auto node : nodes) { + node->SetVisit(0); + node->SetState(kNormal); + } +} + +/* Find and combine correct MemoryAccessPair for memList[0]. */ +void AArch64Schedule::FindAndCombineMemoryAccessPair(const std::vector &memList) { + DEBUG_ASSERT(!memList.empty(), "memList should not be empty"); + CHECK_FATAL(memList[0]->GetInsn() != nullptr, "memList[0]'s insn should not be nullptr"); + MemOperand *currMemOpnd = static_cast(memList[0]->GetInsn()->GetMemOpnd()); + DEBUG_ASSERT(currMemOpnd != nullptr, "opnd should not be nullptr"); + DEBUG_ASSERT(currMemOpnd->IsMemoryAccessOperand(), "opnd should be memOpnd"); + int32 currOffsetVal = static_cast(currMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + MOperator currMop = memList[0]->GetInsn()->GetMachineOpcode(); + /* find a depNode to combine with memList[0], and break; */ + for (auto it = std::next(memList.begin(), 1); it != memList.end(); ++it) { + DEBUG_ASSERT((*it)->GetInsn() != nullptr, "null ptr check"); + + if (currMop == (*it)->GetInsn()->GetMachineOpcode()) { + MemOperand *nextMemOpnd = static_cast((*it)->GetInsn()->GetMemOpnd()); + CHECK_FATAL(nextMemOpnd != nullptr, "opnd should not be nullptr"); + CHECK_FATAL(nextMemOpnd->IsMemoryAccessOperand(), "opnd should be MemOperand"); + int32 nextOffsetVal = static_cast(nextMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + uint32 size = currMemOpnd->GetSize() >> kLog2BitsPerByte; + if ((nextMemOpnd->GetBaseRegister() == currMemOpnd->GetBaseRegister()) && + (nextMemOpnd->GetSize() == currMemOpnd->GetSize()) && + (static_cast(abs(nextOffsetVal - currOffsetVal)) == size)) { + /* + * In ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile + * LDP on page K1-6125 declare that ldp can't use same reg + */ + if (((currMop == MOP_xldr) || (currMop == MOP_sldr) || (currMop == MOP_dldr) || (currMop == MOP_wldr)) && + &(memList[0]->GetInsn()->GetOperand(0)) == &((*it)->GetInsn()->GetOperand(0))) { + continue; + } + if (static_cast((*it)->GetInsn()->GetOperand(0)).GetRegisterType() != + static_cast(memList[0]->GetInsn()->GetOperand(0)).GetRegisterType()) { + continue; + } + + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "Combine insn: " << "\n"; + memList[0]->GetInsn()->Dump(); + (*it)->GetInsn()->Dump(); + } + depAnalysis->CombineMemoryAccessPair(*memList[0], **it, nextOffsetVal > currOffsetVal); + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "To: " << "\n"; + memList[0]->GetInsn()->Dump(); + } + break; + } + } + } +} + +/* combine clinit pairs. */ +void AArch64Schedule::ClinitPairOpt() { + for (auto it = nodes.begin(); it != nodes.end(); ++it) { + auto nextIt = std::next(it, 1); + if (nextIt == nodes.end()) { + return; + } + + if ((*it)->GetInsn()->GetMachineOpcode() == MOP_adrp_ldr) { + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + depAnalysis->CombineClinit(**it, **(nextIt), false); + } else if ((*nextIt)->GetType() == kNodeTypeSeparator) { + nextIt = std::next(nextIt, 1); + if (nextIt == nodes.end()) { + return; + } + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + /* Do something. */ + depAnalysis->CombineClinit(**it, **(nextIt), true); + } + } + } + } +} + +/* Return the next node's index who is kNodeTypeSeparator. */ +uint32 AArch64Schedule::GetNextSepIndex() const { + return ((lastSeparatorIndex + kMaxDependenceNum) < nodeSize) ? (lastSeparatorIndex + kMaxDependenceNum) + : (nodes.size() - 1); +} + +/* Do register pressure schduling. */ +void AArch64Schedule::RegPressureScheduling(BB &bb, MapleVector &nodes) { + RegPressureSchedule *regSchedule = memPool.New(cgFunc, alloc); + /* + * Get physical register amount currently + * undef, Int Reg, Float Reg, Flag Reg + */ + const std::vector kRegNumVec = { 0, V0, (kMaxRegNum - V0) + 1, 1 }; + regSchedule->InitBBInfo(bb, memPool, nodes); + regSchedule->BuildPhyRegInfo(kRegNumVec); + regSchedule->DoScheduling(nodes); +} + +/* + * Compute earliest start of the node, + * return value : the maximum estart. + */ +uint32 AArch64Schedule::ComputeEstart(uint32 cycle) { + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + if (CGOptions::IsDebugSched()) { + /* Check validPredsSize. */ + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + int32 schedNum = 0; + for (const auto *predLink : node->GetPreds()) { + if (predLink->GetFrom().GetState() == kScheduled) { + ++schedNum; + } + } + DEBUG_ASSERT((node->GetPreds().size() - schedNum) == node->GetValidPredsSize(), "validPredsSize error."); + } + } + + DEBUG_ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + (void)readyNodes.insert(readyNodes.begin(), readyList.begin(), readyList.end()); + + uint32 maxEstart = cycle; + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetVisit(0); + } + + for (auto *node : readyNodes) { + DEBUG_ASSERT(node->GetState() == kReady, "CG internal error, all nodes in ready list should be ready."); + if (node->GetEStart() < cycle) { + node->SetEStart(cycle); + } + } + + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + + for (const auto *succLink : node->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + if (succNode.GetType() == kNodeTypeSeparator) { + continue; + } + + if (succNode.GetEStart() < (node->GetEStart() + succLink->GetLatency())) { + succNode.SetEStart(node->GetEStart() + succLink->GetLatency()); + } + maxEstart = (maxEstart < succNode.GetEStart() ? succNode.GetEStart() : maxEstart); + succNode.IncreaseVisit(); + if ((succNode.GetVisit() >= succNode.GetValidPredsSize()) && (succNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&succNode); + } + DEBUG_ASSERT(succNode.GetVisit() <= succNode.GetValidPredsSize(), "CG internal error."); + } + } + + return maxEstart; +} + +/* Compute latest start of the node. */ +void AArch64Schedule::ComputeLstart(uint32 maxEstart) { + /* std::vector is better than std::queue in run time */ + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + DEBUG_ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetLStart(maxEstart); + node->SetVisit(0); + } + + readyNodes.emplace_back(nodes[maxIndex]); + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + for (const auto *predLink : node->GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() == kScheduled) { + continue; + } + + if (predNode.GetLStart() > (node->GetLStart() - predLink->GetLatency())) { + predNode.SetLStart(node->GetLStart() - predLink->GetLatency()); + } + predNode.IncreaseVisit(); + if ((predNode.GetVisit() >= predNode.GetValidSuccsSize()) && (predNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&predNode); + } + + DEBUG_ASSERT(predNode.GetVisit() <= predNode.GetValidSuccsSize(), "CG internal error."); + } + } +} + +/* Compute earliest start and latest start of the node that is in readyList and not be scheduled. */ +void AArch64Schedule::UpdateELStartsOnCycle(uint32 cycle) { + ComputeLstart(ComputeEstart(cycle)); +} + +/* + * If all unit of this node need when it be scheduling is free, this node can be scheduled, + * Return true. + */ +bool DepNode::CanBeScheduled() const { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + if (!unit->IsFree(i)) { + return false; + } + } + } + return true; +} + +/* Mark those unit that this node need occupy unit when it is being scheduled. */ +void DepNode::OccupyUnits() { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + unit->Occupy(*insn, i); + } + } +} + +/* Get unit kind of this node's units[0]. */ +uint32 DepNode::GetUnitKind() const { + uint32 retValue = 0; + if ((units == nullptr) || (units[0] == nullptr)) { + return retValue; + } + + switch (units[0]->GetUnitId()) { + case kUnitIdSlotD: + retValue |= kUnitKindSlot0; + break; + case kUnitIdAgen: + case kUnitIdSlotSAgen: + retValue |= kUnitKindAgen; + break; + case kUnitIdSlotDAgen: + retValue |= kUnitKindAgen; + retValue |= kUnitKindSlot0; + break; + case kUnitIdHazard: + case kUnitIdSlotSHazard: + retValue |= kUnitKindHazard; + break; + case kUnitIdCrypto: + retValue |= kUnitKindCrypto; + break; + case kUnitIdMul: + case kUnitIdSlotSMul: + retValue |= kUnitKindMul; + break; + case kUnitIdDiv: + retValue |= kUnitKindDiv; + break; + case kUnitIdBranch: + case kUnitIdSlotSBranch: + retValue |= kUnitKindBranch; + break; + case kUnitIdStAgu: + retValue |= kUnitKindStAgu; + break; + case kUnitIdLdAgu: + retValue |= kUnitKindLdAgu; + break; + case kUnitIdFpAluS: + case kUnitIdFpAluD: + retValue |= kUnitKindFpAlu; + break; + case kUnitIdFpMulS: + case kUnitIdFpMulD: + retValue |= kUnitKindFpMul; + break; + case kUnitIdFpDivS: + case kUnitIdFpDivD: + retValue |= kUnitKindFpDiv; + break; + case kUnitIdSlot0LdAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindLdAgu; + break; + case kUnitIdSlot0StAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindStAgu; + break; + default: + break; + } + + return retValue; +} + +/* Count unit kinds to an array. Each element of the array indicates the unit kind number of a node set. */ +void AArch64Schedule::CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const { + (void)arraySize; + DEBUG_ASSERT(arraySize >= kUnitKindLast, "CG internal error. unit kind number is not correct."); + uint32 unitKind = depNode.GetUnitKind(); + int32 index = static_cast(__builtin_ffs(unitKind)); + while (index) { + DEBUG_ASSERT(index < kUnitKindLast, "CG internal error. index error."); + ++array[index]; + unitKind &= ~(1u << (index - 1u)); + index = __builtin_ffs(unitKind); + } +} + +/* Check if a node use a specific unit kind. */ +bool AArch64Schedule::IfUseUnitKind(const DepNode &depNode, uint32 index) { + uint32 unitKind = depNode.GetUnitKind(); + int32 idx = static_cast(__builtin_ffs(unitKind)); + while (idx) { + DEBUG_ASSERT(index < kUnitKindLast, "CG internal error. index error."); + if (idx == static_cast(index)) { + return true; + } + unitKind &= ~(1u << (idx - 1u)); + idx = __builtin_ffs(unitKind); + } + + return false; +} + +/* A sample schedule according dependence graph only, to verify correctness of dependence graph. */ +void AArch64Schedule::RandomTest() { + Init(); + nodes.clear(); + + while (!readyList.empty()) { + DepNode *currNode = readyList.back(); + currNode->SetState(kScheduled); + readyList.pop_back(); + nodes.emplace_back(currNode); + + for (auto succLink : currNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + bool ready = true; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() != kScheduled) { + ready = false; + break; + } + } + + if (ready) { + DEBUG_ASSERT(succNode.GetState() == kNormal, "succNode must be kNormal"); + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } + } +} + +/* Remove target from readyList. */ +void AArch64Schedule::EraseNodeFromReadyList(const DepNode &target) { + EraseNodeFromNodeList(target, readyList); +} + +/* Remove target from nodeList. */ +void AArch64Schedule::EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) { + for (auto it = nodeList.begin(); it != nodeList.end(); ++it) { + if ((*it) == &target) { + nodeList.erase(it); + return; + } + } + + DEBUG_ASSERT(false, "CG internal error, erase node fail."); +} + +/* Dump all node of availableReadyList schedule information in current cycle. */ +void AArch64Schedule::DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo) { + LogInfo::MapleLogger() << "Current cycle[ " << scheduleInfo.GetCurrCycle() << " ], Available in readyList is : \n"; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + LogInfo::MapleLogger() << "NodeIndex[ " << node->GetIndex() + << " ], Estart[ " << node->GetEStart() << " ], Lstart[ "; + LogInfo::MapleLogger() << node->GetLStart() << " ], slot[ "; + LogInfo::MapleLogger() << + (node->GetReservation() == nullptr ? "SlotNone" : node->GetReservation()->GetSlotName()) << " ], "; + LogInfo::MapleLogger() << "succNodeNum[ " << node->GetSuccs().size() << " ], "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +/* + * Select a node from availableReadyList according to some heuristic rules, then: + * 1. change targetNode's schedule information; + * 2. try to add successors of targetNode to readyList; + * 3. update unscheduled node set, when targetNode is last kNodeTypeSeparator; + * 4. update AdvanceCycle. + */ +void AArch64Schedule::SelectNode(AArch64ScheduleProcessInfo &scheduleInfo) { + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + auto it = availableReadyList.begin(); + DepNode *targetNode = *it; + if (availableReadyList.size() > 1) { + CalculateMaxUnitKindCount(scheduleInfo); + if (GetConsiderRegPressure()) { + UpdateReleaseRegInfo(scheduleInfo); + } + ++it; + for (; it != availableReadyList.end(); ++it) { + if (CompareDepNode(**it, *targetNode, scheduleInfo)) { + targetNode = *it; + } + } + } + /* The priority of free-reg node is higher than pipeline */ + while (!targetNode->CanBeScheduled()) { + scheduleInfo.IncCurrCycle(); + mad->AdvanceCycle(); + } + if (GetConsiderRegPressure() && !scheduleInfo.IsFirstSeparator()) { + UpdateLiveRegSet(scheduleInfo, *targetNode); + } + /* push target node into scheduled nodes and turn it into kScheduled state */ + scheduleInfo.PushElemIntoScheduledNodes(targetNode); + + EraseNodeFromReadyList(*targetNode); + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, true); + + if (targetNode->GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex and calculate those depNodes's estart and lstart + * between current separator node and new Separator node. + */ + if (!scheduleInfo.IsFirstSeparator()) { + lastSeparatorIndex += kMaxDependenceNum; + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.ResetIsFirstSeparator(); + } + } + + UpdateAdvanceCycle(scheduleInfo, *targetNode); +} + +void AArch64Schedule::UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode) { + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + scheduleInfo.SetAdvanceCycle(kClinitAdvanceCycle); + break; + case kLtAdrpLdr: + scheduleInfo.SetAdvanceCycle(kAdrpLdrAdvanceCycle); + break; + case kLtClinitTail: + scheduleInfo.SetAdvanceCycle(kClinitTailAdvanceCycle); + break; + default: + break; + } + + if ((scheduleInfo.GetAdvanceCycle() == 0) && mad->IsFullIssued()) { + if (targetNode.GetEStart() > scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetAdvanceCycle(1 + targetNode.GetEStart() - scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.SetAdvanceCycle(1); + } + } +} + +/* + * Advance mad's cycle until info's advanceCycle equal zero, + * and then clear info's availableReadyList. + */ +void AArch64Schedule::UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info) { + while (info.GetAdvanceCycle() > 0) { + info.IncCurrCycle(); + mad->AdvanceCycle(); + info.DecAdvanceCycle(); + } + info.ClearAvailableReadyList(); +} + +/* + * Forward traversal readyList, if a node in readyList can be Schedule, add it to availableReadyList. + * Return true, if availableReadyList is not empty. + */ +bool AArch64Schedule::CheckSchedulable(AArch64ScheduleProcessInfo &info) const { + for (auto node : readyList) { + if (GetConsiderRegPressure()) { + info.PushElemIntoAvailableReadyList(node); + } else { + if (node->CanBeScheduled() && node->GetEStart() <= info.GetCurrCycle()) { + info.PushElemIntoAvailableReadyList(node); + } + } + } + return info.AvailableReadyListIsEmpty() ? false : true; +} + +/* + * Calculate estimated machine cycle count for an input node series + */ +int AArch64Schedule::CalSeriesCycles(const MapleVector &nodes) { + int currentCycle = 0; + /* after an instruction is issued, the minimum cycle count for the next instruction is 1 */ + int instructionBaseCycleCount = 1; + std::map scheduledCycleMap; + for (auto node : nodes) { + int latencyCycle = 0; + /* calculate the latest begin time of this node based on its predecessor's issue time and latency */ + for (auto pred : node->GetPreds()) { + DepNode &from = pred->GetFrom(); + int latency = static_cast(pred->GetLatency()); + int fromCycle = scheduledCycleMap[&from]; + if (fromCycle + latency > latencyCycle) { + latencyCycle = fromCycle + latency; + } + } + /* the issue time of this node is the max value between the next cycle and latest begin time */ + if (currentCycle + instructionBaseCycleCount >= latencyCycle) { + currentCycle = currentCycle + instructionBaseCycleCount; + } else { + currentCycle = latencyCycle; + } + /* record this node's issue cycle */ + scheduledCycleMap[node] = currentCycle; + } + return currentCycle; +} + +/* After building dependence graph, schedule insns. */ +uint32 AArch64Schedule::DoSchedule() { + AArch64ScheduleProcessInfo scheduleInfo(nodeSize); + Init(); + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + InitLiveRegSet(scheduleInfo); + while (!readyList.empty()) { + UpdateScheduleProcessInfo(scheduleInfo); + /* Check if schedulable */ + if (!CheckSchedulable(scheduleInfo)) { + /* Advance cycle. */ + scheduleInfo.SetAdvanceCycle(1); + continue; + } + + if (scheduleInfo.GetLastUpdateCycle() < scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetLastUpdateCycle(scheduleInfo.GetCurrCycle()); + } + + if (CGOptions::IsDebugSched()) { + DumpDebugInfo(scheduleInfo); + } + + /* Select a node to scheduling */ + SelectNode(scheduleInfo); + } + + DEBUG_ASSERT(scheduleInfo.SizeOfScheduledNodes() == nodes.size(), "CG internal error, Not all nodes scheduled."); + + nodes.clear(); + (void)nodes.insert(nodes.begin(), scheduleInfo.GetScheduledNodes().begin(), scheduleInfo.GetScheduledNodes().end()); + /* the second to last node is the true last node, because the last is kNodeTypeSeparator node */ + DEBUG_ASSERT(nodes.size() - 2 >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - 2]->GetSchedCycle()); +} + +struct RegisterInfoUnit { + RegisterInfoUnit() : intRegNum(0), fpRegNum(0), ccRegNum(0) {} + uint32 intRegNum = 0; + uint32 fpRegNum = 0; + uint32 ccRegNum = 0; +}; + +RegisterInfoUnit GetDepNodeDefType(const DepNode &depNode, CGFunc &f) { + RegisterInfoUnit rIU; + for (auto defRegNO : depNode.GetDefRegnos()) { + RegType defRegTy = AArch64ScheduleProcessInfo::GetRegisterType(f, defRegNO); + if (defRegTy == kRegTyInt) { + rIU.intRegNum++; + } else if (defRegTy == kRegTyFloat) { + rIU.fpRegNum++; + } else if (defRegTy == kRegTyCc) { + rIU.ccRegNum++; + DEBUG_ASSERT(rIU.ccRegNum <= 1, "spill cc reg?"); + } else { + CHECK_FATAL(false, "NIY aarch64 register type"); + } + } + /* call node will not increase reg def pressure */ + if (depNode.GetInsn() != nullptr && depNode.GetInsn()->IsCall()) { + rIU.intRegNum = 0; + rIU.fpRegNum = 0; + } + return rIU; +} + +AArch64Schedule::CSRResult AArch64Schedule::DoCSR(DepNode &node1, DepNode &node2, + AArch64ScheduleProcessInfo &scheduleInfo) const { + RegisterInfoUnit defRIU1 = GetDepNodeDefType(node1, cgFunc); + RegisterInfoUnit defRIU2 = GetDepNodeDefType(node2, cgFunc); + /* do not increase callee save pressure before call */ + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(true)) >= intCalleeSaveThreshold) { + if (defRIU1.intRegNum > 0 && defRIU2.intRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.intRegNum >= scheduleInfo.GetFreeIntRegs(node1)) || + (csrInfo == kNode2 && defRIU2.intRegNum >= scheduleInfo.GetFreeIntRegs(node2))) { + return csrInfo; + } + } + } + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(false)) >= fpCalleeSaveThreshold) { + if (defRIU1.fpRegNum > 0 && defRIU2.fpRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.fpRegNum >= scheduleInfo.GetFreeFpRegs(node1)) || + (csrInfo == kNode2 && defRIU2.fpRegNum >= scheduleInfo.GetFreeFpRegs(node2))) { + return csrInfo; + } + } + } + auto FindFreeRegNode = [&](bool isInt)->CSRResult { + auto freeRegNodes = isInt ? scheduleInfo.GetFreeIntRegNodeSet() : scheduleInfo.GetFreeFpRegNodeSet(); + if (freeRegNodes.find(&node1) != freeRegNodes.end() && freeRegNodes.find(&node2) == freeRegNodes.end()) { + return kNode1; + } + if (freeRegNodes.find(&node1) == freeRegNodes.end() && freeRegNodes.find(&node2) != freeRegNodes.end()) { + return kNode2; + } + return kDoCSP; + }; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (FindFreeRegNode(true) != kDoCSP) { + return FindFreeRegNode(true); + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (FindFreeRegNode(false) != kDoCSP) { + return FindFreeRegNode(false); + } + } + + bool canDoCSPFurther = false; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (defRIU1.intRegNum != defRIU2.intRegNum) { + return defRIU1.intRegNum < defRIU2.intRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = defRIU1.intRegNum == 0; + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (defRIU1.fpRegNum != defRIU2.fpRegNum) { + return defRIU1.fpRegNum < defRIU2.fpRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = (defRIU1.fpRegNum == 0 && canDoCSPFurther); + } + } + /* if both nodes are going to increase reg pressure, do not do CSP further */ + return canDoCSPFurther ? kDoCSP : (node1.GetInsn()->GetId() < node2.GetInsn()->GetId() ? kNode1 : kNode2); +} + +AArch64Schedule::CSRResult AArch64Schedule::ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const { + uint32 node1ID = node1.GetInsn()->GetId(); + uint32 node2ID = node2.GetInsn()->GetId(); + bool order = node1ID < node2ID; /* true -- node1 before node2 false -- node1 after node2 */ + Insn *beginInsn = order ? node1.GetInsn() : node2.GetInsn(); + uint32 finialId = order ? node2ID : node1ID; + for (Insn *checkInsn = beginInsn; (checkInsn != nullptr && checkInsn->GetId() <= finialId); + checkInsn = checkInsn->GetNextMachineInsn()) { + if (checkInsn->IsCall()) { + return order ? kNode1 : kNode2; + } + } + return kDoCSP; +}; + +/* + * Comparing priorities of node1 and node2 according to some heuristic rules + * return true if node1's priority is higher + * crp -- consider reg pressure + */ +bool AArch64Schedule::CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const { + /* + * strategy CSR -- code schedule for register pressure + * if pressure is above the threshold, select the node which can reduce register pressure + */ + if (GetConsiderRegPressure()) { + switch (DoCSR(node1, node2, scheduleInfo)) { + case kNode1: + return true; + case kNode2: + return false; + default: + break; + } + } + /* strategy CSP -- code schedule for CPU pipeline */ + /* less LStart first */ + if (node1.GetLStart() != node2.GetLStart()) { + return node1.GetLStart() < node2.GetLStart(); + } + + /* max unit kind use */ + bool use1 = IfUseUnitKind(node1, maxUnitIndex); + bool use2 = IfUseUnitKind(node2, maxUnitIndex); + if (use1 != use2) { + return use1; + } + + /* slot0 first */ + SlotType slotType1 = node1.GetReservation()->GetSlot(); + SlotType slotType2 = node2.GetReservation()->GetSlot(); + if (slotType1 == kSlots) { + slotType1 = kSlot0; + } + if (slotType2 == kSlots) { + slotType2 = kSlot0; + } + if (slotType1 != slotType2) { + return slotType1 < slotType2; + } + + /* more succNodes fisrt */ + if (node1.GetSuccs().size() != node2.GetSuccs().size()) { + return node1.GetSuccs().size() > node2.GetSuccs().size(); + } + + /* default order */ + return node1.GetInsn()->GetId() < node2.GetInsn()->GetId(); +} + +/* + * Calculate number of every unit that used by avaliableReadyList's nodes and save the max in maxUnitIndex + */ +void AArch64Schedule::CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo) { + uint32 unitKindCount[kUnitKindLast] = { 0 }; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + CountUnitKind(*node, unitKindCount, kUnitKindLast); + } + + uint32 maxCount = 0; + maxUnitIndex = 0; + for (size_t i = 1; i < kUnitKindLast; ++i) { + if (maxCount < unitKindCount[i]) { + maxCount = unitKindCount[i]; + maxUnitIndex = i; + } + } +} + +/* + * Update the release reg node set + * When node in this set is scheduled, register pressure can be reduced + */ +void AArch64Schedule::UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo) { + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + scheduleInfo.ClearALLFreeRegNodeSet(); + /* Traverse availableReadyList and add those can reduce register pressure to release reg node set */ + for (auto node : availableReadyList) { + std::set freeRegNO = CanFreeRegister(*node); + if (!freeRegNO.empty()) { + scheduleInfo.VaryFreeRegSet(cgFunc, freeRegNO, *node); + } + } +} + +/* + * return registers which an instruction can release after being scheduled + */ +std::set AArch64Schedule::CanFreeRegister(const DepNode &node) const { + std::set freeRegSet; + for (auto reg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, reg)) { + freeRegSet.emplace(reg); + } + } + return freeRegSet; +} + +/* + * After an instruction is scheduled, update live reg set + */ +void AArch64Schedule::UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode& node) { + /* dealing with def reg, add def reg into the live reg set */ + size_t i = 1; + for (auto &defReg : node.GetDefRegnos()) { + if (scheduleInfo.FindIntLiveReg(defReg) == 0 && scheduleInfo.FindFpLiveReg(defReg) == 0) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, true); + } + /* delete dead def reg from live reg set because its live range is only 1 cycle */ + if (node.GetRegDefs(i) == nullptr && liveOutRegNo.find(defReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, false); + } + ++i; + } + /* dealing with use reg, delete use reg from live reg set if this instruction is last use of it */ + for (auto &useReg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, useReg)) { + if ((scheduleInfo.FindIntLiveReg(useReg) != 0 || scheduleInfo.FindFpLiveReg(useReg) != 0) && + liveOutRegNo.find(useReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, useReg, false); + } + } + } +} + +/* + * Initialize the live reg set based on the live in reg information + */ +void AArch64Schedule::InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo) { + if (GetConsiderRegPressure()) { + for (auto reg : liveInRegNo) { + scheduleInfo.VaryLiveRegSet(cgFunc, reg, true); + } + } +} + +/* + * A simulated schedule: + * scheduling instruction in original order to calculate original execute cycles. + */ +uint32 AArch64Schedule::SimulateOnly() { + uint32 currCycle = 0; + uint32 advanceCycle = 0; + Init(); + + for (uint32 i = 0; i < nodes.size();) { + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + + DepNode *targetNode = nodes[i]; + if ((currCycle >= targetNode->GetEStart()) && targetNode->CanBeScheduled()) { + targetNode->SetSimulateCycle(currCycle); + targetNode->OccupyUnits(); + + /* Update estart. */ + for (auto succLink : targetNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + uint32 eStart = currCycle + succLink->GetLatency(); + if (succNode.GetEStart() < eStart) { + succNode.SetEStart(eStart); + } + } + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "[Simulate] TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + switch (targetNode->GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + ++i; + } else { + advanceCycle = 1; + } + } + /* the second to last node is the true last node, because the last is kNodeTypeSeparator nod */ + DEBUG_ASSERT(nodes.size() - kSecondToLastNode >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - kSecondToLastNode]->GetSimulateCycle()); +} + +/* Restore dependence graph to normal CGIR. */ +void AArch64Schedule::FinalizeScheduling(BB &bb, const DepAnalysis &depAnalysis) { + bb.ClearInsns(); + + const Insn *prevLocInsn = (bb.GetPrev() != nullptr ? bb.GetPrev()->GetLastLoc() : nullptr); + for (auto node : nodes) { + /* Append comments first. */ + for (auto comment : node->GetComments()) { + if (comment->GetPrev() != nullptr && comment->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*comment->GetPrev()); + } + bb.AppendInsn(*comment); + } + /* Append insn. */ + if (!node->GetClinitInsns().empty()) { + for (auto clinit : node->GetClinitInsns()) { + bb.AppendInsn(*clinit); + } + } else if (node->GetType() == kNodeTypeNormal) { + if (node->GetInsn()->GetPrev() != nullptr && node->GetInsn()->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*node->GetInsn()->GetPrev()); + } + bb.AppendInsn(*node->GetInsn()); + } + + /* Append cfi instructions. */ + for (auto cfi : node->GetCfiInsns()) { + bb.AppendInsn(*cfi); + } + } + bb.SetLastLoc(prevLocInsn); + + for (auto lastComment : depAnalysis.GetLastComments()) { + bb.AppendInsn(*lastComment); + } +} + +/* For every node of nodes, update it's bruteForceSchedCycle. */ +void AArch64Schedule::UpdateBruteForceSchedCycle() { + for (auto node : nodes) { + node->SetBruteForceSchedCycle(node->GetSchedCycle()); + } +} + +/* Recursively schedule all of the possible node. */ +void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) { + /* Save states. */ + constexpr int32 unitSize = 31; + DEBUG_ASSERT(unitSize == mad->GetAllUnitsSize(), "CG internal error."); + std::vector occupyTable; + occupyTable.resize(unitSize, 0); + mad->SaveStates(occupyTable, unitSize); + + /* Schedule targetNode first. */ + targetNode.SetState(kScheduled); + targetNode.SetSchedCycle(currCycle); + scheduledNodes.emplace_back(&targetNode); + + MapleVector tempList = readyList; + EraseNodeFromNodeList(targetNode, tempList); + targetNode.OccupyUnits(); + + /* Update readyList. */ + UpdateReadyList(targetNode, tempList, true); + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex += kMaxDependenceNum; + } + + if (tempList.empty()) { + DEBUG_ASSERT(scheduledNodes.size() == nodes.size(), "CG internal error, Not all nodes scheduled."); + if (currCycle < maxCycleCount) { + maxCycleCount = currCycle; + UpdateBruteForceSchedCycle(); + optimizedScheduledNodes = scheduledNodes; + } + } else { + uint32 advanceCycle = 0; + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + do { + std::vector availableReadyList; + std::vector tempAvailableList; + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + /* Check EStart. */ + for (auto node : tempList) { + if (node->GetEStart() <= currCycle) { + tempAvailableList.emplace_back(node); + } + } + + if (tempAvailableList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + /* Check if schedulable */ + for (auto node : tempAvailableList) { + if (node->CanBeScheduled()) { + availableReadyList.emplace_back(node); + } + } + + if (availableReadyList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + for (auto node : availableReadyList) { + IterateBruteForce(*node, tempList, currCycle, scheduledNodes, maxCycleCount, optimizedScheduledNodes); + } + + break; + } while (true); + } + + /* + * Recover states. + * Restore targetNode first. + */ + targetNode.SetState(kReady); + targetNode.SetSchedCycle(0); + scheduledNodes.pop_back(); + mad->RestoreStates(occupyTable, unitSize); + + /* Update readyList. */ + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.IncreaseValidPredsSize(); + succNode.SetState(kNormal); + } + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex -= kMaxDependenceNum; + } +} + +/* + * Brute force schedule: + * Finding all possibile schedule list of current bb, and calculate every list's execute cycles, + * return the optimal schedule list and it's cycles. + */ +uint32 AArch64Schedule::DoBruteForceSchedule() { + MapleVector scheduledNodes(alloc.Adapter()); + MapleVector optimizedScheduledNodes(alloc.Adapter()); + + uint32 currCycle = 0; + uint32 maxCycleCount = 0xFFFFFFFF; + Init(); + + /* Schedule First separator. */ + DepNode *targetNode = readyList.front(); + targetNode->SetState(kScheduled); + targetNode->SetSchedCycle(currCycle); + scheduledNodes.emplace_back(targetNode); + readyList.clear(); + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, false); + + DEBUG_ASSERT(targetNode->GetType() == kNodeTypeSeparator, "The first node should be separator node."); + DEBUG_ASSERT(!readyList.empty(), "readyList should not be empty."); + + for (auto targetNodeTemp : readyList) { + IterateBruteForce(*targetNodeTemp, readyList, currCycle, scheduledNodes, maxCycleCount, optimizedScheduledNodes); + } + + nodes = optimizedScheduledNodes; + return maxCycleCount; +} + +/* + * Update ready list after the targetNode has been scheduled. + * For every targetNode's successor, if it's all predecessors have been scheduled, + * add it to ready list and update it's information (like state, estart). + */ +void AArch64Schedule::UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) { + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + + /* Set eStart. */ + if (updateEStart) { + uint32 maxEstart = 0; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + uint32 eStart = predNode.GetSchedCycle() + predLink->GetLatency(); + maxEstart = (maxEstart < eStart ? eStart : maxEstart); + } + succNode.SetEStart(maxEstart); + } + } + } +} + +/* For every node of nodes, dump it's Depdence information. */ +void AArch64Schedule::DumpDepGraph(const MapleVector &nodes) const { + for (auto node : nodes) { + depAnalysis->DumpDepNode(*node); + LogInfo::MapleLogger() << "---------- preds ----------" << "\n"; + for (auto pred : node->GetPreds()) { + depAnalysis->DumpDepLink(*pred, &(pred->GetFrom())); + } + LogInfo::MapleLogger() << "---------- succs ----------" << "\n"; + for (auto succ : node->GetSuccs()) { + depAnalysis->DumpDepLink(*succ, &(succ->GetTo())); + } + LogInfo::MapleLogger() << "---------------------------" << "\n"; + } +} + +/* For every node of nodes, dump it's schedule time according simulate type and instruction information. */ +void AArch64Schedule::DumpScheduleResult(const MapleVector &nodes, SimulateType type) const { + for (auto node : nodes) { + LogInfo::MapleLogger() << "cycle[ "; + switch (type) { + case kListSchedule: + LogInfo::MapleLogger() << node->GetSchedCycle(); + break; + case kBruteForce: + LogInfo::MapleLogger() << node->GetBruteForceSchedCycle(); + break; + case kSimulateOnly: + LogInfo::MapleLogger() << node->GetSimulateCycle(); + break; + } + LogInfo::MapleLogger() << " ] "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } +} + +/* Print bb's dependence dot graph information to a file. */ +void AArch64Schedule::GenerateDot(const BB &bb, const MapleVector &nodes) const { + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::ofstream dgFile; + std::streambuf *buf = dgFile.rdbuf(); + std::cout.rdbuf(buf); + + /* construct the file name */ + std::string fileName; + fileName.append(phaseName); + fileName.append("_"); + fileName.append(cgFunc.GetName()); + fileName.append("_BB"); + auto str = std::to_string(bb.GetId()); + fileName.append(str); + fileName.append("_dep_graph.dot"); + + dgFile.open(fileName.c_str(), std::ios::trunc); + if (!dgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failure.\n"; + return; + } + dgFile << "digraph {\n"; + for (auto node : nodes) { + for (auto succ : node->GetSuccs()) { + dgFile << "insn" << node->GetInsn() << " -> " << "insn" << succ->GetTo().GetInsn(); + dgFile << " ["; + if (succ->GetDepType() == kDependenceTypeTrue) { + dgFile << "color=red,"; + } + dgFile << "label= \"" << succ->GetLatency() << "\""; + dgFile << "];\n"; + } + } + + for (auto node : nodes) { + MOperator mOp = node->GetInsn()->GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + dgFile << "insn" << node->GetInsn() << "["; + dgFile << "shape=box,label= \" " << node->GetInsn()->GetId() << ":\n"; + dgFile << "{ "; + dgFile << md->name << "\n"; + dgFile << "}\"];\n"; + } + dgFile << "}\n"; + dgFile.flush(); + dgFile.close(); + std::cout.rdbuf(coutBuf); +} + +RegType AArch64ScheduleProcessInfo::GetRegisterType(CGFunc &f, regno_t regNO) { + if (AArch64isa::IsPhysicalRegister(regNO)) { + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + return kRegTyInt; + } else if (AArch64isa::IsFPSIMDRegister(static_cast(regNO))) { + return kRegTyFloat; + } else { + CHECK_FATAL(false, "unknown physical reg"); + } + } else { + RegOperand *curRegOpnd = f.GetVirtualRegisterOperand(regNO); + DEBUG_ASSERT(curRegOpnd != nullptr, "register which is not physical and virtual"); + return curRegOpnd->GetRegisterType(); + } +} + +void AArch64ScheduleProcessInfo::VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc) { + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary) { + isInc ? IncIntLiveRegSet(regNO) : DecIntLiveRegSet(regNO); + } else if (registerTy == kRegTyFloat) { + isInc ? IncFpLiveRegSet(regNO) : DecFpLiveRegSet(regNO); + } + /* consider other type register */ +} + +void AArch64ScheduleProcessInfo::VaryFreeRegSet(CGFunc &f, std::set regNOs, DepNode &node) { + for (auto regNO : regNOs) { + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary /* memory base register must be int */) { + IncFreeIntRegNode(node); + } else if (registerTy == kRegTyFloat) { + IncFreeFpRegNode(node); + } else if (registerTy == kRegTyCc) { + /* do not count CC reg */ + return; + } else { + /* consider other type register */ + CHECK_FATAL(false, "do not support this type of register"); + } + } +} + +/* Do brute force scheduling and dump scheduling information */ +void AArch64Schedule::BruteForceScheduling(const BB &bb) { + LogInfo::MapleLogger() << "\n\n$$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "\n BB id = " << bb.GetId() << "; nodes.size = " << nodes.size() << "\n"; + + constexpr uint32 maxBruteForceNum = 50; + if (nodes.size() < maxBruteForceNum) { + GenerateDot(bb, nodes); + uint32 maxBruteForceCycle = DoBruteForceSchedule(); + MapleVector bruteNodes = nodes; + uint32 maxSchedCycle = DoSchedule(); + if (maxBruteForceCycle < maxSchedCycle) { + LogInfo::MapleLogger() << "maxBruteForceCycle = " << maxBruteForceCycle << "; maxSchedCycle = "; + LogInfo::MapleLogger() << maxSchedCycle << "\n"; + LogInfo::MapleLogger() << "\n ## Dump dependence graph ## " << "\n"; + DumpDepGraph(nodes); + LogInfo::MapleLogger() << "\n ** Dump bruteForce scheduling result." << "\n"; + DumpScheduleResult(bruteNodes, kBruteForce); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } + } else { + LogInfo::MapleLogger() << "Skip BruteForce scheduling." << "\n"; + DoSchedule(); + } +} + +/* Do simulate scheduling and dump scheduling information */ +void AArch64Schedule::SimulateScheduling(const BB &bb) { + uint32 originCycle = SimulateOnly(); + MapleVector oldNodes = nodes; + uint32 schedCycle = DoSchedule(); + if (originCycle < schedCycle) { + LogInfo::MapleLogger() << "Worse cycle [ " << (schedCycle - originCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + LogInfo::MapleLogger() << "\n ** Dump original result." << "\n"; + DumpScheduleResult(oldNodes, kSimulateOnly); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } else if (originCycle > schedCycle) { + LogInfo::MapleLogger() << "Advance cycle [ " << (originCycle - schedCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + } else { + LogInfo::MapleLogger() << "Equal cycle [ 0 ]; originCycle = " << originCycle; + LogInfo::MapleLogger() << " ], ignore. nodes.size = " << nodes.size() << "\n"; + } +} + +/* + * A local list scheduling. + * Schedule insns in basic blocks. + */ +void AArch64Schedule::ListScheduling(bool beforeRA) { + InitIDAndLoc(); + + mad = Globals::GetInstance()->GetMAD(); + if (beforeRA) { + RegPressure::SetMaxRegClassNum(kRegisterLast); + } + depAnalysis = memPool.New(cgFunc, memPool, *mad, beforeRA); + + FOR_ALL_BB(bb, &cgFunc) { + depAnalysis->Run(*bb, nodes); + + if (LIST_SCHED_DUMP_REF) { + GenerateDot(*bb, nodes); + DumpDepGraph(nodes); + } + if (beforeRA) { + liveInRegNo = bb->GetLiveInRegNO(); + liveOutRegNo = bb->GetLiveOutRegNO(); + if (bb->GetKind() != BB::kBBReturn) { + SetConsiderRegPressure(); + DoSchedule(); + } else { + RegPressureScheduling(*bb, nodes); + } + } else { + ClinitPairOpt(); + MemoryAccessPairOpt(); + if (CGOptions::IsDruteForceSched()) { + BruteForceScheduling(*bb); + } else if (CGOptions::IsSimulateSched()) { + SimulateScheduling(*bb); + } else { + DoSchedule(); + } + } + + FinalizeScheduling(*bb, *depAnalysis); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9c90ed46bdc8477759b32dabbd60502211fc974a --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp @@ -0,0 +1,378 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_ssa.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64CGSSAInfo::RenameInsn(Insn &insn) { + auto opndNum = static_cast(insn.GetOperandSize()); + const InsnDesc *md = insn.GetDesc(); + if (md->IsPhi()) { + return; + } + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + auto *opndProp = (md->opndMD[static_cast(i)]); + A64SSAOperandRenameVisitor renameVisitor(*this, insn, *opndProp, i); + opnd.Accept(renameVisitor); + } +} + +MemOperand *AArch64CGSSAInfo::CreateMemOperand(MemOperand &memOpnd, bool isOnSSA) { + return isOnSSA ? memOpnd.Clone(*memPool) : + &static_cast(cgFunc)->GetOrCreateMemOpnd(memOpnd); +} + +RegOperand *AArch64CGSSAInfo::GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) { + if (vRegOpnd.IsVirtualRegister()) { + DEBUG_ASSERT(!vRegOpnd.IsSSAForm(), "Unexpect ssa operand"); + if (isDef) { + VRegVersion *newVersion = CreateNewVersion(vRegOpnd, curInsn, idx); + CHECK_FATAL(newVersion != nullptr, "get ssa version failed"); + return newVersion->GetSSAvRegOpnd(); + } else { + VRegVersion *curVersion = GetVersion(vRegOpnd); + if (curVersion == nullptr) { + curVersion = RenamedOperandSpecialCase(vRegOpnd, curInsn, idx); + } + curVersion->AddUseInsn(*this, curInsn, idx); + return curVersion->GetSSAvRegOpnd(); + } + } + DEBUG_ASSERT(false, "Get Renamed operand failed"); + return nullptr; +} + +VRegVersion *AArch64CGSSAInfo::RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx) { + LogInfo::MapleLogger() << "WARNING: " << vRegOpnd.GetRegisterNumber() << " has no def info in function : " + << cgFunc->GetName() << " !\n"; + /* occupy operand for no def vreg */ + if (!IncreaseSSAOperand(vRegOpnd.GetRegisterNumber(), nullptr)) { + DEBUG_ASSERT(GetAllSSAOperands().find(vRegOpnd.GetRegisterNumber()) != GetAllSSAOperands().end(), "should find"); + AddNoDefVReg(vRegOpnd.GetRegisterNumber()); + } + VRegVersion *version = CreateNewVersion(vRegOpnd, curInsn, idx); + version->SetDefInsn(nullptr, kDefByNo); + return version; +} + +RegOperand *AArch64CGSSAInfo::CreateSSAOperand(RegOperand &virtualOpnd) { + regno_t ssaRegNO = static_cast(GetAllSSAOperands().size()) + SSARegNObase; + while (GetAllSSAOperands().count(ssaRegNO)) { + ssaRegNO++; + SSARegNObase++; + } + RegOperand *newVreg = memPool->New(ssaRegNO, + virtualOpnd.GetSize(), virtualOpnd.GetRegisterType()); + newVreg->SetValidBitsNum(virtualOpnd.GetValidBitsNum()); + newVreg->SetOpndSSAForm(); + return newVreg; +} + +void AArch64CGSSAInfo::ReplaceInsn(Insn &oriInsn, Insn &newInsn) { + A64OpndSSAUpdateVsitor ssaUpdator(*this); + auto UpdateInsnSSAInfo = [&ssaUpdator](Insn &curInsn, bool isDelete) { + const InsnDesc *md = curInsn.GetDesc(); + for (uint32 i = 0; i < curInsn.GetOperandSize(); ++i) { + Operand &opnd = curInsn.GetOperand(i); + auto *opndProp = md->opndMD[i]; + if (isDelete) { + ssaUpdator.MarkDecrease(); + } else { + ssaUpdator.MarkIncrease(); + } + ssaUpdator.SetInsnOpndInfo(curInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + }; + UpdateInsnSSAInfo(oriInsn, true); + newInsn.SetId(oriInsn.GetId()); + UpdateInsnSSAInfo(newInsn, false); + CHECK_FATAL(!ssaUpdator.HasDeleteDef(), "delete def point in replace insn, please check"); +} + +/* do not break binding between input and output operands in asm */ +void AArch64CGSSAInfo::CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion) { + if (insn.GetMachineOpcode() == MOP_asm) { + for (auto &opndIt : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + if (opndIt->IsSSAForm()) { + VRegVersion *defVersion = FindSSAVersion(opndIt->GetRegisterNumber()); + if (defVersion && defVersion->GetOriginalRegNO() == toBeReplaced->GetOriginalRegNO()) { + insn.AddRegBinding(defVersion->GetOriginalRegNO(), newVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } + } + } + } +} + +void AArch64CGSSAInfo::ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) { + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it = useList.begin(); it != useList.end();) { + Insn *useInsn = it->second->GetInsn(); + CheckAsmDUbinding(*useInsn, toBeReplaced, newVersion); + for (auto &opndIt : it->second->GetOperands()) { + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor( + *cgFunc, *useInsn, opndIt.first, *toBeReplaced->GetSSAvRegOpnd(), *newVersion->GetSSAvRegOpnd()); + opnd.Accept(replaceRegOpndVisitor); + newVersion->AddUseInsn(*this, *useInsn, opndIt.first); + it->second->ClearDU(opndIt.first); + } + it = useList.erase(it); + } +} + +void AArch64CGSSAInfo::CreateNewInsnSSAInfo(Insn &newInsn) { + uint32 opndNum = newInsn.GetOperandSize(); + MarkInsnsInSSA(newInsn); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = newInsn.GetOperand(i); + auto *opndProp = newInsn.GetDesc()->opndMD[i]; + if (opndProp->IsDef() && opndProp->IsUse()) { + CHECK_FATAL(false, "do not support both def and use"); + } + if (opndProp->IsDef()) { + CHECK_FATAL(opnd.IsRegister(), "defOpnd must be reg"); + auto &defRegOpnd = static_cast(opnd); + regno_t defRegNO = defRegOpnd.GetRegisterNumber(); + uint32 defVIdx = IncreaseVregCount(defRegNO); + RegOperand *defSSAOpnd = CreateSSAOperand(defRegOpnd); + newInsn.SetOperand(i, *defSSAOpnd); + auto *defVersion = memPool->New(ssaAlloc, *defSSAOpnd, defVIdx, defRegNO); + auto *defInfo = CreateDUInsnInfo(&newInsn, i); + defVersion->SetDefInsn(defInfo, kDefByInsn); + if (!IncreaseSSAOperand(defSSAOpnd->GetRegisterNumber(), defVersion)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + } else if (opndProp->IsUse()) { + A64OpndSSAUpdateVsitor ssaUpdator(*this); + ssaUpdator.MarkIncrease(); + ssaUpdator.SetInsnOpndInfo(newInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + } +} + +void AArch64CGSSAInfo::DumpInsnInSSAForm(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = insn.GetDesc(); + DEBUG_ASSERT(md != nullptr, "md should not be nullptr"); + + LogInfo::MapleLogger() << "< " << insn.GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { + Operand &opnd = insn.GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + A64SSAOperandDumpVisitor a64OpVisitor(GetAllSSAOperands()); + opnd.Accept(a64OpVisitor); + if (!a64OpVisitor.HasDumped()) { + opnd.Dump(); + LogInfo::MapleLogger() << ")"; + } + } + if (insn.IsVectorOp()) { + auto &vInsn = static_cast(insn); + if (vInsn.GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn.GetNumOfRegSpec() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void A64SSAOperandRenameVisitor::Visit(RegOperand *v) { + if (v->IsVirtualRegister()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { /* both def use */ + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, false, *insn, idx)); + (void)ssaInfo->GetRenamedOperand(*v, true, *insn, idx); + } else { + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, opndDes->IsRegDef(), *insn, idx)); + } + } +} + +void A64SSAOperandRenameVisitor::Visit(MemOperand *a64MemOpnd) { + RegOperand *base = a64MemOpnd->GetBaseRegister(); + RegOperand *index = a64MemOpnd->GetIndexRegister(); + bool needCopy = (base != nullptr && base->IsVirtualRegister()) || (index != nullptr && index->IsVirtualRegister()); + if (needCopy) { + MemOperand *cpyMem = ssaInfo->CreateMemOperand(*a64MemOpnd, true); + if (base != nullptr && base->IsVirtualRegister()) { + bool isDef = !a64MemOpnd->IsIntactIndexed(); + cpyMem->SetBaseRegister(*ssaInfo->GetRenamedOperand(*base, isDef, *insn, idx)); + } + if (index != nullptr && index->IsVirtualRegister()) { + cpyMem->SetIndexRegister(*ssaInfo->GetRenamedOperand(*index, false, *insn, idx)); + } + insn->SetMemOpnd(ssaInfo->CreateMemOperand(*cpyMem, false)); + } +} + +void A64SSAOperandRenameVisitor::Visit(ListOperand *v) { + bool isAsm = insn->GetMachineOpcode() == MOP_asm; + /* record the orignal list order */ + std::list tempList; + auto& opndList = v->GetOperands(); + while (!opndList.empty()) { + auto* op = opndList.front(); + opndList.pop_front(); + + if (op->IsSSAForm() || !op->IsVirtualRegister()) { + tempList.push_back(op); + continue; + } + + bool isDef = + isAsm && (idx == kAsmClobberListOpnd || idx == kAsmOutputListOpnd); + RegOperand *renameOpnd = ssaInfo->GetRenamedOperand(*op, isDef, *insn, idx); + tempList.push_back(renameOpnd); + } + DEBUG_ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempList.begin(), tempList.end()); +} + +void A64OpndSSAUpdateVsitor::Visit(RegOperand *regOpnd) { + if (regOpnd->IsSSAForm()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + UpdateRegDef(regOpnd->GetRegisterNumber()); + } else { + if (opndDes->IsRegDef()){ + UpdateRegDef(regOpnd->GetRegisterNumber()); + } else if (opndDes->IsRegUse()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else if (IsPhi()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else { + DEBUG_ASSERT(false, "invalid opnd"); + } + } + } +} + +void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *a64MemOpnd) { + RegOperand *base = a64MemOpnd->GetBaseRegister(); + RegOperand *index = a64MemOpnd->GetIndexRegister(); + if (base != nullptr && base->IsSSAForm()) { + if (a64MemOpnd->IsIntactIndexed()) { + UpdateRegUse(base->GetRegisterNumber()); + } else { + UpdateRegDef(base->GetRegisterNumber()); + } + } + if (index != nullptr && index->IsSSAForm()) { + UpdateRegUse(index->GetRegisterNumber()); + } +} + +void A64OpndSSAUpdateVsitor::Visit(PhiOperand *phiOpnd) { + SetPhi(true); + for (auto phiListIt = phiOpnd->GetOperands().begin(); phiListIt != phiOpnd->GetOperands().end(); ++phiListIt) { + Visit(phiListIt->second); + } + SetPhi(false); +} + +void A64OpndSSAUpdateVsitor::Visit(ListOperand *v) { + /* do not handle asm here, so there is no list def */ + if (insn->GetMachineOpcode() == MOP_asm) { + DEBUG_ASSERT(false, "do not support asm yet"); + return; + } + for (auto *op : v->GetOperands()) { + if (op->IsSSAForm()) { + UpdateRegUse(op->GetRegisterNumber()); + } + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegUse(uint32 ssaIdx) { + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + if (isDecrease) { + curVersion->RemoveUseInsn(*insn, idx); + } else { + curVersion->AddUseInsn(*ssaInfo, *insn, idx); + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegDef(uint32 ssaIdx) { + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + if (isDecrease) { + deletedDef.emplace(ssaIdx); + curVersion->MarkDeleted(); + } else { + if (deletedDef.count(ssaIdx)) { + deletedDef.erase(ssaIdx); + curVersion->MarkRecovery(); + } else { + CHECK_FATAL(false, "do no support new define in ssaUpdating"); + } + DEBUG_ASSERT(!insn->IsPhi(), "do no support yet"); + curVersion->SetDefInsn(ssaInfo->CreateDUInsnInfo(insn, idx), kDefByInsn); + } +} + +void A64SSAOperandDumpVisitor::Visit(RegOperand *a64RegOpnd) { + DEBUG_ASSERT(!a64RegOpnd->IsConditionCode(), "both condi and reg"); + if (a64RegOpnd->IsSSAForm()) { + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; + CHECK_FATAL(a64RegOpnd->IsVirtualRegister() && a64RegOpnd->IsSSAForm(), "only dump ssa opnd here"); + RegType regType = a64RegOpnd->GetRegisterType(); + DEBUG_ASSERT(regType < kRegTyLast, "unexpected regType"); + auto ssaVit = allSSAOperands.find(a64RegOpnd->GetRegisterNumber()); + CHECK_FATAL(ssaVit != allSSAOperands.end(), "find ssa version failed"); + LogInfo::MapleLogger() << "ssa_reg:" << prims[regType] << ssaVit->second->GetOriginalRegNO() << "_" + << ssaVit->second->GetVersionIdx() << " class: " << classes[regType] << " validBitNum: [" + << static_cast(a64RegOpnd->GetValidBitsNum()) << "]"; + LogInfo::MapleLogger() << ")"; + SetHasDumped(); + } +} + +void A64SSAOperandDumpVisitor::Visit(ListOperand *v) { + for (auto regOpnd : v->GetOperands()) { + if (regOpnd->IsSSAForm()) { + Visit(regOpnd); + continue; + } + } +} + +void A64SSAOperandDumpVisitor::Visit(MemOperand *a64MemOpnd) { + if (a64MemOpnd->GetBaseRegister() != nullptr && a64MemOpnd->GetBaseRegister()->IsSSAForm()) { + LogInfo::MapleLogger() << "Mem: "; + Visit(a64MemOpnd->GetBaseRegister()); + if (a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + LogInfo::MapleLogger() << "offset:"; + a64MemOpnd->GetOffsetOperand()->Dump(); + } + } + if (a64MemOpnd->GetIndexRegister() != nullptr && a64MemOpnd->GetIndexRegister()->IsSSAForm() ) { + DEBUG_ASSERT(a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX, "mem mode false"); + LogInfo::MapleLogger() << "offset:"; + Visit(a64MemOpnd->GetIndexRegister()); + } +} + +void A64SSAOperandDumpVisitor::Visit(PhiOperand *phi) { + for (auto phiListIt = phi->GetOperands().begin(); phiListIt != phi->GetOperands().end();) { + Visit(phiListIt->second); + LogInfo::MapleLogger() << " fBB<" << phiListIt->first << ">"; + LogInfo::MapleLogger() << (++phiListIt == phi->GetOperands().end() ? ")" : ", "); + } +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de80abd6cd3cd2c2fc930316a52521854eb7d152 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp @@ -0,0 +1,1077 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_strldr.h" +#include "aarch64_reaching.h" +#include "aarch64_cgfunc.h" +#include "common_utils.h" + +namespace maplebe { +using namespace maple; + +static MOperator SelectMovMop(bool isFloatOrSIMD, bool is64Bit) { + return isFloatOrSIMD ? (is64Bit ? MOP_xvmovd : MOP_xvmovs) + : (is64Bit ? MOP_xmovrr : MOP_wmovrr); +} + +void AArch64StoreLoadOpt::Run() { + DoStoreLoadOpt(); +} + +/* + * Transfer: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * Params: + * strInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (x100 in this example) + * memSeq: represent first memOpreand or second memOperand + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, + short memSeq, const InsnSet &memUseInsnSet) { + /* stp instruction need two registers, str only need one register */ + DEBUG_ASSERT(strSrcIdx < kDivide2, "CG internal error."); + /* Find x100's definition insn. */ + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(strInsn, strSrcIdx); + DEBUG_ASSERT(!regDefInsnSet.empty(), "RegOperand is used before defined"); + if (regDefInsnSet.size() != 1) { + return; + } + std::map InsnState; + for (auto *ldrInsn : memUseInsnSet) { + InsnState[ldrInsn] = true; + } + for (auto *ldrInsn : memUseInsnSet) { + if (!ldrInsn->IsLoad() || (ldrInsn->GetDefRegs().size() > 1) || ldrInsn->GetBB()->IsCleanup()) { + continue; + } + + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + + /* ldr x200, [mem], mem index is 1, x200 index is 0 */ + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + /* If load has multiple definition, continue. */ + if (memDefInsnSet.size() > 1) { + InsnState[ldrInsn] = false; + continue; + } + + Operand &resOpnd = ldrInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD()->RegIsLiveBetweenInsn(srcRegOpnd.GetRegisterNumber(), strInsn, *ldrInsn)) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + InsnState[ldrInsn] = false; + } else if (!cgFunc.IsAfterRegAlloc()) { + GenerateMoveDeadInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + } + + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "Do store-load optimization 1: str version"; + LogInfo::MapleLogger() << cgFunc.GetName() << '\n'; + LogInfo::MapleLogger() << "Store insn: "; + strInsn.Dump(); + LogInfo::MapleLogger() << "Load insn: "; + ldrInsn->Dump(); + } + } + auto it = memUseInsnSet.begin(); + ++it; + for (; it != memUseInsnSet.end(); ++it) { + Insn *curInsn = *it; + if (InsnState[curInsn] == false) { + continue; + } + if (!curInsn->IsLoad() || (curInsn->GetDefRegs().size() > 1) || curInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + if (memDefInsnSet.size() > 1) { + continue; + } + auto prevIt = it; + do { + --prevIt; + Insn *prevInsn = *prevIt; + if (InsnState[prevInsn] == false) { + continue; + } + if (prevInsn->GetBB() != curInsn->GetBB()) { + break; + } + if (!prevInsn->IsLoad() || (prevInsn->GetDefRegs().size() > 1) || prevInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memoryDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memoryDefInsnSet.empty(), "load insn should have definitions."); + if (memoryDefInsnSet.size() > 1) { + break; + } + Operand &resOpnd = curInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + continue; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD()->FindRegDefBetweenInsn(srcRegOpnd.GetRegisterNumber(), + prevInsn->GetNext(), curInsn->GetPrev()).empty()) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *curInsn, *prevInsn, memSeq); + InsnState[curInsn] = false; + } + break; + } while (prevIt != memUseInsnSet.begin()); + } +} + +void AArch64StoreLoadOpt::GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq) { + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn *movInsn = nullptr; + if (str2MovMap[&strInsn][memSeq] != nullptr && !cgFunc.IsAfterRegAlloc()) { + Insn *movInsnOfStr = str2MovMap[&strInsn][memSeq]; + auto &vregOpnd = static_cast(movInsnOfStr->GetOperand(kInsnFirstOpnd)); + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, vregOpnd); + } else { + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, srcRegOpnd); + } + if (&resRegOpnd == &srcRegOpnd && cgFunc.IsAfterRegAlloc()) { + ldrInsn.GetBB()->RemoveInsn(ldrInsn); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); + return; + } + movInsn->SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, *movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn->Dump(); + } + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load live version."; + } else { + newComment += "; str-load live version."; + } + movInsn->SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +void AArch64StoreLoadOpt::GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq) { + Insn *newMovInsn = nullptr; + RegOperand *vregOpnd = nullptr; + + if (str2MovMap[&strInsn][memSeq] == nullptr) { + RegType regTy = srcRegOpnd.IsOfFloatOrSIMDClass() ? kRegTyFloat : kRegTyInt; + regno_t vRegNO = + cgFunc.NewVReg(regTy, srcRegOpnd.GetSize() <= k32BitSize ? k4ByteSize : k8ByteSize); + /* generate a new vreg, check if the size of DataInfo is big enough */ + if (vRegNO >= cgFunc.GetRD()->GetRegSize(*strInsn.GetBB())) { + cgFunc.GetRD()->EnlargeRegCapacity(vRegNO); + } + vregOpnd = &cgFunc.CreateVirtualRegisterOperand(vRegNO); + MOperator newMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + newMovInsn = &cgFunc.GetInsnBuilder()->BuildInsn(newMop, *vregOpnd, srcRegOpnd); + newMovInsn->SetId(strInsn.GetId() + memSeq + 1); + strInsn.GetBB()->InsertInsnAfter(strInsn, *newMovInsn); + str2MovMap[&strInsn][memSeq] = newMovInsn; + /* update DataInfo */ + cgFunc.GetRD()->UpdateInOut(*strInsn.GetBB(), true); + } else { + newMovInsn = str2MovMap[&strInsn][memSeq]; + vregOpnd = &static_cast(newMovInsn->GetOperand(kInsnFirstOpnd)); + } + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, *vregOpnd); + movInsn.SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn.Dump(); + } + + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load die version."; + } else { + newComment += "; str-load die version."; + } + movInsn.SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +bool AArch64StoreLoadOpt::HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const { + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + const Insn *currInsn = strInsn.GetNext(); + while (currInsn != &ldrInsn) { + if (currInsn == nullptr) { + return false; + } + if (currInsn->IsMachineInstruction() && currInsn->IsCall()) { + return true; + } + currInsn = currInsn->GetNext(); + } + return false; +} + +/* + * Transfer: store wzr, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_ZERO / OPT_VERSION_STR_ZERO: + * store wzr, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, wzr + * + * Params: + * stInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (wzr in this example) + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, + const InsnSet &memUseInsnSet) const { + /* comment for strInsn should be only added once */ + for (auto *ldrInsn : memUseInsnSet) { + /* Currently we don't support useInsn is ldp insn. */ + if (!ldrInsn->IsLoad() || ldrInsn->GetDefRegs().size() > 1) { + continue; + } + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + /* ldr reg, [mem], the index of [mem] is 1 */ + InsnSet defInsnForUseInsns = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, 1); + /* If load has multiple definition, continue. */ + if (defInsnForUseInsns.size() > 1) { + continue; + } + + auto &resOpnd = ldrInsn->GetOperand(0); + auto &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + RegOperand &resRegOpnd = static_cast(resOpnd); + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resOpnd, srcOpnd); + movInsn.SetId(ldrInsn->GetId()); + ldrInsn->GetBB()->ReplaceInsn(*ldrInsn, movInsn); + + /* Add comment. */ + MapleString newComment = ldrInsn->GetComment(); + newComment += ", str-load zero version"; + movInsn.SetComment(newComment); + } +} + +bool AArch64StoreLoadOpt::CheckStoreOpCode(MOperator opCode) const { + switch (opCode) { + case MOP_wstr: + case MOP_xstr: + case MOP_sstr: + case MOP_dstr: + case MOP_wstp: + case MOP_xstp: + case MOP_sstp: + case MOP_dstp: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64StoreLoadOpt::MemPropInit() { + propMode = kUndef; + amount = 0; + removeDefInsn = false; +} + +bool AArch64StoreLoadOpt::CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, + regno_t replaceRegNo) { + if (replaceRegDefSet.empty()) { + return true; + } + if (defInsn.GetBB() == currInsn.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn.GetNext(); + while (tmpInsn != nullptr && tmpInsn != &currInsn) { + if (replaceRegDefSet.find(tmpInsn) != replaceRegDefSet.end()) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { + regno_t defRegno = static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (defRegno == replaceRegNo) { + uint32 defLoopId = 0; + uint32 curLoopId = 0; + if (defInsn.GetBB()->GetLoop()) { + defLoopId = defInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (currInsn.GetBB()->GetLoop()) { + curLoopId = currInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (defLoopId != curLoopId) { + return false; + } + } + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (a64RD->HasRegDefBetweenInsnGlobal(replaceRegNo, defInsn, currInsn)) { + return false; + } + } + + if (replaceRegDefSet.size() == 1 && *replaceRegDefSet.begin() == &defInsn) { + /* lsl x1, x1, #3 <-----should be removed after replace MemOperand of ldrInsn. + * ldr x0, [x0,x1] <-----should be single useInsn for x1 + */ + InsnSet newRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(defInsn, replaceRegNo, true); + if (newRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +bool AArch64StoreLoadOpt::CheckDefInsn(Insn &defInsn, Insn &currInsn) { + if (defInsn.GetOperandSize() < k2ByteSize) { + return false; + } + for (uint32 i = kInsnSecondOpnd; i < defInsn.GetOperandSize(); i++) { + Operand &opnd = defInsn.GetOperand(i); + if (defInsn.IsMove() && opnd.IsRegister() && !cgFunc.IsSPOrFP(static_cast(opnd))) { + return false; + } + if (opnd.IsRegister()) { + RegOperand &a64OpndTmp = static_cast(opnd); + regno_t replaceRegNo = a64OpndTmp.GetRegisterNumber(); + InsnSet newRegDefSet = cgFunc.GetRD()->FindDefForRegOpnd(currInsn, replaceRegNo, true); + if (!CheckReplaceReg(defInsn, currInsn, newRegDefSet, replaceRegNo)) { + return false; + } + } + } + return true; +} + +bool AArch64StoreLoadOpt::CheckNewAmount(const Insn &insn, uint32 newAmount) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_wstrb: + case MOP_wldrsb: + case MOP_xldrsb: + case MOP_wldrb: { + return newAmount == 0; + } + case MOP_wstrh: + case MOP_wldrsh: + case MOP_xldrsh: + case MOP_wldrh: { + return (newAmount == 0) || (newAmount == k1BitSize); + } + case MOP_wstr: + case MOP_sstr: + case MOP_wldr: + case MOP_sldr: + case MOP_xldrsw: { + return (newAmount == 0) || (newAmount == k2BitSize); + } + case MOP_qstr: + case MOP_qldr: { + return (newAmount == 0) || (newAmount == k4BitSize); + } + default: { + return (newAmount == 0) || (newAmount == k3ByteSize); + } + } +} + +bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) { + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc.IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = newMemOpnd->ShiftAmount(); + if (!CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (propMode == kPropShift) || ((propMode == kPropSignedExtend) && isSigned) || + ((propMode == kPropUnsignedExtend) && !isSigned); + if (propMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, + Operand *oldOffset, int64 defVal) { + if (propMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, + replace, nullptr, newOfstImm, nullptr); +} + +/* + * limit to adjacent bb to avoid ra spill. + */ +bool AArch64StoreLoadOpt::IsAdjacentBB(Insn &defInsn, Insn &curInsn) const { + if (defInsn.GetBB() == curInsn.GetBB()) { + return true; + } + for (auto *bb : defInsn.GetBB()->GetSuccs()) { + if (bb == curInsn.GetBB()) { + return true; + } + if (bb->IsSoloGoto()) { + BB *tragetBB = CGCFG::GetTargetSuc(*bb); + if (tragetBB == curInsn.GetBB()) { + return true; + } + } + } + return false; +} + +/* + * currAddrMode | defMop | propMode | replaceAddrMode + * ============================================================================= + * boi | addrri | base | boi, update imm(offset) + * | addrrr | base | imm(offset) == 0(nullptr) ? borx : NA + * | subrri | base | boi, update imm(offset) + * | subrrr | base | NA + * | adrpl12 | base | imm(offset) == 0(nullptr) ? literal : NA + * | movrr | base | boi + * | movri | base | NA + * | extend/lsl | base | NA + * ============================================================================= + * borx | addrri | offset | NA + * (noextend) | addrrr | offset | NA + * | subrri | offset | NA + * | subrrr | offset | NA + * | adrpl12 | offset | NA + * | movrr | offset | borx + * | movri | offset | bori + * | extend/lsl | offset | borx(with extend) + * ============================================================================= + * borx | addrri | extend | NA + * (extend) | addrrr | extend | NA + * | subrri | extend | NA + * | subrrr | extend | NA + * | adrpl12 | extend | NA + * | movrr | extend | borx + * | movri | extend | NA + * | extend/lsl | extend | borx(with extend) + * ============================================================================= + */ +MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, + RegOperand &base, Operand *offset) { + MemOperand *newMemOpnd = nullptr; + MOperator opCode = defInsn.GetMachineOpcode(); + RegOperand *replace = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + if (!IsAdjacentBB(defInsn, curInsn)) { + break; + } + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, newOffset, nullptr, nullptr); + } + break; + } + case MOP_xadrpl12: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + StImmOperand *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(val), k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeLo12Li, k64BitSize, *replace, nullptr, newOfsetOpnd, addr); + } + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + if (propMode == kPropBase) { + OfstOperand *offsetTmp = static_cast(offset); + CHECK_FATAL(offsetTmp != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, *replace, nullptr, offsetTmp, nullptr); + } else if (propMode == kPropOffset) { /* if newOffset is SP, swap base and newOffset */ + if (cgFunc.IsSPOrFP(*replace)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, &base, nullptr, nullptr); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, replace, nullptr, nullptr); + } + } else if (propMode == kPropSignedExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount, true); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount); + } + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + if (propMode == kPropOffset) { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + uint32 shift = static_cast(imm->GetValue()); + if (propMode == kPropOffset) { + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } else if (propMode == kPropShift) { + shift += amount; + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, true); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, false); + break; + } + default: + break; + } + return newMemOpnd; +} + +bool AArch64StoreLoadOpt::ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + CHECK_FATAL((a64RD != nullptr), "check a64RD!"); + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, regNo, true); + if (regDefSet.size() != k1BitSize) { + return false; + } + Insn *regDefInsn = *regDefSet.begin(); + if (!CheckDefInsn(*regDefInsn, insn)) { + return false; + } + MemOperand *newMemOpnd = SelectReplaceMem(*regDefInsn, insn, base, offset); + if (newMemOpnd == nullptr) { + return false; + } + + /* check new memOpnd */ + if (newMemOpnd->GetBaseRegister() != nullptr) { + InsnSet regDefSetForNewBase = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetBaseRegister()->GetRegisterNumber(), true); + if (regDefSetForNewBase.size() != k1BitSize) { + return false; + } + } + if (newMemOpnd->GetIndexRegister() != nullptr) { + InsnSet regDefSetForNewIndex = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetIndexRegister()->GetRegisterNumber(), true); + if (regDefSetForNewIndex.size() != k1BitSize) { + return false; + } + } + + uint32 opndIdx; + if (insn.IsLoadPair() || insn.IsStorePair()) { + if (newMemOpnd->GetOffsetImmediate() == nullptr) { + return false; + } + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + if (!CheckNewMemOffset(insn, newMemOpnd, opndIdx)) { + return false; + } + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "replace insn:" << std::endl; + insn.Dump(); + } + insn.SetOperand(opndIdx, *newMemOpnd); + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "new insn:" << std::endl; + insn.Dump(); + } + if (removeDefInsn) { + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "remove insn:" << std::endl; + regDefInsn->Dump(); + } + regDefInsn->GetBB()->RemoveInsn(*regDefInsn); + } + cgFunc.GetRD()->InitGenUse(*regDefInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return true; +} + +bool AArch64StoreLoadOpt::CanDoMemProp(const Insn *insn) { + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + if (!insn->IsMachineInstruction()) { + return false; + } + if (insn->GetMachineOpcode() == MOP_qstr) { + return false; + } + + if (insn->IsLoad() || insn->IsStore()) { + if (insn->IsAtomic()) { + return false; + } + // It is not desired to propagate on 128bit reg with immediate offset + // which may cause linker to issue misalignment error + if (insn->IsAtomic() || insn->GetOperand(0).GetSize() == k128BitSize) { + return false; + } + MemOperand *currMemOpnd = static_cast(insn->GetMemOpnd()); + return currMemOpnd != nullptr; + } + return false; +} + +void AArch64StoreLoadOpt::SelectPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + propMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + propMode = kPropOffset; + amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + propMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + propMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + propMode = kPropUnsignedExtend; + } + break; + } + default: + propMode = kUndef; + } +} + +/* + * Optimize: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_LIVE / OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STP_DIE / OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * + * Note: x100 may be wzr/xzr registers. + */ +void AArch64StoreLoadOpt::DoStoreLoadOpt() { + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if (a64CgFunc.IsIntrnCallForC()) { + return; + } + FOR_ALL_BB(bb, &a64CgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, next) { + MOperator mOp = insn->GetMachineOpcode(); + if (CanDoMemProp(insn)) { + MemProp(*insn); + } + if (a64CgFunc.GetMirModule().IsCModule() && cgFunc.GetRD()->OnlyAnalysisReg()) { + continue; + } + if (!insn->IsMachineInstruction() || !insn->IsStore() || !CheckStoreOpCode(mOp) || + (a64CgFunc.GetMirModule().IsCModule() && !a64CgFunc.IsAfterRegAlloc()) || + (!a64CgFunc.GetMirModule().IsCModule() && a64CgFunc.IsAfterRegAlloc())) { + continue; + } + if (insn->IsStorePair()) { + ProcessStrPair(*insn); + continue; + } + ProcessStr(*insn); + } + } +} + +/* + * PropBase: + * add/sub x1, x2, #immVal1 + * ...(no def of x2) + * ldr/str x0, [x1, #immVal2] + * ======> + * add/sub x1, x2, #immVal1 + * ... + * ldr/str x0, [x2, #(immVal1 + immVal2)/#(-immVal1 + immVal2)] + * + * PropOffset: + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ...(no def of x2) + * ldr/str x0, [x0, x1] + * ======> + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ... + * ldr/str x0, [x0, w2, sxtw 1~3] + */ +void AArch64StoreLoadOpt::MemProp(Insn &insn) { + MemPropInit(); + MemOperand *currMemOpnd = static_cast(insn.GetMemOpnd()); + SelectPropMode(*currMemOpnd); + RegOperand *base = currMemOpnd->GetBaseRegister(); + Operand *offset = currMemOpnd->GetOffset(); + bool memReplaced = false; + + if (propMode == kUndef) { + return; + } else if (propMode == kPropBase) { + ImmOperand *immOffset = static_cast(offset); + CHECK_FATAL(immOffset != nullptr, "immOffset is nullptr!"); + regno_t baseRegNo = base->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, baseRegNo, *base, immOffset); + } else { + RegOperand *regOffset = static_cast(offset); + if (regOffset == nullptr) { + return; + } + regno_t offsetRegNo = regOffset->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, offsetRegNo, *base, regOffset); + } + + /* if prop success, find more prop chance */ + if (memReplaced) { + MemProp(insn); + } +} + +/* + * Assume stack(FP) will not be varied out of pro/epi log + * PreIndex: + * add/sub x1, x1 #immVal1 + * ...(no def/use of x1) + * ldr/str x0, [x1] + * ======> + * ldr/str x0, [x1, #immVal1]! + * + * PostIndex: + * ldr/str x0, [x1] + * ...(no def/use of x1) + * add/sub x1, x1, #immVal1 + * ======> + * ldr/str x0, [x1], #immVal1 + */ +void AArch64StoreLoadOpt::StrLdrIndexModeOpt(Insn &currInsn) { + auto *curMemopnd = static_cast(currInsn.GetMemOpnd()); + DEBUG_ASSERT(curMemopnd != nullptr, " get memopnd failed"); + /* one instruction cannot define one register twice */ + if (!CanDoIndexOpt(*curMemopnd) || currInsn.IsRegDefined(curMemopnd->GetBaseRegister()->GetRegisterNumber())) { + return; + } + MemOperand *newMemopnd = SelectIndexOptMode(currInsn, *curMemopnd); + if (newMemopnd != nullptr) { + currInsn.SetMemOpnd(newMemopnd); + } +} + +bool AArch64StoreLoadOpt::CanDoIndexOpt(const MemOperand &MemOpnd) { + if (MemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !MemOpnd.IsIntactIndexed()) { + return false; + } + DEBUG_ASSERT(MemOpnd.GetOffsetImmediate() != nullptr, " kAddrModeBOi memopnd have no offset imm"); + if (!MemOpnd.GetOffsetImmediate()->IsImmOffset()) { + return false; + } + if (cgFunc.IsSPOrFP(*MemOpnd.GetBaseRegister())) { + return false; + } + OfstOperand *a64Ofst = MemOpnd.GetOffsetImmediate(); + if (a64Ofst == nullptr) { + return false; + } + return a64Ofst->GetValue() == 0; +} + +int64 AArch64StoreLoadOpt::GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize) { + bool subMode = defInsn.GetMachineOpcode() == MOP_wsubrri12 || defInsn.GetMachineOpcode() == MOP_xsubrri12; + bool addMode = defInsn.GetMachineOpcode() == MOP_waddrri12 || defInsn.GetMachineOpcode() == MOP_xaddrri12; + if (addMode || subMode) { + DEBUG_ASSERT(static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == baseRegNO, + "check def opnd"); + auto &srcOpnd = static_cast(defInsn.GetOperand(kInsnSecondOpnd)); + if (srcOpnd.GetRegisterNumber() == baseRegNO && defInsn.GetBB() == insn.GetBB()) { + int64 offsetVal = static_cast(defInsn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!MemOperand::IsSIMMOffsetOutOfRange(offsetVal, memOpndSize == k64BitSize, insn.IsLoadStorePair())) { + return subMode ? -offsetVal : offsetVal; + } + } + } + return kMaxPimm8; /* simm max value cannot excced pimm max value */ +}; + + +MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + DEBUG_ASSERT((a64RD != nullptr), "check a64RD!"); + regno_t baseRegisterNO = curMemOpnd.GetBaseRegister()->GetRegisterNumber(); + auto &a64cgFunc = static_cast(cgFunc); + /* pre index */ + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, baseRegisterNO, true); + if (regDefSet.size() == k1BitSize) { + Insn *defInsn = *regDefSet.begin(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, defInsn->GetNext(), insn.GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = + a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + DEBUG_ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPreIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + /* post index */ + std::vector refDefVec = a64RD->FindRegDefBetweenInsn(baseRegisterNO, &insn, insn.GetBB()->GetLastInsn(), true); + if (!refDefVec.empty()) { + Insn *defInsn = refDefVec.back(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, insn.GetNext(), defInsn->GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = a64cgFunc.CreateMemOpnd( + *curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + DEBUG_ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPostIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + return nullptr; +} + +void AArch64StoreLoadOpt::ProcessStrPair(Insn &insn) { + const short memIndex = 2; + short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + InsnSet memUseInsnSet; + for (int i = 0; i != kMaxMovNum; ++i) { + memUseInsnSet.clear(); + if (i == 0) { + regIndex = 0; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + } else { + regIndex = 1; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex, true); + } + if (memUseInsnSet.empty()) { + return; + } + auto ®Opnd = static_cast(insn.GetOperand(static_cast(regIndex))); + if (regOpnd.GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, i, memUseInsnSet); + } + } +} + +void AArch64StoreLoadOpt::ProcessStr(Insn &insn) { + /* str x100, [mem], mem index is 1, x100 index is 0; */ + const short memIndex = 1; + const short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + + InsnSet memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (memUseInsnSet.empty()) { + return; + } + + auto *regOpnd = static_cast(&insn.GetOperand(regIndex)); + CHECK_NULL_FATAL(regOpnd); + if (regOpnd->GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, 0, memUseInsnSet); + } + if (cgFunc.IsAfterRegAlloc() && insn.IsSpillInsn()) { + InsnSet newmemUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (newmemUseInsnSet.empty()) { + insn.GetBB()->RemoveInsn(insn); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f2589d4de120802803925c565c1f69edf9e2cd6f --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_utils.h" +#include "cg_option.h" + +namespace maplebe { + +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, + MOperator newLoadMop) { + MemPool &memPool = *cgFunc.GetMemoryPool(); + auto *memOp = static_cast(loadIns.GetMemOpnd()); + MOperator loadMop = loadIns.GetMachineOpcode(); + + DEBUG_ASSERT(loadIns.IsLoad() && AArch64CG::kMd[newLoadMop].IsLoad(), + "ins and Mop must be load"); + + MemOperand *newMemOp = memOp; + + uint32 memSize = AArch64CG::kMd[loadMop].GetOperandSize(); + uint32 newMemSize = AArch64CG::kMd[newLoadMop].GetOperandSize(); + + if (newMemSize == memSize) { + // if sizes are the same just return old memory operand + return newMemOp; + } + + newMemOp = memOp->Clone(memPool); + newMemOp->SetSize(newMemSize); + + if (!CGOptions::IsBigEndian()) { + return newMemOp; + } + + // for big-endian it's necessary to adjust offset if it's present + if (memOp->GetAddrMode() != MemOperand::kAddrModeBOi || + newMemSize > memSize) { + // currently, it's possible to adjust an offset only for immediate offset + // operand if new size is less than the original one + return nullptr; + } + + auto *newOffOp = static_cast( + memOp->GetOffsetImmediate()->Clone(memPool)); + + newOffOp->AdjustOffset(static_cast((memSize - newMemSize) >> kLog2BitsPerByte)); + newMemOp->SetOffsetOperand(*newOffOp); + + DEBUG_ASSERT(memOp->IsOffsetMisaligned(memSize) || + !newMemOp->IsOffsetMisaligned(newMemSize), + "New offset value is misaligned!"); + + return newMemOp; +} + +} // namespace maplebe diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9101fd7cd812f74e79109472c27bc9d3aa922c1d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp @@ -0,0 +1,566 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_validbit_opt.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + case MOP_wandrri12: + case MOP_xandrri13: { + Optimize(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Optimize(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + Optimize(bb, insn); + break; + } + case MOP_bge: + case MOP_blt: { + Optimize(bb, insn); + break; + } + default: + break; + } +} + +void AArch64ValidBitOpt::SetValidBits(Insn &insn) { + MOperator mop = insn.GetMachineOpcode(); + switch (mop) { + case MOP_wcsetrc: + case MOP_xcsetrc: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(k1BitSize); + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "must be ImmOperand"); + auto &immOpnd = static_cast(srcOpnd); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize())); + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + break; + } + if (srcOpnd.GetRegisterNumber() == RZR) { + srcOpnd.SetValidBitsNum(k1BitSize); + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!(dstOpnd.GetSize() == k64BitSize && srcOpnd.GetSize() == k32BitSize) && + !(dstOpnd.GetSize() == k32BitSize && srcOpnd.GetSize() == k64BitSize)) { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum()); + } + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + uint32 shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { + dstOpnd.SetValidBitsNum(k1BitSize); + } else { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + } + break; + } + case MOP_wlslrri5: + case MOP_xlslrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + uint32 shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 newVB = ((srcOpnd.GetValidBitsNum() + shiftBits) > srcOpnd.GetSize()) ? + srcOpnd.GetSize() : (srcOpnd.GetValidBitsNum() + shiftBits); + dstOpnd.SetValidBitsNum(newVB); + } + case MOP_xuxtb32: + case MOP_xuxth32: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 srcVB = srcOpnd.GetValidBitsNum(); + uint32 newVB = dstOpnd.GetValidBitsNum(); + newVB = (mop == MOP_xuxtb32) ? ((srcVB < k8BitSize) ? srcVB : k8BitSize) : newVB; + newVB = (mop == MOP_xuxth32) ? ((srcVB < k16BitSize) ? srcVB : k16BitSize) : newVB; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wldrb: + case MOP_wldrh: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 newVB = (mop == MOP_wldrb) ? k8BitSize : k16BitSize; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrrr: + case MOP_xandrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + default: + break; + } +} + +bool AArch64ValidBitOpt::SetPhiValidBits(Insn &insn) { + Operand &defOpnd = insn.GetOperand(kInsnFirstOpnd); + DEBUG_ASSERT(defOpnd.IsRegister(), "expect register"); + auto &defRegOpnd = static_cast(defOpnd); + Operand &phiOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(phiOpnd.IsPhi(), "expect phiList"); + auto &phiList = static_cast(phiOpnd); + int32 maxVB = -1; + for (auto phiOpndIt : phiList.GetOperands()) { + if (phiOpndIt.second != nullptr) { + maxVB = (maxVB < static_cast(phiOpndIt.second->GetValidBitsNum())) ? + static_cast(phiOpndIt.second->GetValidBitsNum()) : maxVB; + } + } + if (maxVB >= static_cast(k0BitSize) && static_cast(maxVB) != defRegOpnd.GetValidBitsNum()) { + defRegOpnd.SetValidBitsNum(static_cast(maxVB)); + return true; + } + return false; +} + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AndValidBitPattern::CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const { + if ((__builtin_ffs(static_cast(andImm)) - 1 == shiftImm) && + ((andImm >> shiftImm) == ((1 << (andImmVB - shiftImm)) -1))) { + return true; + } + return false; +} + +bool AndValidBitPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_wandrri12) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xandrri13) { + newMop = MOP_xmovrr; + } + if (newMop == MOP_undef) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsImmediate(), "must be imm!"); + desReg = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + srcReg = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto &andImm = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 immVal = andImm.GetValue(); + uint32 validBit = srcReg->GetValidBitsNum(); + if (validBit == k8BitSize && immVal == 0xFF) { + return true; + } else if (validBit == k16BitSize && immVal == 0xFFFF) { + return true; + } + /* and R287[32], R286[64], #255 */ + if ((desReg->GetSize() < srcReg->GetSize()) && (srcReg->GetValidBitsNum() > desReg->GetSize())) { + return false; + } + InsnSet useInsns = GetAllUseInsn(*desReg); + if (useInsns.size() == 1) { + Insn *useInsn = *useInsns.begin(); + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop != MOP_wasrrri5 && useMop != MOP_xasrrri6 && useMop != MOP_wlsrrri5 && useMop != MOP_xlsrrri6) { + return false; + } + Operand &shiftOpnd = useInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(shiftOpnd.IsImmediate(), "must be immediate"); + int64 shiftImm = static_cast(shiftOpnd).GetValue(); + uint32 andImmVB = ValidBitOpt::GetImmValidBit(andImm.GetValue(), desReg->GetSize()); + if ((srcReg->GetValidBitsNum() == andImmVB) && CheckImmValidBit(andImm.GetValue(), andImmVB, shiftImm)) { + return true; + } + } + return false; +} + +void AndValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *desReg, *srcReg); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + if (desReg->GetSize() < srcReg->GetSize()) { + ssaInfo->InsertSafePropInsn(newInsn.GetId()); + } + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtValidBitPattern::CheckCondition(Insn &insn) { + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + CHECK_FATAL(dstOpnd.IsRegister(), "must be register"); + CHECK_FATAL(srcOpnd.IsRegister(), "must be register"); + if (static_cast(dstOpnd).GetValidBitsNum() != + static_cast(srcOpnd).GetValidBitsNum()) { + return false; + } + newMop = MOP_wmovrr; + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Operand &immOpnd1 = insn.GetOperand(kInsnThirdOpnd); + Operand &immOpnd2 = insn.GetOperand(kInsnFourthOpnd); + CHECK_FATAL(immOpnd1.IsImmediate(), "must be immediate"); + CHECK_FATAL(immOpnd2.IsImmediate(), "must be immediate"); + int64 lsb = static_cast(immOpnd1).GetValue(); + int64 width = static_cast(immOpnd2).GetValue(); + if (lsb != 0 || static_cast(srcOpnd).GetValidBitsNum() > width) { + return false; + } + if ((mOp == MOP_wsbfxrri5i5 || mOp == MOP_xsbfxrri6i6) && width != static_cast(srcOpnd).GetSize()) { + return false; + } + if (mOp == MOP_wubfxrri5i5 || mOp == MOP_wsbfxrri5i5) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xubfxrri6i6 || mOp == MOP_xsbfxrri6i6) { + newMop = MOP_xmovrr; + } + break; + } + default: + return false; + } + newDstOpnd = &static_cast(dstOpnd); + newSrcOpnd = &static_cast(srcOpnd); + return true; +} + +void ExtValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + insn.SetMOP(AArch64CG::kMd[newMop]); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *newDstOpnd, *newSrcOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } + } + default: + return; + } +} + +bool CmpCsetVBPattern::IsContinuousCmpCset(const Insn &curInsn) { + auto &csetDstReg = static_cast(curInsn.GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(csetDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *dstVersion = ssaInfo->FindSSAVersion(csetDstReg.GetRegisterNumber()); + DEBUG_ASSERT(dstVersion != nullptr, "find vRegVersion failed"); + for (auto useDUInfoIt : dstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = useDUInfoIt.second->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_wcmpri || useMop == MOP_xcmpri) { + auto &ccDstReg = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(ccDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *ccDstVersion = ssaInfo->FindSSAVersion(ccDstReg.GetRegisterNumber()); + DEBUG_ASSERT(ccDstVersion != nullptr, "find vRegVersion failed"); + for (auto ccUseDUInfoIt : ccDstVersion->GetAllUseInsns()) { + if (ccUseDUInfoIt.second == nullptr) { + continue; + } + Insn *ccUseInsn = ccUseDUInfoIt.second->GetInsn(); + if (ccUseInsn == nullptr) { + continue; + } + MOperator ccUseMop = ccUseInsn->GetMachineOpcode(); + if (ccUseMop == MOP_wcsetrc || ccUseMop == MOP_xcsetrc) { + return true; + } + } + } + } + return false; +} + +bool CmpCsetVBPattern::OpndDefByOneValidBit(const Insn &defInsn) { + if (defInsn.IsPhi()) { + return (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k1BitSize) || + (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k0BitSize); + } + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +bool CmpCsetVBPattern::CheckCondition(Insn &csetInsn) { + MOperator curMop = csetInsn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + /* combine [continuous cmp & cset] first, to eliminate more insns */ + if (IsContinuousCmpCset(csetInsn)) { + return false; + } + RegOperand &ccReg = static_cast(csetInsn.GetOperand(kInsnThirdOpnd)); + regno_t ccRegNo = ccReg.GetRegisterNumber(); + cmpInsn = GetDefInsn(ccReg); + CHECK_NULL_FATAL(cmpInsn); + MOperator mop = cmpInsn->GetMachineOpcode(); + if ((mop != MOP_wcmpri) && (mop != MOP_xcmpri)) { + return false; + } + VRegVersion *ccRegVersion = ssaInfo->FindSSAVersion(ccRegNo); + if (ccRegVersion->GetAllUseInsns().size() > k1BitSize) { + return false; + } + Operand &cmpSecondOpnd = cmpInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConst.GetValue(); + /* get ImmOperand, must be 0 or 1 */ + if ((cmpConstVal != 0) && (cmpConstVal != k1BitSize)) { + return false; + } + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(cmpFirstOpnd.IsRegister(), "cmpFirstOpnd must be register!"); + RegOperand &cmpReg = static_cast(cmpFirstOpnd); + Insn *defInsn = GetDefInsn(cmpReg); + if (defInsn == nullptr) { + return false; + } + if (defInsn->GetMachineOpcode() == MOP_wmovrr || defInsn->GetMachineOpcode() == MOP_xmovrr) { + auto &srcOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + return false; + } + } + return ((cmpReg.GetValidBitsNum() == k1BitSize) || (cmpReg.GetValidBitsNum() == k0BitSize) || + OpndDefByOneValidBit(*defInsn)); +} + +void CmpCsetVBPattern::Run(BB &bb, Insn &csetInsn) { + if (!CheckCondition(csetInsn)) { + return; + } + Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + auto &cond = static_cast(csetInsn.GetOperand(kInsnSecondOpnd)); + Insn *newInsn = nullptr; + + /* cmpFirstOpnd == 1 */ + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd); + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + /* cmpFirstOpnd == 0 */ + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(cgFunc)->CreateImmOperand(1, k8BitSize, false); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(csetInsn, *newInsn); + ssaInfo->ReplaceInsn(csetInsn, *newInsn); + if (CG_VALIDBIT_OPT_DUMP && (newInsn != nullptr)) { + std::vector prevInsns; + prevInsns.emplace_back(cmpInsn); + prevInsns.emplace_back(&csetInsn); + DumpAfterPattern(prevInsns, newInsn, nullptr); + } +} + +void CmpBranchesPattern::SelectNewMop(MOperator mop) { + switch (mop) { + case MOP_bge: { + newMop = is64Bit ? MOP_xtbnz : MOP_wtbnz; + break; + } + case MOP_blt: { + newMop = is64Bit ? MOP_xtbz : MOP_wtbz; + break; + } + default: + break; + } +} + +bool CmpBranchesPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_blt) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator cmpMop = prevCmpInsn->GetMachineOpcode(); + if (cmpMop != MOP_wcmpri && cmpMop != MOP_xcmpri) { + return false; + } + is64Bit = (cmpMop == MOP_xcmpri); + auto &cmpUseOpnd = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImmOpnd.GetValue(); + newImmVal = ValidBitOpt::GetLogValueAtBase2(cmpImmVal); + if (newImmVal < 0 || cmpUseOpnd.GetValidBitsNum() != (newImmVal + 1)) { + return false; + } + SelectNewMop(curMop); + if (newMop == MOP_undef) { + return false; + } + return true; +} + +void CmpBranchesPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &newImmOpnd = aarFunc->CreateImmOperand(newImmVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevCmpInsn->GetOperand(kInsnSecondOpnd), + newImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} +} /* namespace maplebe */ + diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ec8a1b1533fc9f34f869abbd2ee8b153c72a425 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_yieldpoint.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +void AArch64YieldPointInsertion::Run() { + InsertYieldPoint(); +} + +void AArch64YieldPointInsertion::InsertYieldPoint() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + std::string refQueueName = "Ljava_2Flang_2Fref_2FReference_3B_7C_3Cinit_3E_7C_" + "28Ljava_2Flang_2FObject_3BLjava_2Flang_2Fref_2FReferenceQueue_3B_29V"; + if (!CGOptions::IsGCOnly() && (aarchCGFunc->GetName() == refQueueName)) { + /* skip insert yieldpoint in reference constructor, avoid rc verify issue */ + DEBUG_ASSERT(aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + + /* + * do not insert yieldpoint in function that not saved X30 into stack, + * because X30 will be changed after yieldpoint is taken. + */ + if (!aarchCGFunc->GetHasProEpilogue()) { + DEBUG_ASSERT (aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + /* skip if no GetFirstbb(). */ + if (aarchCGFunc->GetFirstBB() == nullptr) { + return; + } + /* + * The yield point in the entry of the GetFunction() is inserted just after the initialization + * of localrefvars in HandleRCCall. + * for BBs after firstbb. + */ + for (BB *bb = aarchCGFunc->GetFirstBB()->GetNext(); bb != nullptr; bb = bb->GetNext()) { + /* insert a yieldpoint at beginning if BB is BackEdgeDest. */ + if (bb->IsBackEdgeDest()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->GenerateYieldpoint(*aarchCGFunc->GetDummyBB()); + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp b/ecmascript/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b966afc91d26c2dface288fc3910de50e8ac66d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mpl_atomic.h" +#include +#include "mpl_logging.h" + +namespace maple { +namespace { +constexpr int32 kMaxSizeOfTab = 6; +}; +MemOrd MemOrdFromU32(uint32 val) { + /* 6 is the size of tab below. 2 is memory_order_consume, it is Disabled. */ + CHECK_FATAL(val <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", val); + CHECK_FATAL(val != 2, "Illegal number for MemOrd: %u", val); + static std::array tab = { + MemOrd::kNotAtomic, + MemOrd::memory_order_relaxed, + /* + * memory_order_consume Disabled. Its semantics is debatable. + * We don't support it now, but reserve the number. Use memory_order_acquire instead. + */ + MemOrd::memory_order_acquire, /* padding entry */ + MemOrd::memory_order_acquire, + MemOrd::memory_order_release, + MemOrd::memory_order_acq_rel, + MemOrd::memory_order_seq_cst, + }; + return tab[val]; +} + +bool MemOrdIsAcquire(MemOrd ord) { + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + true, /* memory_order_consume */ + true, /* memory_order_acquire */ + false, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} + +bool MemOrdIsRelease(MemOrd ord) { + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + false, /* memory_order_consume */ + false, /* memory_order_acquire */ + true, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} +} /* namespace maple */ diff --git a/ecmascript/mapleall/maple_be/src/cg/alignment.cpp b/ecmascript/mapleall/maple_be/src/cg/alignment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b6b70938436cee4d9ebbce2cab3cf6ddfdde86e5 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/alignment.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "alignment.h" +#include "optimize_common.h" +#include "cgfunc.h" +#include "cg.h" +#include "cg_option.h" + +namespace maplebe { +#define ALIGN_ANALYZE_DUMP_NEWPW CG_DEBUG_FUNC(func) + +void AlignAnalysis::AnalysisAlignment() { + FindLoopHeader(); + FindJumpTarget(); + ComputeLoopAlign(); + ComputeJumpAlign(); + if (CGOptions::DoCondBrAlign()) { + ComputeCondBranchAlign(); + } +} + +void AlignAnalysis::Dump() { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n********* alignment for " << funcSt->GetName() << " *********\n"; + LogInfo::MapleLogger() << "------ jumpTargetBBs: " << jumpTargetBBs.size() << " total ------\n"; + for (auto *jumpLabel : jumpTargetBBs) { + LogInfo::MapleLogger() << " === BB_" << jumpLabel->GetId() << " (" << std::hex << jumpLabel << ")" + << std::dec << " <" << jumpLabel->GetKindName(); + if (jumpLabel->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << jumpLabel->GetLabIdx() << "]> ===\n"; + } + if (!jumpLabel->GetPreds().empty()) { + LogInfo::MapleLogger() << "\tpreds: [ "; + for (auto *pred : jumpLabel->GetPreds()) { + LogInfo::MapleLogger() << "BB_" << pred->GetId(); + if (pred->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (jumpLabel->GetPrev() != nullptr) { + LogInfo::MapleLogger() << "\tprev: [ "; + LogInfo::MapleLogger() << "BB_" << jumpLabel->GetPrev()->GetId(); + if (jumpLabel->GetPrev()->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << jumpLabel->GetPrev() << ") " << std::dec << " "; + LogInfo::MapleLogger() << "]\n"; + } + FOR_BB_INSNS_CONST(insn, jumpLabel) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ loopHeaderBBs: " << loopHeaderBBs.size() << " total ------\n"; + for (auto *loopHeader : loopHeaderBBs) { + LogInfo::MapleLogger() << " === BB_" << loopHeader->GetId() << " (" << std::hex << loopHeader << ")" + << std::dec << " <" << loopHeader->GetKindName(); + if (loopHeader->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << loopHeader->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\tLoop Level: " << loopHeader->GetLoop()->GetLoopLevel() << "\n"; + FOR_BB_INSNS_CONST(insn, loopHeader) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ alignInfos: " << alignInfos.size() << " total ------\n"; + MapleUnorderedMap::iterator iter; + for (iter = alignInfos.begin(); iter != alignInfos.end(); ++iter) { + BB *bb = iter->first; + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ")" + << std::dec << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\talignPower: " << iter->second << "\n"; + } +} + +bool CgAlignAnalysis::PhaseRun(maplebe::CGFunc &func) { + if (ALIGN_ANALYZE_DUMP_NEWPW) { + DotGenerator::GenerateDot("alignanalysis", func, func.GetMirModule(), true, func.GetName()); + } + MemPool *alignMemPool = GetPhaseMemPool(); + AlignAnalysis *alignAnalysis = func.GetCG()->CreateAlignAnalysis(*alignMemPool, func); + + CHECK_FATAL(alignAnalysis != nullptr, "AlignAnalysis instance create failure"); + alignAnalysis->AnalysisAlignment(); + if (ALIGN_ANALYZE_DUMP_NEWPW) { + alignAnalysis->Dump(); + } + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/args.cpp b/ecmascript/mapleall/maple_be/src/cg/args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b647ec327c22840814b195dfabf34b1e651c8ac1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/args.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "args.h" +#include "cg.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgMoveRegArgs::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + MoveRegArgs *movRegArgs = nullptr; + movRegArgs = f.GetCG()->CreateMoveRegArgs(*memPool, f); + movRegArgs->Run(); + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cfgo.cpp b/ecmascript/mapleall/maple_be/src/cg/cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..99a3bbe11f1c7cb2773ad586c93b79e7e5c9968d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cfgo.cpp @@ -0,0 +1,851 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cfgo.h" +#include "cgbb.h" +#include "cg.h" +#include "mpl_logging.h" + +/* + * This phase traverses all basic block of cgFunc and finds special + * basic block patterns, like continuous fallthrough basic block, continuous + * uncondition jump basic block, unreachable basic block and empty basic block, + * then do basic mergering, basic block placement transformations, + * unnecessary jumps elimination, and remove unreachable or empty basic block. + * This optimization is done on control flow graph basis. + */ +namespace maplebe { +using namespace maple; + +#define CFGO_DUMP_NEWPM CG_DEBUG_FUNC(f) + +/* return true if to is put after from and there is no real insns between from and to, */ +bool ChainingPattern::NoInsnBetween(const BB &from, const BB &to) const { + const BB *bb = nullptr; + for (bb = from.GetNext(); bb != nullptr && bb != &to && bb != cgFunc->GetLastBB(); bb = bb->GetNext()) { + if (!bb->IsEmptyOrCommentOnly() || bb->IsUnreachable() || bb->GetKind() != BB::kBBFallthru) { + return false; + } + } + return (bb == &to); +} + +/* return true if insns in bb1 and bb2 are the same except the last goto insn. */ +bool ChainingPattern::DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const { + const Insn *insn1 = bb1.GetFirstInsn(); + const Insn *insn2 = bb2.GetFirstInsn(); + while (insn1 != nullptr && insn1 != last1.GetNext() && insn2 != nullptr && insn2 != last2.GetNext()) { + if (!insn1->IsMachineInstruction()) { + insn1 = insn1->GetNext(); + continue; + } + if (!insn2->IsMachineInstruction()) { + insn2 = insn2->GetNext(); + continue; + } + if (insn1->GetMachineOpcode() != insn2->GetMachineOpcode()) { + return false; + } + uint32 opndNum = insn1->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &op1 = insn1->GetOperand(i); + Operand &op2 = insn2->GetOperand(i); + if (&op1 == &op2) { + continue; + } + if (!op1.Equals(op2)) { + return false; + } + } + insn1 = insn1->GetNext(); + insn2 = insn2->GetNext(); + } + return (insn1 == last1.GetNext() && insn2 == last2.GetNext()); +} + +/* + * BB2 can be merged into BB1, if + * 1. BB1's kind is fallthrough; + * 2. BB2 has only one predecessor which is BB1 and BB2 is not the lastbb + * 3. BB2 is neither catch BB nor switch case BB + */ +bool ChainingPattern::MergeFallthuBB(BB &curBB) { + BB *sucBB = curBB.GetNext(); + if (sucBB == nullptr || + IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) || + !cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (sucBB == cgFunc->GetLastBB()) { + cgFunc->SetLastBB(curBB); + } + cgFunc->GetTheCFG()->MergeBB(curBB, *sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MergeGotoBB(BB &curBB, BB &sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->MergeBB(curBB, sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB) { + /* + * without the judge below, there is + * Assembler Error: CFI state restore without previous remember + */ + if (sucBB.GetHasCfi() || (sucBB.GetFirstInsn() != nullptr && sucBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* put sucBB as curBB's next. */ + DEBUG_ASSERT(sucBB.GetPrev() != nullptr, "the target of current goto BB will not be the first bb"); + sucBB.GetPrev()->SetNext(sucBB.GetNext()); + if (sucBB.GetNext() != nullptr) { + sucBB.GetNext()->SetPrev(sucBB.GetPrev()); + } + sucBB.SetNext(curBB.GetNext()); + DEBUG_ASSERT(curBB.GetNext() != nullptr, "current goto BB will not be the last bb"); + curBB.GetNext()->SetPrev(&sucBB); + sucBB.SetPrev(&curBB); + curBB.SetNext(&sucBB); + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::RemoveGotoInsn(BB &curBB, BB &sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (&sucBB != curBB.GetNext()) { + DEBUG_ASSERT(curBB.GetNext() != nullptr, "nullptr check"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*curBB.GetNext()); + curBB.GetNext()->PushBackPreds(curBB); + sucBB.RemovePreds(curBB); + } + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB) { + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Insn *brInsn = nullptr; + for (brInsn = curBB.GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()){ + break; + } + } + DEBUG_ASSERT(brInsn != nullptr, "goto BB has no branch"); + BB *newTarget = sucBB.GetPrev(); + DEBUG_ASSERT(newTarget != nullptr, "get prev bb failed in ChainingPattern::ClearCurBBAndResetTargetBB"); + Insn *last1 = newTarget->GetLastInsn(); + if (newTarget->GetKind() == BB::kBBGoto) { + Insn *br = nullptr; + for (br = newTarget->GetLastInsn(); br != newTarget->GetFirstInsn()->GetPrev(); br = br->GetPrev()) { + if (br->IsUnCondBranch()){ + break; + } + } + DEBUG_ASSERT(br != nullptr, "goto BB has no branch"); + last1 = br->GetPrev(); + } + if (last1 == nullptr || !DoSameThing(*newTarget, *last1, curBB, *brInsn->GetPrev())) { + return false; + } + + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + LabelIdx tgtLabIdx = newTarget->GetLabIdx(); + if (newTarget->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + newTarget->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + brInsn->SetOperand(0, brTarget); + curBB.RemoveInsnSequence(*curBB.GetFirstInsn(), *brInsn->GetPrev()); + + curBB.RemoveFromSuccessorList(sucBB); + curBB.PushBackSuccs(*newTarget); + sucBB.RemoveFromPredecessorList(curBB); + newTarget->PushBackPreds(curBB); + + sucBB.GetPrev()->SetUnreachable(false); + keepPosition = true; + return true; +} + +/* + * Following optimizations are performed: + * 1. Basic block merging + * 2. unnecessary jumps elimination + * 3. Remove duplicates Basic block. + */ +bool ChainingPattern::Optimize(BB &curBB) { + if (curBB.GetKind() == BB::kBBFallthru) { + return MergeFallthuBB(curBB); + } + + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + Insn* last = curBB.GetLastInsn(); + if (last->IsTailCall()) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + /* + * BB2 can be merged into BB1, if + * 1. BB1 ends with a goto; + * 2. BB2 has only one predecessor which is BB1 + * 3. BB2 is of goto kind. Otherwise, the original fall through will be broken + * 4. BB2 is neither catch BB nor switch case BB + */ + if (sucBB == nullptr || curBB.GetEhSuccs().size() != sucBB->GetEhSuccs().size()) { + return false; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != sucBB->GetEhSuccs().front())) { + return false; + } + if (sucBB->GetKind() == BB::kBBGoto && + !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && + cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return MergeGotoBB(curBB, *sucBB); + } else if (sucBB != &curBB && + curBB.GetNext() != sucBB && + sucBB != cgFunc->GetLastBB() && + !sucBB->IsPredecessor(*sucBB->GetPrev()) && + !(sucBB->GetNext() != nullptr && sucBB->GetNext()->IsPredecessor(*sucBB)) && + !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && + sucBB->GetEhSuccs().empty() && + sucBB->GetKind() != BB::kBBThrow) { + return MoveSuccBBAsCurBBNext(curBB, *sucBB); + } + /* + * Last goto instruction can be removed, if: + * 1. The goto target is physically the next one to current BB. + */ + else if (sucBB == curBB.GetNext() || + (NoInsnBetween(curBB, *sucBB) && !IsLabelInLSDAOrSwitchTable(curBB.GetNext()->GetLabIdx()))) { + return RemoveGotoInsn(curBB, *sucBB); + } + /* + * Clear curBB and target it to sucBB->GetPrev() + * if sucBB->GetPrev() and curBB's insns are the same. + * + * curBB: curBB: + * insn_x0 b prevbb + * b sucBB ... + * ... ==> prevbb: + * prevbb: insn_x0 + * insn_x0 sucBB: + * sucBB: + */ + else if (sucBB != curBB.GetNext() && + !curBB.IsSoloGoto() && + !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) && + sucBB->GetKind() == BB::kBBReturn && + sucBB->GetPreds().size() > 1 && + sucBB->GetPrev() != nullptr && + sucBB->IsPredecessor(*sucBB->GetPrev()) && + (sucBB->GetPrev()->GetKind() == BB::kBBFallthru || sucBB->GetPrev()->GetKind() == BB::kBBGoto)) { + return ClearCurBBAndResetTargetBB(curBB, *sucBB); + } + } + return false; +} + +/* + * curBB: curBB: + * insn_x0 insn_x0 + * b targetBB b BB + * ... ==> ... + * targetBB: targetBB: + * b BB b BB + * ... ... + * BB: BB: + * *------------------------------ + * curBB: curBB: + * insn_x0 insn_x0 + * cond_br brBB cond_br BB + * ... ... + * brBB: ==> brBB: + * b BB b BB + * ... ... + * BB: BB: + * + * conditions: + * 1. only goto and comment in brBB; + */ +bool SequentialJumpPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + CHECK_FATAL(sucBB != nullptr, "sucBB is null in SequentialJumpPattern::Optimize"); + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if ((sucBB != &curBB) && sucBB->IsSoloGoto() && tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } else if (curBB.GetKind() == BB::kBBIf) { + for (BB *sucBB : curBB.GetSuccs()) { + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } + } else if (curBB.GetKind() == BB::kBBRangeGoto) { + bool changed = false; + for (BB *sucBB : curBB.GetSuccs()) { + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + cgFunc->GetTheCFG()->GetTargetSuc(*sucBB) != nullptr) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + UpdateSwitchSucc(curBB, *sucBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*sucBB, *cgFunc); + changed = true; + } + } + return changed; + } + return false; +} + +void SequentialJumpPattern::UpdateSwitchSucc(BB &curBB, BB &sucBB) { + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::UpdateSwitchSucc"); + const MapleVector &labelVec = curBB.GetRangeGotoLabelVec(); + bool isPred = false; + for (auto label: labelVec) { + if (label == gotoTarget->GetLabIdx()) { + isPred = true; + break; + } + } + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == sucBB.GetLabIdx()) { + curBB.SetRangeGotoLabel(i, gotoTarget->GetLabIdx()); + } + } + cgFunc->UpdateEmitSt(curBB, sucBB.GetLabIdx(), gotoTarget->GetLabIdx()); + + /* connect curBB, gotoTarget */ + for (auto it = gotoTarget->GetPredsBegin(); it != gotoTarget->GetPredsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + if (isPred) { + break; + } + if (origIt != gotoTarget->GetPredsBegin()) { + --origIt; + gotoTarget->InsertPred(origIt, curBB); + } else { + gotoTarget->PushFrontPreds(curBB); + } + break; + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + curBB.EraseSuccs(it); + if (isPred) { + break; + } + if (origIt != curBB.GetSuccsBegin()) { + --origIt; + curBB.InsertSucc(origIt, *gotoTarget); + } else { + curBB.PushFrontSuccs(*gotoTarget); + } + break; + } + } + /* cut curBB -> sucBB */ + for (auto it = sucBB.GetPredsBegin(); it != sucBB.GetPredsEnd(); ++it) { + if (*it == &curBB) { + sucBB.ErasePreds(it); + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + curBB.EraseSuccs(it); + } + } +} + +/* + * preCond: + * sucBB is one of curBB's successor. + * + * Change curBB's successor to sucBB's successor + */ +void SequentialJumpPattern::SkipSucBB(BB &curBB, BB &sucBB) { + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::SkipSucBB"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*gotoTarget); + sucBB.RemovePreds(curBB); + gotoTarget->PushBackPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(sucBB, *cgFunc); +} + +/* + * Found pattern + * curBB: curBB: + * ... ==> ... + * cond_br brBB cond1_br ftBB + * ftBB: brBB: + * bl throwfunc ... + * brBB: retBB: + * ... ... + * retBB: ftBB: + * ... bl throwfunc + */ +void FlipBRPattern::RelocateThrowBB(BB &curBB) { + BB *ftBB = curBB.GetNext(); + CHECK_FATAL(ftBB != nullptr, "ifBB has a fall through BB"); + CGCFG *theCFG = cgFunc->GetTheCFG(); + CHECK_FATAL(theCFG != nullptr, "nullptr check"); + BB *retBB = theCFG->FindLastRetBB(); + CHECK_FATAL(retBB != nullptr, "must have a return BB"); + if (ftBB->GetKind() != BB::kBBThrow || !ftBB->GetEhSuccs().empty() || + IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) || !retBB->GetEhSuccs().empty()) { + return; + } + BB *brBB = theCFG->GetTargetSuc(curBB); + if (brBB != ftBB->GetNext()) { + return; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (retBB->GetId() >= startTry->GetId() && retBB->GetId() <= endTry->GetId()) { + if (retBB->GetNext()->GetId() < startTry->GetId() || retBB->GetNext()->GetId() > endTry->GetId() || + curBB.GetId() < startTry->GetId() || curBB.GetId() > endTry->GetId()) { + return; + } + } else { + if ((retBB->GetNext()->GetId() >= startTry->GetId() && retBB->GetNext()->GetId() <= endTry->GetId()) || + (curBB.GetId() >= startTry->GetId() && curBB.GetId() <= endTry->GetId())) { + return; + } + } + } + } + /* get branch insn of curBB */ + Insn *curBBBranchInsn = theCFG->FindLastCondBrInsn(curBB); + CHECK_FATAL(curBBBranchInsn != nullptr, "curBB(it is a kBBif) has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(*ftBB); + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + + /* move ftBB after retBB */ + curBB.SetNext(brBB); + brBB->SetPrev(&curBB); + + retBB->GetNext()->SetPrev(ftBB); + ftBB->SetNext(retBB->GetNext()); + ftBB->SetPrev(retBB); + retBB->SetNext(ftBB); +} + +/* + * 1. relocate goto BB + * Found pattern (1) ftBB->GetPreds().size() == 1 + * curBB: curBB: cond1_br target + * ... ==> brBB: + * cond_br brBB ... + * ftBB: targetBB: (ftBB,targetBB) + * goto target (2) ftBB->GetPreds().size() > 1 + * brBB: curBB : cond1_br ftBB + * ... brBB: + * targetBB ... + * ftBB + * targetBB + * + * 2. relocate throw BB in RelocateThrowBB() + */ +bool FlipBRPattern::Optimize(BB &curBB) { + if (curBB.GetKind() == BB::kBBIf && !curBB.IsEmpty()) { + BB *ftBB = curBB.GetNext(); + DEBUG_ASSERT(ftBB != nullptr, "ftBB is null in FlipBRPattern::Optimize"); + BB *brBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + DEBUG_ASSERT(brBB != nullptr, "brBB is null in FlipBRPattern::Optimize"); + /* Check if it can be optimized */ + if (ftBB->GetKind() == BB::kBBGoto && ftBB->GetNext() == brBB) { + if (!ftBB->GetEhSuccs().empty()) { + return false; + } + Insn *curBBBranchInsn = nullptr; + for (curBBBranchInsn = curBB.GetLastInsn(); curBBBranchInsn != nullptr; + curBBBranchInsn = curBBBranchInsn->GetPrev()) { + if (curBBBranchInsn->IsBranch()) { + break; + } + } + DEBUG_ASSERT(curBBBranchInsn != nullptr, "FlipBRPattern: curBB has no branch"); + Insn *brInsn = nullptr; + for (brInsn = ftBB->GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()) { + break; + } + } + DEBUG_ASSERT(brInsn != nullptr, "FlipBRPattern: ftBB has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + if (mOp == 0) { + return false; + } + auto it = ftBB->GetSuccsBegin(); + BB *tgtBB = *it; + if (ftBB->GetPreds().size() == 1 && + (ftBB->IsSoloGoto() || + (!IsLabelInLSDAOrSwitchTable(tgtBB->GetLabIdx()) && + cgFunc->GetTheCFG()->CanMerge(*ftBB, *tgtBB)))) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + Operand &brTarget = brInsn->GetOperand(GetJumpTargetIdx(*brInsn)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + /* Insert ftBB's insn at the beginning of tgtBB. */ + if (!ftBB->IsSoloGoto()) { + ftBB->RemoveInsn(*brInsn); + tgtBB->InsertAtBeginning(*ftBB); + } + /* Patch pred and succ lists */ + ftBB->EraseSuccs(it); + ftBB->PushBackSuccs(*brBB); + it = curBB.GetSuccsBegin(); + CHECK_FATAL(*it != nullptr, "nullptr check"); + if (*it == brBB) { + curBB.EraseSuccs(it); + curBB.PushBackSuccs(*tgtBB); + } else { + ++it; + curBB.EraseSuccs(it); + curBB.PushFrontSuccs(*tgtBB); + } + for (it = tgtBB->GetPredsBegin(); it != tgtBB->GetPredsEnd(); ++it) { + if (*it == ftBB) { + tgtBB->ErasePreds(it); + break; + } + } + tgtBB->PushBackPreds(curBB); + for (it = brBB->GetPredsBegin(); it != brBB->GetPredsEnd(); ++it) { + if (*it == &curBB) { + brBB->ErasePreds(it); + break; + } + } + brBB->PushFrontPreds(*ftBB); + /* Remove instructions from ftBB so curBB falls thru to brBB */ + ftBB->SetFirstInsn(nullptr); + ftBB->SetLastInsn(nullptr); + ftBB->SetKind(BB::kBBFallthru); + } else if (!IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) && + !tgtBB->IsPredecessor(*tgtBB->GetPrev())) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + LabelIdx tgtLabIdx = ftBB->GetLabIdx(); + if (ftBB->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + ftBB->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + curBB.SetNext(brBB); + brBB->SetPrev(&curBB); + ftBB->SetPrev(tgtBB->GetPrev()); + tgtBB->GetPrev()->SetNext(ftBB); + ftBB->SetNext(tgtBB); + tgtBB->SetPrev(ftBB); + + ftBB->RemoveInsn(*brInsn); + ftBB->SetKind(BB::kBBFallthru); + } + } else { + RelocateThrowBB(curBB); + } + } + return false; +} + +/* remove a basic block that contains nothing */ +bool EmptyBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + /* Empty bb but do not have cleanup label. */ + if (curBB.GetPrev() != nullptr && curBB.GetFirstStmt() != cgFunc->GetCleanupLabel() && + curBB.GetFirstInsn() == nullptr && curBB.GetLastInsn() == nullptr && &curBB != cgFunc->GetLastBB() && + curBB.GetKind() != BB::kBBReturn && !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx())) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + if (sucBB == nullptr || sucBB->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + return false; + } + cgFunc->GetTheCFG()->RemoveBB(curBB); + /* removeBB may do nothing. since no need to repeat, always ret false here. */ + return false; + } + return false; +} + +/* + * remove unreachable BB + * condition: + * 1. unreachable BB can't have cfi instruction when postcfgo. + */ +bool UnreachBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* if curBB in exitbbsvec,return false. */ + if (cgFunc->IsExitBB(curBB)) { + curBB.SetUnreachable(false); + return false; + } + + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if curBB InLSDA ,replace curBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && + cgFunc->GetTheCFG()->InLSDA(curBB.GetLabIdx(), *ehFunc)) { + /* find nextReachableBB */ + BB *nextReachableBB = nullptr; + for (BB *bb = &curBB; bb != nullptr; bb = bb->GetNext()) { + if (!bb->IsUnreachable()) { + nextReachableBB = bb; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labIdx); + cgFunc->SetLab2BBMap(labIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(curBB, *nextReachableBB); + } + + if (curBB.GetSuccs().empty() && curBB.GetEhSuccs().empty()) { + return false; + } + + if (curBB.GetPrev() != nullptr) { + curBB.GetPrev()->SetNext(curBB.GetNext()); + } + if (curBB.GetNext() != nullptr) { + curBB.GetNext()->SetPrev(curBB.GetPrev()); + } + + /* flush after remove; */ + for (BB *bb : curBB.GetSuccs()) { + bb->RemovePreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + for (BB *bb : curBB.GetEhSuccs()) { + bb->RemoveEhPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + curBB.ClearSuccs(); + curBB.ClearEhSuccs(); + return true; + } + return false; +} + +/* BB_pred1: BB_pred1: + * b curBB insn_x0 + * ... b BB2 + * BB_pred2: ==> ... + * b curBB BB_pred2: + * ... insn_x0 + * curBB: b BB2 + * insn_x0 ... + * b BB2 curBB: + * insn_x0 + * b BB2 + * condition: + * 1. The number of instruct in curBB + * is less than THRESHOLD; + * 2. curBB can't have cfi instruction when postcfgo. + */ +bool DuplicateBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + if (CGOptions::IsNoDupBB() || CGOptions::OptimizeForSize()) { + return false; + } + + /* curBB can't be in try block */ + if (curBB.GetKind() != BB::kBBGoto || IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) || + !curBB.GetEhSuccs().empty()) { + return false; + } + +#if TARGARM32 + FOR_BB_INSNS(insn, (&curBB)) { + if (insn->IsPCLoad() || insn->IsClinit()) { + return false; + } + } +#endif + /* It is possible curBB jump to itself */ + uint32 numPreds = curBB.NumPreds(); + for (BB *bb : curBB.GetPreds()) { + if (bb == &curBB) { + numPreds--; + } + } + + if (numPreds > 1 && cgFunc->GetTheCFG()->GetTargetSuc(curBB) != nullptr && + cgFunc->GetTheCFG()->GetTargetSuc(curBB)->NumPreds() > 1) { + std::vector candidates; + for (BB *bb : curBB.GetPreds()) { + if (bb->GetKind() == BB::kBBGoto && bb->GetNext() != &curBB && bb != &curBB && !bb->IsEmpty()) { + candidates.emplace_back(bb); + } + } + if (candidates.empty()) { + return false; + } + if (curBB.NumInsn() <= kThreshold) { + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + bool changed = false; + for (BB *bb : candidates) { + if (curBB.GetEhSuccs().size() != bb->GetEhSuccs().size()) { + continue; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != bb->GetEhSuccs().front())) { + continue; + } + bb->RemoveInsn(*bb->GetLastInsn()); + FOR_BB_INSNS(insn, (&curBB)) { + Insn *clonedInsn = cgFunc->GetTheCFG()->CloneInsn(*insn); + clonedInsn->SetPrev(nullptr); + clonedInsn->SetNext(nullptr); + clonedInsn->SetBB(nullptr); + bb->AppendInsn(*clonedInsn); + } + bb->RemoveSuccs(curBB); + for (BB *item : curBB.GetSuccs()) { + bb->PushBackSuccs(*item); + item->PushBackPreds(*bb); + } + curBB.RemovePreds(*bb); + changed = true; + } + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(curBB, *cgFunc); + return changed; + } + } + return false; +} + +/* === new pm === */ +bool CgCfgo::PhaseRun(maplebe::CGFunc &f) { + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-cfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("after-cfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCfgo, cfgo) + +bool CgPostCfgo::PhaseRun(maplebe::CGFunc &f) { + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-postcfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("after-postcfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostCfgo, postcfgo) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cfi.cpp b/ecmascript/mapleall/maple_be/src/cg/cfi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..62f73aa5ce7285885192d0251d3c937e6072600b --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cfi.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cfi.h" +#include "emit.h" + +namespace cfi { +using maplebe::Operand; +using maplebe::MOperator; +using maplebe::CG; +using maplebe::Emitter; +using maplebe::OpndDesc; + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) \ + { ".cfi_" #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + { "." #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void CfiInsn::Dump() const { + MOperator mOp = GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + LogInfo::MapleLogger() << "CFI " << cfiDescr.name; + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void CfiInsn::Check() const { + CfiDescr &cfiDescr = cfiDescrTable[GetMachineOpcode()]; + /* cfi instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != cfiDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in cfi insn"); + } + } +} +#endif + +void RegOperand::Dump() const { + LogInfo::MapleLogger() << "reg: " << regNO << "[ size: " << GetSize() << "] "; +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << "imm: " << val << "[ size: " << GetSize() << "] "; +} + +void StrOperand::Dump() const { + LogInfo::MapleLogger() << str; +} + +void LabelOperand::Dump() const { + LogInfo::MapleLogger() << "label:" << labelIndex; +} +void CFIOpndEmitVisitor::Visit(RegOperand *v) { + emitter.Emit(v->GetRegisterNO()); +} +void CFIOpndEmitVisitor::Visit(ImmOperand *v) { + emitter.Emit(v->GetValue()); +} +void CFIOpndEmitVisitor::Visit(SymbolOperand *v) { + CHECK_FATAL(false, "NIY"); +} +void CFIOpndEmitVisitor::Visit(StrOperand *v) { + emitter.Emit(v->GetStr()); +} +void CFIOpndEmitVisitor::Visit(LabelOperand *v) { + if (emitter.GetCG()->GetMIRModule()->IsCModule()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const char *idx = strdup(std::to_string(pIdx).c_str()); + emitter.Emit(".label.").Emit(idx).Emit("__").Emit(v->GetIabelIdx()); + } else { + emitter.Emit(".label.").Emit(v->GetParentFunc()).Emit(v->GetIabelIdx()); + } +} +} /* namespace cfi */ + +namespace maplebe { +bool CgGenCfi::PhaseRun(maplebe::CGFunc &f) { + f.GenerateCfiPrologEpilog(); + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenCfi, gencfi) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg.cpp b/ecmascript/mapleall/maple_be/src/cg/cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c729b1f6ec988a92eb35659b16b2ab5b0b729c73 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg.cpp @@ -0,0 +1,294 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "emit.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (mirModule->IsJavaModule()) + +void Globals::SetTarget(CG &target) { + cg = ⌖ +} +const CG *Globals::GetTarget() const { + DEBUG_ASSERT(cg, " set target info please "); + return cg; +} + +CGFunc *CG::currentCGFunction = nullptr; +std::map> CG::funcWrapLabels; + +CG::~CG() { + if (emitter != nullptr) { + emitter->CloseOutput(); + } + delete memPool; + memPool = nullptr; + mirModule = nullptr; + emitter = nullptr; + currentCGFunction = nullptr; + instrumentationFunction = nullptr; + dbgTraceEnter = nullptr; + dbgTraceExit = nullptr; + dbgFuncProfile = nullptr; +} +/* This function intends to be a more general form of GenFieldOffsetmap. */ +void CG::GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName) { + const std::string &cMacroDefSuffix = ".macros.def"; + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + std::vector classesToGenerate; + + if (classListFileName.empty()) { + /* + * Class list not specified. Visit all classes. + */ + std::set visited; + + for (const auto &tyId : mirModule->GetClassList()) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyId); + if ((mirType->GetKind() != kTypeClass) && (mirType->GetKind() != kTypeClassIncomplete)) { + continue; /* Skip non-class. Too paranoid. We just enumerated classlist_! */ + } + MIRClassType *classType = static_cast(mirType); + const std::string &name = classType->GetName(); + + if (visited.find(name) != visited.end()) { + continue; /* Skip duplicated class definitions. */ + } + + (void)visited.insert(name); + classesToGenerate.emplace_back(classType); + } + } else { + /* Visit listed classes. */ + std::ifstream inFile(classListFileName); + CHECK_FATAL(inFile.is_open(), "Failed to open file: %s", classListFileName.c_str()); + std::string str; + + /* check each class name first and expose all unknown classes */ + while (inFile >> str) { + MIRType *type = GlobalTables::GetTypeTable().GetOrCreateClassType(str, *mirModule); + MIRClassType *classType = static_cast(type); + if (classType == nullptr) { + LogInfo::MapleLogger() << " >>>>>>>> unknown class: " << str.c_str() << "\n"; + return; + } + + classesToGenerate.emplace_back(classType); + } + } + + if (cgOption.GenDef()) { + const std::string &outputFileName = outputBaseName + cMacroDefSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenExtraTypeMetadata"); + } + + for (auto classType : classesToGenerate) { + beCommon->GenObjSize(*classType, *outputFile); + beCommon->GenFieldOffsetMap(*classType, *outputFile); + } + fclose(outputFile); + } + + if (cgOption.GenGctib()) { + maple::LogInfo::MapleLogger(kLlErr) << "--gen-gctib-file option not implemented"; + } +} + +void CG::GenPrimordialObjectList(const std::string &outputBaseName) { + const std::string &kPrimorListSuffix = ".primordials.txt"; + if (!cgOption.GenPrimorList()) { + return; + } + + const std::string &outputFileName = outputBaseName + kPrimorListSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenPrimordialObjectList"); + } + + for (StIdx stIdx : mirModule->GetSymbolSet()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "get symbol from st idx failed"); + if (symbol->IsPrimordialObject()) { + const std::string &name = symbol->GetName(); + fprintf(outputFile, "%s\n", name.c_str()); + } + } + + fclose(outputFile); +} + +void CG::AddStackGuardvar() { + MIRSymbol *chkGuard = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkGuard->SetNameStrIdx(std::string("__stack_chk_guard")); + chkGuard->SetStorageClass(kScExtern); + chkGuard->SetSKind(kStVar); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > PTY_u64, "out of vector range"); + chkGuard->SetTyIdx(GlobalTables::GetTypeTable().GetTypeTable()[PTY_u64]->GetTypeIndex()); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkGuard); + + MIRSymbol *chkFunc = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkFunc->SetNameStrIdx(std::string("__stack_chk_fail")); + chkFunc->SetStorageClass(kScText); + chkFunc->SetSKind(kStFunc); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkFunc); +} + +void CG::SetInstrumentationFunction(const std::string &name) { + instrumentationFunction = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + instrumentationFunction->SetNameStrIdx(std::string("__").append(name).append("__")); + instrumentationFunction->SetStorageClass(kScText); + instrumentationFunction->SetSKind(kStFunc); +} + +#define DBG_TRACE_ENTER MplDtEnter +#define DBG_TRACE_EXIT MplDtExit +#define XSTR(s) str(s) +#define str(s) #s + +void CG::DefineDebugTraceFunctions() { + dbgTraceEnter = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceEnter->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_ENTER) "__")); + dbgTraceEnter->SetStorageClass(kScText); + dbgTraceEnter->SetSKind(kStFunc); + + dbgTraceExit = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceExit->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_EXIT) "__")); + dbgTraceExit->SetStorageClass(kScText); + dbgTraceExit->SetSKind(kStFunc); + + dbgFuncProfile = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgFuncProfile->SetNameStrIdx(std::string("__" XSTR(MplFuncProfile) "__")); + dbgFuncProfile->SetStorageClass(kScText); + dbgFuncProfile->SetSKind(kStFunc); +} + +/* + * Add the fields of curStructType to the result. Used to handle recursive + * structures. + */ +static void AppendReferenceOffsets64(const BECommon &beCommon, MIRStructType &curStructType, int64 &curOffset, + std::vector &result) { + /* + * We are going to reimplement BECommon::GetFieldOffset so that we can do + * this in one pass through all fields. + * + * The tricky part is to make sure the object layout described here is + * compatible with the rest of the system. This implies that we need + * something like a "Maple ABI" documented for each platform. + */ + if (curStructType.GetKind() == kTypeClass) { + MIRClassType &curClassTy = static_cast(curStructType); + auto maybeParent = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curClassTy.GetParentTyIdx()); + if (maybeParent != nullptr) { + if (maybeParent->GetKind() == kTypeClass) { + auto parentClassType = static_cast(maybeParent); + AppendReferenceOffsets64(beCommon, *parentClassType, curOffset, result); + } else { + LogInfo::MapleLogger() << "WARNING:: generating objmap for incomplete class\n"; + } + } + } + + for (const auto &fieldPair : curStructType.GetFields()) { + auto fieldNameIdx = fieldPair.first; + auto fieldTypeIdx = fieldPair.second.first; + + auto &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldNameIdx); + auto fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx); + auto &fieldTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldType->GetNameStrIdx()); + auto fieldTypeKind = fieldType->GetKind(); + + auto fieldSize = beCommon.GetTypeSize(fieldTypeIdx); + auto fieldAlign = beCommon.GetTypeAlign(fieldTypeIdx); + int64 myOffset = static_cast(RoundUp(curOffset, fieldAlign)); + int64 nextOffset = myOffset + fieldSize; + + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " field: " << fieldName << "\n"; + LogInfo::MapleLogger() << " type: " << fieldTypeIdx << ": " << fieldTypeName << "\n"; + LogInfo::MapleLogger() << " type kind: " << fieldTypeKind << "\n"; + LogInfo::MapleLogger() << " size: " << fieldSize << "\n"; /* int64 */ + LogInfo::MapleLogger() << " align: " << static_cast(fieldAlign) << "\n"; /* int8_t */ + LogInfo::MapleLogger() << " field offset:" << myOffset << "\n"; /* int64 */ + } + + if (fieldTypeKind == kTypePointer) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** Is a pointer field.\n"; + } + result.emplace_back(myOffset); + } + + if ((fieldTypeKind == kTypeArray) || (fieldTypeKind == kTypeStruct) || (fieldTypeKind == kTypeClass) || + (fieldTypeKind == kTypeInterface)) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** ERROR: We are not expecting nested aggregate type. "; + LogInfo::MapleLogger() << "All Java classes are flat -- no nested structs. "; + LogInfo::MapleLogger() << "Please extend me if we are going to work with non-java languages.\n"; + } + } + + curOffset = nextOffset; + } +} + +/* Return a list of offsets of reference fields. */ +std::vector CG::GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType) { + std::vector result; + /* java class layout has already been done in previous phase. */ + if (structType.GetKind() == kTypeClass) { + for (auto fieldInfo : beCommon.GetJClassLayout(static_cast(structType))) { + if (fieldInfo.IsRef()) { + result.emplace_back(static_cast(fieldInfo.GetOffset())); + } + } + } else if (structType.GetKind() != kTypeInterface) { /* interface doesn't have reference fields */ + int64 curOffset = 0; + AppendReferenceOffsets64(beCommon, structType, curOffset, result); + } + + return result; +} + +const std::string CG::ExtractFuncName(const std::string &str) { + /* 3: length of "_7C" */ + size_t offset = 3; + size_t pos1 = str.find("_7C"); + if (pos1 == std::string::npos) { + return str; + } + size_t pos2 = str.find("_7C", pos1 + offset); + if (pos2 == std::string::npos) { + return str; + } + std::string funcName = str.substr(pos1 + offset, pos2 - pos1 - offset); + /* avoid funcName like __LINE__ and __FILE__ which will be resolved by assembler */ + if (funcName.find("__") != std::string::npos) { + return str; + } + if (funcName == "_3Cinit_3E") { + return "init"; + } + if (funcName == "_3Cclinit_3E") { + return "clinit"; + } + return funcName; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_cfg.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_cfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0d2733d55d9060c1df985ed35d44f421fb595117 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -0,0 +1,897 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_cfg.h" +#if TARGAARCH64 +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_insn.h" +#endif +#if TARGARM32 +#include "arm32_insn.h" +#endif +#include "cg_option.h" +#include "mpl_logging.h" +#if TARGX86_64 +#include "x64_cgfunc.h" +#include "cg.h" +#endif +#include + +namespace { +using namespace maplebe; +bool CanBBThrow(const BB &bb) { + FOR_BB_INSNS_CONST(insn, &bb) { + if (insn->IsTargetInsn() && insn->CanThrow()) { + return true; + } + } + return false; +} +} + +namespace maplebe { +void CGCFG::BuildCFG() { + /* + * Second Pass: + * Link preds/succs in the BBs + */ + BB *firstBB = cgFunc->GetFirstBB(); + for (BB *curBB = firstBB; curBB != nullptr; curBB = curBB->GetNext()) { + BB::BBKind kind = curBB->GetKind(); + switch (kind) { + case BB::kBBIntrinsic: + /* + * An intrinsic BB append a MOP_wcbnz instruction at the end, check + * AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode *intrinsiccallNode) for details + */ + if (!curBB->GetLastInsn()->IsBranch()) { + break; + } + /* else fall through */ + [[clang::fallthrough]]; + case BB::kBBIf: { + BB *fallthruBB = curBB->GetNext(); + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + Insn *branchInsn = curBB->GetLastMachineInsn(); + CHECK_FATAL(branchInsn != nullptr, "machine instruction must be exist in ifBB"); + DEBUG_ASSERT(branchInsn->IsCondBranch(), "must be a conditional branch generated from an intrinsic"); + /* Assume the last non-null operand is the branch target */ + int lastOpndIndex = curBB->GetLastInsn()->GetOperandSize() - 1; + DEBUG_ASSERT(lastOpndIndex > -1, "lastOpndIndex's opnd is greater than -1"); + Operand &lastOpnd = branchInsn->GetOperand(static_cast(lastOpndIndex)); + DEBUG_ASSERT(lastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(lastOpnd); + BB *brToBB = cgFunc->GetBBFromLab2BBMap(labelOpnd.GetLabelIndex()); + if (fallthruBB->GetId() != brToBB->GetId()) { + curBB->PushBackSuccs(*brToBB); + brToBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBGoto: { + Insn *insn = curBB->GetLastMachineInsn(); + CHECK_FATAL(insn != nullptr, "machine insn must be exist in gotoBB"); + DEBUG_ASSERT(insn->IsUnCondBranch(), "insn must be a unconditional branch insn"); + LabelIdx labelIdx = static_cast(insn->GetOperand(0)).GetLabelIndex(); + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + CHECK_FATAL(gotoBB != nullptr, "gotoBB is null"); + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + break; + } + case BB::kBBIgoto: { + for (auto lidx : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetLabelTab()->GetAddrTakenLabels()) { + BB *igotobb = cgFunc->GetBBFromLab2BBMap(lidx); + CHECK_FATAL(igotobb, "igotobb is null"); + curBB->PushBackSuccs(*igotobb); + igotobb->PushBackPreds(*curBB); + } + break; + } + case BB::kBBRangeGoto: { + std::set bbs; + for (auto labelIdx : curBB->GetRangeGotoLabelVec()) { + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + bbs.insert(gotoBB); + } + for (auto gotoBB : bbs) { + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBThrow: + break; + case BB::kBBFallthru: { + BB *fallthruBB = curBB->GetNext(); + if (fallthruBB != nullptr) { + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + } + break; + } + default: + break; + } /* end switch */ + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* Check exception table. If curBB is in a try block, add catch BB to its succs */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + /* Determine if insn in bb can actually except */ + if (CanBBThrow(*curBB)) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (curBB->GetId() >= startTry->GetId() && curBB->GetId() <= endTry->GetId() && + lsdaCallsite->csLandingPad.GetEndOffset() != nullptr) { + BB *landingPad = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLandingPad.GetEndOffset()->GetLabelIdx()); + curBB->PushBackEhSuccs(*landingPad); + landingPad->PushBackEhPreds(*curBB); + } + } + } + } + } +} + +void CGCFG::CheckCFG() { + FOR_ALL_BB(bb, cgFunc) { + for (BB *sucBB : bb->GetSuccs()) { + bool found = false; + for (BB *sucPred : sucBB->GetPreds()) { + if (sucPred == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() << "dup pred " << sucPred->GetId() << " for sucBB " << sucBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non pred for sucBB " << sucBB->GetId() << " for BB " << bb->GetId() << "\n"; + } + } + } + FOR_ALL_BB(bb, cgFunc) { + for (BB *predBB : bb->GetPreds()) { + bool found = false; + for (BB *predSucc : predBB->GetSuccs()) { + if (predSucc == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() << "dup succ " << predSucc->GetId() << " for predBB " << predBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non succ for predBB " << predBB->GetId() << " for BB " << bb->GetId() << "\n"; + } + } + } +} + +void CGCFG::CheckCFGFreq() { + auto verifyBBFreq = [this](const BB *bb, uint32 succFreq) { + uint32 res = bb->GetFrequency(); + if ((res != 0 && abs(static_cast(res - succFreq)) / res > 1.0) || (res == 0 && res != succFreq)) { + // Not included + if (bb->GetSuccs().size() > 1 && bb->GetPreds().size() > 1) { + return; + } + LogInfo::MapleLogger() << cgFunc->GetName() << " curBB: " << bb->GetId() << " freq: " + << bb->GetFrequency() << std::endl; + CHECK_FATAL(false, "Verifyfreq failure BB frequency!"); + } + }; + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsUnreachable() || bb->IsCleanup()) { + continue; + } + uint32 res = 0; + if (bb->GetSuccs().size() > 1) { + for (auto *succBB : bb->GetSuccs()) { + res += succBB->GetFrequency(); + if (succBB->GetPreds().size() > 1) { + LogInfo::MapleLogger() << cgFunc->GetName() << " critical edges: curBB: " << bb->GetId() << std::endl; + CHECK_FATAL(false, "The CFG has critical edges!"); + } + } + verifyBBFreq(bb, res); + } else if (bb->GetSuccs().size() == 1) { + auto *succBB = bb->GetSuccs().front(); + if (succBB->GetPreds().size() == 1) { + verifyBBFreq(bb, succBB->GetFrequency()); + } else if (succBB->GetPreds().size() > 1) { + for (auto *pred : succBB->GetPreds()) { + res += pred->GetFrequency(); + } + verifyBBFreq(succBB, res); + } + } + } +} + +InsnVisitor *CGCFG::insnVisitor; + +void CGCFG::InitInsnVisitor(CGFunc &func) { + insnVisitor = func.NewInsnModifier(); +} + +Insn *CGCFG::CloneInsn(Insn &originalInsn) { + cgFunc->IncTotalNumberOfInstructions(); + return insnVisitor->CloneInsn(originalInsn); +} + +RegOperand *CGCFG::CreateVregFromReg(const RegOperand &pReg) { + return insnVisitor->CreateVregFromReg(pReg); +} + +/* + * return true if: + * mergee has only one predecessor which is merger, + * or mergee has other comments only predecessors & merger is soloGoto + * mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::BBJudge(const BB &first, const BB &second) const { + if (first.GetKind() == BB::kBBReturn || second.GetKind() == BB::kBBReturn) { + return false; + } + if (&first == &second) { + return false; + } + if (second.GetPreds().size() == 1 && second.GetPreds().front() == &first) { + return true; + } + for (BB *bb : second.GetPreds()) { + if (bb != &first && !AreCommentAllPreds(*bb)) { + return false; + } + } + return first.IsSoloGoto(); +} + +/* + * Check if a given BB mergee can be merged into BB merger. + * Returns true if: + * 1. mergee has only one predecessor which is merger, or mergee has + * other comments only predecessors. + * 2. merge has only one successor which is mergee. + * 3. mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::CanMerge(const BB &merger, const BB &mergee) const { + if (!BBJudge(merger, mergee)) { + return false; + } + if (mergee.GetFirstInsn() != nullptr && mergee.GetFirstInsn()->IsCfiInsn()) { + return false; + } + return (merger.GetSuccs().size() == 1) && (merger.GetSuccs().front() == &mergee); +} + +/* Check if the given BB contains only comments and all its predecessors are comments */ +bool CGCFG::AreCommentAllPreds(const BB &bb) { + if (!bb.IsCommentBB()) { + return false; + } + for (BB *pred : bb.GetPreds()) { + if (!AreCommentAllPreds(*pred)) { + return false; + } + } + return true; +} + +/* Merge sucBB into curBB. */ +void CGCFG::MergeBB(BB &merger, BB &mergee, CGFunc &func) { + MergeBB(merger, mergee); + if (mergee.GetKind() == BB::kBBReturn) { + for (size_t i = 0; i < func.ExitBBsVecSize(); ++i) { + if (func.GetExitBB(i) == &mergee) { + func.EraseExitBBsVec(func.GetExitBBsVec().begin() + i); + } + } + func.PushBackExitBBsVec(merger); + } + if (mergee.GetKind() == BB::kBBRangeGoto) { + func.AddEmitSt(merger.GetId(), *func.GetEmitSt(mergee.GetId())); + func.DeleteEmitSt(mergee.GetId()); + } +} + +void CGCFG::MergeBB(BB &merger, BB &mergee) { + if (merger.GetKind() == BB::kBBGoto) { + if (!merger.GetLastInsn()->IsBranch()) { + CHECK_FATAL(false, "unexpected insn kind"); + } + merger.RemoveInsn(*merger.GetLastInsn()); + } + merger.AppendBBInsns(mergee); + if (mergee.GetPrev() != nullptr) { + mergee.GetPrev()->SetNext(mergee.GetNext()); + } + if (mergee.GetNext() != nullptr) { + mergee.GetNext()->SetPrev(mergee.GetPrev()); + } + merger.RemoveSuccs(mergee); + if (!merger.GetEhSuccs().empty()) { +#if DEBUG + for (BB *bb : merger.GetEhSuccs()) { + DEBUG_ASSERT((bb != &mergee), "CGCFG::MergeBB: Merging of EH bb"); + } +#endif + } + if (!mergee.GetEhSuccs().empty()) { + for (BB *bb : mergee.GetEhSuccs()) { + bb->RemoveEhPreds(mergee); + bb->PushBackEhPreds(merger); + merger.PushBackEhSuccs(*bb); + } + } + for (BB *bb : mergee.GetSuccs()) { + bb->RemovePreds(mergee); + bb->PushBackPreds(merger); + merger.PushBackSuccs(*bb); + } + merger.SetKind(mergee.GetKind()); + mergee.SetNext(nullptr); + mergee.SetPrev(nullptr); + mergee.ClearPreds(); + mergee.ClearSuccs(); + mergee.ClearEhPreds(); + mergee.ClearEhSuccs(); + mergee.SetFirstInsn(nullptr); + mergee.SetLastInsn(nullptr); +} + +/* + * Find all reachable BBs by dfs in cgfunc and mark their field false, then all other bbs should be + * unreachable. + */ +void CGCFG::FindAndMarkUnreachable(CGFunc &func) { + BB *firstBB = func.GetFirstBB(); + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(firstBB); + std::unordered_set instackBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the first or the last BB of the function */ + if (bb->GetFirstStmt() == func.GetCleanupLabel() || InSwitchTable(bb->GetLabIdx(), func) || + bb == func.GetFirstBB() || bb == func.GetLastBB()) { + toBeAnalyzedBBs.push(bb); + } else if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.top(); + toBeAnalyzedBBs.pop(); + (void)instackBBs.insert(bb->GetId()); + + bb->SetUnreachable(false); + + for (BB *succBB : bb->GetSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + for (BB *succBB : bb->GetEhSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + } +} + +/* + * Theoretically, every time you remove from a bb's preds, you should consider invoking this method. + * + * @param bb + * @param func + */ +void CGCFG::FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const { + /* Check if bb is the first or the last BB of the function */ + bool isFirstBBInfunc = (&bb == func.GetFirstBB()); + bool isLastBBInfunc = (&bb == func.GetLastBB()); + if (bb.GetFirstStmt() == func.GetCleanupLabel() || InSwitchTable(bb.GetLabIdx(), func) || isFirstBBInfunc || + isLastBBInfunc) { + return; + } + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(&bb); + std::set instackBBs; + BB *it = nullptr; + while (!toBeAnalyzedBBs.empty()) { + it = toBeAnalyzedBBs.top(); + (void)instackBBs.insert(it->GetId()); + toBeAnalyzedBBs.pop(); + /* Check if bb is the first or the last BB of the function */ + isFirstBBInfunc = (it == func.GetFirstBB()); + isLastBBInfunc = (it == func.GetLastBB()); + bool needFlush = !isFirstBBInfunc && !isLastBBInfunc && + it->GetFirstStmt() != func.GetCleanupLabel() && + (it->GetPreds().empty() || (it->GetPreds().size() == 1 && it->GetEhPreds().front() == it)) && + it->GetEhPreds().empty() && + !InSwitchTable(it->GetLabIdx(), *cgFunc) && + !cgFunc->IsExitBB(*it) && + (it->IsLabelTaken() == false); + if (!needFlush) { + continue; + } + it->SetUnreachable(true); + it->SetFirstInsn(nullptr); + it->SetLastInsn(nullptr); + for (BB *succ : it->GetSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemovePreds(*it); + succ->RemoveEhPreds(*it); + } + it->ClearSuccs(); + for (BB *succ : it->GetEhSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemoveEhPreds(*it); + succ->RemovePreds(*it); + } + it->ClearEhSuccs(); + } +} + +void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) { + BB *sucBB = CGCFG::GetTargetSuc(curBB, false, isGotoIf); + if (sucBB != nullptr) { + sucBB->RemovePreds(curBB); + } + BB *fallthruSuc = nullptr; + if (isGotoIf) { + for (BB *succ : curBB.GetSuccs()) { + if (succ == sucBB) { + continue; + } + fallthruSuc = succ; + break; + } + + DEBUG_ASSERT(fallthruSuc == curBB.GetNext(), "fallthru succ should be its next bb."); + if (fallthruSuc != nullptr) { + fallthruSuc->RemovePreds(curBB); + } + } + + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() == BB::kBBIgoto) { + return; + } + /* + * If curBB is the target of its predecessor, change + * the jump target. + */ + if (&curBB == GetTargetSuc(*preBB, true, isGotoIf)) { + LabelIdx targetLabel; + if (curBB.GetNext()->GetLabIdx() == 0) { + targetLabel = insnVisitor->GetCGFunc()->CreateLabel(); + curBB.GetNext()->SetLabIdx(targetLabel); + } else { + targetLabel = curBB.GetNext()->GetLabIdx(); + } + insnVisitor->ModifyJumpTarget(targetLabel, *preBB); + } + if (fallthruSuc != nullptr && !fallthruSuc->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*fallthruSuc); + fallthruSuc->PushBackPreds(*preBB); + } + if (sucBB != nullptr && !sucBB->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*sucBB); + sucBB->PushBackPreds(*preBB); + } + preBB->RemoveSuccs(curBB); + } + for (BB *ehSucc : curBB.GetEhSuccs()) { + ehSucc->RemoveEhPreds(curBB); + } + for (BB *ehPred : curBB.GetEhPreds()) { + ehPred->RemoveEhSuccs(curBB); + } + curBB.GetNext()->RemovePreds(curBB); + curBB.GetPrev()->SetNext(curBB.GetNext()); + curBB.GetNext()->SetPrev(curBB.GetPrev()); + cgFunc->ClearBBInVec(curBB.GetId()); + /* remove callsite */ + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* only java try has ehFunc->GetLSDACallSiteTable */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + ehFunc->GetLSDACallSiteTable()->RemoveCallSite(curBB); + } +} + +void CGCFG::RetargetJump(BB &srcBB, BB &targetBB) { + insnVisitor->ModifyJumpTarget(srcBB, targetBB); +} + +BB *CGCFG::GetTargetSuc(BB &curBB, bool branchOnly, bool isGotoIf) { + switch (curBB.GetKind()) { + case BB::kBBGoto: + case BB::kBBIntrinsic: + case BB::kBBIf: { + const Insn* origLastInsn = curBB.GetLastMachineInsn(); + if (isGotoIf && (curBB.GetPrev() != nullptr) && + (curBB.GetKind() == BB::kBBGoto || curBB.GetKind() == BB::kBBIf) && + (curBB.GetPrev()->GetKind() == BB::kBBGoto || curBB.GetPrev()->GetKind() == BB::kBBIf)) { + origLastInsn = curBB.GetPrev()->GetLastMachineInsn(); + } + LabelIdx label = insnVisitor->GetJumpLabel(*origLastInsn); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == label) { + return bb; + } + } + break; + } + case BB::kBBIgoto: { + for (Insn *insn = curBB.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { +#if TARGAARCH64 + if (insn->GetMachineOpcode() == MOP_adrp_label) { + int64 label = static_cast(insn->GetOperand(1)).GetValue(); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == static_cast(label)) { + return bb; + } + } + } +#endif + } + /* can also be a MOP_xbr. */ + return nullptr; + } + case BB::kBBFallthru: { + return (branchOnly ? nullptr : curBB.GetNext()); + } + case BB::kBBThrow: + return nullptr; + default: + return nullptr; + } + return nullptr; +} + +bool CGCFG::InLSDA(LabelIdx label, const EHFunc &ehFunc) { + if (!label || ehFunc.GetLSDACallSiteTable() == nullptr) { + return false; + } + if (label == ehFunc.GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx() || + label == ehFunc.GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()) { + return true; + } + return ehFunc.GetLSDACallSiteTable()->InCallSiteTable(label); +} + +bool CGCFG::InSwitchTable(LabelIdx label, const CGFunc &func) { + if (!label) { + return false; + } + return func.InSwitchTable(label); +} + +bool CGCFG::IsCompareAndBranchInsn(const Insn &insn) const { + return insnVisitor->IsCompareAndBranchInsn(insn); +} + +bool CGCFG::IsAddOrSubInsn(const Insn &insn) const { + return insnVisitor->IsAddOrSubInsn(insn); +} + +Insn *CGCFG::FindLastCondBrInsn(BB &bb) const { + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (insn->IsBranch()) { + return insn; + } + } + return nullptr; +} + +void CGCFG::MarkLabelTakenBB() { + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return; + } + for (BB *bb = cgFunc->GetFirstBB(); bb != nullptr; bb = bb->GetNext()) { + if (cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().find(bb->GetLabIdx()) != + cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().end()) { + cgFunc->SetHasTakenLabel(); + bb->SetLabelTaken(); + } + } +} + +/* + * analyse the CFG to find the BBs that are not reachable from function entries + * and delete them + */ +void CGCFG::UnreachCodeAnalysis() { + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && + (cgFunc->HasTakenLabel() || + (cgFunc->GetEHFunc() && cgFunc->GetEHFunc()->GetLSDAHeader()))) { + return; + } + /* + * Find all reachable BBs by dfs in cgfunc and mark their field false, + * then all other bbs should be unreachable. + */ + BB *firstBB = cgFunc->GetFirstBB(); + std::forward_list toBeAnalyzedBBs; + toBeAnalyzedBBs.push_front(firstBB); + std::set unreachBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the first or the last BB of the function */ + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel() || InSwitchTable(bb->GetLabIdx(), *cgFunc) || + bb == cgFunc->GetFirstBB() || bb == cgFunc->GetLastBB() || bb->GetKind() == BB::kBBReturn) { + toBeAnalyzedBBs.push_front(bb); + } else { + (void)unreachBBs.insert(bb); + } + if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.front(); + toBeAnalyzedBBs.pop_front(); + if (!bb->IsUnreachable()) { + continue; + } + bb->SetUnreachable(false); + for (BB *succBB : bb->GetSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + for (BB *succBB : bb->GetEhSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + } + /* Don't remove unreach code if withDwarf is enabled. */ + if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { + return; + } + /* remove unreachable bb */ + std::set::iterator it; + for (it = unreachBBs.begin(); it != unreachBBs.end(); it++) { + BB *unreachBB = *it; + DEBUG_ASSERT(unreachBB != nullptr, "unreachBB must not be nullptr"); + if (cgFunc->IsExitBB(*unreachBB)) { + unreachBB->SetUnreachable(false); + } + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if unreachBB InLSDA ,replace unreachBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && + cgFunc->GetTheCFG()->InLSDA(unreachBB->GetLabIdx(), *ehFunc)) { + /* find next reachable BB */ + BB* nextReachableBB = nullptr; + for (BB* curBB = unreachBB; curBB != nullptr; curBB = curBB->GetNext()) { + if (!curBB->IsUnreachable()) { + nextReachableBB = curBB; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labelIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labelIdx); + cgFunc->SetLab2BBMap(labelIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(*unreachBB, *nextReachableBB); + } + + unreachBB->GetPrev()->SetNext(unreachBB->GetNext()); + unreachBB->GetNext()->SetPrev(unreachBB->GetPrev()); + + for (BB *sucBB : unreachBB->GetSuccs()) { + sucBB->RemovePreds(*unreachBB); + } + for (BB *ehSucBB : unreachBB->GetEhSuccs()) { + ehSucBB->RemoveEhPreds(*unreachBB); + } + + unreachBB->ClearSuccs(); + unreachBB->ClearEhSuccs(); + + /* Clear insns in GOT Map. */ + cgFunc->ClearUnreachableGotInfos(*unreachBB); + cgFunc->ClearUnreachableConstInfos(*unreachBB); + } +} + +void CGCFG::FindWillExitBBs(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + for (BB *predbb : bb->GetPreds()) { + FindWillExitBBs(predbb, visitedBBs); + } +} + +/* + * analyse the CFG to find the BBs that will not reach any function exit; these + * are BBs inside infinite loops; mark their wontExit flag and create + * artificial edges from them to commonExitBB + */ +void CGCFG::WontExitAnalysis() { + std::set visitedBBs; + FindWillExitBBs(cgFunc->GetCommonExitBB(), &visitedBBs); + BB *bb = cgFunc->GetFirstBB(); + while (bb != nullptr) { + if (visitedBBs.count(bb) == 0) { + bb->SetWontExit(true); + if (bb->GetKind() == BB::kBBGoto || bb->GetKind() == BB::kBBThrow) { + // make this bb a predecessor of commonExitBB + cgFunc->GetCommonExitBB()->PushBackPreds(*bb); + } + } + bb = bb->GetNext(); + } +} + +BB *CGCFG::FindLastRetBB() { + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->GetKind() == BB::kBBReturn) { + return bb; + } + } + return nullptr; +} + +void CGCFG::UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB) { + /* connext newBB -> succ */ + for (auto it = succ.GetPredsBegin(); it != succ.GetPredsEnd(); ++it) { + if (*it == &pred) { + auto origIt = it; + succ.ErasePreds(it); + if (origIt != succ.GetPredsBegin()) { + --origIt; + succ.InsertPred(origIt, newBB); + } else { + succ.PushFrontPreds(newBB); + } + break; + } + } + newBB.PushBackSuccs(succ); + + /* connext pred -> newBB */ + for (auto it = pred.GetSuccsBegin(); it != pred.GetSuccsEnd(); ++it) { + if (*it == &succ) { + auto origIt = it; + pred.EraseSuccs(it); + if (origIt != succ.GetSuccsBegin()) { + --origIt; + pred.InsertSucc(origIt, newBB); + } else { + pred.PushFrontSuccs(newBB); + } + break; + } + } + newBB.PushBackPreds(pred); + + /* maintain eh info */ + for (auto it = pred.GetEhSuccs().begin(); it != pred.GetEhSuccs().end(); ++it) { + newBB.PushBackEhSuccs(**it); + } + for (auto it = pred.GetEhPredsBegin(); it != pred.GetEhPredsEnd(); ++it) { + newBB.PushBackEhPreds(**it); + } + + /* update phi */ + for (auto phiInsnIt : succ.GetPhiInsns()) { + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + DEBUG_ASSERT(fBBId != 0, "GetFromBBID = 0"); + BB *predBB = cgFunc->GetBBFromID(fBBId); + if (predBB == &pred) { + phiList.UpdateOpnd(fBBId, newBB.GetId(), *phiOpndIt.second); + break; + } + } + } +} + +#if TARGAARCH64 +void CGCFG::BreakCriticalEdge(BB &pred, BB &succ) { + LabelIdx newLblIdx = cgFunc->CreateLabel(); + BB *newBB = cgFunc->CreateNewBB(newLblIdx, false, BB::kBBGoto, pred.GetFrequency()); + newBB->SetCritical(true); + bool isFallThru = pred.GetNext() == ≻ + /* set prev, next */ + if (isFallThru) { + BB *origNext = pred.GetNext(); + origNext->SetPrev(newBB); + newBB->SetNext(origNext); + pred.SetNext(newBB); + newBB->SetPrev(&pred); + newBB->SetKind(BB::kBBFallthru); + } else { + BB *exitBB = cgFunc->GetExitBBsVec().size() == 0 ? nullptr : cgFunc->GetExitBB(0); + if (exitBB == nullptr) { + cgFunc->GetLastBB()->AppendBB(*newBB); + cgFunc->SetLastBB(*newBB); + } else { + exitBB->AppendBB(*newBB); + } + newBB->AppendInsn( + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xuncond, cgFunc->GetOrCreateLabelOperand(succ.GetLabIdx()))); + } + + /* update offset if succ is goto target */ + if (pred.GetKind() == BB::kBBIf) { + Insn *brInsn = FindLastCondBrInsn(pred); + DEBUG_ASSERT(brInsn != nullptr, "null ptr check"); + LabelOperand &brTarget = static_cast(brInsn->GetOperand(AArch64isa::GetJumpTargetIdx(*brInsn))); + if (brTarget.GetLabelIndex() == succ.GetLabIdx()) { + brInsn->SetOperand(AArch64isa::GetJumpTargetIdx(*brInsn), cgFunc->GetOrCreateLabelOperand(newLblIdx)); + } + } else if (pred.GetKind() == BB::kBBRangeGoto) { + const MapleVector &labelVec = pred.GetRangeGotoLabelVec(); + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == succ.GetLabIdx()) { + /* single edge for multi jump target, so have to replace all. */ + pred.SetRangeGotoLabel(i, newLblIdx); + } + } + cgFunc->UpdateEmitSt(pred, succ.GetLabIdx(), newLblIdx); + } else { + DEBUG_ASSERT(0, "unexpeced bb kind in BreakCriticalEdge"); + } + + /* update pred, succ */ + UpdatePredsSuccsAfterSplit(pred, succ, *newBB); +} +#endif + +bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { + CGCFG *cfg = f.GetMemoryPool()->New(f); + f.SetTheCFG(cfg); + /* build control flow graph */ + f.GetTheCFG()->BuildCFG(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgHandleCFG, handlecfg) + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_critical_edge.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_critical_edge.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63a023e29458931bdf26357a5c3faa2823ae0657 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_critical_edge.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg.h" +#include "cg_critical_edge.h" +#include "cg_ssa.h" + +namespace maplebe { +void CriticalEdge::SplitCriticalEdges() { + for (auto it = criticalEdges.begin(); it != criticalEdges.end(); ++it) { + cgFunc->GetTheCFG()->BreakCriticalEdge(*((*it).first), *((*it).second)); + } +} + +void CriticalEdge::CollectCriticalEdges() { + constexpr int multiPredsNum = 2; + FOR_ALL_BB(bb, cgFunc) { + const auto &preds = bb->GetPreds(); + if (preds.size() < multiPredsNum) { + continue; + } + // current BB is a merge + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + // pred has more than one succ + criticalEdges.push_back(std::make_pair(pred, bb)); + } + } + } +} + +bool CgCriticalEdge::PhaseRun(maplebe::CGFunc &f) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && f.NumBBs() < kBBLimit) { + MemPool *memPool = GetPhaseMemPool(); + CriticalEdge *split = memPool->New(f, *memPool); + f.GetTheCFG()->InitInsnVisitor(f); + split->CollectCriticalEdges(); + split->SplitCriticalEdges(); + } + return false; +} + +void CgCriticalEdge::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCriticalEdge, cgsplitcriticaledge) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_dce.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1642aac5ccded508f943d06aa7c0ef31b9f9c95f --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_dce.cpp @@ -0,0 +1,45 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "cg_dce.h" +#include "cg.h" +namespace maplebe { +void CGDce::DoDce() { + bool tryDceAgain = false; + do { + tryDceAgain = false; + for (auto &ssaIt : GetSSAInfo()->GetAllSSAOperands()) { + if (ssaIt.second != nullptr && !ssaIt.second->IsDeleted()) { + if (RemoveUnuseDef(*ssaIt.second)) { + tryDceAgain = true; + } + } + } + } while (tryDceAgain); +} + +bool CgDce::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CGDce *cgDce = f.GetCG()->CreateCGDce(*GetPhaseMemPool(),f, *ssaInfo); + cgDce->DoDce(); + return false; +} + +void CgDce::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgDce, cgdeadcodeelimination) +} + diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_dominance.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_dominance.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31212fcc6ea590225e10bb9970f74152881de730 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_dominance.cpp @@ -0,0 +1,488 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_dominance.h" +#include +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build dominance + */ +namespace maplebe { +constexpr uint32 kBBVectorInitialSize = 2; +void DomAnalysis::PostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) { + DEBUG_ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PostOrderWalk"); + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (const BB *suc : bb.GetSuccs()) { + PostOrderWalk(*suc, pid, visitedMap); + } + DEBUG_ASSERT(bb.GetId() < postOrderIDVec.size(), "index out of range in Dominance::PostOrderWalk"); + postOrderIDVec[bb.GetId()] = pid++; +} + +void DomAnalysis::GenPostOrderID() { + DEBUG_ASSERT(!bbVec.empty(), "size to be allocated is 0"); + MapleVector visitedMap(bbVec.size() + 1, false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PostOrderWalk(commonEntryBB, postOrderID, visitedMap); + // initialize reversePostOrder + int32 maxPostOrderID = postOrderID - 1; + reversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < postOrderIDVec.size(); ++i) { + int32 postOrderNo = postOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + reversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *DomAnalysis::Intersect(BB &bb1, const BB &bb2) { + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (postOrderIDVec[ptrBB1->GetId()] < postOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetDom(ptrBB1->GetId()); + } + while (postOrderIDVec[ptrBB2->GetId()] < postOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetDom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +bool DominanceBase::CommonEntryBBIsPred(const BB &bb) const { + for (const BB *suc : commonEntryBB.GetSuccs()) { + if (suc == &bb) { + return true; + } + } + return false; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDominance() { + SetDom(commonEntryBB.GetId(), &commonEntryBB); + bool changed; + do { + changed = false; + for (size_t i = 1; i < reversePostOrder.size(); ++i) { + BB *bb = reversePostOrder[i]; + if (bb == nullptr) { + continue; + } + BB *pre = nullptr; + auto it = bb->GetPredsBegin(); + if (CommonEntryBBIsPred(*bb) || bb->GetPreds().empty()) { + pre = &commonEntryBB; + } else { + pre = *it; + } + ++it; + while ((GetDom(pre->GetId()) == nullptr || pre == bb) && it != bb->GetPredsEnd()) { + pre = *it; + ++it; + } + BB *newIDom = pre; + for (; it != bb->GetPredsEnd(); ++it) { + pre = *it; + if (GetDom(pre->GetId()) != nullptr && pre != bb) { + newIDom = Intersect(*pre, *newIDom); + } + } + if (GetDom(bb->GetId()) != newIDom) { + SetDom(bb->GetId(), newIDom); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDomFrontiers() { + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + if (bb->GetPreds().size() < kBBVectorInitialSize) { + continue; + } + for (BB *pre : bb->GetPreds()) { + BB *runner = pre; + while (runner != nullptr && runner != GetDom(bb->GetId()) && runner != &commonEntryBB) { + if (!HasDomFrontier(runner->GetId(), bb->GetId())) { + domFrontier[runner->GetId()].push_back(bb->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } + // check entry bb's predBB, such as : + // bb1 is commonEntryBB, bb2 is entryBB, bb2 is domFrontier of bb3 and bb7. + // 1 + // | + // 2 <- + // / | + // 3 | + // / \ | + // 4 7--- + // / \ ^ + // | | | + // 5-->6-- + for (BB *succ : commonEntryBB.GetSuccs()) { + if (succ->GetPreds().size() != 1) { // Only deal with one pred bb. + continue; + } + for (BB *pre : succ->GetPreds()) { + BB *runner = pre; + while (runner != GetDom(succ->GetId()) && runner != &commonEntryBB && runner != succ) { + if (!HasDomFrontier(runner->GetId(), succ->GetId())) { + domFrontier[runner->GetId()].push_back(succ->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } +} + +void DomAnalysis::ComputeDomChildren() { + for (auto *bb : reversePostOrder) { + if (bb == nullptr || GetDom(bb->GetId()) == nullptr) { + continue; + } + BB *parent = GetDom(bb->GetId()); + if (parent == bb) { + continue; + } + domChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterDomFrontier results for bbid < bbidMarker +// have been computed +void DomAnalysis::GetIterDomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) { + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : domFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterDomFrontier[frontierbbid].begin(), iterDomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterDomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void DomAnalysis::ComputeIterDomFrontiers() { + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterDomFrontier(bb, &iterDomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + + +uint32 DomAnalysis::ComputeDtPreorder(const BB &bb, uint32 &num) { + DEBUG_ASSERT(num < dtPreOrder.size(), "index out of range in Dominance::ComputeDtPreorder"); + dtPreOrder[num] = bb.GetId(); + dtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : domChildren[bb.GetId()]) { + maxDtDfnOut = ComputeDtPreorder(*bbVec[k], num); + } + + dtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 dominates b2 +bool DomAnalysis::Dominate(const BB &bb1, const BB &bb2) { + return dtDfn[bb1.GetId()] <= dtDfn[bb2.GetId()] && dtDfnOut[bb1.GetId()] >= dtDfnOut[bb2.GetId()]; +} + +void DomAnalysis::Compute() { + GenPostOrderID(); + ComputeDominance(); + ComputeDomFrontiers(); + ComputeDomChildren(); + ComputeIterDomFrontiers(); + uint32 num = 0; + (void)ComputeDtPreorder(*cgFunc.GetFirstBB(), num); + GetDtPreOrder().resize(num); +} + +void DomAnalysis::Dump() { + for (BB *bb : reversePostOrder) { + LogInfo::MapleLogger() << "postorder no " << postOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_dom is bb:" << GetDom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " domfrontier: ["; + for (uint32 id : domFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] domchildren: ["; + for (uint32 id : domChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\npreorder traversal of dominator tree:"; + for (uint32 id : dtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +/* ================= for PostDominance ================= */ +void PostDomAnalysis::PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) { + DEBUG_ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PdomPostOrderWalk"); + if (bbVec[bb.GetId()] == nullptr) { + return; + } + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (BB *pre : bb.GetPreds()) { + PdomPostOrderWalk(*pre, pid, visitedMap); + } + DEBUG_ASSERT(bb.GetId() < pdomPostOrderIDVec.size(), "index out of range in Dominance::PdomPostOrderWalk"); + pdomPostOrderIDVec[bb.GetId()] = pid++; +} + +void PostDomAnalysis::PdomGenPostOrderID() { + DEBUG_ASSERT(!bbVec.empty(), "call calloc failed in Dominance::PdomGenPostOrderID"); + MapleVector visitedMap(bbVec.size(), false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PdomPostOrderWalk(commonExitBB, postOrderID, visitedMap); + // initialize pdomReversePostOrder + int32 maxPostOrderID = postOrderID - 1; + pdomReversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < pdomPostOrderIDVec.size(); ++i) { + int32 postOrderNo = pdomPostOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + pdomReversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *PostDomAnalysis::PdomIntersect(BB &bb1, const BB &bb2) { + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (pdomPostOrderIDVec[ptrBB1->GetId()] < pdomPostOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetPdom(ptrBB1->GetId()); + } + while (pdomPostOrderIDVec[ptrBB2->GetId()] < pdomPostOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetPdom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePostDominance() { + SetPdom(commonExitBB.GetId(), &commonExitBB); + bool changed = false; + do { + changed = false; + for (size_t i = 1; i < pdomReversePostOrder.size(); ++i) { + BB *bb = pdomReversePostOrder[i]; + BB *suc = nullptr; + auto it = bb->GetSuccsBegin(); + if (cgFunc.IsExitBB(*bb) || bb->GetSuccs().empty() || + (bb->IsWontExit() && bb->GetKind() == BB::kBBGoto)) { + suc = &commonExitBB; + } else { + suc = *it; + } + ++it; + while ((GetPdom(suc->GetId()) == nullptr || suc == bb) && it != bb->GetSuccsEnd()) { + suc = *it; + ++it; + } + if (GetPdom(suc->GetId()) == nullptr) { + suc = &commonExitBB; + } + BB *newIDom = suc; + for (; it != bb->GetSuccsEnd(); ++it) { + suc = *it; + if (GetPdom(suc->GetId()) != nullptr && suc != bb) { + newIDom = PdomIntersect(*suc, *newIDom); + } + } + if (GetPdom(bb->GetId()) != newIDom) { + SetPdom(bb->GetId(), newIDom); + DEBUG_ASSERT(GetPdom(newIDom->GetId()) != nullptr, "null ptr check"); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePdomFrontiers() { + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + if (bb->GetSuccs().size() < kBBVectorInitialSize) { + continue; + } + for (BB *suc : bb->GetSuccs()) { + BB *runner = suc; + while (runner != GetPdom(bb->GetId()) && runner != &commonEntryBB) { + if (!HasPdomFrontier(runner->GetId(), bb->GetId())) { + pdomFrontier[runner->GetId()].push_back(bb->GetId()); + } + DEBUG_ASSERT(GetPdom(runner->GetId()) != nullptr, "ComputePdomFrontiers: pdoms[] is nullptr"); + runner = GetPdom(runner->GetId()); + } + } + } +} + +void PostDomAnalysis::ComputePdomChildren() { + for (const BB *bb : bbVec) { + if (bb == nullptr || GetPdom(bb->GetId()) == nullptr) { + continue; + } + const BB *parent = GetPdom(bb->GetId()); + if (parent == bb) { + continue; + } + pdomChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterPdomFrontier results for bbid < bbidMarker +// have been computed +void PostDomAnalysis::GetIterPdomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) { + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : pdomFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterPdomFrontier[frontierbbid].begin(), iterPdomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterPdomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void PostDomAnalysis::ComputeIterPdomFrontiers() { + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterPdomFrontier(bb, &iterPdomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + +uint32 PostDomAnalysis::ComputePdtPreorder(const BB &bb, uint32 &num) { + DEBUG_ASSERT(num < pdtPreOrder.size(), "index out of range in Dominance::ComputePdtPreOrder"); + pdtPreOrder[num] = bb.GetId(); + pdtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : pdomChildren[bb.GetId()]) { + maxDtDfnOut = ComputePdtPreorder(*bbVec[k], num); + } + + pdtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 postdominates b2 +bool PostDomAnalysis::PostDominate(const BB &bb1, const BB &bb2) { + return pdtDfn[bb1.GetId()] <= pdtDfn[bb2.GetId()] && pdtDfnOut[bb1.GetId()] >= pdtDfnOut[bb2.GetId()]; +} + +void PostDomAnalysis::Dump() { + for (BB *bb : pdomReversePostOrder) { + LogInfo::MapleLogger() << "pdom_postorder no " << pdomPostOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_pdom is bb:" << GetPdom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " pdomfrontier: ["; + for (uint32 id : pdomFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] pdomchildren: ["; + for (uint32 id : pdomChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "preorder traversal of post-dominator tree:"; + for (uint32 id : pdtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +void PostDomAnalysis::Compute() { + PdomGenPostOrderID(); + ComputePostDominance(); + ComputePdomFrontiers(); + ComputePdomChildren(); + ComputeIterPdomFrontiers(); + uint32 num = 0; + (void)ComputePdtPreorder(GetCommonExitBB(), num); + ResizePdtPreOrder(num); +} + +bool CgDomAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *domMemPool = GetPhaseMemPool(); + domAnalysis = domMemPool->New(f, *domMemPool, *domMemPool, f.GetAllBBs(), + *f.GetFirstBB(), *f.GetCommonExitBB()); + domAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + domAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgDomAnalysis, domanalysis) + +bool CgPostDomAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *pdomMemPool = GetPhaseMemPool(); + pdomAnalysis = pdomMemPool->New(f, *pdomMemPool, *pdomMemPool, f.GetAllBBs(), + *f.GetFirstBB(), *f.GetCommonExitBB()); + pdomAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + pdomAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgPostDomAnalysis, pdomanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_irbuilder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f1796a3f14a63a019853075a0a3a8841194cbe3f --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "cg_irbuilder.h" +#include "isa.h" +#include "cg.h" +#include "cfi.h" +#include "dbg.h" + +namespace maplebe { +Insn &InsnBuilder::BuildInsn(MOperator opCode, const InsnDesc &idesc) { + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + return nI.AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3).AddOpndChain(o4); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, std::vector &opnds) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + for (auto *opnd : opnds) { + nI.AddOperand(*opnd); + } + return nI; +} + +Insn &InsnBuilder::BuildCfiInsn(MOperator opCode) { + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} +Insn &InsnBuilder::BuildDbgInsn(MOperator opCode) { + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} + +VectorInsn &InsnBuilder::BuildVectorInsn(MOperator opCode, const InsnDesc &idesc) { + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +ImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, MemPool *mp) { + return mp ? *mp->New(value, size, false) : *alloc.New(value, size, false); +} + +ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp) { + return mp ? *mp->New(symbol, offset, relocs, false) : + *alloc.New(symbol, offset, relocs, false); +} + +MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { + return mp ? *mp->New(size) : *alloc.New(size); +} + +MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size) { + MemOperand *memOprand = &CreateMem(size); + memOprand->SetBaseRegister(baseOpnd); + memOprand->SetOffsetOperand(CreateImm(baseOpnd.GetSize(), offset)); + return *memOprand; +} + +RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { + virtualRegNum++; + regno_t vRegNO = baseVirtualRegNO + virtualRegNum; + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(pRegNO, size, type) : *alloc.New(pRegNO, size, type); +} + +ListOperand &OperandBuilder::CreateList(MemPool *mp) { + return mp ? *mp->New(alloc) : *alloc.New(alloc); +} + +FuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp){ + return mp ? *mp->New(symbol) : *alloc.New(symbol); +} + +LabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp){ + return mp ? *mp->New(parent, idx) : *alloc.New(parent, idx); +} + +CommentOperand &OperandBuilder::CreateComment(const std::string &s, MemPool *mp) { + return mp ? *mp->New(s, *mp) : *alloc.New(s, *mp); +} + +CommentOperand &OperandBuilder::CreateComment(const MapleString &s, MemPool *mp) { + return mp ? *mp->New(s.c_str(), *mp) : *alloc.New(s.c_str(), *mp); +} + +} diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_occur.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_occur.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ec87c882bec82988ef7386abc1c5e603c0d39786 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_occur.cpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_occur.h" +#include "cg_pre.h" + +/* The methods associated with the data structures that represent occurrences and work candidates for PRE */ +namespace maplebe { +/* return if this occur dominate occ */ +bool CgOccur::IsDominate(DomAnalysis &dom, CgOccur &occ) { + return dom.Dominate(*GetBB(), *occ.GetBB()); +} + +/* compute bucket index for the work candidate in workCandHashTable */ +uint32 PreWorkCandHashTable::ComputeWorkCandHashIndex(const Operand &opnd) { + uint32 hashIdx = static_cast(reinterpret_cast(&opnd) >> k4ByteSize); + return hashIdx % workCandHashLength; +} + +uint32 PreWorkCandHashTable::ComputeStmtWorkCandHashIndex(const Insn &insn) { + uint32 hIdx = (static_cast(insn.GetMachineOpcode())) << k3ByteSize; + return hIdx % workCandHashLength; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_option.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a88c20bf52eb8e6006c6c8e8a3f5667599e2c70c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_option.cpp @@ -0,0 +1,1228 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_option.h" +#include +#include +#include "cg_options.h" +#include "driver_options.h" +#include "mpl_logging.h" +#include "parser_opt.h" +#include "mir_parser.h" +#include "string_utils.h" +#include "triple.h" + +namespace maplebe { +using namespace maple; + +const std::string kMplcgVersion = ""; + +bool CGOptions::timePhases = false; +std::string CGOptions::targetArch = ""; +std::unordered_set CGOptions::dumpPhases = {}; +std::unordered_set CGOptions::skipPhases = {}; +std::unordered_map> CGOptions::cyclePatternMap = {}; +std::string CGOptions::skipFrom = ""; +std::string CGOptions::skipAfter = ""; +std::string CGOptions::dumpFunc = "*"; +std::string CGOptions::globalVarProfile = ""; +std::string CGOptions::profileData = ""; +std::string CGOptions::profileFuncData = ""; +std::string CGOptions::profileClassData = ""; +#ifdef TARGARM32 +std::string CGOptions::duplicateAsmFile = ""; +#else +std::string CGOptions::duplicateAsmFile = "maple/mrt/codetricks/arch/arm64/duplicateFunc.s"; +#endif +Range CGOptions::range = Range(); +std::string CGOptions::fastFuncsAsmFile = ""; +Range CGOptions::spillRanges = Range(); +uint8 CGOptions::fastAllocMode = 0; /* 0: fast, 1: spill all */ +bool CGOptions::fastAlloc = false; +uint64 CGOptions::lsraBBOptSize = 150000; +uint64 CGOptions::lsraInsnOptSize = 200000; +uint64 CGOptions::overlapNum = 28; +uint8 CGOptions::rematLevel = 2; +bool CGOptions::optForSize = false; +bool CGOptions::enableHotColdSplit = false; +uint32 CGOptions::alignMinBBSize = 16; +uint32 CGOptions::alignMaxBBSize = 96; +uint32 CGOptions::loopAlignPow = 4; +uint32 CGOptions::jumpAlignPow = 5; +uint32 CGOptions::funcAlignPow = 5; +#if TARGAARCH64 || TARGRISCV64 +bool CGOptions::useBarriersForVolatile = false; +#else +bool CGOptions::useBarriersForVolatile = true; +#endif +bool CGOptions::exclusiveEH = false; +bool CGOptions::doEBO = false; +bool CGOptions::doCGSSA = false; +bool CGOptions::doIPARA = true; +bool CGOptions::doCFGO = false; +bool CGOptions::doICO = false; +bool CGOptions::doStoreLoadOpt = false; +bool CGOptions::doGlobalOpt = false; +bool CGOptions::doVregRename = false; +bool CGOptions::doMultiPassColorRA = true; +bool CGOptions::doPrePeephole = false; +bool CGOptions::doPeephole = false; +bool CGOptions::doRetMerge = false; +bool CGOptions::doSchedule = false; +bool CGOptions::doWriteRefFieldOpt = false; +bool CGOptions::dumpOptimizeCommonLog = false; +bool CGOptions::checkArrayStore = false; +bool CGOptions::doPIC = false; +bool CGOptions::noDupBB = false; +bool CGOptions::noCalleeCFI = true; +bool CGOptions::emitCyclePattern = false; +bool CGOptions::insertYieldPoint = false; +bool CGOptions::mapleLinker = false; +bool CGOptions::printFunction = false; +bool CGOptions::nativeOpt = false; +bool CGOptions::lazyBinding = false; +bool CGOptions::hotFix = false; +bool CGOptions::debugSched = false; +bool CGOptions::bruteForceSched = false; +bool CGOptions::simulateSched = false; +CGOptions::ABIType CGOptions::abiType = kABIHard; +CGOptions::EmitFileType CGOptions::emitFileType = kAsm; +bool CGOptions::genLongCalls = false; +bool CGOptions::functionSections = false; +bool CGOptions::useFramePointer = false; +bool CGOptions::gcOnly = false; +bool CGOptions::quiet = false; +bool CGOptions::doPatchLongBranch = false; +bool CGOptions::doPreSchedule = false; +bool CGOptions::emitBlockMarker = true; +bool CGOptions::inRange = false; +bool CGOptions::doPreLSRAOpt = false; +bool CGOptions::doLocalRefSpill = false; +bool CGOptions::doCalleeToSpill = false; +bool CGOptions::doRegSavesOpt = false; +bool CGOptions::useSsaPreSave = false; +bool CGOptions::useSsuPreRestore = false; +bool CGOptions::replaceASM = false; +bool CGOptions::generalRegOnly = false; +bool CGOptions::fastMath = false; +bool CGOptions::doAlignAnalysis = false; +bool CGOptions::doCondBrAlign = false; +bool CGOptions::cgBigEndian = false; +bool CGOptions::arm64ilp32 = false; +bool CGOptions::noCommon = false; + +CGOptions &CGOptions::GetInstance() { + static CGOptions instance; + return instance; +} + +void CGOptions::DecideMplcgRealLevel(bool isDebug) { + if (opts::cg::o0) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O0\n"; + } + EnableO0(); + } + + if (opts::cg::o1) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O1\n"; + } + EnableO1(); + } + + if (opts::cg::o2 || opts::cg::os) { + if (opts::cg::os) { + optForSize = true; + } + if (isDebug) { + std::string oLog = (opts::cg::os == true) ? "Os" : "O2"; + LogInfo::MapleLogger() << "Real Mplcg level: " << oLog << "\n"; + } + EnableO2(); + } + if (opts::cg::olitecg) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: LiteCG\n"; + } + EnableLiteCG(); + } +} + +bool CGOptions::SolveOptions(bool isDebug) { + DecideMplcgRealLevel(isDebug); + + for (const auto &opt : cgCategory.GetEnabledOptions()) { + std::string printOpt; + if (isDebug) { + for (const auto &val : opt->GetRawValues()) { + printOpt += opt->GetName() + " " + val + " "; + } + LogInfo::MapleLogger() << "cg options: " << printOpt << '\n'; + } + } + + if (opts::cg::quiet.IsEnabledByUser()) { + SetQuiet(true); + } + + if (opts::verbose.IsEnabledByUser()) { + SetQuiet(false); + } + + if (opts::cg::pie.IsEnabledByUser()) { + opts::cg::pie ? SetOption(CGOptions::kGenPie) : ClearOption(CGOptions::kGenPie); + } + + if (opts::cg::fpic.IsEnabledByUser()) { + if (opts::cg::fpic) { + EnablePIC(); + SetOption(CGOptions::kGenPic); + } else { + DisablePIC(); + ClearOption(CGOptions::kGenPic); + } + } + + if (opts::cg::verboseAsm.IsEnabledByUser()) { + opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); + } + + if (opts::cg::verboseCg.IsEnabledByUser()) { + opts::cg::verboseCg ? SetOption(CGOptions::kVerboseCG) : ClearOption(CGOptions::kVerboseCG); + } + + if (opts::cg::maplelinker.IsEnabledByUser()) { + opts::cg::maplelinker ? EnableMapleLinker() : DisableMapleLinker(); + } + + if (opts::cg::fastAlloc.IsEnabledByUser()) { + EnableFastAlloc(); + SetFastAllocMode(opts::cg::fastAlloc); + } + + if (opts::cg::useBarriersForVolatile.IsEnabledByUser()) { + opts::cg::useBarriersForVolatile ? EnableBarriersForVolatile() : DisableBarriersForVolatile(); + } + + if (opts::cg::spillRange.IsEnabledByUser()) { + SetRange(opts::cg::spillRange, "--pill-range", GetSpillRanges()); + } + + if (opts::cg::range.IsEnabledByUser()) { + SetRange(opts::cg::range, "--range", GetRange()); + } + + if (opts::cg::timePhases.IsEnabledByUser()) { + opts::cg::timePhases ? EnableTimePhases() : DisableTimePhases(); + } + + if (opts::cg::dumpFunc.IsEnabledByUser()) { + SetDumpFunc(opts::cg::dumpFunc); + } + + if (opts::cg::duplicateAsmList.IsEnabledByUser()) { + SetDuplicateAsmFile(opts::cg::duplicateAsmList); + } + + if (opts::cg::duplicateAsmList2.IsEnabledByUser()) { + SetFastFuncsAsmFile(opts::cg::duplicateAsmList2); + } + + if (opts::cg::insertCall.IsEnabledByUser()) { + SetOption(kGenInsertCall); + SetInstrumentationFunction(opts::cg::insertCall); + SetInsertCall(true); + } + + if (opts::cg::stackProtectorStrong.IsEnabledByUser()) { + SetOption(kUseStackProtectorStrong); + } + + if (opts::cg::stackProtectorAll.IsEnabledByUser()) { + SetOption(kUseStackProtectorAll); + } + + if (opts::cg::debug.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gdwarf.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithDwarf); + SetParserOption(kWithDbgInfo); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + ClearOption(kWithMpl); + } + + if (opts::cg::gmixedsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + } + + if (opts::cg::gmixedasm.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + SetOption(kWithAsm); + } + + if (opts::cg::profile.IsEnabledByUser()) { + SetOption(kWithProfileCode); + SetParserOption(kWithProfileInfo); + } + + if (opts::cg::withRaLinearScan.IsEnabledByUser()) { + SetOption(kDoLinearScanRegAlloc); + ClearOption(kDoColorRegAlloc); + } + + if (opts::cg::withRaGraphColor.IsEnabledByUser()) { + SetOption(kDoColorRegAlloc); + ClearOption(kDoLinearScanRegAlloc); + } + + if (opts::cg::printFunc.IsEnabledByUser()) { + opts::cg::printFunc ? EnablePrintFunction() : DisablePrintFunction(); + } + + if (opts::cg::addDebugTrace.IsEnabledByUser()) { + SetOption(kAddDebugTrace); + } + + if (opts::cg::addFuncProfile.IsEnabledByUser()) { + SetOption(kAddFuncProfile); + } + + if (opts::cg::suppressFileinfo.IsEnabledByUser()) { + SetOption(kSuppressFileInfo); + } + + if (opts::cg::patchLongBranch.IsEnabledByUser()) { + SetOption(kPatchLongBranch); + } + + if (opts::cg::constFold.IsEnabledByUser()) { + opts::cg::constFold ? SetOption(kConstFold) : ClearOption(kConstFold); + } + + if (opts::cg::dumpCfg.IsEnabledByUser()) { + SetOption(kDumpCFG); + } + + if (opts::cg::classListFile.IsEnabledByUser()) { + SetClassListFile(opts::cg::classListFile); + } + + if (opts::cg::genCMacroDef.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kCMacroDef, opts::cg::genCMacroDef); + } + + if (opts::cg::genGctibFile.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGctib, opts::cg::genGctibFile); + } + + if (opts::cg::yieldpoint.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenYieldPoint, opts::cg::yieldpoint); + } + + if (opts::cg::localRc.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenLocalRc, opts::cg::localRc); + } + + if (opts::cg::ehExclusiveList.IsEnabledByUser()) { + SetEHExclusiveFile(opts::cg::ehExclusiveList); + EnableExclusiveEH(); + ParseExclusiveFunc(opts::cg::ehExclusiveList); + } + + if (opts::cg::cyclePatternList.IsEnabledByUser()) { + SetCyclePatternFile(opts::cg::cyclePatternList); + EnableEmitCyclePattern(); + ParseCyclePattern(opts::cg::cyclePatternList); + } + + if (opts::cg::cg.IsEnabledByUser()) { + SetRunCGFlag(opts::cg::cg); + opts::cg::cg ? SetOption(CGOptions::kDoCg) : ClearOption(CGOptions::kDoCg); + } + + if (opts::cg::objmap.IsEnabledByUser()) { + SetGenerateObjectMap(opts::cg::objmap); + } + + if (opts::cg::replaceAsm.IsEnabledByUser()) { + opts::cg::replaceAsm ? EnableReplaceASM() : DisableReplaceASM(); + } + + if (opts::cg::generalRegOnly.IsEnabledByUser()) { + opts::cg::generalRegOnly ? EnableGeneralRegOnly() : DisableGeneralRegOnly(); + } + + if (opts::cg::lazyBinding.IsEnabledByUser()) { + opts::cg::lazyBinding ? EnableLazyBinding() : DisableLazyBinding(); + } + + if (opts::cg::hotFix.IsEnabledByUser()) { + opts::cg::hotFix ? EnableHotFix() : DisableHotFix(); + } + + if (opts::cg::soeCheck.IsEnabledByUser()) { + SetOption(CGOptions::kSoeCheckInsert); + } + + if (opts::cg::checkArraystore.IsEnabledByUser()) { + opts::cg::checkArraystore ? EnableCheckArrayStore() : DisableCheckArrayStore(); + } + + if (opts::cg::ebo.IsEnabledByUser()) { + opts::cg::ebo ? EnableEBO() : DisableEBO(); + } + + if (opts::cg::cfgo.IsEnabledByUser()) { + opts::cg::cfgo ? EnableCFGO() : DisableCFGO(); + } + + if (opts::cg::ico.IsEnabledByUser()) { + opts::cg::ico ? EnableICO() : DisableICO(); + } + + if (opts::cg::storeloadopt.IsEnabledByUser()) { + opts::cg::storeloadopt ? EnableStoreLoadOpt() : DisableStoreLoadOpt(); + } + + if (opts::cg::globalopt.IsEnabledByUser()) { + opts::cg::globalopt ? EnableGlobalOpt() : DisableGlobalOpt(); + } + +/* + // only on master + if (opts::cg::hotcoldsplit.IsEnabledByUser()) { + opts::cg::hotcoldsplit ? EnableHotColdSplit() : DisableHotColdSplit(); + } +*/ + + if (opts::cg::prelsra.IsEnabledByUser()) { + opts::cg::prelsra ? EnablePreLSRAOpt() : DisablePreLSRAOpt(); + } + + if (opts::cg::lsraLvarspill.IsEnabledByUser()) { + opts::cg::lsraLvarspill ? EnableLocalRefSpill() : DisableLocalRefSpill(); + } + + if (opts::cg::lsraOptcallee.IsEnabledByUser()) { + opts::cg::lsraOptcallee ? EnableCalleeToSpill() : DisableCalleeToSpill(); + } + + if (opts::cg::prepeep.IsEnabledByUser()) { + opts::cg::prepeep ? EnablePrePeephole() : DisablePrePeephole(); + } + + if (opts::cg::peep.IsEnabledByUser()) { + opts::cg::peep ? EnablePeephole() : DisablePeephole(); + } + + if (opts::cg::retMerge.IsEnabledByUser()) { + opts::cg::retMerge ? EnableRetMerge() : DisableRetMerge(); + } + + if (opts::cg::preschedule.IsEnabledByUser()) { + opts::cg::preschedule ? EnablePreSchedule() : DisablePreSchedule(); + } + + if (opts::cg::schedule.IsEnabledByUser()) { + opts::cg::schedule ? EnableSchedule() : DisableSchedule(); + } + + if (opts::cg::vregRename.IsEnabledByUser()) { + opts::cg::vregRename ? EnableVregRename() : DisableVregRename(); + } + + if (opts::cg::fullcolor.IsEnabledByUser()) { + opts::cg::fullcolor ? EnableMultiPassColorRA() : DisableMultiPassColorRA(); + } + + if (opts::cg::writefieldopt.IsEnabledByUser()) { + opts::cg::writefieldopt ? EnableWriteRefFieldOpt() : DisableWriteRefFieldOpt(); + } + + if (opts::cg::dumpOlog.IsEnabledByUser()) { + opts::cg::dumpOlog ? EnableDumpOptimizeCommonLog() : DisableDumpOptimizeCommonLog(); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + // FIXME: Disabling Looks strage: should be checked by author of the code + DisableNativeOpt(); + } + + if (opts::cg::dupBb.IsEnabledByUser()) { + opts::cg::dupBb ? DisableNoDupBB() : EnableNoDupBB(); + } + + if (opts::cg::calleeCfi.IsEnabledByUser()) { + opts::cg::calleeCfi ? DisableNoCalleeCFI() : EnableNoCalleeCFI(); + } + + if (opts::cg::proepilogue.IsEnabledByUser()) { + opts::cg::proepilogue ? SetOption(CGOptions::kProEpilogueOpt) + : ClearOption(CGOptions::kProEpilogueOpt); + } + + if (opts::cg::tailcall.IsEnabledByUser()) { + opts::cg::tailcall ? SetOption(CGOptions::kTailCallOpt) + : ClearOption(CGOptions::kTailCallOpt); + } + + if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { + opts::cg::calleeregsPlacement ? EnableRegSavesOpt() : DisableRegSavesOpt(); + } + + if (opts::cg::ssapreSave.IsEnabledByUser()) { + opts::cg::ssapreSave ? EnableSsaPreSave() : DisableSsaPreSave(); + } + + if (opts::cg::ssupreRestore.IsEnabledByUser()) { + opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); + } + + if (opts::cg::lsraBb.IsEnabledByUser()) { + SetLSRABBOptSize(opts::cg::lsraBb); + } + + if (opts::cg::lsraInsn.IsEnabledByUser()) { + SetLSRAInsnOptSize(opts::cg::lsraInsn); + } + + if (opts::cg::lsraOverlap.IsEnabledByUser()) { + SetOverlapNum(opts::cg::lsraOverlap); + } + + if (opts::cg::remat.IsEnabledByUser()) { + SetRematLevel(opts::cg::remat); + } + + if (opts::cg::dumpPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::dumpPhases, GetDumpPhases()); + } + + if (opts::cg::target.IsEnabledByUser()) { + SetTargetMachine(opts::cg::target); + } + + if (opts::cg::skipPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::skipPhases, GetSkipPhases()); + } + + if (opts::cg::skipFrom.IsEnabledByUser()) { + SetSkipFrom(opts::cg::skipFrom); + } + + if (opts::cg::skipAfter.IsEnabledByUser()) { + SetSkipAfter(opts::cg::skipAfter); + } + + if (opts::cg::debugSchedule.IsEnabledByUser()) { + opts::cg::debugSchedule ? EnableDebugSched() : DisableDebugSched(); + } + + if (opts::cg::bruteforceSchedule.IsEnabledByUser()) { + opts::cg::bruteforceSchedule ? EnableDruteForceSched() : DisableDruteForceSched(); + } + + if (opts::cg::simulateSchedule.IsEnabledByUser()) { + opts::cg::simulateSchedule ? EnableSimulateSched() : DisableSimulateSched(); + } + + if (opts::profile.IsEnabledByUser()) { + SetProfileData(opts::profile); + } + + if (opts::cg::quiet.IsEnabledByUser()) { + SetQuiet(true); + } + + if (opts::verbose.IsEnabledByUser()) { + SetQuiet(false); + } + + if (opts::cg::pie.IsEnabledByUser()) { + opts::cg::pie ? SetOption(CGOptions::kGenPie) : ClearOption(CGOptions::kGenPie); + } + + if (opts::cg::fpic.IsEnabledByUser()) { + if (opts::cg::fpic) { + EnablePIC(); + SetOption(CGOptions::kGenPic); + } else { + DisablePIC(); + ClearOption(CGOptions::kGenPic); + } + } + + if (opts::cg::verboseAsm.IsEnabledByUser()) { + opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); + } + + if (opts::cg::verboseCg.IsEnabledByUser()) { + opts::cg::verboseCg ? SetOption(CGOptions::kVerboseCG) : ClearOption(CGOptions::kVerboseCG); + } + + if (opts::cg::maplelinker.IsEnabledByUser()) { + opts::cg::maplelinker ? EnableMapleLinker() : DisableMapleLinker(); + } + + if (opts::cg::fastAlloc.IsEnabledByUser()) { + EnableFastAlloc(); + SetFastAllocMode(opts::cg::fastAlloc); + } + + if (opts::cg::useBarriersForVolatile.IsEnabledByUser()) { + opts::cg::useBarriersForVolatile ? EnableBarriersForVolatile() : DisableBarriersForVolatile(); + } + + if (opts::cg::spillRange.IsEnabledByUser()) { + SetRange(opts::cg::spillRange, "--pill-range", GetSpillRanges()); + } + + if (opts::cg::range.IsEnabledByUser()) { + SetRange(opts::cg::range, "--range", GetRange()); + } + + if (opts::cg::timePhases.IsEnabledByUser()) { + opts::cg::timePhases ? EnableTimePhases() : DisableTimePhases(); + } + + if (opts::cg::dumpFunc.IsEnabledByUser()) { + SetDumpFunc(opts::cg::dumpFunc); + } + + if (opts::cg::duplicateAsmList.IsEnabledByUser()) { + SetDuplicateAsmFile(opts::cg::duplicateAsmList); + } + + if (opts::cg::duplicateAsmList2.IsEnabledByUser()) { + SetFastFuncsAsmFile(opts::cg::duplicateAsmList2); + } + + if (opts::cg::insertCall.IsEnabledByUser()) { + SetOption(kGenInsertCall); + SetInstrumentationFunction(opts::cg::insertCall); + SetInsertCall(true); + } + + if (opts::cg::stackProtectorStrong.IsEnabledByUser()) { + SetOption(kUseStackProtectorStrong); + } + + if (opts::cg::stackProtectorAll.IsEnabledByUser()) { + SetOption(kUseStackProtectorAll); + } + + if (opts::cg::debug.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gdwarf.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithDwarf); + SetParserOption(kWithDbgInfo); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + ClearOption(kWithMpl); + } + + if (opts::cg::gmixedsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + } + + if (opts::cg::gmixedasm.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + SetOption(kWithAsm); + } + + if (opts::cg::profile.IsEnabledByUser()) { + SetOption(kWithProfileCode); + SetParserOption(kWithProfileInfo); + } + + if (opts::cg::withRaLinearScan.IsEnabledByUser()) { + SetOption(kDoLinearScanRegAlloc); + ClearOption(kDoColorRegAlloc); + } + + if (opts::cg::withRaGraphColor.IsEnabledByUser()) { + SetOption(kDoColorRegAlloc); + ClearOption(kDoLinearScanRegAlloc); + } + + if (opts::cg::printFunc.IsEnabledByUser()) { + opts::cg::printFunc ? EnablePrintFunction() : DisablePrintFunction(); + } + + if (opts::cg::addDebugTrace.IsEnabledByUser()) { + SetOption(kAddDebugTrace); + } + + if (opts::cg::addFuncProfile.IsEnabledByUser()) { + SetOption(kAddFuncProfile); + } + + if (opts::cg::suppressFileinfo.IsEnabledByUser()) { + SetOption(kSuppressFileInfo); + } + + if (opts::cg::patchLongBranch.IsEnabledByUser()) { + SetOption(kPatchLongBranch); + } + + if (opts::cg::constFold.IsEnabledByUser()) { + opts::cg::constFold ? SetOption(kConstFold) : ClearOption(kConstFold); + } + + if (opts::cg::dumpCfg.IsEnabledByUser()) { + SetOption(kDumpCFG); + } + + if (opts::cg::classListFile.IsEnabledByUser()) { + SetClassListFile(opts::cg::classListFile); + } + + if (opts::cg::genCMacroDef.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kCMacroDef, opts::cg::genCMacroDef); + } + + if (opts::cg::genGctibFile.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGctib, opts::cg::genGctibFile); + } + + if (opts::cg::yieldpoint.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenYieldPoint, opts::cg::yieldpoint); + } + + if (opts::cg::localRc.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenLocalRc, opts::cg::localRc); + } + + if (opts::cg::ehExclusiveList.IsEnabledByUser()) { + SetEHExclusiveFile(opts::cg::ehExclusiveList); + EnableExclusiveEH(); + ParseExclusiveFunc(opts::cg::ehExclusiveList); + } + + if (opts::cg::cyclePatternList.IsEnabledByUser()) { + SetCyclePatternFile(opts::cg::cyclePatternList); + EnableEmitCyclePattern(); + ParseCyclePattern(opts::cg::cyclePatternList); + } + + if (opts::cg::cg.IsEnabledByUser()) { + SetRunCGFlag(opts::cg::cg); + opts::cg::cg ? SetOption(CGOptions::kDoCg) : ClearOption(CGOptions::kDoCg); + } + + if (opts::cg::objmap.IsEnabledByUser()) { + SetGenerateObjectMap(opts::cg::objmap); + } + + if (opts::cg::replaceAsm.IsEnabledByUser()) { + opts::cg::replaceAsm ? EnableReplaceASM() : DisableReplaceASM(); + } + + if (opts::cg::generalRegOnly.IsEnabledByUser()) { + opts::cg::generalRegOnly ? EnableGeneralRegOnly() : DisableGeneralRegOnly(); + } + + if (opts::cg::lazyBinding.IsEnabledByUser()) { + opts::cg::lazyBinding ? EnableLazyBinding() : DisableLazyBinding(); + } + + if (opts::cg::hotFix.IsEnabledByUser()) { + opts::cg::hotFix ? EnableHotFix() : DisableHotFix(); + } + + if (opts::cg::soeCheck.IsEnabledByUser()) { + SetOption(CGOptions::kSoeCheckInsert); + } + + if (opts::cg::checkArraystore.IsEnabledByUser()) { + opts::cg::checkArraystore ? EnableCheckArrayStore() : DisableCheckArrayStore(); + } + + if (opts::cg::ebo.IsEnabledByUser()) { + opts::cg::ebo ? EnableEBO() : DisableEBO(); + } + + if (opts::cg::cfgo.IsEnabledByUser()) { + opts::cg::cfgo ? EnableCFGO() : DisableCFGO(); + } + + if (opts::cg::ico.IsEnabledByUser()) { + opts::cg::ico ? EnableICO() : DisableICO(); + } + + if (opts::cg::storeloadopt.IsEnabledByUser()) { + opts::cg::storeloadopt ? EnableStoreLoadOpt() : DisableStoreLoadOpt(); + } + + if (opts::cg::globalopt.IsEnabledByUser()) { + opts::cg::globalopt ? EnableGlobalOpt() : DisableGlobalOpt(); + } + + if (opts::cg::hotcoldsplit.IsEnabledByUser()) { + opts::cg::hotcoldsplit ? EnableHotColdSplit() : DisableHotColdSplit(); + } + + if (opts::cg::prelsra.IsEnabledByUser()) { + opts::cg::prelsra ? EnablePreLSRAOpt() : DisablePreLSRAOpt(); + } + + if (opts::cg::lsraLvarspill.IsEnabledByUser()) { + opts::cg::lsraLvarspill ? EnableLocalRefSpill() : DisableLocalRefSpill(); + } + + if (opts::cg::lsraOptcallee.IsEnabledByUser()) { + opts::cg::lsraOptcallee ? EnableCalleeToSpill() : DisableCalleeToSpill(); + } + + if (opts::cg::prepeep.IsEnabledByUser()) { + opts::cg::prepeep ? EnablePrePeephole() : DisablePrePeephole(); + } + + if (opts::cg::peep.IsEnabledByUser()) { + opts::cg::peep ? EnablePeephole() : DisablePeephole(); + } + + if (opts::cg::retMerge.IsEnabledByUser()) { + opts::cg::retMerge ? EnableRetMerge() : DisableRetMerge(); + } + + if (opts::cg::preschedule.IsEnabledByUser()) { + opts::cg::preschedule ? EnablePreSchedule() : DisablePreSchedule(); + } + + if (opts::cg::schedule.IsEnabledByUser()) { + opts::cg::schedule ? EnableSchedule() : DisableSchedule(); + } + + if (opts::cg::vregRename.IsEnabledByUser()) { + opts::cg::vregRename ? EnableVregRename() : DisableVregRename(); + } + + if (opts::cg::fullcolor.IsEnabledByUser()) { + opts::cg::fullcolor ? EnableMultiPassColorRA() : DisableMultiPassColorRA(); + } + + if (opts::cg::writefieldopt.IsEnabledByUser()) { + opts::cg::writefieldopt ? EnableWriteRefFieldOpt() : DisableWriteRefFieldOpt(); + } + + if (opts::cg::dumpOlog.IsEnabledByUser()) { + opts::cg::dumpOlog ? EnableDumpOptimizeCommonLog() : DisableDumpOptimizeCommonLog(); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + DisableNativeOpt(); + } + + if (opts::cg::dupBb.IsEnabledByUser()) { + opts::cg::dupBb ? DisableNoDupBB() : EnableNoDupBB(); + } + + if (opts::cg::calleeCfi.IsEnabledByUser()) { + opts::cg::calleeCfi ? DisableNoCalleeCFI() : EnableNoCalleeCFI(); + } + + if (opts::cg::proepilogue.IsEnabledByUser()) { + opts::cg::proepilogue ? SetOption(CGOptions::kProEpilogueOpt) + : ClearOption(CGOptions::kProEpilogueOpt); + } + + if (opts::cg::tailcall.IsEnabledByUser()) { + opts::cg::tailcall ? SetOption(CGOptions::kTailCallOpt) + : ClearOption(CGOptions::kTailCallOpt); + } + + if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { + opts::cg::calleeregsPlacement ? EnableRegSavesOpt() : DisableRegSavesOpt(); + } + + if (opts::cg::ssapreSave.IsEnabledByUser()) { + opts::cg::ssapreSave ? EnableSsaPreSave() : DisableSsaPreSave(); + } + + if (opts::cg::ssupreRestore.IsEnabledByUser()) { + opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); + } + + if (opts::cg::lsraBb.IsEnabledByUser()) { + SetLSRABBOptSize(opts::cg::lsraBb); + } + + if (opts::cg::lsraInsn.IsEnabledByUser()) { + SetLSRAInsnOptSize(opts::cg::lsraInsn); + } + + if (opts::cg::lsraOverlap.IsEnabledByUser()) { + SetOverlapNum(opts::cg::lsraOverlap); + } + + if (opts::cg::remat.IsEnabledByUser()) { + SetRematLevel(opts::cg::remat); + } + + if (opts::cg::dumpPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::dumpPhases, GetDumpPhases()); + } + + if (opts::cg::target.IsEnabledByUser()) { + SetTargetMachine(opts::cg::target); + } + + if (opts::cg::skipPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::skipPhases, GetSkipPhases()); + } + + if (opts::cg::skipFrom.IsEnabledByUser()) { + SetSkipFrom(opts::cg::skipFrom); + } + + if (opts::cg::skipAfter.IsEnabledByUser()) { + SetSkipAfter(opts::cg::skipAfter); + } + + if (opts::cg::debugSchedule.IsEnabledByUser()) { + opts::cg::debugSchedule ? EnableDebugSched() : DisableDebugSched(); + } + + if (opts::cg::bruteforceSchedule.IsEnabledByUser()) { + opts::cg::bruteforceSchedule ? EnableDruteForceSched() : DisableDruteForceSched(); + } + + if (opts::cg::simulateSchedule.IsEnabledByUser()) { + opts::cg::simulateSchedule ? EnableSimulateSched() : DisableSimulateSched(); + } + + if (opts::profile.IsEnabledByUser()) { + SetProfileData(opts::profile); + } + + if (opts::cg::floatAbi.IsEnabledByUser()) { + SetABIType(opts::cg::floatAbi); + } + + if (opts::cg::filetype.IsEnabledByUser()) { + SetEmitFileType(opts::cg::filetype); + } + + if (opts::cg::longCalls.IsEnabledByUser()) { + opts::cg::longCalls ? EnableLongCalls() : DisableLongCalls(); + } + + if (opts::cg::functionSections.IsEnabledByUser()) { + opts::cg::functionSections ? EnableFunctionSections() : DisableFunctionSections(); + } + + if (opts::cg::omitFramePointer.IsEnabledByUser()) { + opts::cg::omitFramePointer ? DisableFramePointer() : EnableFramePointer(); + } + + if (opts::gconly.IsEnabledByUser()) { + opts::gconly ? EnableGCOnly() : DisableGCOnly(); + } + + if (opts::cg::fastMath.IsEnabledByUser()) { + opts::cg::fastMath ? EnableFastMath() : DisableFastMath(); + } + + if (opts::cg::alignAnalysis.IsEnabledByUser()) { + opts::cg::alignAnalysis ? EnableAlignAnalysis() : DisableAlignAnalysis(); + } + + if (opts::cg::condbrAlign.IsEnabledByUser()) { + opts::cg::condbrAlign ? EnableCondBrAlign() : DisableCondBrAlign(); + } + + /* big endian can be set with several options: --target, -Be. + * Triple takes to account all these options and allows to detect big endian with IsBigEndian() interface */ + Triple::GetTriple().IsBigEndian() ? EnableBigEndianInCG() : DisableBigEndianInCG(); + (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? EnableArm64ilp32() : DisableArm64ilp32(); + + if (opts::cg::cgSsa.IsEnabledByUser()) { + opts::cg::cgSsa ? EnableCGSSA() : DisableCGSSA(); + } + + if (opts::cg::common.IsEnabledByUser()) { + opts::cg::common ? EnableCommon() : DisableCommon(); + } + + if (opts::cg::alignMinBbSize.IsEnabledByUser()) { + SetAlignMinBBSize(opts::cg::alignMinBbSize); + } + + if (opts::cg::alignMaxBbSize.IsEnabledByUser()) { + SetAlignMaxBBSize(opts::cg::alignMaxBbSize); + } + + if (opts::cg::loopAlignPow.IsEnabledByUser()) { + SetLoopAlignPow(opts::cg::loopAlignPow); + } + + if (opts::cg::jumpAlignPow.IsEnabledByUser()) { + SetJumpAlignPow(opts::cg::jumpAlignPow); + } + + if (opts::cg::funcAlignPow.IsEnabledByUser()) { + SetFuncAlignPow(opts::cg::funcAlignPow); + } + + /* override some options when loc, dwarf is generated */ + if (WithLoc()) { + DisableSchedule(); + SetOption(kWithSrc); + } + if (WithDwarf()) { + DisableEBO(); + DisableCFGO(); + DisableICO(); + DisableSchedule(); + SetOption(kDebugFriendly); + SetOption(kWithSrc); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + return true; +} + +void CGOptions::ParseExclusiveFunc(const std::string &fileName) { + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + while (file >> content) { + ehExclusiveFunctionName.push_back(content); + } +} + +void CGOptions::ParseCyclePattern(const std::string &fileName) { + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + std::string classStr("class: "); + while (getline(file, content)) { + if (content.compare(0, classStr.length(), classStr) == 0) { + std::vector classPatternContent; + std::string patternContent; + while (getline(file, patternContent)) { + if (patternContent.length() == 0) { + break; + } + classPatternContent.push_back(patternContent); + } + std::string className = content.substr(classStr.length()); + CGOptions::cyclePatternMap[className] = move(classPatternContent); + } + } +} + +void CGOptions::SetRange(const std::string &str, const std::string &cmd, Range &subRange) { + const std::string &tmpStr = str; + size_t comma = tmpStr.find_first_of(",", 0); + subRange.enable = true; + + if (comma != std::string::npos) { + subRange.begin = std::stoul(tmpStr.substr(0, comma), nullptr); + subRange.end = std::stoul(tmpStr.substr(comma + 1, std::string::npos - (comma + 1)), nullptr); + } + CHECK_FATAL(range.begin < range.end, "invalid values for %s=%lu,%lu", cmd.c_str(), subRange.begin, subRange.end); +} + +/* Set default options according to different languages. */ +void CGOptions::SetDefaultOptions(const maple::MIRModule &mod) { + if (mod.IsJavaModule()) { + generateFlag = generateFlag | kGenYieldPoint | kGenLocalRc | kGrootList | kPrimorList; + } + insertYieldPoint = GenYieldPoint(); +} + +void CGOptions::EnableO0() { + optimizeLevel = kLevel0; + doEBO = false; + doCGSSA = false; + doCFGO = false; + doICO = false; + doPrePeephole = false; + doPeephole = false; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + + if (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) { + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); + } else { + SetOption(kUseStackProtectorStrong); + SetOption(kUseStackProtectorAll); + } + + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::EnableO1() { + optimizeLevel = kLevel1; + doPreLSRAOpt = true; + doCalleeToSpill = true; + SetOption(kConstFold); + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +} + +void CGOptions::EnableO2() { + optimizeLevel = kLevel2; + doEBO = true; + doCGSSA = true; + doCFGO = true; + doICO = true; + doPrePeephole = true; + doPeephole = true; + doStoreLoadOpt = true; + doGlobalOpt = true; + doPreSchedule = true; + doSchedule = true; + doAlignAnalysis = true; + doCondBrAlign = true; + SetOption(kConstFold); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +#if TARGARM32 + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doWriteRefFieldOpt = false; + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +#else + doPreLSRAOpt = true; + doLocalRefSpill = true; + doCalleeToSpill = true; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = true; + doWriteRefFieldOpt = true; + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); +#endif +} + +void CGOptions::EnableLiteCG() { + optimizeLevel = kLevelLiteCG; + doEBO = false; + doCGSSA = false; + doCFGO = false; + doICO = false; + doPrePeephole = false; + doPeephole = false; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::SetTargetMachine(const std::string &str) { + if (str == "aarch64") { + targetArch = "aarch64"; + } else if (str == "x86_64") { + targetArch = "x86_64"; + } + CHECK_FATAL(false, "unknown target. not implement yet"); +} + +void CGOptions::SplitPhases(const std::string &str, std::unordered_set &set) { + const std::string& tmpStr{ str }; + if ((tmpStr.compare("*") == 0) || (tmpStr.compare("cgir") == 0)) { + (void)set.insert(tmpStr); + return; + } + StringUtils::Split(tmpStr, set, ','); +} + +bool CGOptions::DumpPhase(const std::string &phase) { + return (IS_STR_IN_SET(dumpPhases, "*") || IS_STR_IN_SET(dumpPhases, "cgir") || IS_STR_IN_SET(dumpPhases, phase)); +} + +/* match sub std::string of function name */ +bool CGOptions::FuncFilter(const std::string &name) { + return dumpFunc == "*" || dumpFunc == name; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_options.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..563b5b9e932069f87bd730f8a693cc2794614dc6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_options.cpp @@ -0,0 +1,617 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" + +#include +#include +#include + +namespace opts::cg { + +maplecl::Option pie({"-fPIE", "--pie", "-pie"}, + " --pie \tGenerate position-independent executable\n" + " --no-pie\n", + {cgCategory, driverCategory, ldCategory}, + maplecl::DisableWith("--no-pie")); + +maplecl::Option fpic({"-fPIC", "--fpic", "-fpic"}, + " --fpic \tGenerate position-independent shared library\n" + " --no-fpic\n", + {cgCategory, driverCategory, ldCategory}, + maplecl::DisableWith("--no-fpic")); + +maplecl::Option verboseAsm({"--verbose-asm"}, + " --verbose-asm \tAdd comments to asm output\n" + " --no-verbose-asm\n", + {cgCategory}, + maplecl::DisableWith("--no-verbose-asm")); + +maplecl::Option verboseCg({"--verbose-cg"}, + " --verbose-cg \tAdd comments to cg output\n" + " --no-verbose-cg\n", + {cgCategory}, + maplecl::DisableWith("--no-verbose-cg")); + +maplecl::Option maplelinker({"--maplelinker"}, + " --maplelinker \tGenerate the MapleLinker .s format\n" + " --no-maplelinker\n", + {cgCategory}, + maplecl::DisableWith("--no-maplelinker")); + +maplecl::Option quiet({"--quiet"}, + " --quiet \tBe quiet (don't output debug messages)\n" + " --no-quiet\n", + {cgCategory}, + maplecl::DisableWith("--no-quiet")); + +maplecl::Option cg({"--cg"}, + " --cg \tGenerate the output .s file\n" + " --no-cg\n", + {cgCategory}, + maplecl::DisableWith("--no-cg")); + +maplecl::Option replaceAsm({"--replaceasm"}, + " --replaceasm \tReplace the the assembly code\n" + " --no-replaceasm\n", + {cgCategory}, + maplecl::DisableWith("--no-replaceasm")); + +maplecl::Option generalRegOnly({"--general-reg-only"}, + " --general-reg-only \tdisable floating-point or Advanced SIMD registers\n" + " --no-general-reg-only\n", + {cgCategory}, + maplecl::DisableWith("--no-general-reg-only")); + +maplecl::Option lazyBinding({"--lazy-binding"}, + " --lazy-binding \tBind class symbols lazily[default off]\n", + {cgCategory}, + maplecl::DisableWith("--no-lazy-binding")); + +maplecl::Option hotFix({"--hot-fix"}, + " --hot-fix \tOpen for App hot fix[default off]\n" + " --no-hot-fix\n", + {cgCategory}, + maplecl::DisableWith("--no-hot-fix")); + +maplecl::Option ebo({"--ebo"}, + " --ebo \tPerform Extend block optimization\n" + " --no-ebo\n", + {cgCategory}, + maplecl::DisableWith("--no-ebo")); + +maplecl::Option cfgo({"--cfgo"}, + " --cfgo \tPerform control flow optimization\n" + " --no-cfgo\n", + {cgCategory}, + maplecl::DisableWith("--no-cfgo")); + +maplecl::Option ico({"--ico"}, + " --ico \tPerform if-conversion optimization\n" + " --no-ico\n", + {cgCategory}, + maplecl::DisableWith("--no-ico")); + +maplecl::Option storeloadopt({"--storeloadopt"}, + " --storeloadopt \tPerform global store-load optimization\n" + " --no-storeloadopt\n", + {cgCategory}, + maplecl::DisableWith("--no-storeloadopt")); + +maplecl::Option globalopt({"--globalopt"}, + " --globalopt \tPerform global optimization\n" + " --no-globalopt\n", + {cgCategory}, + maplecl::DisableWith("--no-globalopt")); + +maplecl::Option hotcoldsplit({"--hotcoldsplit"}, + " --hotcoldsplit \tPerform HotColdSplit optimization\n" + " --no-hotcoldsplit\n", + {cgCategory}, + maplecl::DisableWith("--no-hotcoldsplit")); + +maplecl::Option prelsra({"--prelsra"}, + " --prelsra \tPerform live interval simplification in LSRA\n" + " --no-prelsra\n", + {cgCategory}, + maplecl::DisableWith("--no-prelsra")); + +maplecl::Option lsraLvarspill({"--lsra-lvarspill"}, + " --lsra-lvarspill" + " \tPerform LSRA spill using local ref var stack locations\n" + " --no-lsra-lvarspill\n", + {cgCategory}, + maplecl::DisableWith("--no-lsra-lvarspill")); + +maplecl::Option lsraOptcallee({"--lsra-optcallee"}, + " --lsra-optcallee \tSpill callee if only one def to use\n" + " --no-lsra-optcallee\n", + {cgCategory}, + maplecl::DisableWith("--no-lsra-optcallee")); + +maplecl::Option calleeregsPlacement({"--calleeregs-placement"}, + " --calleeregs-placement \tOptimize placement of callee-save registers\n" + " --no-calleeregs-placement\n", + {cgCategory}, + maplecl::DisableWith("--no-calleeregs-placement")); + +maplecl::Option ssapreSave({"--ssapre-save"}, + " --ssapre-save \tUse ssapre algorithm to save callee-save registers\n" + " --no-ssapre-save\n", + {cgCategory}, + maplecl::DisableWith("--no-ssapre-save")); + +maplecl::Option ssupreRestore({"--ssupre-restore"}, + " --ssupre-restore" + " \tUse ssupre algorithm to restore callee-save registers\n" + " --no-ssupre-restore\n", + {cgCategory}, + maplecl::DisableWith("--no-ssupre-restore")); + +maplecl::Option prepeep({"--prepeep"}, + " --prepeep \tPerform peephole optimization before RA\n" + " --no-prepeep\n", + {cgCategory}, + maplecl::DisableWith("--no-prepeep")); + +maplecl::Option peep({"--peep"}, + " --peep \tPerform peephole optimization after RA\n" + " --no-peep\n", + {cgCategory}, + maplecl::DisableWith("--no-peep")); + +maplecl::Option preschedule({"--preschedule"}, + " --preschedule \tPerform prescheduling\n" + " --no-preschedule\n", + {cgCategory}, + maplecl::DisableWith("--no-preschedule")); + +maplecl::Option schedule({"--schedule"}, + " --schedule \tPerform scheduling\n" + " --no-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-schedule")); + +maplecl::Option retMerge({"--ret-merge"}, + " --ret-merge \tMerge return bb into a single destination\n" + " --no-ret-merge \tallows for multiple return bb\n", + {cgCategory}, + maplecl::DisableWith("--no-ret-merge")); + +maplecl::Option vregRename({"--vreg-rename"}, + " --vreg-rename" + " \tPerform rename of long live range around loops in coloring RA\n" + " --no-vreg-rename\n", + {cgCategory}, + maplecl::DisableWith("--no-vreg-rename")); + +maplecl::Option fullcolor({"--fullcolor"}, + " --fullcolor \tPerform multi-pass coloring RA\n" + " --no-fullcolor\n", + {cgCategory}, + maplecl::DisableWith("--no-fullcolor")); + +maplecl::Option writefieldopt({"--writefieldopt"}, + " --writefieldopt \tPerform WriteRefFieldOpt\n" + " --no-writefieldopt\n", + {cgCategory}, + maplecl::DisableWith("--no-writefieldopt")); + +maplecl::Option dumpOlog({"--dump-olog"}, + " --dump-olog \tDump CFGO and ICO debug information\n" + " --no-dump-olog\n", + {cgCategory}, + maplecl::DisableWith("--no-dump-olog")); + +maplecl::Option nativeopt({"--nativeopt"}, + " --nativeopt \tEnable native opt\n" + " --no-nativeopt\n", + {cgCategory}, + maplecl::DisableWith("--no-nativeopt")); + +maplecl::Option objmap({"--objmap"}, + " --objmap" + " \tCreate object maps (GCTIBs) inside the main output (.s) file\n" + " --no-objmap\n", + {cgCategory}, + maplecl::DisableWith("--no-objmap")); + +maplecl::Option yieldpoint({"--yieldpoint"}, + " --yieldpoint \tGenerate yieldpoints [default]\n" + " --no-yieldpoint\n", + {cgCategory}, + maplecl::DisableWith("--no-yieldpoint")); + +maplecl::Option proepilogue({"--proepilogue"}, + " --proepilogue \tDo tail call optimization and" + " eliminate unnecessary prologue and epilogue.\n" + " --no-proepilogue\n", + {cgCategory}, + maplecl::DisableWith("--no-proepilogue")); + +maplecl::Option localRc({"--local-rc"}, + " --local-rc \tHandle Local Stack RC [default]\n" + " --no-local-rc\n", + {cgCategory}, + maplecl::DisableWith("--no-local-rc")); + +maplecl::Option insertCall({"--insert-call"}, + " --insert-call=name \tInsert a call to the named function\n", + {cgCategory}); + +maplecl::Option addDebugTrace({"--add-debug-trace"}, + " --add-debug-trace" + " \tInstrument the output .s file to print call traces at runtime\n", + {cgCategory}); + +maplecl::Option addFuncProfile({"--add-func-profile"}, + " --add-func-profile" + " \tInstrument the output .s file to record func at runtime\n", + {cgCategory}); + +maplecl::Option classListFile({"--class-list-file"}, + " --class-list-file" + " \tSet the class list file for the following generation options,\n" + " \tif not given, " + "generate for all visible classes\n" + " \t--class-list-file=class_list_file\n", + {cgCategory}); + +maplecl::Option genCMacroDef({"--gen-c-macro-def"}, + " --gen-c-macro-def" + " \tGenerate a .def file that contains extra type metadata, including the\n" + " \tclass instance sizes and field offsets (default)\n" + " --no-gen-c-macro-def\n", + {cgCategory}, + maplecl::DisableWith("--no-gen-c-macro-def")); + +maplecl::Option genGctibFile({"--gen-gctib-file"}, + " --gen-gctib-file" + " \tGenerate a separate .s file for GCTIBs. Usually used together with\n" + " \t--no-objmap (not implemented yet)\n" + " --no-gen-gctib-file\n", + {cgCategory}, + maplecl::DisableWith("--no-gen-gctib-file")); + +maplecl::Option stackProtectorStrong({"--stack-protector-strong", "-fstack-protector", "-fstack-protector-strong"}, + " --stack-protector-strong \tadd stack guard for some function \n" + " --no-stack-protector-strong \n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-stack-protector-strong", "-fno-stack-protector"})); + +maplecl::Option stackProtectorAll({"--stack-protector-all"}, + " --stack-protector-all \tadd stack guard for all functions \n" + " --no-stack-protector-all\n", + {cgCategory}, + maplecl::DisableWith("--no-stack-protector-all")); + +maplecl::Option debug({"-g", "--g"}, + " -g \tGenerate debug information\n", + {cgCategory}); + +maplecl::Option gdwarf({"--gdwarf"}, + " --gdwarf \tGenerate dwarf infomation\n", + {cgCategory}); + +maplecl::Option gsrc({"--gsrc"}, + " --gsrc \tUse original source file instead of mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedsrc({"--gmixedsrc"}, + " --gmixedsrc" + " \tUse both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedasm({"--gmixedasm"}, + " --gmixedasm" + " \tComment out both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option profile({"--p", "-p"}, + " -p \tGenerate profiling infomation\n", + {cgCategory}); + +maplecl::Option withRaLinearScan({"--with-ra-linear-scan"}, + " --with-ra-linear-scan \tDo linear-scan register allocation\n", + {cgCategory}); + +maplecl::Option withRaGraphColor({"--with-ra-graph-color"}, + " --with-ra-graph-color \tDo coloring-based register allocation\n", + {cgCategory}); + +maplecl::Option patchLongBranch({"--patch-long-branch"}, + " --patch-long-branch" + " \tEnable patching long distance branch with jumping pad\n", + {cgCategory}); + +maplecl::Option constFold({"--const-fold"}, + " --const-fold \tEnable constant folding\n" + " --no-const-fold\n", + {cgCategory}, + maplecl::DisableWith("--no-const-fold")); + +maplecl::Option ehExclusiveList({"--eh-exclusive-list"}, + " --eh-exclusive-list \tFor generating gold files in unit testing\n" + " \t--eh-exclusive-list=list_file\n", + {cgCategory}); + +maplecl::Option o0({"-O0", "--O0"}, + " -O0 \tNo optimization.\n", + {cgCategory}); + +maplecl::Option o1({"-O1", "--O1"}, + " -O1 \tDo some optimization.\n", + {cgCategory}); + +maplecl::Option o2({"-O2", "--O2"}, + " -O2 \tDo some optimization.\n", + {cgCategory}); + +maplecl::Option os({"-Os", "--Os"}, + " -Os \tOptimize for size, based on O2.\n", + {cgCategory}); + +maplecl::Option olitecg({"-Olitecg", "--Olitecg"}, + " -Olitecg \tOptimize for litecg.\n", + {cgCategory}); + +maplecl::Option lsraBb({"--lsra-bb"}, + " --lsra-bb=NUM" + " \tSwitch to spill mode if number of bb in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraInsn({"--lsra-insn"}, + " --lsra-insn=NUM" + " \tSwitch to spill mode if number of instructons in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraOverlap({"--lsra-overlap"}, + " --lsra-overlap=NUM \toverlap NUM to decide pre spill in lsra\n", + {cgCategory}); + +maplecl::Option remat({"--remat"}, + " --remat \tEnable rematerialization during register allocation\n" + " \t 0: no rematerialization (default)\n" + " \t >= 1: rematerialize constants\n" + " \t >= 2: rematerialize addresses\n" + " \t >= 3: rematerialize local dreads\n" + " \t >= 4: rematerialize global dreads\n", + {cgCategory}); + +maplecl::Option suppressFileinfo({"--suppress-fileinfo"}, + " --suppress-fileinfo \tFor generating gold files in unit testing\n", + {cgCategory}); + +maplecl::Option dumpCfg({"--dump-cfg"}, + " --dump-cfg\n", + {cgCategory}); + +maplecl::Option target({"--target"}, + " --target=TARGETMACHINE \t generate code for TARGETMACHINE\n", + {cgCategory}, + maplecl::optionalValue); + +maplecl::Option dumpPhases({"--dump-phases"}, + " --dump-phases=PHASENAME,..." + " \tEnable debug trace for specified phases in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipPhases({"--skip-phases"}, + " --skip-phases=PHASENAME,..." + " \tSkip the phases specified in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipFrom({"--skip-from"}, + " --skip-from=PHASENAME \tSkip the rest phases from PHASENAME(included)\n", + {cgCategory}); + +maplecl::Option skipAfter({"--skip-after"}, + " --skip-after=PHASENAME \tSkip the rest phases after PHASENAME(excluded)\n", + {cgCategory}); + +maplecl::Option dumpFunc({"--dump-func"}, + " --dump-func=FUNCNAME" + " \tDump/trace only for functions whose names contain FUNCNAME as substring\n" + " \t(can only specify once)\n", + {cgCategory}); + +maplecl::Option timePhases({"--time-phases"}, + " --time-phases \tCollect compilation time stats for each phase\n" + " --no-time-phases \tDon't Collect compilation time stats for each phase\n", + {cgCategory}, + maplecl::DisableWith("--no-time-phases")); + +maplecl::Option useBarriersForVolatile({"--use-barriers-for-volatile"}, + " --use-barriers-for-volatile \tOptimize volatile load/str\n" + " --no-use-barriers-for-volatile\n", + {cgCategory}, + maplecl::DisableWith("--no-use-barriers-for-volatile")); + +maplecl::Option range({"--range"}, + " --range=NUM0,NUM1 \tOptimize only functions in the range [NUM0, NUM1]\n", + {cgCategory}); + +maplecl::Option fastAlloc({"--fast-alloc"}, + " --fast-alloc=[0/1] \tO2 RA fast mode, set to 1 to spill all registers\n", + {cgCategory}); + +maplecl::Option spillRange({"--spill_range"}, + " --spill_range=NUM0,NUM1 \tO2 RA spill registers in the range [NUM0, NUM1]\n", + {cgCategory}); + +maplecl::Option dupBb({"--dup-bb"}, + " --dup-bb \tAllow cfg optimizer to duplicate bb\n" + " --no-dup-bb \tDon't allow cfg optimizer to duplicate bb\n", + {cgCategory}, + maplecl::DisableWith("--no-dup-bb")); + +maplecl::Option calleeCfi({"--callee-cfi"}, + " --callee-cfi \tcallee cfi message will be generated\n" + " --no-callee-cfi \tcallee cfi message will not be generated\n", + {cgCategory}, + maplecl::DisableWith("--no-callee-cfi")); + +maplecl::Option printFunc({"--print-func"}, + " --print-func\n" + " --no-print-func\n", + {cgCategory}, + maplecl::DisableWith("--no-print-func")); + +maplecl::Option cyclePatternList({"--cycle-pattern-list"}, + " --cycle-pattern-list \tFor generating cycle pattern meta\n" + " \t--cycle-pattern-list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList({"--duplicate_asm_list"}, + " --duplicate_asm_list \tDuplicate asm functions to delete plt call\n" + " \t--duplicate_asm_list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList2({"--duplicate_asm_list2"}, + " --duplicate_asm_list2" + " \tDuplicate more asm functions to delete plt call\n" + " \t--duplicate_asm_list2=list_file\n", + {cgCategory}); + +maplecl::Option blockMarker({"--block-marker"}, + " --block-marker" + " \tEmit block marker symbols in emitted assembly files\n", + {cgCategory}); + +maplecl::Option soeCheck({"--soe-check"}, + " --soe-check \tInsert a soe check instruction[default off]\n", + {cgCategory}); + +maplecl::Option checkArraystore({"--check-arraystore"}, + " --check-arraystore \tcheck arraystore exception[default off]\n" + " --no-check-arraystore\n", + {cgCategory}, + maplecl::DisableWith("--no-check-arraystore")); + +maplecl::Option debugSchedule({"--debug-schedule"}, + " --debug-schedule \tdump scheduling information\n" + " --no-debug-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-debug-schedule")); + +maplecl::Option bruteforceSchedule({"--bruteforce-schedule"}, + " --bruteforce-schedule \tdo brute force schedule\n" + " --no-bruteforce-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-bruteforce-schedule")); + +maplecl::Option simulateSchedule({"--simulate-schedule"}, + " --simulate-schedule \tdo simulate schedule\n" + " --no-simulate-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-simulate-schedule")); + +maplecl::Option crossLoc({"--cross-loc"}, + " --cross-loc \tcross loc insn schedule\n" + " --no-cross-loc\n", + {cgCategory}, + maplecl::DisableWith("--no-cross-loc")); + +maplecl::Option floatAbi({"--float-abi"}, + " --float-abi=name \tPrint the abi type.\n" + " \tname=hard: abi-hard (Default)\n" + " \tname=soft: abi-soft\n" + " \tname=softfp: abi-softfp\n", + {cgCategory}); + +maplecl::Option filetype({"--filetype"}, + " --filetype=name \tChoose a file type.\n" + " \tname=asm: Emit an assembly file (Default)\n" + " \tname=obj: Emit an object file\n" + " \tname=null: not support yet\n", + {cgCategory}); + +maplecl::Option longCalls({"--long-calls"}, + " --long-calls \tgenerate long call\n" + " --no-long-calls\n", + {cgCategory}, + maplecl::DisableWith("--no-long-calls")); + +maplecl::Option functionSections({"--function-sections"}, + " --function-sections \t \n" + " --no-function-sections\n", + {cgCategory}, + maplecl::DisableWith("--no-function-sections")); + +maplecl::Option omitFramePointer({"--omit-frame-pointer", "-fomit-frame-pointer"}, + " --omit-frame-pointer \t do not use frame pointer \n" + " --no-omit-frame-pointer\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-omit-frame-pointer", "-fno-omit-frame-pointer"})); + +maplecl::Option fastMath({"--fast-math"}, + " --fast-math \tPerform fast math\n" + " --no-fast-math\n", + {cgCategory}, + maplecl::DisableWith("--no-fast-math")); + +maplecl::Option tailcall({"--tailcall"}, + " --tailcall \tDo tail call optimization\n" + " --no-tailcall\n", + {cgCategory}, + maplecl::DisableWith("--no-tailcall")); + +maplecl::Option alignAnalysis({"--align-analysis"}, + " --align-analysis \tPerform alignanalysis\n" + " --no-align-analysis\n", + {cgCategory}, + maplecl::DisableWith("--no-align-analysis")); + +maplecl::Option cgSsa({"--cg-ssa"}, + " --cg-ssa \tPerform cg ssa\n" + " --no-cg-ssa\n", + {cgCategory}, + maplecl::DisableWith("--no-cg-ssa")); + +maplecl::Option common({"--common", "-fcommon"}, + " --common \t \n" + " --no-common\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-common", "-fno-common"})); + +maplecl::Option condbrAlign({"--condbr-align"}, + " --condbr-align \tPerform condbr align\n" + " --no-condbr-align\n", + {cgCategory}, + maplecl::DisableWith("--no-condbr-align")); + +maplecl::Option alignMinBbSize({"--align-min-bb-size"}, + " --align-min-bb-size=NUM" + " \tO2 Minimum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option alignMaxBbSize({"--align-max-bb-size"}, + " --align-max-bb-size=NUM" + " \tO2 Maximum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option loopAlignPow({"--loop-align-pow"}, + " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", + {cgCategory}); + +maplecl::Option jumpAlignPow({"--jump-align-pow"}, + " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", + {cgCategory}); + +maplecl::Option funcAlignPow({"--func-align-pow"}, + " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", + {cgCategory}); + +} diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_phasemanager.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_phasemanager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9becc9942f8ee034195cde14fd087ae0a5279802 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_phasemanager.cpp @@ -0,0 +1,579 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_phasemanager.h" +#include +#include +#include "cg_option.h" +#include "args.h" +#include "label_creation.h" +#include "isel.h" +#include "offset_adjust.h" +#include "alignment.h" +#include "yieldpoint.h" +#include "emit.h" +#include "reg_alloc.h" +#if TARGAARCH64 +#include "aarch64_emitter.h" +#include "aarch64_cg.h" +#elif TARGRISCV64 +#include "riscv64_emitter.h" +#elif TARGX86_64 +#include "x64_cg.h" +#include "x64_emitter.h" +#include "string_utils.h" +#endif + +namespace maplebe { +#define JAVALANG (module.IsJavaModule()) +#define CLANG (module.GetSrcLang() == kSrcLangC) + +#define RELEASE(pointer) \ + do { \ + if (pointer != nullptr) { \ + delete pointer; \ + pointer = nullptr; \ + } \ + } while (0) + +namespace { + +void DumpMIRFunc(MIRFunction &func, const char *msg, bool printAlways = false, const char* extraMsg = nullptr) { + bool dumpAll = (CGOptions::GetDumpPhases().find("*") != CGOptions::GetDumpPhases().end()); + bool dumpFunc = CGOptions::FuncFilter(func.GetName()); + + if (printAlways || (dumpAll && dumpFunc)) { + LogInfo::MapleLogger() << msg << '\n'; + func.Dump(); + + if (extraMsg) { + LogInfo::MapleLogger() << extraMsg << '\n'; + } + } +} + +} /* anonymous namespace */ + +void CgFuncPM::GenerateOutPutFile(MIRModule &m) { + CHECK_FATAL(cg != nullptr, "cg is null"); + CHECK_FATAL(cg->GetEmitter(), "emitter is null"); +#if TARGX86_64 + assembler::Assembler &assm = static_cast(*cg->GetEmitter()).GetAssembler(); + if (!cgOptions->SuppressFileInfo()) { + assm.InitialFileInfo(m.GetInputFileName()); + } + // TODO: Dwarf info + if (cgOptions->WithDwarf()) { + assm.EmitDIHeader(); + } +#else + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + if (!cgOptions->SuppressFileInfo()) { + cg->GetEmitter()->EmitFileInfo(m.GetInputFileName()); + } + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIHeader(); + } + } +#endif + InitProfile(m); +} + +bool CgFuncPM::FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM) { + bool changed = false; + for (size_t i = 0; i < phasesSequence.size(); ++i) { + SolveSkipFrom(CGOptions::GetSkipFromPhase(), i); + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Run MplCG " << (curPhase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << curPhase->PhaseName() << " ]---\n"; + } + if (curPhase->IsAnalysis()) { + changed |= RunAnalysisPhase, CGFunc>(*curPhase, serialADM, cgFunc); + } else { + changed |= RunTransformPhase, CGFunc>(*curPhase, serialADM, cgFunc); + DumpFuncCGIR(cgFunc, curPhase->PhaseName()); + } + SolveSkipAfter(CGOptions::GetSkipAfterPhase(), i); + } + return changed; +} + +void CgFuncPM::PostOutPut(MIRModule &m) { +#if TARGX86_64 + X64Emitter *x64Emitter = static_cast(cg->GetEmitter()); + assembler::Assembler &assm = x64Emitter->GetAssembler(); + if (cgOptions->WithDwarf()) { + assm.EmitDIFooter(); + } + x64Emitter->EmitGlobalVariable(*cg); + x64Emitter->EmitDebugInfo(*cg); + assm.FinalizeFileInfo(); + assm.CloseOutput(); +#else +if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + cg->GetEmitter()->EmitHugeSoRoutines(true); + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIFooter(); + } + /* Emit global info */ + EmitGlobalInfo(m); + } else { + cg->GetEmitter()->Finish(); + cg->GetEmitter()->CloseOutput(); + } +#endif +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx); +std::map visitedSym; + +void CollectStaticSymbolInVar(MIRConst *mirConst) { + if (mirConst->GetKind() == kConstAddrof) { + auto *addrSymbol = static_cast(mirConst); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(addrSymbol->GetSymbolIndex().Idx(), true); + if (sym != nullptr) { + MarkUsedStaticSymbol(sym->GetStIdx()); + } + } else if (mirConst->GetKind() == kConstAggConst) { + auto &constVec = static_cast(mirConst)->GetConstVec(); + for (auto &cst : constVec) { + CollectStaticSymbolInVar(cst); + } + } +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx) { + if (!symbolIdx.IsGlobal()) { + return; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx.Idx(), true); + if (symbol == nullptr) { + return; + } + if (visitedSym[symbolIdx]) { + return; + } else { + visitedSym[symbolIdx] = true; + } + symbol->ResetIsDeleted(); + if (symbol->IsConst()) { + auto *konst = symbol->GetKonst(); + CollectStaticSymbolInVar(konst); + } +} + +void RecursiveMarkUsedStaticSymbol(const BaseNode *baseNode) { + if (baseNode == nullptr) { + return; + } + Opcode op = baseNode->GetOpCode(); + switch (op) { + case OP_block: { + const BlockNode *blk = static_cast(baseNode); + for (auto &stmt : blk->GetStmtNodes()) { + RecursiveMarkUsedStaticSymbol(&stmt); + } + break; + } + case OP_dassign: { + const DassignNode *dassignNode = static_cast(baseNode); + MarkUsedStaticSymbol(dassignNode->GetStIdx()); + break; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: { + const AddrofNode *dreadNode = static_cast(baseNode); + MarkUsedStaticSymbol(dreadNode->GetStIdx()); + break; + } + default: { + break; + } + } + for (size_t i = 0; i < baseNode->NumOpnds(); ++i) { + RecursiveMarkUsedStaticSymbol(baseNode->Opnd(i)); + } +} + +void CollectStaticSymbolInFunction(MIRFunction &func) { + RecursiveMarkUsedStaticSymbol(func.GetBody()); +} + +void CgFuncPM::SweepUnusedStaticSymbol(MIRModule &m) { + if (!m.IsCModule()) { + return; + } + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol != nullptr && (mirSymbol->GetSKind() == kStVar || mirSymbol->GetSKind() == kStConst) && + (mirSymbol->GetStorageClass() == kScFstatic || mirSymbol->GetStorageClass() == kScPstatic)) { + mirSymbol->SetIsDeleted(); + } + } + + visitedSym.clear(); + /* scan all funtions */ + std::vector &funcTable = GlobalTables::GetFunctionTable().GetFuncTable(); + /* don't optimize this loop to iterator or range-base loop + * because AddCallGraphNode(mirFunc) will change GlobalTables::GetFunctionTable().GetFuncTable() + */ + for (size_t index = 0; index < funcTable.size(); ++index) { + MIRFunction *mirFunc = funcTable.at(index); + if (mirFunc == nullptr || mirFunc->GetBody() == nullptr) { + continue; + } + m.SetCurFunction(mirFunc); + CollectStaticSymbolInFunction(*mirFunc); + /* scan function symbol declaration + * find addrof static const */ + MIRSymbolTable *funcSymTab = mirFunc->GetSymTab(); + if (funcSymTab) { + size_t localSymSize = funcSymTab->GetSymbolTableSize(); + for (uint32 i = 0; i < localSymSize; ++i) { + MIRSymbol *st = funcSymTab->GetSymbolFromStIdx(i); + if (st && st->IsConst()) { + MIRConst *mirConst = st->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } + } + } + /* scan global symbol declaration + * find addrof static const */ + auto &symbolSet = m.GetSymbolSet(); + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx(), true); + if (s->IsConst()) { + MIRConst *mirConst = s->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } +} + +/* =================== new phase manager =================== */ +#ifdef RA_PERF_ANALYSIS +extern void printLSRATime(); +extern void printRATime(); +#endif + +bool CgFuncPM::PhaseRun(MIRModule &m) { + CreateCGAndBeCommon(m); + bool changed = false; + /* reserve static symbol for debugging */ + if (!cgOptions->WithDwarf()) { + SweepUnusedStaticSymbol(m); + } + if (cgOptions->IsRunCG()) { + GenerateOutPutFile(m); + + /* Run the cg optimizations phases */ + PrepareLower(m); + + uint32 countFuncId = 0; + unsigned long rangeNum = 0; + + auto userDefinedOptLevel = cgOptions->GetOptimizeLevel(); + cg->EnrollTargetPhases(this); + + auto admMempool = AllocateMemPoolInPhaseManager("cg phase manager's analysis data manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + DEBUG_ASSERT(serialADM->CheckAnalysisInfoEmpty(), "clean adm before function run"); + MIRFunction *mirFunc = *it; + if (mirFunc->GetBody() == nullptr) { + continue; + } + if (userDefinedOptLevel == CGOptions::kLevel2 && m.HasPartO2List()) { + if (m.IsInPartO2List(mirFunc->GetNameStrIdx())) { + cgOptions->EnableO2(); + } else { + cgOptions->EnableO0(); + } + ClearAllPhases(); + cg->EnrollTargetPhases(this); + cg->UpdateCGOptions(*cgOptions); + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + } + if (!IsQuiet()) { + LogInfo::MapleLogger() << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>> Optimizing Function < " << mirFunc->GetName() + << " id=" << mirFunc->GetPuidxOrigin() << " >---\n"; + } + /* LowerIR. */ + m.SetCurFunction(mirFunc); + if (cg->DoConstFold()) { + DumpMIRFunc(*mirFunc, "************* before ConstantFold **************"); + ConstantFold cf(m); + (void)cf.Simplify(mirFunc->GetBody()); + } + + if (m.GetFlavor() != MIRFlavor::kFlavorLmbc) { + DoFuncCGLower(m, *mirFunc); + } + /* create CGFunc */ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(mirFunc->GetStIdx().Idx()); + auto funcMp = std::make_unique(memPoolCtrler, funcSt->GetName()); + auto stackMp = std::make_unique(funcMp->GetCtrler(), ""); + MapleAllocator funcScopeAllocator(funcMp.get()); + mirFunc->SetPuidxOrigin(++countFuncId); + CGFunc *cgFunc = cg->CreateCGFunc(m, *mirFunc, *beCommon, *funcMp, *stackMp, funcScopeAllocator, countFuncId); + CHECK_FATAL(cgFunc != nullptr, "Create CG Function failed in cg_phase_manager"); + CG::SetCurCGFunc(*cgFunc); + + if (cgOptions->WithDwarf()) { + cgFunc->SetDebugInfo(m.GetDbgInfo()); + } + /* Run the cg optimizations phases. */ + if (CGOptions::UseRange() && rangeNum >= CGOptions::GetRangeBegin() && rangeNum <= CGOptions::GetRangeEnd()) { + CGOptions::EnableInRange(); + } + changed = FuncLevelRun(*cgFunc, *serialADM); + /* Delete mempool. */ + mirFunc->ReleaseCodeMemory(); + ++rangeNum; + CGOptions::DisableInRange(); + } + PostOutPut(m); +#ifdef RA_PERF_ANALYSIS + if (cgOptions->IsEnableTimePhases()) { + printLSRATime(); + printRATime(); + } +#endif + } else { + LogInfo::MapleLogger(kLlErr) << "Skipped generating .s because -no-cg is given" << '\n'; + } + RELEASE(cg); + RELEASE(beCommon); + return changed; +} + +void CgFuncPM::DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const { + if (CGOptions::DumpPhase(phaseName) && CGOptions::FuncFilter(f.GetName())) { + LogInfo::MapleLogger() << "\n******** CG IR After " << phaseName << ": *********\n"; + f.DumpCGIR(); + } +} + +void CgFuncPM::EmitGlobalInfo(MIRModule &m) const { + EmitDuplicatedAsmFunc(m); + EmitFastFuncs(m); + if (cgOptions->IsGenerateObjectMap()) { + cg->GenerateObjectMaps(*beCommon); + } + cg->GetEmitter()->EmitGlobalVariable(); + EmitDebugInfo(m); + cg->GetEmitter()->CloseOutput(); +} + +void CgFuncPM::InitProfile(MIRModule &m) const { + if (!CGOptions::IsProfileDataEmpty()) { + uint32 dexNameIdx = m.GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + const std::string &dexName = GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(dexNameIdx)); + bool deCompressSucc = m.GetProfile().DeCompress(CGOptions::GetProfileData(), dexName); + if (!deCompressSucc) { + LogInfo::MapleLogger() << "WARN: DeCompress() " << CGOptions::GetProfileData() << "failed in mplcg()\n"; + } + } +} + +void CgFuncPM::CreateCGAndBeCommon(MIRModule &m) { + DEBUG_ASSERT(cgOptions != nullptr, "New cg phase manager running FAILED :: cgOptions unset"); + auto outputFileName = m.GetOutputFileName(); +#if TARGAARCH64 || TARGRISCV64 + cg = new AArch64CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); + } else { + outputFileName = outputFileName.replace(outputFileName.length() - 1, 1, 1, 'o'); + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); + } +#elif TARGARM32 + cg = new Arm32CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); +#elif TARGX86_64 + cg = new X64CG(m, *cgOptions); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + assembler::Assembler *assembler = new assembler::AsmAssembler(outputFileName); + cg->SetEmitter(*m.GetMemPool()->New(*cg, *assembler)); + } else { + outputFileName = outputFileName.replace(outputFileName.length()-1, 1, 1, 'o'); + assembler::Assembler *assembler = new assembler::ElfAssembler(outputFileName, cgOptions->GetEmitMemoryManager()); + cg->SetEmitter(*m.GetMemPool()->New(*cg, *assembler)); + } +#else +#error "unknown platform" +#endif + + + /* + * Must be done before creating any BECommon instances. + * + * BECommon, when constructed, will calculate the type, size and align of all types. As a side effect, it will also + * lower ptr and ref types into a64. That will drop the information of what a ptr or ref points to. + * + * All metadata generation passes which depend on the pointed-to type must be done here. + */ + cg->GenPrimordialObjectList(m.GetBaseName()); + /* We initialize a couple of BECommon's tables using the size information of GlobalTables.type_table_. + * So, BECommon must be allocated after all the parsing is done and user-defined types are all acounted. + */ + beCommon = new BECommon(m); + Globals::GetInstance()->SetBECommon(*beCommon); + Globals::GetInstance()->SetTarget(*cg); + + /* If a metadata generation pass depends on object layout it must be done after creating BECommon. */ + cg->GenExtraTypeMetadata(cgOptions->GetClassListFile(), m.GetBaseName()); + + if (cg->NeedInsertInstrumentationFunction()) { + CHECK_FATAL(cgOptions->IsInsertCall(), "handling of --insert-call is not correct"); + cg->SetInstrumentationFunction(cgOptions->GetInstrumentationFunction()); + } +#if TARGAARCH64 + if (!m.IsCModule()) { + CGOptions::EnableFramePointer(); + } +#endif +} + +void CgFuncPM::PrepareLower(MIRModule &m) { + mirLower = GetManagerMemPool()->New(m, nullptr); + mirLower->Init(); + cgLower = GetManagerMemPool()->New(m, + *beCommon, cg->GenerateExceptionHandlingCode(), cg->GenerateVerboseCG()); + cgLower->RegisterBuiltIns(); + if (m.IsJavaModule()) { + cgLower->InitArrayClassCacheTableIndex(); + } + cgLower->RegisterExternalLibraryFunctions(); + cgLower->SetCheckLoadStore(CGOptions::IsCheckArrayStore()); + if (cg->IsStackProtectorStrong() || cg->IsStackProtectorAll() || m.HasPartO2List()) { + cg->AddStackGuardvar(); + } +} + +void CgFuncPM::DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc) { + if (m.GetFlavor() <= kFeProduced) { + mirLower->SetLowerCG(); + mirLower->SetMirFunc(&mirFunc); + + DumpMIRFunc(mirFunc, "************* before MIRLowerer **************"); + mirLower->LowerFunc(mirFunc); + } + + bool isNotQuiet = !CGOptions::IsQuiet(); + DumpMIRFunc(mirFunc, "************* before CGLowerer **************", isNotQuiet); + + cgLower->LowerFunc(mirFunc); + + DumpMIRFunc(mirFunc, "************* after CGLowerer **************", isNotQuiet, + "************* end CGLowerer **************"); +} + +void CgFuncPM::EmitDuplicatedAsmFunc(MIRModule &m) const { + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return; + } + + std::ifstream duplicateAsmFileFD(CGOptions::GetDuplicateAsmFile()); + + if (!duplicateAsmFileFD.is_open()) { + duplicateAsmFileFD.close(); + ERR(kLncErr, " %s open failed!", CGOptions::GetDuplicateAsmFile().c_str()); + return; + } + std::string contend; + bool onlyForFramework = false; + bool isFramework = IsFramework(m); + + while (getline(duplicateAsmFileFD, contend)) { + if (!contend.compare("#Libframework_start")) { + onlyForFramework = true; + } + + if (!contend.compare("#Libframework_end")) { + onlyForFramework = false; + } + + if (onlyForFramework && !isFramework) { + continue; + } + + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + duplicateAsmFileFD.close(); +} + +void CgFuncPM::EmitFastFuncs(const MIRModule &m) const { + if (CGOptions::IsFastFuncsAsmFileEmpty() || !(m.IsJavaModule())) { + return; + } + + struct stat buffer; + if (stat(CGOptions::GetFastFuncsAsmFile().c_str(), &buffer) != 0) { + return; + } + + std::ifstream fastFuncsAsmFileFD(CGOptions::GetFastFuncsAsmFile()); + if (fastFuncsAsmFileFD.is_open()) { + std::string contend; + (void)cg->GetEmitter()->Emit("#define ENABLE_LOCAL_FAST_FUNCS 1\n"); + + while (getline(fastFuncsAsmFileFD, contend)) { + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + } + fastFuncsAsmFileFD.close(); +} + +void CgFuncPM::EmitDebugInfo(const MIRModule &m) const { + if (!cgOptions->WithDwarf()) { + return; + } + cg->GetEmitter()->SetupDBGInfo(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIHeaderFileInfo(); + cg->GetEmitter()->EmitDIDebugInfoSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugAbbrevSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugARangesSection(); + cg->GetEmitter()->EmitDIDebugRangesSection(); + cg->GetEmitter()->EmitDIDebugLineSection(); + cg->GetEmitter()->EmitDIDebugStrSection(); +} + +bool CgFuncPM::IsFramework(MIRModule &m) const { + auto &funcList = m.GetFunctionList(); + for (auto it = funcList.begin(); it != funcList.end(); ++it) { + MIRFunction *mirFunc = *it; + DEBUG_ASSERT(mirFunc != nullptr, "nullptr check"); + if (mirFunc->GetBody() != nullptr && + mirFunc->GetName() == "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V") { + return true; + } + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFuncPM, cgFuncPhaseManager) +/* register codegen common phases */ +MAPLE_TRANSFORM_PHASE_REGISTER(CgLayoutFrame, layoutstackframe) +MAPLE_TRANSFORM_PHASE_REGISTER(CgCreateLabel, createstartendlabel) +MAPLE_TRANSFORM_PHASE_REGISTER(InstructionSelector, instructionselector) +MAPLE_TRANSFORM_PHASE_REGISTER(CgMoveRegArgs, moveargs) +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegAlloc, regalloc) +MAPLE_TRANSFORM_PHASE_REGISTER(CgAlignAnalysis, alignanalysis) +MAPLE_TRANSFORM_PHASE_REGISTER(CgFrameFinalize, framefinalize) +MAPLE_TRANSFORM_PHASE_REGISTER(CgYieldPointInsertion, yieldpoint) +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenProEpiLog, generateproepilog) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_phi_elimination.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..441dfe893083e6cd0e1430be71a88315c52f255c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_phi_elimination.cpp @@ -0,0 +1,118 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_phi_elimination.h" +#include "cg.h" +#include "cgbb.h" + +namespace maplebe { +void PhiEliminate::TranslateTSSAToCSSA() { + FOR_ALL_BB(bb, cgFunc) { + eliminatedBB.emplace(bb->GetId()); + for (auto phiInsnIt : bb->GetPhiInsns()) { + /* Method I create a temp move for phi-node */ + auto &destReg = static_cast(phiInsnIt.second->GetOperand(kInsnFirstOpnd)); + RegOperand &tempMovDest = cgFunc->GetOrCreateVirtualRegisterOperand(CreateTempRegForCSSA(destReg)); + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + DEBUG_ASSERT(fBBId != 0, "GetFromBBID = 0"); +#if DEBUG + bool find = false; + for (auto predBB : bb->GetPreds()) { + if (predBB->GetId() == fBBId) { + find = true; + } + } + CHECK_FATAL(find, "dont exited pred for phi-node"); +#endif + PlaceMovInPredBB(fBBId, CreateMov(tempMovDest, *(phiOpndIt.second))); + } + Insn &movInsn = CreateMov(destReg, tempMovDest); + bb->ReplaceInsn(*phiInsnIt.second, movInsn); + } + } + + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + CHECK_FATAL(eliminatedBB.count(bb->GetId()), "still have phi"); + if (!insn->IsMachineInstruction()) { + continue; + } + ReCreateRegOperand(*insn); + bb->GetPhiInsns().clear(); + } + } + UpdateRematInfo(); + cgFunc->SetSSAvRegCount(0); +} + +void PhiEliminate::UpdateRematInfo() { + if (CGOptions::GetRematLevel() > 0) { + cgFunc->UpdateAllRegisterVregMapping(remateInfoAfterSSA); + } +} + +void PhiEliminate::PlaceMovInPredBB(uint32 predBBId, Insn &movInsn) { + BB *predBB = cgFunc->GetBBFromID(predBBId); + DEBUG_ASSERT(movInsn.GetOperand(kInsnSecondOpnd).IsRegister(), "unexpect operand"); + if (predBB->GetKind() == BB::kBBFallthru) { + predBB->AppendInsn(movInsn); + } else { + AppendMovAfterLastVregDef(*predBB, movInsn); + } +} + +regno_t PhiEliminate::GetAndIncreaseTempRegNO() { + while (GetSSAInfo()->GetAllSSAOperands().count(tempRegNO)) { + tempRegNO++; + } + regno_t ori = tempRegNO; + tempRegNO++; + return ori; +} + +RegOperand *PhiEliminate::MakeRoomForNoDefVreg(RegOperand &conflictReg) { + regno_t conflictVregNO = conflictReg.GetRegisterNumber(); + auto rVregIt = replaceVreg.find(conflictVregNO); + if (rVregIt != replaceVreg.end()) { + return rVregIt->second; + } else { + RegOperand *regForRecreate = &CreateTempRegForCSSA(conflictReg); + (void)replaceVreg.emplace(std::pair(conflictVregNO, regForRecreate)); + return regForRecreate; + } +} + +void PhiEliminate::RecordRematInfo(regno_t vRegNO, PregIdx pIdx) { + if (remateInfoAfterSSA.count(vRegNO)) { + if (remateInfoAfterSSA[vRegNO] != pIdx) { + remateInfoAfterSSA.erase(vRegNO); + } + } else { + (void)remateInfoAfterSSA.emplace(std::pair(vRegNO, pIdx)); + } +} + +bool CgPhiElimination::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + PhiEliminate *pe = f.GetCG()->CreatePhiElimintor(*GetPhaseMemPool(), f, *ssaInfo); + pe->TranslateTSSAToCSSA(); + return false; +} +void CgPhiElimination::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPhiElimination, cgphielimination) +} diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_pre.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad2d8287763553ee0ada74f80cd2226cd5452b32 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_pre.cpp @@ -0,0 +1,222 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_pre.h" +#include "cg_dominance.h" +#include "aarch64_cg.h" + +namespace maplebe { +/* Implement PRE in cgir */ +void CGPre::ResetDS(CgPhiOcc *phiOcc) { + if (!phiOcc->IsDownSafe()) { + return; + } + + phiOcc->SetIsDownSafe(false); + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } +} + +void CGPre::ComputeDS() { + for (auto phiIt = phiOccs.rbegin(); phiIt != phiOccs.rend(); ++phiIt) { + auto *phiOcc = *phiIt; + if (phiOcc->IsDownSafe()) { + continue; + } + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + if (phiOpnd->HasRealUse()) { + continue; + } + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } + } +} + +/* based on ssapre->workCand's realOccs and dfPhiDfns (which now privides all + the inserted phis), create the phi and phiOpnd occ nodes; link them all up in + order of dt_preorder in ssapre->allOccs; the phi occ nodes are in addition + provided in order of dt_preorder in ssapre->phiOccs */ +void CGPre::CreateSortedOccs() { + // merge varPhiDfns to dfPhiDfns + dfPhiDfns.insert(varPhiDfns.begin(), varPhiDfns.end()); + + auto comparator = [this](const CgPhiOpndOcc *occA, const CgPhiOpndOcc *occB) -> bool { + return dom->GetDtDfnItem(occA->GetBB()->GetId()) < dom->GetDtDfnItem(occB->GetBB()->GetId()); + }; + + std::vector phiOpnds; + for (auto dfn : dfPhiDfns) { + uint32 bbId = dom->GetDtPreOrderItem(dfn); + BB *bb = GetBB(bbId); + auto *phiOcc = perCandMemPool->New(*bb, workCand->GetTheOperand(), perCandAllocator); + phiOccs.push_back(phiOcc); + + for (BB *pred : bb->GetPreds()) { + auto phiOpnd = perCandMemPool->New(pred, workCand->GetTheOperand(), phiOcc); + phiOpnds.push_back(phiOpnd); + phiOcc->AddPhiOpnd(*phiOpnd); + phiOpnd->SetPhiOcc(*phiOcc); + } + } + std::sort(phiOpnds.begin(), phiOpnds.end(), comparator); + + auto realOccIt = workCand->GetRealOccs().begin(); + auto exitOccIt = exitOccs.begin(); + auto phiIt = phiOccs.begin(); + auto phiOpndIt = phiOpnds.begin(); + + CgOccur *nextRealOcc = nullptr; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } + + CgOccur *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + + CgPhiOcc *nextPhiOcc = nullptr; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } + + CgPhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } + + CgOccur *pickedOcc; // the next picked occ in order of preorder traveral of dominator tree + do { + pickedOcc = nullptr; + // the 4 kinds of occ must be checked in this order, so it will be right + // if more than 1 has the same dfn + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextRealOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextExitOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextExitOcc; + } + if (nextPhiOpndOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextPhiOpndOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->GetOccType()) { + case kOccReal: + case kOccUse: + case kOccDef: + case kOccStore: + case kOccMembar: { + ++realOccIt; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kOccExit: { + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kOccPhiocc: { + ++phiIt; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } else { + nextPhiOcc = nullptr; + } + break; + } + case kOccPhiopnd: { + ++phiOpndIt; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occty"); + break; + } + } + } while (pickedOcc != nullptr); +} + +CgOccur *CGPre::CreateRealOcc(Insn &insn, Operand &opnd, OccType occType) { + uint64 hashIdx = PreWorkCandHashTable::ComputeWorkCandHashIndex(opnd); + PreWorkCand *wkCand = preWorkCandHashTable.GetWorkcandFromIndex(hashIdx); + while (wkCand != nullptr) { + Operand *currOpnd = wkCand->GetTheOperand(); + DEBUG_ASSERT(currOpnd != nullptr, "CreateRealOcc: found workcand with theMeExpr as nullptr"); + if (currOpnd == &opnd) { + break; + } + wkCand = static_cast(wkCand->GetNext()); + } + + CgOccur *newOcc = nullptr; + switch (occType) { + case kOccDef: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccStore: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccUse: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + default: + CHECK_FATAL(false, "unsupported occur type"); + break; + } + + if (wkCand != nullptr) { + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + return newOcc; + } + + // workcand not yet created; create a new one and add to worklist + wkCand = ssaPreMemPool->New(ssaPreAllocator, &opnd, GetPUIdx()); + workList.push_back(wkCand); + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + // add to bucket at workcandHashTable[hashIdx] + wkCand->SetNext(*preWorkCandHashTable.GetWorkcandFromIndex(hashIdx)); + preWorkCandHashTable.SetWorkCandAt(hashIdx, *wkCand); + return newOcc; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_prop.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f78b64d5f55a70e39a67bc481208bdaa0e506f11 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_prop.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "loop.h" +#include "cg_prop.h" + +namespace maplebe { +void CGProp::DoCopyProp() { + CopyProp(); + cgDce->DoDce(); +} + +void CGProp::DoTargetProp() { + DoCopyProp(); + /* instruction level opt */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + TargetProp(*insn); + } + } + /* pattern level opt */ + if (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel2) { + PropPatternOpt(); + } +} + +Insn *PropOptimizePattern::FindDefInsn(const VRegVersion *useVersion) { + if (!useVersion) { + return nullptr; + } + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (!defInfo) { + return nullptr; + } + return defInfo->GetInsn(); +} + +bool CgCopyProp::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(),f, *ssaInfo, *ll); + cgProp->DoCopyProp(); + ll->ClearBFS(); + return false; +} +void CgCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCopyProp, cgcopyprop) + +bool CgTargetProp::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(),f, *ssaInfo, *ll); + cgProp->DoTargetProp(); + ll->ClearBFS(); + return false; +} +void CgTargetProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgTargetProp, cgtargetprop) +} diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_ssa.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5739408a5e0e3bb17a15b6dd1d3692befb98867 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -0,0 +1,347 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_ssa.h" +#include "cg.h" + +#include "optimize_common.h" + +namespace maplebe { +uint32 CGSSAInfo::SSARegNObase = 100; +void CGSSAInfo::ConstructSSA() { + InsertPhiInsn(); + /* Rename variables */ + RenameVariablesForBB(domInfo->GetCommonEntryBB().GetId()); +#if DEBUG + /* Check phiListOpnd, must be ssaForm */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + Operand &phiListOpnd = insn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd.IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd).GetOperands(); + for (auto &phiOpndIt : phiList) { + if (!phiOpndIt.second->IsSSAForm()) { + CHECK_FATAL(false, "phiOperand is not ssaForm!"); + } + } + } + } +#endif + cgFunc->SetSSAvRegCount(static_cast(GetAllSSAOperands().size()) + SSARegNObase + 1); + /* save reversePostOrder of bbs for rectify validbit */ + SetReversePostOrder(); +} + +void CGSSAInfo::MarkInsnsInSSA(Insn &insn) { + CHECK_FATAL(insn.GetId() == 0, "insn is not clean !!"); /* change to assert*/ + insnCount += 2; + insn.SetId(static_cast(insnCount)); +} + +void CGSSAInfo::InsertPhiInsn() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb){ + if (!insn->IsMachineInstruction()) { + continue; + } + std::set defRegNOs = insn->GetDefRegs(); + for (auto vRegNO : defRegNOs) { + RegOperand *virtualOpnd = cgFunc->GetVirtualRegisterOperand(vRegNO); + if (virtualOpnd != nullptr) { + PrunedPhiInsertion(*bb, *virtualOpnd); + } + } + } + } +} + +void CGSSAInfo::PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + MapleVector frontiers = domInfo->GetDomFrontier(bb.GetId()); + for (auto i : frontiers) { + BB *phiBB = cgFunc->GetBBFromID(i); + CHECK_FATAL(phiBB != nullptr, "get phiBB failed change to DEBUG_ASSERT"); + if (phiBB->HasPhiInsn(vRegNO)) { + continue; + } + if (phiBB->GetLiveIn()->TestBit(vRegNO)) { + CG *codeGen = cgFunc->GetCG(); + PhiOperand &phiList = codeGen->CreatePhiOperand(*memPool, ssaAlloc); + /* do not insert phi opnd when insert phi insn? */ + for (auto prevBB : phiBB->GetPreds()) { + if (prevBB->GetLiveOut()->TestBit(vRegNO)) { + auto *paraOpnd = static_cast(virtualOpnd.Clone(*tempMp)); + phiList.InsertOpnd(prevBB->GetId(), *paraOpnd); + } else { + CHECK_FATAL(false, "multipule BB in"); + } + } + Insn &phiInsn = codeGen->BuildPhiInsn(virtualOpnd, phiList); + MarkInsnsInSSA(phiInsn); + bool insertSuccess = false; + FOR_BB_INSNS(insn, phiBB) { + if (insn->IsMachineInstruction()) { + (void)phiBB->InsertInsnBefore(*insn, phiInsn); + insertSuccess = true; + break; + } + } + if (!insertSuccess) { + phiBB->InsertInsnBegin(phiInsn); + } + phiBB->AddPhiInsn(vRegNO, phiInsn); + PrunedPhiInsertion(*phiBB, virtualOpnd); + } + } +} + +void CGSSAInfo::RenameVariablesForBB(uint32 bbID) { + RenameBB(*cgFunc->GetBBFromID(bbID)); /* rename first BB */ + const auto &domChildren = domInfo->GetDomChildren(bbID); + for (const auto &child : domChildren) { + RenameBB(*cgFunc->GetBBFromID(child)); + } +} + +void CGSSAInfo::RenameBB(BB &bb) { + if (IsBBRenamed(bb.GetId())) { + return; + } + AddRenamedBB(bb.GetId()); + /* record version stack size */ + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + vRegStk.rbegin()->first + 1; + std::vector oriStackSize(tempSize, -1); + for (auto it : vRegStk) { + DEBUG_ASSERT(it.first < oriStackSize.size(), "out of range"); + oriStackSize[it.first] = static_cast(it.second.size()); + } + RenamePhi(bb); + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + MarkInsnsInSSA(*insn); + RenameInsn(*insn); + } + RenameSuccPhiUse(bb); + RenameVariablesForBB(bb.GetId()); + /* stack pop up */ + for (auto &it : vRegStk) { + if (it.first < oriStackSize.size() && oriStackSize[it.first] >= 0) { + while (static_cast(it.second.size()) > oriStackSize[static_cast(it.first)]) { + DEBUG_ASSERT(!it.second.empty(), "empty stack"); + it.second.pop(); + } + } + } +} + +void CGSSAInfo::RenamePhi(BB &bb) { + for (auto phiInsnIt : bb.GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + auto *phiDefOpnd = static_cast(&phiInsn->GetOperand(kInsnFirstOpnd)); + VRegVersion *newVst = CreateNewVersion(*phiDefOpnd, *phiInsn, kInsnFirstOpnd, true); + phiInsn->SetOperand(kInsnFirstOpnd, *newVst->GetSSAvRegOpnd()); + } +} + +void CGSSAInfo::RenameSuccPhiUse(const BB &bb) { + for (auto *sucBB : bb.GetSuccs()) { + for (auto phiInsnIt : sucBB->GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + Operand *phiListOpnd = &phiInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd->IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd)->GetOperands(); + DEBUG_ASSERT(phiList.size() <= sucBB->GetPreds().size(), "unexpect phiList size need check"); + for (auto phiOpndIt = phiList.begin(); phiOpndIt != phiList.end(); ++phiOpndIt) { + if (phiOpndIt->first == bb.GetId()) { + RegOperand *renamedOpnd = GetRenamedOperand(*(phiOpndIt->second), false, *phiInsn, kInsnSecondOpnd); + phiList[phiOpndIt->first] = renamedOpnd; + } + } + } + } +} + +uint32 CGSSAInfo::IncreaseVregCount(regno_t vRegNO) { + if (!vRegDefCount.count(vRegNO)) { + vRegDefCount.emplace(vRegNO, 0); + } else { + vRegDefCount[vRegNO]++; + } + return vRegDefCount[vRegNO]; +} + +bool CGSSAInfo::IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst) { + if (allSSAOperands.count(vRegNO)) { + return false; + } + allSSAOperands.emplace(vRegNO, vst); + return true; +} + +VRegVersion *CGSSAInfo::CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + uint32 verionIdx = IncreaseVregCount(vRegNO); + RegOperand *ssaOpnd = CreateSSAOperand(virtualOpnd); + auto *newVst = memPool->New(ssaAlloc, *ssaOpnd, verionIdx, vRegNO); + auto *defInfo = CreateDUInsnInfo(&defInsn, idx); + newVst->SetDefInsn(defInfo, isDefByPhi ? kDefByPhi : kDefByInsn); + if (!IncreaseSSAOperand(ssaOpnd->GetRegisterNumber(), newVst)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + auto it = vRegStk.find(vRegNO); + if (it == vRegStk.end()) { + MapleStack vRegVersionStack(ssaAlloc.Adapter()); + auto ret = vRegStk.emplace(std::pair>(vRegNO, vRegVersionStack)); + CHECK_FATAL(ret.second, "insert failed"); + it = ret.first; + } + it->second.push(newVst); + return newVst; +} + +VRegVersion *CGSSAInfo::GetVersion(const RegOperand &virtualOpnd) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + auto vRegIt = vRegStk.find(vRegNO); + return vRegIt != vRegStk.end() ? vRegIt->second.top() : nullptr; +} + +VRegVersion *CGSSAInfo::FindSSAVersion(regno_t ssaRegNO) { + auto it = allSSAOperands.find(ssaRegNO); + return it != allSSAOperands.end() ? it->second : nullptr; +} + +PhiOperand &CGSSAInfo::CreatePhiOperand() { + return cgFunc->GetCG()->CreatePhiOperand(*memPool, ssaAlloc); +} + +void CGSSAInfo::SetReversePostOrder() { + MapleVector &reverse = domInfo->GetReversePostOrder(); + for (auto *bb : reverse) { + if (bb != nullptr) { + reversePostOrder.emplace_back(bb->GetId()); + } + } +} + +void CGSSAInfo::DumpFuncCGIRinSSAForm() const { + LogInfo::MapleLogger() << "\n****** SSA CGIR for " << cgFunc->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, cgFunc) { + LogInfo::MapleLogger() << "=== BB " << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx(); + LogInfo::MapleLogger() << " ==> @" << cgFunc->GetFunction().GetLabelName(bb->GetLabIdx()) << "]"; + } + + LogInfo::MapleLogger() << "> <" << bb->GetId() << "> "; + if (bb->IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb->IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + LogInfo::MapleLogger() << "cleanup "; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << "succs: "; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (!bb->GetEhSuccs().empty()) { + LogInfo::MapleLogger() << "eh_succs: "; + for (auto *ehSuccBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "===\n"; + LogInfo::MapleLogger() << "frequency:" << bb->GetFrequency() << "\n"; + + FOR_BB_INSNS_CONST(insn, bb) { + if (insn->IsCfiInsn() && insn->IsDbgInsn()) { + insn->Dump(); + } else { + DumpInsnInSSAForm(*insn); + } + } + } +} + +void VRegVersion::AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx) { + DEBUG_ASSERT(useInsn.GetId() > 0, "insn should be marked during ssa"); + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + if (useInsnIt != useInsnInfos.end()) { + useInsnIt->second->IncreaseDU(idx); + } else { + useInsnInfos.insert(std::make_pair(useInsn.GetId(), ssaInfo.CreateDUInsnInfo(&useInsn, idx))); + } +} + +void VRegVersion::RemoveUseInsn(const Insn &useInsn, uint32 idx) { + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + DEBUG_ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + useInsnIt->second->DecreaseDU(idx); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void VRegVersion::CheckDeadUse(const Insn &useInsn) { + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + DEBUG_ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void CgSSAConstruct::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} + +bool CgSSAConstruct::PhaseRun(maplebe::CGFunc &f) { + if (CG_DEBUG_FUNC(f)) { + DotGenerator::GenerateDot("beforessa", f, f.GetMirModule(), true); + } + MemPool *ssaMemPool = GetPhaseMemPool(); + MemPool *ssaTempMp = ApplyTempMemPool(); + DomAnalysis *domInfo = nullptr; + domInfo = GET_ANALYSIS(CgDomAnalysis, f); + LiveAnalysis *liveInfo = nullptr; + liveInfo = GET_ANALYSIS(CgLiveAnalysis, f); + ssaInfo = f.GetCG()->CreateCGSSAInfo(*ssaMemPool, f, *domInfo, *ssaTempMp); + ssaInfo->ConstructSSA(); + + if (CG_DEBUG_FUNC(f)) { + LogInfo::MapleLogger() << "******** CG IR After ssaconstruct in ssaForm: *********" << "\n"; + ssaInfo->DumpFuncCGIRinSSAForm(); + } + if (liveInfo != nullptr) { + liveInfo->ClearInOutDataInfo(); + } + /* due to change of register number */ + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + return true; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgSSAConstruct, cgssaconstruct) /* both transform & analysis */ +} diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_ssa_pre.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_ssa_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4bf8fbabdb70215eb0e9104c095d6b45ba652c70 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_ssa_pre.cpp @@ -0,0 +1,602 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#include "loop.h" +#include "cg_ssa_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSAPre::CodeMotion() { + // pass 1 only doing insertion + for (Occ *occ : allOccs) { + if (occ->occTy != kAOccPhiOpnd) { + continue; + } + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + DEBUG_ASSERT(phiOpndOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save inserted inside loop"); + workCand->saveAtEntryBBs.insert(phiOpndOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (Occ *occ : realOccs) { + if (occ->occTy != kAOccReal) { + continue; + } + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + DEBUG_ASSERT(realOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save in place inside loop"); + workCand->saveAtEntryBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " saveAtEntryBBs: ["; + for (uint32 id : workCand->saveAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting RealOcc's redundant flag and PhiOpndOcc's insertHere flag +void SSAPre::Finalize() { + std::vector availDefVec(classCount + 1, nullptr); + // preorder traversal of dominator tree + for (Occ *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kAOccPhi: { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->WillBeAvail()) { + availDefVec[classId] = phiOcc; + } + break; + } + case kAOccReal: { + RealOcc *realOcc = static_cast(occ); + if (availDefVec[classId] == nullptr || !availDefVec[classId]->IsDominate(dom, occ)) { + realOcc->redundant = false; + availDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kAOccPhiOpnd: { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + const PhiOcc *phiOcc = phiOpndOcc->defPhiOcc; + if (phiOcc->WillBeAvail()) { + if (phiOpndOcc->def == nullptr || (!phiOpndOcc->hasRealUse && + phiOpndOcc->def->occTy == kAOccPhi && + !static_cast(phiOpndOcc->def)->WillBeAvail())) { + // insert a store + if (phiOpndOcc->cgbb->GetSuccs().size() != 1) { // critical edge + workCand->saveAtProlog = true; + break; + } + phiOpndOcc->insertHere = true; + } else { + phiOpndOcc->def = availDefVec[classId]; + } + } + break; + } + case kAOccExit: + break; + default: + DEBUG_ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->saveAtProlog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->saveAtProlog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (Occ *occ : allOccs) { + if (occ->occTy == kAOccReal) { + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kAOccPhiOpnd) { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAvail Computation ================ + +void SSAPre::ResetCanBeAvail(PhiOcc *phi) const { + phi->isCanBeAvail = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (!phiOpndOcc->hasRealUse && !phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + ResetCanBeAvail(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeCanBeAvail() const { + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + bool existNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAvail(phiOcc); + } + } + } +} + +void SSAPre::ResetLater(PhiOcc *phi) const { + phi->isLater = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (phiOcc->isLater) { + ResetLater(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeLater() const { + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->isLater = phiOcc->isCanBeAvail; + } + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->isLater) { + bool existNonNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse || phiOcc->speculativeDownsafe) { + ResetLater(phiOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after later computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->isCanBeAvail) { + LogInfo::MapleLogger() << " canbeAvail"; + } + if (phiOcc->isLater) { + LogInfo::MapleLogger() << " later"; + } + if (phiOcc->isCanBeAvail && !phiOcc->isLater) { + LogInfo::MapleLogger() << " will be Avail"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Downsafe Computation ================ +void SSAPre::ResetDownsafe(const PhiOpndOcc *phiOpnd) const { + if (phiOpnd->hasRealUse) { + return; + } + Occ *defOcc = phiOpnd->def; + if (defOcc == nullptr || defOcc->occTy != kAOccPhi) { + return; + } + PhiOcc *defPhiOcc = static_cast(defOcc); + if (defPhiOcc->speculativeDownsafe) { + return; + } + if (!defPhiOcc->isDownsafe) { + return; + } + defPhiOcc->isDownsafe = false; + for (PhiOpndOcc *phiOpndOcc : defPhiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } +} + +void SSAPre::ComputeDownsafe() const { + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe) { + // propagate not-Downsafe backward along use-def edges + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after downsafe computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + if (phiOcc->isDownsafe) { + LogInfo::MapleLogger() << " downsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +static void PropagateSpeculativeDownsafe(PhiOcc *phiOcc) { + if (phiOcc->speculativeDownsafe) { + return; + } + phiOcc->isDownsafe = true; + phiOcc->speculativeDownsafe = true; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } +} + +void SSAPre::Rename() { + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of dominator + // tree + for (Occ *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(dom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kAOccExit: + if (!occStack.empty()) { + Occ *topOcc = occStack.top(); + if (topOcc->occTy == kAOccPhi) { + PhiOcc *phiTopOcc = static_cast(topOcc); + if (!phiTopOcc->speculativeDownsafe) { + phiTopOcc->isDownsafe = false; + } + } + } + break; + case kAOccPhi: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kAOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + Occ *topOcc = occStack.top(); + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccPhi) { + occStack.push(occ); + if (occ->cgbb->GetLoop() != nullptr) { + static_cast(topOcc)->isDownsafe = true; + static_cast(topOcc)->speculativeDownsafe = true; + } + } + break; + } + case kAOccPhiOpnd: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + Occ *topOcc = occStack.top(); + occ->def = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + DEBUG_ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + // loop thru phiOccs to propagate speculativeDownsafe + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->speculativeDownsafe) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + if (occ->occTy == kAOccPhi) { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert phis ================ + +// form pih occ based on the real occ in workCand->realOccs; result is +// stored in phiDfns +void SSAPre::FormPhis() { + for (Occ *occ : realOccs) { + GetIterDomFrontier(occ->cgbb, &phiDfns); + } +} + +// form allOccs inclusive of real, phi, phiOpnd, exit occurrences; +// form phiOccs containing only the phis +void SSAPre::CreateSortedOccs() { + // form phiOpnd occs based on the preds of the phi occs; result is + // stored in phiOpndDfns + std::multiset phiOpndDfns; + for (uint32 dfn : phiDfns) { + const BBId bbId = dom->GetDtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *pred : cgbb->GetPreds()) { + (void)phiOpndDfns.insert(dom->GetDtDfnItem(pred->GetId())); + } + } + std::unordered_map> bb2PhiOpndMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator exitOccIt = exitOccs.begin(); + MapleSet::iterator phiDfnIt = phiDfns.begin(); + MapleSet::iterator phiOpndDfnIt = phiOpndDfns.begin(); + Occ *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + ExitOcc *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + PhiOcc *nextPhiOcc = nullptr; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } + PhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = { nextPhiOpndOcc }; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } + Occ *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of dominator tree + do { + pickedOcc = nullptr; + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextRealOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextPhiOpndOcc != nullptr && + (pickedOcc == nullptr || *phiOpndDfnIt < dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextExitOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextExitOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kAOccReal: { + // get the next real occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kAOccExit: { + CHECK_FATAL(exitOccIt != exitOccs.end(), "iterator check"); + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kAOccPhi: { + phiOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(phiDfnIt != phiDfns.end(), "iterator check"); + ++phiDfnIt; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } else { + nextPhiOcc = nullptr; + } + break; + } + case kAOccPhiOpnd: { + CHECK_FATAL(phiOpndDfnIt != phiOpndDfns.end(), "iterator check"); + ++phiOpndDfnIt; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = { nextPhiOpndOcc }; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize phiOpnd vector in each PhiOcc node and defPhiOcc in each PhiOpndOcc + for (PhiOcc *phiOcc : phiOccs) { + for (BB *pred : phiOcc->cgbb->GetPreds()) { + PhiOpndOcc *phiOpndOcc = bb2PhiOpndMap[pred->GetId()].front(); + phiOcc->phiOpnds.push_back(phiOpndOcc); + phiOpndOcc->defPhiOcc = phiOcc; + bb2PhiOpndMap[pred->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after phi insertion _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSAPre::PropagateNotAnt(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0) { + return; + } + fullyAntBBs[bb->GetId()] = false; + for (BB *predbb : bb->GetPreds()) { + PropagateNotAnt(predbb, visitedBBs); + } +} + +void SSAPre::FormRealsNExits() { + std::set visitedBBs; + if (asEarlyAsPossible) { + for (BB *cgbb : cgFunc->GetExitBBsVec()) { + if (!cgbb->IsUnreachable()) { + PropagateNotAnt(cgbb, &visitedBBs); + } + } + } + + for (uint32 i = 0; i < dom->GetDtPreOrderSize(); i++) { + BBId bbid = dom->GetDtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (asEarlyAsPossible) { + if (fullyAntBBs[cgbb->GetId()]) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } else { + if (workCand->occBBs.count(cgbb->GetId()) != 0) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + if (!cgbb->IsUnreachable() && (cgbb->NumSuccs() == 0 || cgbb->GetKind() == BB::kBBReturn)) { + ExitOcc *exitOcc = preMp->New(cgbb); + exitOccs.push_back(exitOcc); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save saves" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSAPre::ApplySSAPre() { + FormRealsNExits(); + // #1 insert phis; results in allOccs and phiOccs + FormPhis(); // result put in the set phi_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!phiOccs.empty()) { + // #3 DownSafety + ComputeDownsafe(); + // #4 CanBeAvail + ComputeCanBeAvail(); + ComputeLater(); + } + // #5 Finalize + Finalize(); + if (!workCand->saveAtProlog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand) { + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssa_pre", true); + SSAPre cgssapre(f, dom, tempMP, workCand, false/*asEarlyAsPossible*/, false/*enabledDebug*/); + + cgssapre.ApplySSAPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_ssu_pre.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_ssu_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..387325dfe545986eb4b206a9048b0c2ce705e46d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_ssu_pre.cpp @@ -0,0 +1,603 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSUPre::CodeMotion() { + // pass 1 only donig insertion + for (SOcc *occ : allOccs) { + if (occ->occTy != kSOccLambdaRes) { + continue; + } + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + workCand->restoreAtEntryBBs.insert(lambdaResOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (SOcc *occ : realOccs) { + if (occ->occTy != kSOccReal) { + continue; + } + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + if (realOcc->cgbb->IsWontExit()) { + workCand->restoreAtEpilog = true; + break; + } + workCand->restoreAtExitBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of restore inside infinite loop" << '\n'; + return; + } + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " restoreAtEntryBBs: ["; + for (uint32 id : workCand->restoreAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n restoreAtExitBBs: ["; + for (uint32 id : workCand->restoreAtExitBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting SRealOcc's redundant flag and SLambdaResOcc's insertHere flag +void SSUPre::Finalize() { + std::vector anticipatedDefVec(classCount + 1, nullptr); + // preorder traversal of post-dominator tree + for (SOcc *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kSOccLambda: { + auto *lambdaOcc = static_cast(occ); + if (lambdaOcc->WillBeAnt()) { + anticipatedDefVec[classId] = lambdaOcc; + } + break; + } + case kSOccReal: { + auto *realOcc = static_cast(occ); + if (anticipatedDefVec[classId] == nullptr || !anticipatedDefVec[classId]->IsPostDominate(pdom, occ)) { + realOcc->redundant = false; + anticipatedDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kSOccLambdaRes: { + auto *lambdaResOcc = static_cast(occ); + const SLambdaOcc *lambdaOcc = lambdaResOcc->useLambdaOcc; + if (lambdaOcc->WillBeAnt()) { + if (lambdaResOcc->use == nullptr || (!lambdaResOcc->hasRealUse && + lambdaResOcc->use->occTy == kSOccLambda && + !static_cast(lambdaResOcc->use)->WillBeAnt())) { + // insert a store + if (lambdaResOcc->cgbb->GetPreds().size() != 1) { // critical edge + workCand->restoreAtEpilog = true; + break; + } + lambdaResOcc->insertHere = true; + } else { + lambdaResOcc->use = anticipatedDefVec[classId]; + } + } + break; + } + case kSOccEntry: + case kSOccKill: + break; + default: + DEBUG_ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->restoreAtEpilog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (SOcc *occ : allOccs) { + if (occ->occTy == kSOccReal) { + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kSOccLambdaRes) { + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAnt Computation ================ + +void SSUPre::ResetCanBeAnt(SLambdaOcc *lambda) const { + lambda->isCanBeAnt = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (!lambdaResOcc->hasRealUse && !lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + ResetCanBeAnt(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeCanBeAnt() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + bool existNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAnt(lambdaOcc); + } + } + } +} + +void SSUPre::ResetEarlier(SLambdaOcc *lambda) const { + lambda->isEarlier = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (lambdaOcc->isEarlier) { + ResetEarlier(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeEarlier() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->isEarlier = lambdaOcc->isCanBeAnt; + } + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (lambdaOcc->isEarlier) { + bool existNonNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse) { + ResetEarlier(lambdaOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after earlier computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isCanBeAnt) { + LogInfo::MapleLogger() << " canbeant"; + } + if (lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " earlier"; + } + if (lambdaOcc->isCanBeAnt && !lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " will be ant"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Upsafe Computation ================ +void SSUPre::ResetUpsafe(const SLambdaResOcc *lambdaRes) const { + if (lambdaRes->hasRealUse) { + return; + } + SOcc *useOcc = lambdaRes->use; + if (useOcc == nullptr || useOcc->occTy != kSOccLambda) { + return; + } + auto *useLambdaOcc = static_cast(useOcc); + if (!useLambdaOcc->isUpsafe) { + return; + } + useLambdaOcc->isUpsafe = false; + for (SLambdaResOcc *lambdaResOcc : useLambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } +} + +void SSUPre::ComputeUpsafe() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe) { + // propagate not-upsafe forward along def-use edges + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after upsafe computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isUpsafe) { + LogInfo::MapleLogger() << " upsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +void SSUPre::Rename() { + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of + // post-dominator tree + for (SOcc *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsPostDominate(pdom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kSOccKill: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + occStack.push(occ); + break; + case kSOccEntry: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + break; + case kSOccLambda: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kSOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + DEBUG_ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccLambda) { + occStack.push(occ); + } + break; + } + case kSOccLambdaRes: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // leave classId as 0 + break; + } + DEBUG_ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->use = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + DEBUG_ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert lambdas ================ + +// form lambda occ based on the real occ in workCand->realOccs; result is +// stored in lambdaDfns +void SSUPre::FormLambdas() { + for (SOcc *occ : realOccs) { + if (occ->occTy == kSOccKill) { + continue; + } + GetIterPdomFrontier(occ->cgbb, &lambdaDfns); + } +} + +// form allOccs inclusive of real, kill, lambda, lambdaRes, entry occurrences; +// form lambdaOccs containing only the lambdas +void SSUPre::CreateSortedOccs() { + // form lambdaRes occs based on the succs of the lambda occs; result is + // stored in lambdaResDfns + std::multiset lambdaResDfns; + for (uint32 dfn : lambdaDfns) { + const BBId bbId = pdom->GetPdtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *succ : cgbb->GetSuccs()) { + (void)lambdaResDfns.insert(pdom->GetPdtDfnItem(succ->GetId())); + } + } + std::unordered_map> bb2LambdaResMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator entryOccIt = entryOccs.begin(); + MapleSet::iterator lambdaDfnIt = lambdaDfns.begin(); + MapleSet::iterator lambdaResDfnIt = lambdaResDfns.begin(); + SOcc *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + SEntryOcc *nextEntryOcc = nullptr; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } + SLambdaOcc *nextLambdaOcc = nullptr; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } + SLambdaResOcc *nextLambdaResOcc = nullptr; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = { nextLambdaResOcc }; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } + SOcc *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of post-dominator tree + do { + pickedOcc = nullptr; + if (nextLambdaOcc != nullptr) { + pickedOcc = nextLambdaOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextRealOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextLambdaResOcc != nullptr && + (pickedOcc == nullptr || *lambdaResDfnIt < pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextLambdaResOcc; + } + if (nextEntryOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextEntryOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextEntryOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kSOccReal: + case kSOccKill: { + // get the next real/kill occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kSOccEntry: { + CHECK_FATAL(entryOccIt != entryOccs.end(), "iterator check"); + ++entryOccIt; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } else { + nextEntryOcc = nullptr; + } + break; + } + case kSOccLambda: { + lambdaOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(lambdaDfnIt != lambdaDfns.end(), "iterator check"); + ++lambdaDfnIt; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } else { + nextLambdaOcc = nullptr; + } + break; + } + case kSOccLambdaRes: { + CHECK_FATAL(lambdaResDfnIt != lambdaResDfns.end(), "iterator check"); + ++lambdaResDfnIt; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = { nextLambdaResOcc }; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } else { + nextLambdaResOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize lambdaRes vector in each SLambdaOcc node and useLambdaOcc in each SLambdaResOcc + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (BB *succ : lambdaOcc->cgbb->GetSuccs()) { + SLambdaResOcc *lambdaResOcc = bb2LambdaResMap[succ->GetId()].front(); + lambdaOcc->lambdaRes.push_back(lambdaResOcc); + lambdaResOcc->useLambdaOcc = lambdaOcc; + bb2LambdaResMap[succ->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after lambda insertion _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSUPre::PropagateNotAvail(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0 || + workCand->saveBBs.count(bb->GetId()) != 0) { + return; + } + fullyAvailBBs[bb->GetId()] = false; + for (BB *succbb : bb->GetSuccs()) { + PropagateNotAvail(succbb, visitedBBs); + } +} + +void SSUPre::FormReals() { + if (!asLateAsPossible) { + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } else if (workCand->occBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + } else { + std::set visitedBBs; + fullyAvailBBs[cgFunc->GetCommonExitBB()->GetId()] = false; + PropagateNotAvail(cgFunc->GetFirstBB(), &visitedBBs); + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (fullyAvailBBs[cgbb->GetId()]) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } + } + } + } + + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save restores" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n saveBBs: ["; + for (uint32 id : workCand->saveBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSUPre::ApplySSUPre() { + FormReals(); + // #1 insert lambdas; results in allOccs and lambdaOccs + FormLambdas(); // result put in the set lambda_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!lambdaOccs.empty()) { + // #3 UpSafety + ComputeUpsafe(); + // #4 CanBeAnt + ComputeCanBeAnt(); + ComputeEarlier(); + } + // #5 Finalize + Finalize(); + if (!workCand->restoreAtEpilog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand) { + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssu_pre", true); + SSUPre cgssupre(f, pdom, tempMP, workCand, true/*asLateAsPossible*/, false/*enabledDebug*/); + + cgssupre.ApplySSUPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/ecmascript/mapleall/maple_be/src/cg/cg_validbit_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/cg_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b026001346ab3ccef91a5aa4154a7665c1cffb8d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cg_validbit_opt.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_validbit_opt.h" +#include "mempool.h" +#include "aarch64_validbit_opt.h" + +namespace maplebe { +Insn *ValidBitPattern::GetDefInsn(const RegOperand &useReg) { + if (!useReg.IsSSAForm()) { + return nullptr; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + DEBUG_ASSERT(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + CHECK_FATAL(!useVersion->IsDeleted(), "deleted version"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + return defInfo == nullptr ? nullptr : defInfo->GetInsn(); +} + +InsnSet ValidBitPattern::GetAllUseInsn(const RegOperand &defReg) { + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *currInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(currInsn); + } + } + return allUseInsn; +} + +void ValidBitPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +void ValidBitOpt::RectifyValidBitNum() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + SetValidBits(*insn); + } + } + bool iterate; + /* Use inverse postorder to converge with minimal iterations */ + do { + iterate = false; + MapleVector reversePostOrder = ssaInfo->GetReversePostOrder(); + for (uint32 bbId : reversePostOrder) { + BB *bb = cgFunc->GetBBFromID(bbId); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + bool change = SetPhiValidBits(*insn); + if (change) { + /* if vb changes once, iterate. */ + iterate = true; + } + } + } + } while (iterate); +} + +void ValidBitOpt::RecoverValidBitNum() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->OpndIsDef(i) && insn->GetOperand(i).IsRegister()) { + auto &dstOpnd = static_cast(insn->GetOperand(i)); + dstOpnd.SetValidBitsNum(dstOpnd.GetSize()); + } + } + } + } +} + +void ValidBitOpt::Run() { + /* + * Set validbit of regOpnd before optimization + */ + RectifyValidBitNum(); + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoOpt(*bb, *insn); + } + } + /* + * Recover validbit of regOpnd after optimization + */ + RecoverValidBitNum(); +} + +bool CgValidBitOpt::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL(ssaInfo != nullptr, "Get ssaInfo failed"); + auto *vbOpt = f.GetCG()->CreateValidBitOpt(*GetPhaseMemPool(), f, *ssaInfo); + CHECK_FATAL(vbOpt != nullptr, "vbOpt instance create failed"); + vbOpt->Run(); + return true; +} + +void CgValidBitOpt::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgValidBitOpt, cgvalidbitopt) +} /* namespace maplebe */ + diff --git a/ecmascript/mapleall/maple_be/src/cg/cgbb.cpp b/ecmascript/mapleall/maple_be/src/cg/cgbb.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4d477e8107dcd35ecadf9b3be5080470f2b46251 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cgbb.cpp @@ -0,0 +1,553 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgbb.h" +#include "cgfunc.h" +#include "loop.h" + +namespace maplebe { +constexpr uint32 kCondBrNum = 2; +constexpr uint32 kSwitchCaseNum = 5; + +const std::string BB::bbNames[BB::kBBLast] = { + "BB_ft", + "BB_if", + "BB_goto", + "BB_igoto", + "BB_ret", + "BB_intrinsic", + "BB_rangegoto", + "BB_throw" +}; + +Insn *BB::InsertInsnBefore(Insn &existing, Insn &newInsn) { + Insn *pre = existing.GetPrev(); + newInsn.SetPrev(pre); + newInsn.SetNext(&existing); + existing.SetPrev(&newInsn); + if (pre != nullptr) { + pre->SetNext(&newInsn); + } + if (&existing == firstInsn) { + firstInsn = &newInsn; + } + newInsn.SetBB(this); + return &newInsn; +} + +Insn *BB::InsertInsnAfter(Insn &existing, Insn &newInsn) { + newInsn.SetPrev(&existing); + newInsn.SetNext(existing.GetNext()); + existing.SetNext(&newInsn); + if (&existing == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext()) { + newInsn.GetNext()->SetPrev(&newInsn); + } + newInsn.SetBB(this); + internalFlag1++; + return &newInsn; +} + +void BB::ReplaceInsn(Insn &insn, Insn &newInsn) { + if (insn.IsAccessRefField()) { + newInsn.MarkAsAccessRefField(true); + } + if (insn.GetDoNotRemove()) { + newInsn.SetDoNotRemove(true); + } + newInsn.SetPrev(insn.GetPrev()); + newInsn.SetNext(insn.GetNext()); + if (&insn == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext() != nullptr) { + newInsn.GetNext()->SetPrev(&newInsn); + } + if (firstInsn == &insn) { + firstInsn = &newInsn; + } else if (newInsn.GetPrev() != nullptr) { + newInsn.GetPrev()->SetNext(&newInsn); + } + newInsn.SetComment(insn.GetComment()); + newInsn.SetBB(this); +} + +void BB::RemoveInsn(Insn &insn) { + if ((firstInsn == &insn) && (lastInsn == &insn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = insn.GetNext(); + } else if (lastInsn == &insn) { + lastInsn = insn.GetPrev(); + } + /* remove insn from lir list */ + Insn *prevInsn = insn.GetPrev(); + Insn *nextInsn = insn.GetNext(); + if (prevInsn != nullptr) { + prevInsn->SetNext(nextInsn); + } + if (nextInsn != nullptr) { + nextInsn->SetPrev(prevInsn); + } +} + +void BB::RemoveInsnPair(Insn &insn, const Insn &nextInsn) { + DEBUG_ASSERT(insn.GetNext() == &nextInsn, "next_insn is supposed to follow insn"); + DEBUG_ASSERT(nextInsn.GetPrev() == &insn, "next_insn is supposed to follow insn"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* Remove insns in this bb from insn1 to insn2. */ +void BB::RemoveInsnSequence(Insn &insn, const Insn &nextInsn) { + DEBUG_ASSERT(insn.GetBB() == this, "remove insn sequence in one bb"); + DEBUG_ASSERT(nextInsn.GetBB() == this, "remove insn sequence in one bb"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* append all insns from bb into this bb */ +void BB::AppendBBInsns(BB &bb) { + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + if (firstInsn != nullptr) { + FOR_BB_INSNS(i, &bb) { + i->SetBB(this); + } + } + return; + } + if ((bb.firstInsn == nullptr) || (bb.lastInsn == nullptr)) { + return; + } + FOR_BB_INSNS_SAFE(insn, &bb, nextInsn) { + AppendInsn(*insn); + } +} + +/* prepend all insns from bb into this bb */ +void BB::InsertAtBeginning(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.lastInsn->SetNext(firstInsn); + firstInsn->SetPrev(bb.lastInsn); + firstInsn = bb.firstInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* append all insns from bb into this bb */ +void BB::InsertAtEnd(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.firstInsn->SetPrev(lastInsn); + lastInsn->SetNext(bb.firstInsn); + lastInsn = bb.lastInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Insert all insns from bb into this bb before the last instr */ +void BB::InsertAtEndMinus1(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + if (NumInsn() == 1) { + InsertAtBeginning(bb); + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + /* Add between prevLast and lastInsn */ + Insn *prevLast = lastInsn->GetPrev(); + bb.firstInsn->SetPrev(prevLast); + prevLast->SetNext(bb.firstInsn); + lastInsn->SetPrev(bb.lastInsn); + bb.lastInsn->SetNext(lastInsn); + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Number of instructions excluding DbgInsn and comments */ +int32 BB::NumInsn() const { + int32 bbSize = 0; + FOR_BB_INSNS_CONST(i, this) { + if (i->IsImmaterialInsn() || i->IsDbgInsn()) { + continue; + } + ++bbSize; + } + return bbSize; +} + +bool BB::IsInPhiList(regno_t regNO) { + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiListOpnd = static_cast(phiInsn->GetOperand(kInsnSecondOpnd)); + for (auto phiListIt : phiListOpnd.GetOperands()) { + RegOperand *phiUseOpnd = phiListIt.second; + if (phiUseOpnd == nullptr) { + continue; + } + if (phiUseOpnd->GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool BB::IsInPhiDef(regno_t regNO) { + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiDefOpnd = static_cast(phiInsn->GetOperand(kInsnFirstOpnd)); + if (phiDefOpnd.GetRegisterNumber() == regNO) { + return true; + } + } + return false; +} + +bool BB::HasCriticalEdge() { + constexpr int minPredsNum = 2; + if (preds.size() < minPredsNum) { + return false; + } + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + return true; + } + } + return false; +} + +void BB::Dump() const { + LogInfo::MapleLogger() << "=== BB " << this << " <" << GetKindName(); + if (labIdx) { + LogInfo::MapleLogger() << "[labeled with " << labIdx << "]"; + if (labelTaken) { + LogInfo::MapleLogger() << " taken"; + } + } + LogInfo::MapleLogger() << "> <" << id << "> "; + if (isCleanup) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (unreachable) { + LogInfo::MapleLogger() << "[unreachable] "; + } + LogInfo::MapleLogger() << "frequency:" << frequency << "===\n"; + + Insn *insn = firstInsn; + while (insn != nullptr) { + insn->Dump(); + insn = insn->GetNext(); + } +} + +bool BB::IsCommentBB() const { + if (GetKind() != kBBFallthru) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (insn->IsMachineInstruction()) { + return false; + } + } + return true; +} + +/* return true if bb has no real insns. */ +bool BB::IsEmptyOrCommentOnly() const { + return (IsEmpty() || IsCommentBB()); +} + +bool BB::IsSoloGoto() const { + if (GetKind() != kBBGoto) { + return false; + } + if (GetHasCfi()) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (!insn->IsMachineInstruction()) { + continue; + } + return (insn->IsUnCondBranch()); + } + return false; +} + +BB *BB::GetValidPrev() { + BB *pre = GetPrev(); + while (pre != nullptr && (pre->IsEmptyOrCommentOnly() || pre->IsUnreachable())) { + pre = pre->GetPrev(); + } + return pre; +} + +bool Bfs::AllPredBBVisited(const BB &bb, long &level) const { + bool isAllPredsVisited = true; + for (const auto *predBB : bb.GetPreds()) { + /* See if pred bb is a loop back edge */ + bool isBackEdge = false; + for (const auto *loopBB : predBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predBB->GetInternalFlag2()); + } + for (const auto *predEhBB : bb.GetEhPreds()) { + bool isBackEdge = false; + for (const auto *loopBB : predEhBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predEhBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predEhBB->GetInternalFlag2()); + } + return isAllPredsVisited; +} + +/* + * During live interval construction, bb has only one predecessor and/or one + * successor are stright line bb. It can be considered to be a single large bb + * for the purpose of finding live interval. This is to prevent extending live + * interval of registers unnecessarily when interleaving bb from other paths. + */ +BB *Bfs::MarkStraightLineBBInBFS(BB *bb) { + while (true) { + if ((bb->GetSuccs().size() != 1) || !bb->GetEhSuccs().empty()) { + break; + } + BB *sbb = bb->GetSuccs().front(); + if (visitedBBs[sbb->GetId()]) { + break; + } + if ((sbb->GetPreds().size() != 1) || !sbb->GetEhPreds().empty()) { + break; + } + sortedBBs.push_back(sbb); + visitedBBs[sbb->GetId()] = true; + sbb->SetInternalFlag2(bb->GetInternalFlag2() + 1); + bb = sbb; + } + return bb; +} + +BB *Bfs::SearchForStraightLineBBs(BB &bb) { + if ((bb.GetSuccs().size() != kCondBrNum) || bb.GetEhSuccs().empty()) { + return &bb; + } + BB *sbb1 = bb.GetSuccs().front(); + BB *sbb2 = bb.GetSuccs().back(); + size_t predSz1 = sbb1->GetPreds().size(); + size_t predSz2 = sbb2->GetPreds().size(); + BB *candidateBB = nullptr; + if ((predSz1 == 1) && (predSz2 > kSwitchCaseNum)) { + candidateBB = sbb1; + } else if ((predSz2 == 1) && (predSz1 > kSwitchCaseNum)) { + candidateBB = sbb2; + } else { + return &bb; + } + DEBUG_ASSERT(candidateBB->GetId() < visitedBBs.size(), "index out of range in RA::SearchForStraightLineBBs"); + if (visitedBBs[candidateBB->GetId()]) { + return &bb; + } + if (!candidateBB->GetEhPreds().empty()) { + return &bb; + } + if (candidateBB->GetSuccs().size() != 1) { + return &bb; + } + + sortedBBs.push_back(candidateBB); + visitedBBs[candidateBB->GetId()] = true; + return MarkStraightLineBBInBFS(candidateBB); +} + +void Bfs::BFS(BB &curBB) { + std::queue workList; + workList.push(&curBB); + DEBUG_ASSERT(curBB.GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + DEBUG_ASSERT(curBB.GetId() < visitedBBs.size(), "index out of range in RA::BFS"); + visitedBBs[curBB.GetId()] = true; + do { + BB *bb = workList.front(); + sortedBBs.push_back(bb); + DEBUG_ASSERT(bb->GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + visitedBBs[bb->GetId()] = true; + workList.pop(); + /* Look for straight line bb */ + bb = MarkStraightLineBBInBFS(bb); + /* Look for an 'if' followed by some straight-line bb */ + bb = SearchForStraightLineBBs(*bb); + for (auto *ibb : bb->GetSuccs()) { + /* See if there are unvisited predecessor */ + if (visitedBBs[ibb->GetId()]) { + continue; + } + long prevLevel = 0; + if (AllPredBBVisited(*ibb, prevLevel)) { + ibb->SetInternalFlag2(prevLevel + 1); + workList.push(ibb); + DEBUG_ASSERT(ibb->GetId() < cgfunc->NumBBs(), "GCRA::BFS visitedBBs overflow"); + visitedBBs[ibb->GetId()] = true; + } + } + } while (!workList.empty()); +} + +void Bfs::ComputeBlockOrder() { + visitedBBs.clear(); + sortedBBs.clear(); + visitedBBs.resize(cgfunc->NumBBs()); + for (uint32 i = 0; i < cgfunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + BB *cleanupBB = nullptr; + FOR_ALL_BB(bb, cgfunc) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(1); + if (bb->GetFirstStmt() == cgfunc->GetCleanupLabel()) { + cleanupBB = bb; + } + } + for (BB *bb = cleanupBB; bb != nullptr; bb = bb->GetNext()) { + bb->SetInternalFlag1(1); + } + + bool changed; + size_t sortedCnt = 0; + bool done = false; + do { + changed = false; + FOR_ALL_BB(bb, cgfunc) { + if (bb->GetInternalFlag1() == 1) { + continue; + } + if (visitedBBs[bb->GetId()]) { + continue; + } + changed = true; + long prevLevel = 0; + if (AllPredBBVisited(*bb, prevLevel)) { + bb->SetInternalFlag2(prevLevel + 1); + BFS(*bb); + } + } + /* Make sure there is no infinite loop. */ + if (sortedCnt == sortedBBs.size()) { + if (!done) { + done = true; + } else { + LogInfo::MapleLogger() << "Error: RA BFS loop " << sortedCnt << " in func " << cgfunc->GetName() << "\n"; + } + } + sortedCnt = sortedBBs.size(); + } while (changed); + + for (BB *bb = cleanupBB; bb != nullptr; bb = bb->GetNext()) { + sortedBBs.push_back(bb); + } +} + +void CgBBSort::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool CgBBSort::PhaseRun(CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + bfs = memPool->New(f, *memPool); + CHECK_FATAL(bfs != nullptr, "NIY, ptr null check."); + bfs->ComputeBlockOrder(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgBBSort, bbsort) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/cgfunc.cpp b/ecmascript/mapleall/maple_be/src/cg/cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ee477cc7e367c0e848e0e4302797a6b79b22a88 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/cgfunc.cpp @@ -0,0 +1,2270 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#if DEBUG +#include +#endif +#include "cg.h" +#include "insn.h" +#include "loop.h" +#include "mir_builder.h" +#include "factory.h" +#include "debug_info.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (GetMirModule().IsJavaModule()) + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &dreadNode = static_cast(expr); + return cgFunc.SelectDread(parent, dreadNode); +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto ®ReadNode = static_cast(expr); + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &cgFunc.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return cgFunc.SelectRegread(regReadNode); +} + +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return cgFunc.SelectIntConst(*mirIntConst); + } else if (mirConst->GetKind() == kConstFloatConst) { + auto *mirFloatConst = safe_cast(mirConst); + return cgFunc.SelectFloatConst(*mirFloatConst, parent); + } else if (mirConst->GetKind() == kConstDoubleConst) { + auto *mirDoubleConst = safe_cast(mirConst); + return cgFunc.SelectDoubleConst(*mirDoubleConst, parent); + } else { + CHECK_FATAL(false, "NYI"); + } + return nullptr; +} + +Operand *HandleConstStr(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto &constStrNode = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleConstStr16(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto &constStr16Node = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(0)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && + !IsPrimitiveFloat(expr.GetPrimType()) && expr.Opnd(0)->Opnd(0)->GetOpCode() != OP_constval && + expr.Opnd(0)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd(static_cast(expr), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(1)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(1)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && + !IsPrimitiveFloat(expr.GetPrimType()) && expr.Opnd(1)->Opnd(0)->GetOpCode() != OP_constval && + expr.Opnd(1)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd(static_cast(expr), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(1)), + *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + } else { + return cgFunc.SelectAdd(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return &cgFunc.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +BaseNode *IsConstantInVectorFromScalar(BaseNode *expr) { + if (expr->op != OP_intrinsicop) { + return nullptr; + } + IntrinsicopNode *intrn = static_cast(expr); + switch (intrn->GetIntrinsic()) { + case INTRN_vector_from_scalar_v8u8: case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: case INTRN_vector_from_scalar_v2i64: { + if (intrn->Opnd(0) != nullptr && intrn->Opnd(0)->op == OP_constval) { + return intrn->Opnd(0); + } + break; + } + default: + break; + } + return nullptr; +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + BaseNode *cExpr = IsConstantInVectorFromScalar(expr.Opnd(1)); + if (cExpr == nullptr) { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(1), *cExpr), parent); + } +} + +Operand *HandleRor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMpy(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectDiv(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRem(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofNode = static_cast(expr); + return cgFunc.SelectAddrof(addrofNode, parent, false); +} + +Operand *HandleAddrofoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofoffNode = static_cast(expr); + return cgFunc.SelectAddrofoff(addrofoffNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addroffuncNode = static_cast(expr); + return &cgFunc.SelectAddrofFunc(addroffuncNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofLabelNode = static_cast(expr); + return &cgFunc.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIread(parent, ireadNode); +} + +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleIreadfpoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadfpoff(parent, ireadNode); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectSub(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBior(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBxor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectAbs(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + ExtractbitsNode &node = static_cast(expr); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + if (!CGOptions::IsBigEndian() && (bitSize == k8BitSize || bitSize == k16BitSize) && + GetPrimTypeBitSize(node.GetPrimType()) != k64BitSize && + (bitOffset == 0 || bitOffset == k8BitSize || bitOffset == k16BitSize || bitOffset == k24BitSize) && + expr.Opnd(0)->GetOpCode() == OP_iread && node.GetOpCode() == OP_extractbits) { + return cgFunc.SelectRegularBitFieldLoad(node, parent); + } + return cgFunc.SelectExtractbits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectDepositBits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectLnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleLand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectLand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + if (parent.IsCondBr()) { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent, true); + } else { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMin(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMax(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectNeg(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRecip(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRecip(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleSqrt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectSqrt(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleCeil(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectCeil(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleFloor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectFloor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectRetype(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectCvt(parent, static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleRound(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRound(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectTrunc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +static bool HasCompare(const BaseNode *expr) { + if (kOpcodeInfo.IsCompare(expr->GetOpCode())) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasCompare(expr->Opnd(i))) { + return true; + } + } + return false; +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + bool hasCompare = false; + if (HasCompare(expr.Opnd(1)) || HasCompare(expr.Opnd(2))) { + hasCompare = true; + } + Operand &trueOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(2)); + Operand *cond = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + return cgFunc.SelectSelect(static_cast(expr), *cond, trueOpnd, falseOpnd, parent, hasCompare); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if (IsPrimitiveInteger(targetPtyp) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return cgFunc.SelectCmpOp(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectAlloca(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleGCMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectGCMalloc(static_cast(expr)); +} + +Operand *HandleJarrayMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectJarrayMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +/* Neon intrinsic handling */ +Operand *HandleVectorAddLong(const BaseNode &expr, CGFunc &cgFunc, bool isLow) { + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddLong(expr.GetPrimType(), o1, o2, expr.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorAddWiden(const BaseNode &expr, CGFunc &cgFunc, bool isLow) { + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddWiden(o1, expr.Opnd(0)->GetPrimType(), o2, expr.Opnd(1)->GetPrimType(), isLow); +} + +Operand *HandleVectorFromScalar(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + return cgFunc.SelectVectorFromScalar(intrnNode.GetPrimType(), cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)), + intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorAbsSubL(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorAbsSubL(intrnNode.GetPrimType(), opnd1, opnd2, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorMerge(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand2 */ + BaseNode *index = intrnNode.Opnd(2); /* index operand */ + int32 iNum = 0; + if (index->GetOpCode() == OP_constval) { + MIRConst *mirConst = static_cast(index)->GetConstVal(); + iNum = static_cast(safe_cast(mirConst)->GetExtValue()); + PrimType ty = intrnNode.Opnd(0)->GetPrimType(); + if (!IsPrimitiveVector(ty)) { + iNum = 0; + } else { + iNum *= GetPrimTypeSize(ty) / GetVecLanes(ty); /* 64x2: 0-1 -> 0-8 */ + } + } else { /* 32x4: 0-3 -> 0-12 */ + CHECK_FATAL(0, "VectorMerge does not have const index"); + } + return cgFunc.SelectVectorMerge(intrnNode.GetPrimType(), opnd1, opnd2, iNum); +} + +Operand *HandleVectorGetHigh(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, false); +} + +Operand *HandleVectorGetLow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, true); +} + +Operand *HandleVectorGetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + PrimType o1Type = intrnNode.Opnd(0)->GetPrimType(); + Operand *opndLane = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + int32 laneNum = -1; + if (opndLane->IsConstImmediate()) { + MIRConst *mirConst = static_cast(intrnNode.Opnd(1))->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorGetElement does not have lane const"); + } + return cgFunc.SelectVectorGetElement(intrnNode.GetPrimType(), opnd1, o1Type, laneNum); +} + +Operand *HandleVectorPairwiseAdd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *src = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector src operand */ + PrimType sType = intrnNode.Opnd(0)->GetPrimType(); + return cgFunc.SelectVectorPairwiseAdd(intrnNode.GetPrimType(), src, sType); +} + +Operand *HandleVectorPairwiseAdalp(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + BaseNode *arg1 = intrnNode.Opnd(0); + BaseNode *arg2 = intrnNode.Opnd(1); + Operand *src1 = cgFunc.HandleExpr(intrnNode, *arg1); /* vector src operand 1 */ + Operand *src2 = cgFunc.HandleExpr(intrnNode, *arg2); /* vector src operand 2 */ + return cgFunc.SelectVectorPairwiseAdalp(src1, arg1->GetPrimType(), src2, arg2->GetPrimType()); +} + +Operand *HandleVectorSetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + BaseNode *arg0 = intrnNode.Opnd(0); /* uint32_t operand */ + Operand *opnd0 = cgFunc.HandleExpr(intrnNode, *arg0); + PrimType aType = arg0->GetPrimType(); + + BaseNode *arg1 = intrnNode.Opnd(1); /* vector operand == result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *arg1); + PrimType vType = arg1->GetPrimType(); + + BaseNode *arg2 = intrnNode.Opnd(2); /* lane const operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *arg2); + int32 laneNum = -1; + if (opnd2->IsConstImmediate()) { + MIRConst *mirConst = static_cast(arg2)->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorSetElement does not have lane const"); + } + return cgFunc.SelectVectorSetElement(opnd0, aType, opnd1, vType, laneNum); +} + +Operand *HandleVectorReverse(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, uint32 size) { + BaseNode *argExpr = intrnNode.Opnd(0); /* src operand */ + Operand *src = cgFunc.HandleExpr(intrnNode, *argExpr); + MIRType *type = intrnNode.GetIntrinDesc().GetReturnType(); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + auto revVecType = type->GetPrimType(); + return cgFunc.SelectVectorReverse(revVecType, src, revVecType, size); +} + +Operand *HandleVectorShiftNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* vector result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* shift const */ + if (!opnd2->IsConstImmediate()) { + CHECK_FATAL(0, "VectorShiftNarrow does not have shift const"); + } + return cgFunc.SelectVectorShiftRNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, isLow); +} + +Operand *HandleVectorSubWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow, bool isWide) { + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *o1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + return cgFunc.SelectVectorSubWiden(resType, o1, intrnNode.Opnd(0)->GetPrimType(), + o2, intrnNode.Opnd(1)->GetPrimType(), isLow, isWide); +} + +Operand *HandleVectorSum(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorSum(resType, opnd1, intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorTableLookup(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorTableLookup(rType, opnd1, opnd2); +} + +Operand *HandleVectorMadd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + Operand *opnd3 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(2)); /* vector operand 3 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + PrimType oTyp3 = intrnNode.Opnd(2)->GetPrimType(); + return cgFunc.SelectVectorMadd(opnd1, oTyp1, opnd2, oTyp2, opnd3, oTyp3); +} + +Operand *HandleVectorMull(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + return cgFunc.SelectVectorMull(rType, opnd1, oTyp1, opnd2, oTyp2, isLow); +} + +Operand *HandleVectorNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + if (isLow) { + return cgFunc.SelectVectorNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType()); + } else { + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector opnd 2 */ + return cgFunc.SelectVectorNarrow2(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, + intrnNode.Opnd(1)->GetPrimType()); + } +} + +Operand *HandleVectorWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + return cgFunc.SelectVectorWiden(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_MPL_READ_OVTABLE_ENTRY_LAZY: { + Operand *srcOpnd = cgFunc.HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + return cgFunc.SelectLazyLoad(*srcOpnd, intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_STATIC_OFFSET_TAB: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLazyLoadStatic(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLoadArrayClassCache(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + // double + case INTRN_C_sin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sin"); + case INTRN_C_sinh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinh"); + case INTRN_C_asin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asin"); + case INTRN_C_cos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cos"); + case INTRN_C_cosh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosh"); + case INTRN_C_acos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acos"); + case INTRN_C_atan: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atan"); + case INTRN_C_exp: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "exp"); + case INTRN_C_log: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log"); + case INTRN_C_log10: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10"); + // float + case INTRN_C_sinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinf"); + case INTRN_C_sinhf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinhf"); + case INTRN_C_asinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asinf"); + case INTRN_C_cosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosf"); + case INTRN_C_coshf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "coshf"); + case INTRN_C_acosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acosf"); + case INTRN_C_atanf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atanf"); + case INTRN_C_expf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "expf"); + case INTRN_C_logf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "logf"); + case INTRN_C_log10f: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10f"); + // int + case INTRN_C_ffs: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "ffs"); + // libc mem* and str* functions as intrinsicops + case INTRN_C_memcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "memcmp"); + case INTRN_C_strlen: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_u64, "strlen"); + case INTRN_C_strcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strcmp"); + case INTRN_C_strncmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strncmp"); + case INTRN_C_strchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strchr"); + case INTRN_C_strrchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strrchr"); + + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return cgFunc.SelectBswap(intrinsicopNode, *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + + + case INTRN_C_clz32: + case INTRN_C_clz64: + return cgFunc.SelectCclz(intrinsicopNode); + case INTRN_C_ctz32: + case INTRN_C_ctz64: + return cgFunc.SelectCctz(intrinsicopNode); + case INTRN_C_popcount32: + case INTRN_C_popcount64: + return cgFunc.SelectCpopcount(intrinsicopNode); + case INTRN_C_parity32: + case INTRN_C_parity64: + return cgFunc.SelectCparity(intrinsicopNode); + case INTRN_C_clrsb32: + case INTRN_C_clrsb64: + return cgFunc.SelectCclrsb(intrinsicopNode); + case INTRN_C_isaligned: + return cgFunc.SelectCisaligned(intrinsicopNode); + case INTRN_C_alignup: + return cgFunc.SelectCalignup(intrinsicopNode); + case INTRN_C_aligndown: + return cgFunc.SelectCaligndown(intrinsicopNode); + case INTRN_C___sync_add_and_fetch_1: + case INTRN_C___sync_add_and_fetch_2: + case INTRN_C___sync_add_and_fetch_4: + case INTRN_C___sync_add_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, false); + case INTRN_C___sync_sub_and_fetch_1: + case INTRN_C___sync_sub_and_fetch_2: + case INTRN_C___sync_sub_and_fetch_4: + case INTRN_C___sync_sub_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, false); + case INTRN_C___sync_fetch_and_add_1: + case INTRN_C___sync_fetch_and_add_2: + case INTRN_C___sync_fetch_and_add_4: + case INTRN_C___sync_fetch_and_add_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, true); + case INTRN_C___sync_fetch_and_sub_1: + case INTRN_C___sync_fetch_and_sub_2: + case INTRN_C___sync_fetch_and_sub_4: + case INTRN_C___sync_fetch_and_sub_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, true); + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: + case INTRN_C___sync_bool_compare_and_swap_4: + case INTRN_C___sync_bool_compare_and_swap_8: + return cgFunc.SelectCSyncBoolCmpSwap(intrinsicopNode); + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: + case INTRN_C___sync_val_compare_and_swap_4: + case INTRN_C___sync_val_compare_and_swap_8: + return cgFunc.SelectCSyncValCmpSwap(intrinsicopNode); + case INTRN_C___sync_lock_test_and_set_1: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i8); + case INTRN_C___sync_lock_test_and_set_2: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i16); + case INTRN_C___sync_lock_test_and_set_4: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i32); + case INTRN_C___sync_lock_test_and_set_8: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i64); + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, true); + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, false); + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, true); + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, false); + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, true); + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, false); + case INTRN_C___sync_synchronize: + return cgFunc.SelectCSyncSynchronize(intrinsicopNode); + case INTRN_C___atomic_load_n: + return cgFunc.SelectCAtomicLoadN(intrinsicopNode); + case INTRN_C___atomic_exchange_n: + return cgFunc.SelectCAtomicExchangeN(intrinsicopNode); + case INTRN_C__builtin_return_address: + case INTRN_C__builtin_extract_return_addr: + return cgFunc.SelectCReturnAddress(intrinsicopNode); + + case INTRN_vector_abs_v8i8: case INTRN_vector_abs_v4i16: + case INTRN_vector_abs_v2i32: case INTRN_vector_abs_v1i64: + case INTRN_vector_abs_v16i8: case INTRN_vector_abs_v8i16: + case INTRN_vector_abs_v4i32: case INTRN_vector_abs_v2i64: + return HandleAbs(parent, intrinsicopNode, cgFunc); + + case INTRN_vector_addl_low_v8i8: case INTRN_vector_addl_low_v8u8: + case INTRN_vector_addl_low_v4i16: case INTRN_vector_addl_low_v4u16: + case INTRN_vector_addl_low_v2i32: case INTRN_vector_addl_low_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addl_high_v8i8: case INTRN_vector_addl_high_v8u8: + case INTRN_vector_addl_high_v4i16: case INTRN_vector_addl_high_v4u16: + case INTRN_vector_addl_high_v2i32: case INTRN_vector_addl_high_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, false); + + case INTRN_vector_addw_low_v8i8: case INTRN_vector_addw_low_v8u8: + case INTRN_vector_addw_low_v4i16: case INTRN_vector_addw_low_v4u16: + case INTRN_vector_addw_low_v2i32: case INTRN_vector_addw_low_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addw_high_v8i8: case INTRN_vector_addw_high_v8u8: + case INTRN_vector_addw_high_v4i16: case INTRN_vector_addw_high_v4u16: + case INTRN_vector_addw_high_v2i32: case INTRN_vector_addw_high_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, false); + + case INTRN_vector_sum_v8u8: case INTRN_vector_sum_v8i8: + case INTRN_vector_sum_v4u16: case INTRN_vector_sum_v4i16: + case INTRN_vector_sum_v2u32: case INTRN_vector_sum_v2i32: + case INTRN_vector_sum_v16u8: case INTRN_vector_sum_v16i8: + case INTRN_vector_sum_v8u16: case INTRN_vector_sum_v8i16: + case INTRN_vector_sum_v4u32: case INTRN_vector_sum_v4i32: + case INTRN_vector_sum_v2u64: case INTRN_vector_sum_v2i64: + return HandleVectorSum(intrinsicopNode, cgFunc); + + case INTRN_vector_from_scalar_v8u8: case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: case INTRN_vector_from_scalar_v2i64: + return HandleVectorFromScalar(intrinsicopNode, cgFunc); + + case INTRN_vector_labssub_low_v8u8: case INTRN_vector_labssub_low_v8i8: + case INTRN_vector_labssub_low_v4u16: case INTRN_vector_labssub_low_v4i16: + case INTRN_vector_labssub_low_v2u32: case INTRN_vector_labssub_low_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, true); + + case INTRN_vector_labssub_high_v8u8: case INTRN_vector_labssub_high_v8i8: + case INTRN_vector_labssub_high_v4u16: case INTRN_vector_labssub_high_v4i16: + case INTRN_vector_labssub_high_v2u32: case INTRN_vector_labssub_high_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, false); + + case INTRN_vector_merge_v8u8: case INTRN_vector_merge_v8i8: + case INTRN_vector_merge_v4u16: case INTRN_vector_merge_v4i16: + case INTRN_vector_merge_v2u32: case INTRN_vector_merge_v2i32: + case INTRN_vector_merge_v1u64: case INTRN_vector_merge_v1i64: + case INTRN_vector_merge_v16u8: case INTRN_vector_merge_v16i8: + case INTRN_vector_merge_v8u16: case INTRN_vector_merge_v8i16: + case INTRN_vector_merge_v4u32: case INTRN_vector_merge_v4i32: + case INTRN_vector_merge_v2u64: case INTRN_vector_merge_v2i64: + return HandleVectorMerge(intrinsicopNode, cgFunc); + + case INTRN_vector_set_element_v8u8: case INTRN_vector_set_element_v8i8: + case INTRN_vector_set_element_v4u16: case INTRN_vector_set_element_v4i16: + case INTRN_vector_set_element_v2u32: case INTRN_vector_set_element_v2i32: + case INTRN_vector_set_element_v1u64: case INTRN_vector_set_element_v1i64: + case INTRN_vector_set_element_v16u8: case INTRN_vector_set_element_v16i8: + case INTRN_vector_set_element_v8u16: case INTRN_vector_set_element_v8i16: + case INTRN_vector_set_element_v4u32: case INTRN_vector_set_element_v4i32: + case INTRN_vector_set_element_v2u64: case INTRN_vector_set_element_v2i64: + return HandleVectorSetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_get_high_v16u8: case INTRN_vector_get_high_v16i8: + case INTRN_vector_get_high_v8u16: case INTRN_vector_get_high_v8i16: + case INTRN_vector_get_high_v4u32: case INTRN_vector_get_high_v4i32: + case INTRN_vector_get_high_v2u64: case INTRN_vector_get_high_v2i64: + return HandleVectorGetHigh(intrinsicopNode, cgFunc); + + case INTRN_vector_get_low_v16u8: case INTRN_vector_get_low_v16i8: + case INTRN_vector_get_low_v8u16: case INTRN_vector_get_low_v8i16: + case INTRN_vector_get_low_v4u32: case INTRN_vector_get_low_v4i32: + case INTRN_vector_get_low_v2u64: case INTRN_vector_get_low_v2i64: + return HandleVectorGetLow(intrinsicopNode, cgFunc); + + case INTRN_vector_get_element_v8u8: case INTRN_vector_get_element_v8i8: + case INTRN_vector_get_element_v4u16: case INTRN_vector_get_element_v4i16: + case INTRN_vector_get_element_v2u32: case INTRN_vector_get_element_v2i32: + case INTRN_vector_get_element_v1u64: case INTRN_vector_get_element_v1i64: + case INTRN_vector_get_element_v16u8: case INTRN_vector_get_element_v16i8: + case INTRN_vector_get_element_v8u16: case INTRN_vector_get_element_v8i16: + case INTRN_vector_get_element_v4u32: case INTRN_vector_get_element_v4i32: + case INTRN_vector_get_element_v2u64: case INTRN_vector_get_element_v2i64: + return HandleVectorGetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_adalp_v8i8: case INTRN_vector_pairwise_adalp_v4i16: + case INTRN_vector_pairwise_adalp_v2i32: case INTRN_vector_pairwise_adalp_v8u8: + case INTRN_vector_pairwise_adalp_v4u16: case INTRN_vector_pairwise_adalp_v2u32: + case INTRN_vector_pairwise_adalp_v16i8: case INTRN_vector_pairwise_adalp_v8i16: + case INTRN_vector_pairwise_adalp_v4i32: case INTRN_vector_pairwise_adalp_v16u8: + case INTRN_vector_pairwise_adalp_v8u16: case INTRN_vector_pairwise_adalp_v4u32: + return HandleVectorPairwiseAdalp(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_add_v8u8: case INTRN_vector_pairwise_add_v8i8: + case INTRN_vector_pairwise_add_v4u16: case INTRN_vector_pairwise_add_v4i16: + case INTRN_vector_pairwise_add_v2u32: case INTRN_vector_pairwise_add_v2i32: + case INTRN_vector_pairwise_add_v16u8: case INTRN_vector_pairwise_add_v16i8: + case INTRN_vector_pairwise_add_v8u16: case INTRN_vector_pairwise_add_v8i16: + case INTRN_vector_pairwise_add_v4u32: case INTRN_vector_pairwise_add_v4i32: + return HandleVectorPairwiseAdd(intrinsicopNode, cgFunc); + + case INTRN_vector_madd_v8u8: case INTRN_vector_madd_v8i8: + case INTRN_vector_madd_v4u16: case INTRN_vector_madd_v4i16: + case INTRN_vector_madd_v2u32: case INTRN_vector_madd_v2i32: + return HandleVectorMadd(intrinsicopNode, cgFunc); + + case INTRN_vector_mull_low_v8u8: case INTRN_vector_mull_low_v8i8: + case INTRN_vector_mull_low_v4u16: case INTRN_vector_mull_low_v4i16: + case INTRN_vector_mull_low_v2u32: case INTRN_vector_mull_low_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, true); + + case INTRN_vector_mull_high_v8u8: case INTRN_vector_mull_high_v8i8: + case INTRN_vector_mull_high_v4u16: case INTRN_vector_mull_high_v4i16: + case INTRN_vector_mull_high_v2u32: case INTRN_vector_mull_high_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, false); + + case INTRN_vector_narrow_low_v8u16: case INTRN_vector_narrow_low_v8i16: + case INTRN_vector_narrow_low_v4u32: case INTRN_vector_narrow_low_v4i32: + case INTRN_vector_narrow_low_v2u64: case INTRN_vector_narrow_low_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_narrow_high_v8u16: case INTRN_vector_narrow_high_v8i16: + case INTRN_vector_narrow_high_v4u32: case INTRN_vector_narrow_high_v4i32: + case INTRN_vector_narrow_high_v2u64: case INTRN_vector_narrow_high_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, false); + + case INTRN_vector_reverse_v8u8: case INTRN_vector_reverse_v8i8: + case INTRN_vector_reverse_v4u16: case INTRN_vector_reverse_v4i16: + case INTRN_vector_reverse_v16u8: case INTRN_vector_reverse_v16i8: + case INTRN_vector_reverse_v8u16: case INTRN_vector_reverse_v8i16: + return HandleVectorReverse(intrinsicopNode, cgFunc, k32BitSize); + + case INTRN_vector_reverse16_v16u8: case INTRN_vector_reverse16_v16i8: + case INTRN_vector_reverse16_v8u8: case INTRN_vector_reverse16_v8i8: + return HandleVectorReverse(intrinsicopNode, cgFunc, k16BitSize); + + case INTRN_vector_reverse64_v16u8: case INTRN_vector_reverse64_v16i8: + case INTRN_vector_reverse64_v8u8: case INTRN_vector_reverse64_v8i8: + case INTRN_vector_reverse64_v8u16: case INTRN_vector_reverse64_v8i16: + case INTRN_vector_reverse64_v4u16: case INTRN_vector_reverse64_v4i16: + case INTRN_vector_reverse64_v4u32: case INTRN_vector_reverse64_v4i32: + case INTRN_vector_reverse64_v2u32: case INTRN_vector_reverse64_v2i32: + return HandleVectorReverse(intrinsicopNode, cgFunc, k64BitSize); + + case INTRN_vector_shr_narrow_low_v8u16: case INTRN_vector_shr_narrow_low_v8i16: + case INTRN_vector_shr_narrow_low_v4u32: case INTRN_vector_shr_narrow_low_v4i32: + case INTRN_vector_shr_narrow_low_v2u64: case INTRN_vector_shr_narrow_low_v2i64: + return HandleVectorShiftNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_subl_low_v8i8: case INTRN_vector_subl_low_v8u8: + case INTRN_vector_subl_low_v4i16: case INTRN_vector_subl_low_v4u16: + case INTRN_vector_subl_low_v2i32: case INTRN_vector_subl_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, false); + + case INTRN_vector_subl_high_v8i8: case INTRN_vector_subl_high_v8u8: + case INTRN_vector_subl_high_v4i16: case INTRN_vector_subl_high_v4u16: + case INTRN_vector_subl_high_v2i32: case INTRN_vector_subl_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, false); + + case INTRN_vector_subw_low_v8i8: case INTRN_vector_subw_low_v8u8: + case INTRN_vector_subw_low_v4i16: case INTRN_vector_subw_low_v4u16: + case INTRN_vector_subw_low_v2i32: case INTRN_vector_subw_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, true); + + case INTRN_vector_subw_high_v8i8: case INTRN_vector_subw_high_v8u8: + case INTRN_vector_subw_high_v4i16: case INTRN_vector_subw_high_v4u16: + case INTRN_vector_subw_high_v2i32: case INTRN_vector_subw_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, true); + + case INTRN_vector_table_lookup_v8u8: case INTRN_vector_table_lookup_v8i8: + case INTRN_vector_table_lookup_v16u8: case INTRN_vector_table_lookup_v16i8: + return HandleVectorTableLookup(intrinsicopNode, cgFunc); + + case INTRN_vector_widen_low_v8u8: case INTRN_vector_widen_low_v8i8: + case INTRN_vector_widen_low_v4u16: case INTRN_vector_widen_low_v4i16: + case INTRN_vector_widen_low_v2u32: case INTRN_vector_widen_low_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_widen_high_v8u8: case INTRN_vector_widen_high_v8i8: + case INTRN_vector_widen_high_v4u16: case INTRN_vector_widen_high_v4i16: + case INTRN_vector_widen_high_v2u32: case INTRN_vector_widen_high_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, false); + + default: + DEBUG_ASSERT(false, "Should not reach here."); + return nullptr; + } +} + +using HandleExprFactory = FunctionFactory; +void InitHandleExprFactory() { + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_conststr16, HandleConstStr16); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_ror, HandleRor); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addrofoff, HandleAddrofoff); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_ireadfpoff, HandleIreadfpoff); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_lnot, HandleLnot); + RegisterFactoryFunction(OP_land, HandleLand); + RegisterFactoryFunction(OP_lior, HandleLor); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_recip, HandleRecip); + RegisterFactoryFunction(OP_sqrt, HandleSqrt); + RegisterFactoryFunction(OP_ceil, HandleCeil); + RegisterFactoryFunction(OP_floor, HandleFloor); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_round, HandleRound); + RegisterFactoryFunction(OP_trunc, HandleTrunc); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_cmp, HandleCmp); + RegisterFactoryFunction(OP_cmpl, HandleCmp); + RegisterFactoryFunction(OP_cmpg, HandleCmp); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_malloc, HandleMalloc); + RegisterFactoryFunction(OP_gcmalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcpermalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcmallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_gcpermallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} + +void HandleLabel(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc.StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + if (newBB->GetId() == 1) { + newBB->SetFrequency(kFreqBase); + } + cgFunc.SetLab2BBMap(newBB->GetLabIdx(), *newBB); + cgFunc.SetCurBB(*newBB); +} + +void HandleGoto(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBGoto); + cgFunc.SelectGoto(gotoNode); + cgFunc.SetCurBB(*cgFunc.StartNewBB(gotoNode)); + DEBUG_ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + DEBUG_ASSERT(cgFunc.GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIgoto(StmtNode &stmt, CGFunc &cgFunc) { + auto &igotoNode = static_cast(stmt); + Operand *targetOpnd = cgFunc.HandleExpr(stmt, *igotoNode.Opnd(0)); + cgFunc.SelectIgoto(targetOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(igotoNode)); +} + +void HandleCondbr(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + DEBUG_ASSERT(condNode != nullptr, "expect first operand of cond br"); + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode->GetOpCode() == OP_constval) { + auto *constValNode = static_cast(condNode); + if ((constValNode->GetConstVal()->IsZero() && (OP_brfalse == condOp)) || + (!constValNode->GetConstVal()->IsZero() && (OP_brtrue == condOp))) { + auto *gotoStmt = cgFunc.GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, cgFunc); + auto *labelStmt = cgFunc.GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc.CreateLabel()); + HandleLabel(*labelStmt, cgFunc); + } + return; + } + cgFunc.SetCurBBKind(BB::kBBIf); + /* if condNode is not a cmp node, cmp it with zero. */ + if (!kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + Operand *opnd0 = cgFunc.HandleExpr(condGotoNode, *condNode); + PrimType primType = condNode->GetPrimType(); + Operand *zeroOpnd = nullptr; + if (IsPrimitiveInteger(primType)) { + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } else { + DEBUG_ASSERT(((PTY_f32 == primType) || (PTY_f64 == primType)), "we don't support half-precision FP operands yet"); + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *zeroOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + /* + * Special case: + * bgt (cmp (op0, op1), 0) ==> + * bgt (op0, op1) + * but skip the case cmp(op0, 0) + */ + BaseNode *op0 = condNode->Opnd(0); + DEBUG_ASSERT(op0 != nullptr, "get first opnd of a condNode failed"); + BaseNode *op1 = condNode->Opnd(1); + DEBUG_ASSERT(op1 != nullptr, "get second opnd of a condNode failed"); + if ((op0->GetOpCode() == OP_cmp) && (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + auto *cmpNode = static_cast(op0); + bool skip = false; + if (cmpNode->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(cmpNode->Opnd(1))->GetConstVal(); + if (constVal->IsZero()) { + skip = true; + } + } + if (!skip && mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase1(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + /* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ + if ((condGotoNode.GetOpCode() == OP_brfalse) && (condNode->GetOpCode() == OP_ge) && + (op0->GetOpCode() == OP_cmpg) && (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + if (mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase2(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + Operand *opnd0 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(0)); + Operand *opnd1 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(1)); + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *opnd1); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); +} + +void HandleReturn(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &retNode = static_cast(stmt); + cgFunc.HandleRetCleanup(retNode); + DEBUG_ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc.GetFunction().StructReturnedInRegs()) { + opnd = cgFunc.HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc.SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc.SelectReturn(opnd); + cgFunc.SetCurBBKind(BB::kBBReturn); + cgFunc.SetCurBB(*cgFunc.StartNewBB(retNode)); +} + +void HandleCall(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &callNode = static_cast(stmt); + cgFunc.SelectCall(callNode); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(callNode)); + } + + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc.SetCurBB(*cgFunc.StartNewBBImpl(true, stmt)); + } + cgFunc.HandleCatch(); +} + +void HandleICall(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &icallNode = static_cast(stmt); + cgFunc.GetCurBB()->SetHasCall(); + Operand *opnd0 = cgFunc.HandleExpr(stmt, *icallNode.GetNopndAt(0)); + cgFunc.SelectIcall(icallNode, *opnd0); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(icallNode)); + } +} + +void HandleIntrinCall(StmtNode &stmt, CGFunc &cgFunc) { + auto &call = static_cast(stmt); + cgFunc.SelectIntrinCall(call); +} + +void HandleDassign(StmtNode &stmt, CGFunc &cgFunc) { + auto &dassignNode = static_cast(stmt); + DEBUG_ASSERT(dassignNode.GetOpCode() == OP_dassign, "expect dassign"); + BaseNode *rhs = dassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + if (rhs->GetOpCode() == OP_malloc || rhs->GetOpCode() == OP_alloca) { + UnaryStmtNode &uNode = static_cast(stmt); + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *(uNode.Opnd())); + cgFunc.SelectDassign(dassignNode, *opnd0); + return; + } else if (rhs->GetPrimType() == PTY_agg) { + cgFunc.SelectAggDassign(dassignNode); + return; + } + bool isSaveRetvalToLocal = false; + if (rhs->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(rhs)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *rhs); + cgFunc.SelectDassign(dassignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleDassignoff(StmtNode &stmt, CGFunc &cgFunc) { + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = cgFunc.HandleExpr(dassignoffNode, *rhs); + cgFunc.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleRegassign(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + bool isSaveRetvalToLocal = false; + BaseNode *operand = regAssignNode.Opnd(0); + DEBUG_ASSERT(operand != nullptr, "get operand of regassignNode failed"); + if (operand->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(operand)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(regAssignNode, *operand); + cgFunc.SelectRegassign(regAssignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleIassign(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassign, "expect stmt"); + auto &iassignNode = static_cast(stmt); + if ((iassignNode.GetRHS() != nullptr) && iassignNode.GetRHS()->GetPrimType() != PTY_agg) { + cgFunc.SelectIassign(iassignNode); + } else { + BaseNode *addrNode = iassignNode.Opnd(0); + if (addrNode == nullptr) { + return; + } + cgFunc.SelectAggIassign(iassignNode, *cgFunc.HandleExpr(stmt, *addrNode)); + } +} + +void HandleIassignoff(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignoff, "expect iassignoff"); + auto &iassignoffNode = static_cast(stmt); + cgFunc.SelectIassignoff(iassignoffNode); +} + +void HandleIassignfpoff(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignfpoff, "expect iassignfpoff"); + auto &iassignfpoffNode = static_cast(stmt); + cgFunc.SelectIassignfpoff(iassignfpoffNode, *cgFunc.HandleExpr(stmt, *stmt.Opnd(0))); +} + +void HandleIassignspoff(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignspoff, "expect iassignspoff"); + auto &baseNode = static_cast(stmt); /* same as FP */ + BaseNode *rhs = baseNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of iassignspoffNode failed"); + Operand *opnd0 = cgFunc.HandleExpr(baseNode, *rhs); + cgFunc.SelectIassignspoff(baseNode.GetPrimType(), baseNode.GetOffset(), *opnd0); +} + +void HandleBlkassignoff(StmtNode &stmt, CGFunc &cgFunc) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_blkassignoff, "expect blkassignoff"); + auto &baseNode = static_cast(stmt); + Operand *src = cgFunc.HandleExpr(baseNode, *baseNode.Opnd(1)); + cgFunc.SelectBlkassignoff(baseNode, src); +} + +void HandleEval(const StmtNode &stmt, CGFunc &cgFunc) { + (void)cgFunc.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +void HandleRangeGoto(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &rangeGotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBRangeGoto); + cgFunc.SelectRangeGoto(rangeGotoNode, *cgFunc.HandleExpr(rangeGotoNode, *rangeGotoNode.Opnd(0))); + cgFunc.SetCurBB(*cgFunc.StartNewBB(rangeGotoNode)); +} + +void HandleMembar(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.SelectMembar(stmt); + if (stmt.GetOpCode() != OP_membarrelease) { + return; + } +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::UseBarriersForVolatile()) { + return; + } +#endif + StmtNode *secondStmt = stmt.GetRealNext(); + if (secondStmt == nullptr || + ((secondStmt->GetOpCode() != OP_iassign) && (secondStmt->GetOpCode() != OP_dassign))) { + return; + } + StmtNode *thirdStmt = secondStmt->GetRealNext(); + if (thirdStmt == nullptr || thirdStmt->GetOpCode() != OP_membarstoreload) { + return; + } + cgFunc.SetVolStore(true); + cgFunc.SetVolReleaseInsn(cgFunc.GetCurBB()->GetLastInsn()); +} + +void HandleComment(StmtNode &stmt, CGFunc &cgFunc) { + if (cgFunc.GetCG()->GenerateVerboseAsm() || cgFunc.GetCG()->GenerateVerboseCG()) { + cgFunc.SelectComment(static_cast(stmt)); + } +} + +void HandleCatchOp(const StmtNode &stmt, const CGFunc &cgFunc) { + (void)stmt; + (void)cgFunc; + DEBUG_ASSERT(stmt.GetNext()->GetOpCode() == OP_call, "The next statement of OP_catch should be OP_call."); +} + +void HandleAssertNull(StmtNode &stmt, CGFunc &cgFunc) { + auto &cgAssertNode = static_cast(stmt); + cgFunc.SelectAssertNull(cgAssertNode); +} + +void HandleAbort(const StmtNode &stmt, CGFunc &cgFunc) { + (void)stmt; + cgFunc.SelectAbort(); +} + +void HandleAsm(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.SelectAsm(static_cast(stmt)); +} + +using HandleStmtFactory = FunctionFactory; +void InitHandleStmtFactory() { + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleICall); + RegisterFactoryFunction(OP_icallproto, HandleICall); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtype, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtypeassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_iassignfpoff, HandleIassignfpoff); + RegisterFactoryFunction(OP_iassignspoff, HandleIassignspoff); + RegisterFactoryFunction(OP_blkassignoff, HandleBlkassignoff); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_membarrelease, HandleMembar); + RegisterFactoryFunction(OP_membaracquire, HandleMembar); + RegisterFactoryFunction(OP_membarstoreload, HandleMembar); + RegisterFactoryFunction(OP_membarstorestore, HandleMembar); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_catch, HandleCatchOp); + RegisterFactoryFunction(OP_abort, HandleAbort); + RegisterFactoryFunction(OP_assertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_callassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_assignassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_returnassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_asm, HandleAsm); +} + +CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) + : vRegTable(allocator.Adapter()), + bbVec(allocator.Adapter()), + vRegOperandTable(allocator.Adapter()), + pRegSpillMemOperands(allocator.Adapter()), + spillRegMemOperands(allocator.Adapter()), + reuseSpillLocMem(allocator.Adapter()), + labelMap(std::less(), allocator.Adapter()), + vregsToPregsMap(std::less(), allocator.Adapter()), + hasVLAOrAlloca(mirFunc.HasVlaOrAlloca()), + dbgCallFrameLocations(allocator.Adapter()), + cg(&cg), + mirModule(mod), + memPool(&memPool), + stackMp(stackMp), + func(mirFunc), + exitBBVec(allocator.Adapter()), + extendSet(allocator.Adapter()), + lab2BBMap(allocator.Adapter()), + beCommon(beCommon), + funcScopeAllocator(&allocator), + emitStVec(allocator.Adapter()), + switchLabelCnt(allocator.Adapter()), +#if TARGARM32 + sortedBBs(allocator.Adapter()), + lrVec(allocator.Adapter()), +#endif /* TARGARM32 */ + loops(allocator.Adapter()), + lmbcParamVec(allocator.Adapter()), + shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) { + mirModule.SetCurFunction(&func); + dummyBB = CreateNewBB(); + vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); + firstNonPregVRegNO = vRegCount; + /* maximum register count initial be increased by 1024 */ + maxRegCount = vRegCount + 1024; + + insnBuilder = memPool.New(memPool); + opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); + + vRegTable.resize(maxRegCount); + /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ + DEBUG_ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); + for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { + PrimType primType = func.GetPregTab()->PregFromPregIdx(i)->GetPrimType(); + uint32 byteLen = GetPrimTypeSize(primType); + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + if (primType == PTY_u128 || primType == PTY_i128) { + byteLen = k8ByteSize; + } + new (&GetVirtualRegNodeFromPseudoRegIdx(i)) VirtualRegNode(GetRegTyFromPrimTy(primType), byteLen); + } + firstCGGenLabelIdx = func.GetLabelTab()->GetLabelTableSize(); + lSymSize = 0; + if (func.GetSymTab()) { + lSymSize = func.GetSymTab()->GetSymbolTableSize(); + } +} + +CGFunc::~CGFunc() { + mirModule.SetCurFunction(nullptr); +} + +Operand *CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + return function(parent, expr, *this); +} + +StmtNode *CGFunc::HandleFirstStmt() { + BlockNode *block = func.GetBody(); + + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); + if (withFreqInfo) { + frequency = kFreqBase; + } + DEBUG_ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + firstBB = curBB; + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + curBB = StartNewBBImpl(false, *stmt); + curBB->SetFrequency(frequency); + if (JAVALANG) { + HandleRCCall(true); + } + return stmt; +} + +bool CGFunc::CheckSkipMembarOp(const StmtNode &stmt) { + StmtNode *nextStmt = stmt.GetRealNext(); + if (nextStmt == nullptr) { + return false; + } + + Opcode opCode = stmt.GetOpCode(); + if (((opCode == OP_membaracquire) || (opCode == OP_membarrelease)) && (nextStmt->GetOpCode() == stmt.GetOpCode())) { + return true; + } + if ((opCode == OP_membarstorestore) && (nextStmt->GetOpCode() == OP_membarrelease)) { + return true; + } + if ((opCode == OP_membarstorestore) && func.IsConstructor() && MemBarOpt(stmt)) { + return true;; + } +#if TARGAARCH64 || TARGRISCV64 + if ((!CGOptions::UseBarriersForVolatile()) && (nextStmt->GetOpCode() == OP_membaracquire)) { + isVolLoad = true; + } +#endif /* TARGAARCH64 */ + return false; +} + +void CGFunc::GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc) { + /* insert Insn for .loc before cg for the stmt */ + if (cg->GetCGOptions().WithLoc() && stmt->op != OP_label && stmt->op != OP_comment) { + /* if original src file location info is availiable for this stmt, + * use it and skip mpl file location info for this stmt + */ + bool hasLoc = false; + unsigned newSrcLoc = cg->GetCGOptions().WithSrc() ? stmt->GetSrcPos().LineNum() : 0; + if (newSrcLoc != 0 && newSrcLoc != lastSrcLoc) { + /* .loc for original src file */ + unsigned fileid = stmt->GetSrcPos().FileNum(); + Operand *o0 = CreateDbgImmOperand(fileid); + Operand *o1 = CreateDbgImmOperand(newSrcLoc); + Insn &loc = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(loc); + lastSrcLoc = newSrcLoc; + hasLoc = true; + } + /* .loc for mpl file, skip if already has .loc from src for this stmt */ + unsigned newMplLoc = cg->GetCGOptions().WithMpl() ? stmt->GetSrcPos().MplLineNum() : 0; + if (newMplLoc != 0 && newMplLoc != lastMplLoc && !hasLoc) { + unsigned fileid = 1; + Operand *o0 = CreateDbgImmOperand(fileid); + Operand *o1 = CreateDbgImmOperand(newMplLoc); + Insn &loc = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(loc); + lastMplLoc = newMplLoc; + } + } +} + +int32 CGFunc::GetFreqFromStmt(uint32 stmtId) { + int32 freq = GetFunction().GetFreqFromLastStmt(stmtId); + if (freq != -1) { + return freq; + } + return GetFunction().GetFreqFromFirstStmt(stmtId); +} + +LmbcFormalParamInfo *CGFunc::GetLmbcFormalParamInfo(uint32 offset) { + MapleVector ¶mVec = GetLmbcParamVec(); + for (auto *param : paramVec) { + uint32 paramOffset = param->GetOffset(); + uint32 paramSize = param->GetSize(); + if (paramOffset <= offset && offset < (paramOffset + paramSize)) { + return param; + } + } + return nullptr; +} + +/* + * For formals of lmbc, the formal list is deleted if there is no + * passing of aggregate by value. + */ +void CGFunc::CreateLmbcFormalParamInfo() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return; + } + PrimType primType; + uint32 offset; + uint32 typeSize; + MIRFunction &lmbcFunc = GetFunction(); + if (lmbcFunc.GetFormalCount() > 0) { + /* Whenever lmbc cannot delete call type info, the prototype is available */ + uint32 stackOffset = 0; + for (size_t idx = 0; idx < lmbcFunc.GetFormalCount(); ++idx) { + MIRSymbol *sym = lmbcFunc.GetFormal(idx); + MIRType *type; + TyIdx tyIdx; + if (sym) { + tyIdx = lmbcFunc.GetFormalDefVec()[idx].formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + FormalDef vec = const_cast(GetBecommon().GetMIRModule().CurFunction())->GetFormalDefAt(idx); + tyIdx = vec.formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + primType = type->GetPrimType(); + offset = stackOffset; + typeSize = static_cast(GetBecommon().GetTypeSize(tyIdx)); + stackOffset += (typeSize + 7) & (-8); + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + if (idx == 0 && lmbcFunc.IsFirstArgReturn()) { + info->SetIsReturn(); + } + if (type->GetKind() == kTypeStruct) { + MIRStructType *structType = static_cast(type); + info->SetType(structType); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(structType, fpSize); + if (numFpRegs > 0) { + info->SetIsPureFloat(); + info->SetNumRegs(numFpRegs); + info->SetFpSize(fpSize); + } + } + } + } else { + /* No aggregate pass by value here */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + primType = ireadNode->GetPrimType(); + if (ireadNode->GetOffset() < 0) { + continue; + } + offset = static_cast(ireadNode->GetOffset()); + typeSize = GetPrimTypeSize(primType); + CHECK_FATAL((offset % k8ByteSize) == 0, ""); /* scalar only, no struct for now */ + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + } + } + std::sort(lmbcParamVec.begin(), lmbcParamVec.end(), + [] (const LmbcFormalParamInfo *x, const LmbcFormalParamInfo *y) + { return x->GetOffset() < y->GetOffset(); } + ); + + /* When a scalar param address is taken, its regassign is not in the 1st block */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + if (ireadNode->GetOffset() < 0) { + continue; + } + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(static_cast(ireadNode->GetOffset())); + ASSERT_NOT_NULL(info); + info->SetHasRegassign(); + } + + AssignLmbcFormalParams(); +} + +void CGFunc::GenerateInstruction() { + InitHandleExprFactory(); + InitHandleStmtFactory(); + StmtNode *secondStmt = HandleFirstStmt(); + + /* First Pass: Creates the doubly-linked list of BBs (next,prev) */ + volReleaseInsn = nullptr; + unsigned lastSrcLoc = 0; + unsigned lastMplLoc = 0; + std::set bbFreqSet; + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GenerateLoc(stmt, lastSrcLoc, lastMplLoc); + BB *tmpBB = curBB; + isVolLoad = false; + if (CheckSkipMembarOp(*stmt)) { + continue; + } + bool tempLoad = isVolLoad; + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + /* skip the membar acquire if it is just after the iread. ldr + membaraquire->ldar */ + if (tempLoad && !isVolLoad) { + stmt = stmt->GetNext(); + } + int32 freq = GetFreqFromStmt(stmt->GetStmtID()); + if (freq != -1) { + if (tmpBB != curBB) { + if (curBB->GetFirstInsn() == nullptr && curBB->GetLabIdx() == 0 && bbFreqSet.count(tmpBB->GetId()) == 0) { + tmpBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } else if ((curBB->GetFirstInsn() != nullptr || curBB->GetLabIdx() != 0) && + bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } + } else if (bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(curBB->GetId()); + } + } + + /* + * skip the membarstoreload if there is the pattern for volatile write( membarrelease + store + membarstoreload ) + * membarrelease + store + membarstoreload -> stlr + */ + if (volReleaseInsn != nullptr) { + if ((stmt->GetOpCode() != OP_membarrelease) && (stmt->GetOpCode() != OP_comment)) { + if (!isVolStore) { + /* remove the generated membar release insn. */ + curBB->RemoveInsn(*volReleaseInsn); + /* skip the membarstoreload. */ + stmt = stmt->GetNext(); + } + volReleaseInsn = nullptr; + isVolStore = false; + } + } + if (curBB != tmpBB) { + lastSrcLoc = 0; + } + } + + /* Set lastbb's frequency */ + BlockNode *block = func.GetBody(); + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + curBB->SetLastStmt(*block->GetLast()); + curBB->SetFrequency(frequency); + lastBB = curBB; + cleanupBB = lastBB->GetPrev(); + /* All stmts are handled */ + frequency = 0; +} + +LabelIdx CGFunc::CreateLabel() { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "Get func failed at CGFunc::CreateLabel"); + std::string funcName = funcSt->GetName(); + std::string labelStr = funcName.append(std::to_string(labelIdx++)); + return func.GetOrCreateLableIdxFromName(labelStr); +} + +MIRSymbol *CGFunc::GetRetRefSymbol(BaseNode &expr) { + Opcode opcode = expr.GetOpCode(); + if (opcode != OP_dread) { + return nullptr; + } + auto &retExpr = static_cast(expr); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(retExpr.GetStIdx()); + DEBUG_ASSERT(symbol != nullptr, "get symbol in mirmodule failed"); + if (symbol->IsRefType()) { + MIRSymbol *sym = nullptr; + for (uint32 i = 0; i < func.GetFormalCount(); i++) { + sym = func.GetFormal(i); + if (sym == symbol) { + return nullptr; + } + } + return symbol; + } + return nullptr; +} + +void CGFunc::GenerateCfiPrologEpilog() { + if (GenCfi() == false) { + return; + } + Insn &ipoint = GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_startproc); + /* prolog */ + if (firstBB->GetFirstInsn() != nullptr) { + firstBB->InsertInsnBefore(*firstBB->GetFirstInsn(), ipoint); + } else { + firstBB->AppendInsn(ipoint); + } + +#if !defined(TARGARM32) + /* + * always generate ".cfi_personality 155, DW.ref.__mpl_personality_v0" for Java methods. + * we depend on this to tell whether it is a java method. + */ + if (mirModule.IsJavaModule() && func.IsJava()) { + Insn &personality = + GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_personality_symbol). + AddOpndChain(CreateCfiImmOperand(EHFunc::kTypeEncoding, k8BitSize)). + AddOpndChain(CreateCfiStrOperand("DW.ref.__mpl_personality_v0")); + firstBB->InsertInsnAfter(ipoint, personality); + } +#endif + + /* epilog */ + lastBB->AppendInsn(GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_endproc)); +} + +void CGFunc::TraverseAndClearCatchMark(BB &bb) { + /* has bb been visited */ + if (bb.GetInternalFlag3()) { + return; + } + bb.SetIsCatch(false); + bb.SetInternalFlag3(1); + for (auto succBB : bb.GetSuccs()) { + TraverseAndClearCatchMark(*succBB); + } +} + +/* + * Two types of successor edges, normal and eh. Any bb which is not + * reachable by a normal successor edge is considered to be in a + * catch block. + * Marking it as a catch block does not automatically make it into + * a catch block. Unreachables can be marked as such too. + */ +void CGFunc::MarkCatchBBs() { + /* First, suspect all bb to be in catch */ + FOR_ALL_BB(bb, this) { + bb->SetIsCatch(true); + bb->SetInternalFlag3(0); /* mark as not visited */ + } + /* Eliminate cleanup section from catch */ + FOR_ALL_BB(bb, this) { + if (bb->GetFirstStmt() == cleanupLabel) { + bb->SetIsCatch(false); + DEBUG_ASSERT(bb->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + BB *succ = nullptr; + if (!bb->GetSuccs().empty()) { + succ = bb->GetSuccs().front(); + } else { + continue; + } + DEBUG_ASSERT(succ != nullptr, "Get front succsBB failed"); + while (1) { + DEBUG_ASSERT(succ->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + succ->SetIsCatch(false); + if (!succ->GetSuccs().empty()) { + succ = succ->GetSuccs().front(); + } else { + break; + } + } + } + } + /* Unmark all normally reachable bb as NOT catch. */ + TraverseAndClearCatchMark(*firstBB); +} + +/* + * Mark CleanupEntryBB + * Note: Cleanup bbs and func body bbs are seperated, no edges between them. + * No ehSuccs or eh_prevs between cleanup bbs. + */ +void CGFunc::MarkCleanupEntryBB() { + BB *cleanupEntry = nullptr; + FOR_ALL_BB(bb, this) { + bb->SetIsCleanup(0); /* Use to mark cleanup bb */ + bb->SetInternalFlag3(0); /* Use to mark if visited. */ + if (bb->GetFirstStmt() == this->cleanupLabel) { + cleanupEntry = bb; + } + } + /* If a function without cleanup bb, return. */ + if (cleanupEntry == nullptr) { + return; + } + /* after merge bb, update cleanupBB. */ + if (cleanupEntry->GetSuccs().empty()) { + this->cleanupBB = cleanupEntry; + } + SetCleanupLabel(*cleanupEntry); + DEBUG_ASSERT(cleanupEntry->GetEhSuccs().empty(), "CG internal error. Cleanup bb should not have ehSuccs."); +#if DEBUG /* Please don't remove me. */ + /* Check if all of the cleanup bb is at bottom of the function. */ + bool isCleanupArea = true; + if (!mirModule.IsCModule()) { + FOR_ALL_BB_REV(bb, this) { + if (isCleanupArea) { + DEBUG_ASSERT(bb->IsCleanup(), "CG internal error, cleanup BBs should be at the bottom of the function."); + } else { + DEBUG_ASSERT(!bb->IsCleanup(), "CG internal error, cleanup BBs should be at the bottom of the function."); + } + + if (bb == cleanupEntry) { + isCleanupArea = false; + } + } + } +#endif /* DEBUG */ + this->cleanupEntryBB = cleanupEntry; +} + +/* Tranverse from current bb's successor and set isCleanup true. */ +void CGFunc::SetCleanupLabel(BB &cleanupEntry) { + /* If bb hasn't been visited, return. */ + if (cleanupEntry.GetInternalFlag3()) { + return; + } + cleanupEntry.SetInternalFlag3(1); + cleanupEntry.SetIsCleanup(1); + for (auto tmpBB : cleanupEntry.GetSuccs()) { + if (tmpBB->GetKind() != BB::kBBReturn) { + SetCleanupLabel(*tmpBB); + } else { + DEBUG_ASSERT(ExitbbNotInCleanupArea(cleanupEntry), "exitBB created in cleanupArea."); + } + } +} + +bool CGFunc::ExitbbNotInCleanupArea(const BB &bb) const { + for (const BB *nextBB = bb.GetNext(); nextBB != nullptr; nextBB = nextBB->GetNext()) { + if (nextBB->GetKind() == BB::kBBReturn) { + return false; + } + } + return true; +} + +/* + * Do mem barrier optimization for constructor funcs as follow: + * membarstorestore + * write field of this_ ==> write field of this_ + * membarrelease membarrelease. + */ +bool CGFunc::MemBarOpt(const StmtNode &membar) { + if (func.GetFormalCount() == 0) { + return false; + } + MIRSymbol *thisSym = func.GetFormal(0); + if (thisSym == nullptr) { + return false; + } + StmtNode *stmt = membar.GetNext(); + for (; stmt != nullptr; stmt = stmt->GetNext()) { + BaseNode *base = nullptr; + if (stmt->GetOpCode() == OP_comment) { + continue; + } else if (stmt->GetOpCode() == OP_iassign) { + base = static_cast(stmt)->Opnd(0); + } else if (stmt->GetOpCode() == OP_call) { + auto *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + DEBUG_ASSERT(fsym != nullptr, "null ptr check"); + if (fsym->GetName() == "MCC_WriteRefFieldNoDec") { + base = callNode->Opnd(0); + } + } + if (base != nullptr) { + Opcode op = base->GetOpCode(); + if (op == OP_regread && thisSym->IsPreg() && + thisSym->GetPreg()->GetPregNo() == static_cast(base)->GetRegIdx()) { + continue; + } + if ((op == OP_dread || op == OP_addrof) && !thisSym->IsPreg() && + static_cast(base)->GetStIdx() == thisSym->GetStIdx()) { + continue; + } + } + break; + } + + CHECK_NULL_FATAL(stmt); + return stmt->GetOpCode() == OP_membarrelease; +} + +void CGFunc::ProcessExitBBVec() { + if (exitBBVec.empty()) { + LabelIdx newLabelIdx = CreateLabel(); + BB *retBB = CreateNewBB(newLabelIdx, cleanupBB->IsUnreachable(), BB::kBBReturn, cleanupBB->GetFrequency()); + cleanupBB->PrependBB(*retBB); + exitBBVec.emplace_back(retBB); + return; + } + /* split an empty exitBB */ + BB *bb = exitBBVec[0]; + if (bb->NumInsn() > 0) { + BB *retBBPart = CreateNewBB(false, BB::kBBFallthru, bb->GetFrequency()); + DEBUG_ASSERT(retBBPart != nullptr, "retBBPart should not be nullptr"); + LabelIdx retBBPartLabelIdx = bb->GetLabIdx(); + if (retBBPartLabelIdx != MIRLabelTable::GetDummyLabel()) { + retBBPart->AddLabel(retBBPartLabelIdx); + lab2BBMap[retBBPartLabelIdx] = retBBPart; + } + Insn *insn = bb->GetFirstInsn(); + while (insn != nullptr) { + bb->RemoveInsn(*insn); + retBBPart->AppendInsn(*insn); + insn = bb->GetFirstInsn(); + } + bb->PrependBB(*retBBPart); + LabelIdx newLabelIdx = CreateLabel(); + bb->AddLabel(newLabelIdx); + lab2BBMap[newLabelIdx] = bb; + } +} + +void CGFunc::AddCommonExitBB() { + uint32 i = 0; + while (exitBBVec[i]->IsUnreachable() && i < exitBBVec.size()) { + i++; + } + DEBUG_ASSERT(i < exitBBVec.size(), "all exit BBs are unreachable"); + // create fake commonExitBB + commonExitBB = CreateNewBB(true, BB::kBBFallthru, 0); + DEBUG_ASSERT(commonExitBB != nullptr, "cannot create fake commonExitBB"); + for (BB *cgbb : exitBBVec) { + if (!cgbb->IsUnreachable()) { + commonExitBB->PushBackPreds(*cgbb); + } + } +} + +void CGFunc::UpdateCallBBFrequency() { + if (!func.HasFreqMap() || func.GetLastFreqMap().empty()) { + return; + } + FOR_ALL_BB(bb, this) { + if (bb->GetKind() != BB::kBBFallthru || !bb->HasCall()) { + continue; + } + DEBUG_ASSERT(bb->GetSuccs().size() <= 1, "fallthru BB has only one successor."); + if (!bb->GetSuccs().empty()) { + bb->SetFrequency((*(bb->GetSuccsBegin()))->GetFrequency()); + } + } +} + +void CGFunc::HandleFunction() { + /* select instruction */ + GenerateInstruction(); + /* merge multi return */ + if (!func.GetModule()->IsCModule() || CGOptions::DoRetMerge() || CGOptions::OptimizeForSize()) { + MergeReturn(); + } + if (func.IsJava()) { + DEBUG_ASSERT(exitBBVec.size() <= 1, "there are more than one BB_return in func"); + } + ProcessExitBBVec(); + LmbcGenSaveSpForAlloca(); + + if (func.IsJava()) { + GenerateCleanupCodeForExtEpilog(*cleanupBB); + } else if (!func.GetModule()->IsCModule()) { + GenerateCleanupCode(*cleanupBB); + } + GenSaveMethodInfoCode(*firstBB); + /* build control flow graph */ + theCFG = memPool->New(*this); + theCFG->BuildCFG(); + AddCommonExitBB(); + if (mirModule.GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupEntryBB(); + DetermineReturnTypeofCall(); + theCFG->MarkLabelTakenBB(); + theCFG->UnreachCodeAnalysis(); + if (mirModule.GetSrcLang() == kSrcLangC) { + theCFG->WontExitAnalysis(); + } + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + ProcessLazyBinding(); + } + if (GetCG()->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (CGOptions::DoEnableHotColdSplit()) { + theCFG->CheckCFGFreq(); + } +} + +void CGFunc::AddDIESymbolLocation(const MIRSymbol *sym, SymbolAlloc *loc) { + DEBUG_ASSERT(debugInfo != nullptr, "debugInfo is null!"); + DEBUG_ASSERT(loc->GetMemSegment() != nullptr, "only support those variable that locate at stack now"); + DBGDie *sdie = debugInfo->GetLocalDie(&func, sym->GetNameStrIdx()); + if (sdie == nullptr) { + return; + } + + DBGExprLoc *exprloc = sdie->GetExprLoc(); + CHECK_FATAL(exprloc != nullptr, "exprloc is null in CGFunc::AddDIESymbolLocation"); + exprloc->SetSymLoc(loc); + + GetDbgCallFrameLocations().push_back(exprloc); +} + +void CGFunc::DumpCFG() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CFG built by CG for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + LogInfo::MapleLogger() << "=== BB ( " << std::hex << bb << std::dec << " ) <" << bb->GetKindName() << "> ===\n"; + LogInfo::MapleLogger() << "BB id:" << bb->GetId() << "\n"; + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + const StmtNode *stmt = bb->GetFirstStmt(); + if (stmt != nullptr) { + bool done = false; + do { + done = stmt == bb->GetLastStmt(); + stmt->Dump(1); + LogInfo::MapleLogger() << "\n"; + stmt = stmt->GetNext(); + } while (!done); + } else { + LogInfo::MapleLogger() << "\n"; + } + } +} + +void CGFunc::DumpCGIR() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CGIR for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + if (bb->IsUnreachable()) { + continue; + } + LogInfo::MapleLogger() << "=== BB " << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx(); + LogInfo::MapleLogger() << " ==> @" << func.GetLabelName(bb->GetLabIdx()) << "]"; + } + + LogInfo::MapleLogger() << "> <" << bb->GetId() << "> "; + if (bb->GetLoop()) { + LogInfo::MapleLogger() << "[Loop level " << bb->GetLoop()->GetLoopLevel(); + LogInfo::MapleLogger() << ", head BB " << bb->GetLoop()->GetHeader()->GetId() << "]"; + } + if (bb->IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb->IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb->GetFirstStmt() == cleanupLabel) { + LogInfo::MapleLogger() << "cleanup "; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << "succs: "; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << "preds: "; + for (auto *predBB : bb->GetPreds()) { + LogInfo::MapleLogger() << predBB->GetId() << " "; + } + } + if (!bb->GetEhSuccs().empty()) { + LogInfo::MapleLogger() << "eh_succs: "; + for (auto *ehSuccBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + if (!bb->GetEhPreds().empty()) { + LogInfo::MapleLogger() << "eh_preds: "; + for (auto *ehPredBB : bb->GetEhPreds()) { + LogInfo::MapleLogger() << ehPredBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "===\n"; + LogInfo::MapleLogger() << "frequency:" << bb->GetFrequency() << "\n"; + + FOR_BB_INSNS_CONST(insn, bb) { + insn->Dump(); + } + } +} + +void CGFunc::DumpLoop() const { + for (const auto *lp : loops) { + lp->PrintLoops(*lp); + } +} + +void CGFunc::ClearLoopInfo() { + loops.clear(); + loops.shrink_to_fit(); + FOR_ALL_BB(bb, this) { + bb->ClearLoopPreds(); + bb->ClearLoopSuccs(); + } +} + +void CGFunc::DumpCFGToDot(const std::string &fileNamePrefix) { + std::ofstream file(fileNamePrefix + GetName()); + file << "digraph {" << std::endl; + for (auto *bb : GetAllBBs()) { + if (bb == nullptr) { + continue; + } + auto &succs = bb->GetSuccs(); + if (succs.empty()) { + continue; + } + file << " " << bb->GetId() << "->{"; + for (auto *succ : succs) { + file << succ->GetId() << " "; + } + file << "};"; + } + file << "}" << std::endl; +} + +void CGFunc::PatchLongBranch() { + for (BB *bb = firstBB->GetNext(); bb != nullptr; bb = bb->GetNext()) { + bb->SetInternalFlag1(bb->GetInternalFlag1() + bb->GetPrev()->GetInternalFlag1()); + } + BB *next = nullptr; + for (BB *bb = firstBB; bb != nullptr; bb = next) { + next = bb->GetNext(); + if (bb->GetKind() != BB::kBBIf && bb->GetKind() != BB::kBBGoto) { + continue; + } + Insn *insn = bb->GetLastInsn(); + while (insn->IsImmaterialInsn()) { + insn = insn->GetPrev(); + } + BB *tbb = GetBBFromLab2BBMap(GetLabelInInsn(*insn)); + if ((tbb->GetInternalFlag1() - bb->GetInternalFlag1()) < MaxCondBranchDistance()) { + continue; + } + InsertJumpPad(insn); + } +} + +void CGFunc::UpdateAllRegisterVregMapping(MapleMap &newMap) { + vregsToPregsMap.clear(); + for (auto it : newMap) { + vregsToPregsMap[it.first] = it.second; + } +} + +bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { + f.HandleFunction(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) + +bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { + if (f.GetCG()->GetCGOptions().WithDwarf()) { + f.DBGFixCallFrameLocationOffsets(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixCFLocOsft, dbgfixcallframeoffsets) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/dbg.cpp b/ecmascript/mapleall/maple_be/src/cg/dbg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..07f02276a62671967a966ba828b72020e875e0cd --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/dbg.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dbg.h" +#include "emit.h" + +namespace mpldbg { +using maplebe::Operand; +using maplebe::MOperator; +using maplebe::CG; +using maplebe::Emitter; +using maplebe::OpndDesc; + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) \ + { #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "dbg.def" +#undef DBG_DEFINE + { "undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void DbgInsn::Dump() const { + MOperator mOp = GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + LogInfo::MapleLogger() << "DBG " << dbgDescr.name; + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void DbgInsn::Check() const { + DbgDescr &dbgDescr = dbgDescrTable[GetMachineOpcode()]; + /* dbg instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != dbgDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in debug insn"); + } + } +} +#endif + +uint32 DbgInsn::GetLoc() const { + if (mOp != OP_DBG_loc) { + return 0; + } + return static_cast(static_cast(opnds[0])->GetVal()); +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << " " << val; +} +void DBGOpndEmitVisitor::Visit(ImmOperand *v) { + emitter.Emit(v->GetVal()); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/ebo.cpp b/ecmascript/mapleall/maple_be/src/cg/ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..865f29ad10f75dd272285d3fea70a7013ef3c779 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/ebo.cpp @@ -0,0 +1,1306 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_ebo.h" +#elif TARGRISCV64 +#include "riscv64_ebo.h" +#endif +#if TARGARM32 +#include "arm32_ebo.h" +#endif +#include "securec.h" + +#include "optimize_common.h" + +/* + * The Optimizations include forward propagation, common expression elimination, constant folding, + * dead code elimination and some target optimizations. The main entry of the optimization is run. + * When the Optimization level is less than O2, it can only perform in single block. and in O2 it + * can perform it a sequence of blocks. + */ +namespace maplebe { +using namespace maple; + +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) +#define EBO_DUMP_NEWPM CG_DEBUG_FUNC(f) +#define TRUE_OPND cgFunc->GetTrueOpnd() + +constexpr uint32 kEboOpndHashLength = 521; +constexpr uint32 kEboMaxBBNums = 200; + +/* Return the opndInfo for the first mem operand of insn. */ +MemOpndInfo *Ebo::GetMemInfo(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "insnInfo.insn is nullptr!"); + CHECK_FATAL(insn->AccessMem(), "insn is not access memory!"); + uint32 opndNum = insn->GetOperandSize(); + if (insn->IsLoad()) { + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.origOpnd[i]); + } + } + } else if (insn->IsStore()) { + int32 resId = 0; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->OpndIsDef(i)) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.result[resId]); + } else { + resId++; + } + } + } + } + return nullptr; +} + +void Ebo::EnlargeSpaceForLA(Insn &csetInsn) { + CHECK_FATAL(live != nullptr, "no live info!"); + live->EnlargeSpaceForLiveAnalysis(*csetInsn.GetBB()); +} + +bool Ebo::IsFrameReg(Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + RegOperand ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +Operand *Ebo::GetZeroOpnd(uint32 size) const { +#if TARGAARCH64 || TARGRISCV64 + return size > k64BitSize ? nullptr : &cgFunc->GetZeroOpnd(size); +#else + return nullptr; +#endif +} + +bool Ebo::IsSaveReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return cgFunc->IsSaveReg(reg, *cgFunc->GetFunction().GetReturnType(), cgFunc->GetBecommon()); +} + +bool Ebo::IsPhysicalReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return reg.IsPhysicalRegister(); +} + +bool Ebo::HasAssignedReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const auto ® = static_cast(opnd); + return reg.IsVirtualRegister() ? (!IsInvalidReg(reg)) : true; +} + +bool Ebo::IsOfSameClass(const Operand &op0, const Operand &op1) const { + if (!op0.IsRegister() || !op1.IsRegister()) { + return false; + } + const auto ®0 = static_cast(op0); + const auto ®1 = static_cast(op1); + return reg0.GetRegisterType() == reg1.GetRegisterType(); +} + +/* return true if opnd of bb is available. */ +bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo *info) { + if (info == nullptr) { + return false; + } + if (info->opnd == nullptr) { + return false; + } + + Operand *op = info->opnd; + if (IsConstantImmOrReg(*op)) { + return true; + } + + int32 hashVal = 0; + if (op->IsRegShift() || op->IsRegister()) { + hashVal = -1; + } else { + hashVal = info->hashVal; + } + if (GetOpndInfo(*op, hashVal) != info) { + return false; + } + /* global operands aren't supported at low levels of optimization. */ + if ((Globals::GetInstance()->GetOptimLevel() < CGOptions::kLevel2) && (&bb != info->bb)) { + return false; + } + if (beforeRegAlloc && IsPhysicalReg(*op)) { + return false; + } + return true; +} + +bool Ebo::ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn) { + if (opndReplace == nullptr) { + return false; + } + if ((opndInfo.replacementInfo != nullptr) && opndInfo.replacementInfo->redefined) { + return false; + } +#if TARGARM32 + /* for arm32, disable forwardProp in strd insn. */ + if (insn.GetMachineOpcode() == MOP_strd) { + return false; + } + if (opndInfo.mayReDef) { + return false; + } +#endif + if (!(IsConstantImmOrReg(*opndReplace) || + ((OpndAvailableInBB(*insn.GetBB(), opndInfo.replacementInfo) || RegistersIdentical(opnd, *opndReplace)) && + (HasAssignedReg(opnd) == HasAssignedReg(*opndReplace))))) { + return false; + } + /* if beforeRA, replace op should not be PhysicalRe */ + return !beforeRegAlloc || !IsPhysicalReg(*opndReplace); +} + +bool Ebo::RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo) { + if (IsConstantImmOrReg(opnd)) { + return false; + } + if (!(!beforeRegAlloc || (HasAssignedReg(oldOpnd) == HasAssignedReg(*opndReplace)) || IsZeroRegister(opnd) || + !insn.IsMove())) { + return false; + } + std::set defRegs = insn.GetDefRegs(); + if (!(defRegs.empty() || + ((opnd.IsRegister() && !defRegs.count(static_cast(opnd).GetRegisterNumber())) || + !beforeRegAlloc))) { + return false; + } + if (!(beforeRegAlloc || !IsFrameReg(oldOpnd))) { + return false; + } + if (insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + return false; + } + if (IsPseudoRet(insn)) { + return false; + } + + return ((IsOfSameClass(oldOpnd, *opndReplace) && (oldOpnd.GetSize() <= opndReplace->GetSize())) || + ((tmpInfo != nullptr) && IsMovToSIMDVmov(insn, *tmpInfo->insn))); +} + +/* For Memory Operand, its info was stored in a hash table, this function is to compute its hash value. */ +int32 Ebo::ComputeOpndHash(const Operand &opnd) const { + uint64 hashIdx = reinterpret_cast(&opnd) >> k4ByteSize; + return static_cast(hashIdx % kEboOpndHashLength); +} + +/* Store the operand information. Store it to the vRegInfo if is register. otherwise put it to the hash table. */ +void Ebo::SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal) { + /* opnd is Register or RegShift */ + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = opndInfo; + return; + } + + CHECK_FATAL(static_cast(static_cast(hashVal)) < exprInfoTable.size(), + "SetOpndInfo hashval outof range!"); + opndInfo->hashVal = hashVal; + opndInfo->hashNext = exprInfoTable.at(hashVal); + exprInfoTable.at(hashVal) = opndInfo; +} + +/* Used to change the info of opnd from opndinfo to newinfo. */ +void Ebo::UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal) { + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = newInfo; + return; + } + DEBUG_ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + if (newInfo != nullptr) { + newInfo->hashNext = opndInfo.hashNext; + opndInfo.hashNext = nullptr; + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = newInfo; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = newInfo; + return; + } + info = info->hashNext; + } + return; + } + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = opndInfo.hashNext; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = opndInfo.next; + opndInfo.hashNext = nullptr; + return; + } + info = info->hashNext; + } +} + +/* return true if op1 op2 is equal */ +bool Ebo::OperandEqual(const Operand &op1, const Operand &op2) const { + if (&op1 == &op2) { + return true; + } + if (op1.GetKind() != op2.GetKind()) { + return false; + } + return OperandEqSpecial(op1, op2); +} + +OpndInfo *Ebo::GetOpndInfo(const Operand &opnd, int32 hashVal) const { + if (hashVal < 0) { + const RegOperand ® = GetRegOperand(opnd); + auto it = vRegInfo.find(reg.GetRegisterNumber()); + return it != vRegInfo.end() ? it->second : nullptr; + } + /* do not find prev memOpend */ + if (opnd.IsMemoryAccessOperand()) { + return nullptr; + } + DEBUG_ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + while (info != nullptr) { + if (&opnd == info->opnd) { + return info; + } + info = info->hashNext; + } + return nullptr; +} + +/* Create a opndInfo for opnd. */ +OpndInfo *Ebo::GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal) { + OpndInfo *opndInfo = nullptr; + if (opnd.IsMemoryAccessOperand()) { + opndInfo = eboMp->New(opnd); + } else { + opndInfo = eboMp->New(opnd); + } + /* Initialize the entry. */ + opndInfo->hashVal = hashVal; + opndInfo->opnd = &opnd; + opndInfo->bb = &bb; + opndInfo->insn = insn; + opndInfo->prev = lastOpndInfo; + if (firstOpndInfo == nullptr) { + firstOpndInfo = opndInfo; + } else { + lastOpndInfo->next = opndInfo; + } + lastOpndInfo = opndInfo; + return opndInfo; +} + +/* Update the use infomation for localOpnd because of its use insn currentInsn. */ +OpndInfo *Ebo::OperandInfoUse(BB ¤tBB, Operand &localOpnd) { + if (!(localOpnd.IsRegister() || localOpnd.IsRegShift()) && !localOpnd.IsMemoryAccessOperand()) { + return nullptr; + } + int hashVal = 0; + /* only arm32 has regShift */ + if (localOpnd.IsRegister() || localOpnd.IsRegShift()) { + hashVal = -1; + } else { + hashVal = ComputeOpndHash(localOpnd); + } + OpndInfo *opndInfo = GetOpndInfo(localOpnd, hashVal); + + if (opndInfo == nullptr) { + opndInfo = GetNewOpndInfo(currentBB, nullptr, localOpnd, hashVal); + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + IncRef(*opndInfo); + return opndInfo; +} + +/* return true if op0 is identical with op1 */ +bool Ebo::RegistersIdentical(const Operand &op0, const Operand &op1) const { + if (&op0 == &op1) { + return true; + } + if (!(op0.IsRegister() && op1.IsRegister())) { + return false; + } + const RegOperand ®0 = static_cast(op0); + const RegOperand ®1 = static_cast(op1); + return ((reg0.IsPhysicalRegister() || !IsInvalidReg(reg0)) && + (reg1.IsPhysicalRegister() || !IsInvalidReg(reg1)) && + (reg0.GetRegisterType() == reg1.GetRegisterType()) && + (reg0.GetRegisterNumber() == reg1.GetRegisterNumber())); +} + +InsnInfo *Ebo::GetNewInsnInfo(Insn &insn) { + InsnInfo *insnInfo = eboMp->New(*eboMp, insn); + insnInfo->prev = lastInsnInfo; + if (firstInsnInfo == nullptr) { + firstInsnInfo = insnInfo; + } else { + lastInsnInfo->next = insnInfo; + } + lastInsnInfo = insnInfo; + insnInfo->next = nullptr; + return insnInfo; +} + +uint32 Ebo::ComputeHashVal(Insn &insn, const MapleVector &opndInfos) const { + uint32 hashVal = 0; + if (insn.AccessMem()) { + hashVal = kEboDefaultMemHash; + if (insn.NoAlias()) { + hashVal = kEboNoAliasMemHash; + } + MemOperand *memOpnd = static_cast(insn.GetMemOpnd()); + if (memOpnd != nullptr) { + Operand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && IsFrameReg(*baseReg)) { + hashVal = kEboSpillMemHash; + } + } + } else if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + hashVal = kEboCopyInsnHash; + } else { + uint32 opndNum = insn.GetOperandSize(); + hashVal = insn.GetMachineOpcode(); + for (uint32 i = 0; i < opndNum; ++i) { + hashVal += static_cast(reinterpret_cast(opndInfos.at(i))); + } + hashVal = static_cast(kEboReservedInsnHash + EBO_EXP_INSN_HASH(hashVal)); + } + return hashVal; +} + +/* computeHashVal of insn */ +void Ebo::HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfos) { + uint32 hashVal = ComputeHashVal(insn, opndInfos); + /* Create a new insnInfo entry and add the new insn to the hash table. */ + InsnInfo *insnInfo = GetNewInsnInfo(insn); + insnInfo->bb = insn.GetBB(); + insnInfo->insn = &insn; + insnInfo->hashIndex = hashVal; + insnInfo->same = insnInfoTable.at(hashVal); + + if (!beforeRegAlloc) { + if ((insn.IsCall() || insn.IsTailCall() || insn.IsAsmInsn()) && !insn.GetIsThrow()) { + DefineCallerSaveRegisters(*insnInfo); + } else if (IsClinitCheck(insn)) { + DefineClinitSpecialRegisters(*insnInfo); + } + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + /* Copy all the opndInfo entries for the operands. */ + insnInfo->origOpnd.emplace_back(origInfo.at(i)); + insnInfo->optimalOpnd.emplace_back(opndInfos.at(i)); + /* Keep the result info. */ + if (!insn.OpndIsDef(i)) { + continue; + } + auto genOpndInfoDef = [this, insnInfo](Operand &op) { + OpndInfo *opndInfo = nullptr; + if ((&op != TRUE_OPND) && + ((op.IsRegister() && (&op) != GetZeroOpnd(op.GetSize())) || + (op.IsMemoryAccessOperand() && (static_cast(op)).GetBaseRegister() != nullptr))) { + opndInfo = OperandInfoDef(*insnInfo->bb, *insnInfo->insn, op); + opndInfo->insnInfo = insnInfo; + } + insnInfo->result.emplace_back(opndInfo); + }; + Operand &op = insn.GetOperand(i); + if (op.IsList() && !static_cast(op).GetOperands().empty()) { + for (auto operand : static_cast(op).GetOperands()) { + genOpndInfoDef(*operand); + } + } else { + genOpndInfoDef(op); + } + } + SetInsnInfo(hashVal, *insnInfo); +} + +/* do decref of orig_info, refCount will be set to 0 */ +void Ebo::RemoveUses(uint32 opndNum, const MapleVector &origInfo) { + OpndInfo *info = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + info = origInfo.at(i); + if (info != nullptr) { + DecRef(*info); + if (info->opnd->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(info); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offsetInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offsetInfo != nullptr) { + DecRef(*offsetInfo); + } + } + } + } +} + +OpndInfo *Ebo::BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex) { + auto *memOpnd = static_cast(&opnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetOffset(); + OpndInfo *baseInfo = nullptr; + OpndInfo *offsetInfo = nullptr; + if (base != nullptr) { + if (!memOpnd->IsIntactIndexed()) { + baseInfo = OperandInfoUse(bb, *base); + baseInfo = OperandInfoDef(bb, insn, *base); + return baseInfo; + } else { + baseInfo = OperandInfoUse(bb, *base); + } + /* forward prop for base register. */ + if ((baseInfo != nullptr) && base->IsRegister()) { + auto *baseReg = static_cast(base); + Operand *replaceOpnd = baseInfo->replacementOpnd; + OpndInfo *replaceInfo = baseInfo->replacementInfo; + if ((replaceInfo != nullptr) && (replaceOpnd != nullptr) && !cgFunc->IsSPOrFP(*baseReg) && + (!beforeRegAlloc || (!IsPhysicalReg(*replaceOpnd) && !IsPhysicalReg(*base))) && + IsOfSameClass(*base, *replaceOpnd) && memOpnd->IsIntactIndexed() && + (base->GetSize() <= replaceOpnd->GetSize()) && + /* In case that replace opnd was redefined. */ + !replaceInfo->redefined) { + MemOperand *newMem = static_cast(memOpnd->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "newMem is null in Ebo::BuildAllInfo(BB *bb)"); + newMem->SetBaseRegister(*static_cast(replaceOpnd)); + insn.SetOperand(opndIndex, *newMem); + DecRef(*baseInfo); + IncRef(*replaceInfo); + baseInfo = replaceInfo; + } + } + } + if ((offset != nullptr) && offset->IsRegister()) { + offsetInfo = OperandInfoUse(bb, *offset); + } + OpndInfo *opndInfo = OperandInfoUse(bb, insn.GetOperand(opndIndex)); + CHECK_FATAL(opndInfo != nullptr, "opndInfo should not be null ptr"); + MemOpndInfo *memInfo = static_cast(opndInfo); + if (baseInfo != nullptr) { + memInfo->SetBaseInfo(*baseInfo); + } + if (offsetInfo != nullptr) { + memInfo->SetOffsetInfo(*offsetInfo); + } + return memInfo; +} + +OpndInfo *Ebo::BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, + MapleVector &origInfos) { + if (opnd.IsList()) { + ListOperand *listOpnd = static_cast(&opnd); + for (auto op : listOpnd->GetOperands()) { + OperandInfoUse(bb, *op); + } + return nullptr; + } + DEBUG_ASSERT(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (opnd.IsConditionCode()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + /* if operand is Opnd_cond, the orig_info store the info of rFlag. */ + OpndInfo *tempOpndInfo = GetOpndInfo(rFlag, -1); + origInfos.at(opndIndex) = tempOpndInfo; + return nullptr; + } + + if (!(opnd.IsRegister() || opnd.IsRegShift()) && !opnd.IsMemoryAccessOperand()) { + return nullptr; + } + + if (opnd.IsMemoryAccessOperand()) { + OpndInfo *memInfo = BuildMemOpndInfo(bb, insn, opnd, opndIndex); + CHECK_FATAL(memInfo != nullptr, "build memopnd info failed in Ebo::BuildAllInfo"); + origInfos.at(opndIndex) = memInfo; + return nullptr; + } + OpndInfo *opndInfo = OperandInfoUse(bb, opnd); + origInfos.at(opndIndex) = opndInfo; + return opndInfo; +} + +bool Ebo::ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, + OpndInfo *&opndInfo, MapleVector &origInfos) { + CHECK_FATAL(opnd != nullptr, "nullptr check"); + Operand *opndReplace = opndInfo->replacementOpnd; + /* Don't propagate physical registers before register allocation. */ + if (beforeRegAlloc && (opndReplace != nullptr) && (IsPhysicalReg(*opndReplace) || IsPhysicalReg(*opnd))) { + return false; + } + + /* forward propagation of constants */ + CHECK_FATAL(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (!ForwardPropCheck(opndReplace, *opndInfo, *opnd, insn)) { + return false; + } + Operand *oldOpnd = opnd; + opnd = opndInfo->replacementOpnd; + opndInfo = opndInfo->replacementInfo; + + /* constant prop. */ + if (opnd->IsIntImmediate() && oldOpnd->IsRegister()) { + if (DoConstProp(insn, opndIndex, *opnd)) { + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + } + } + /* move reg, wzr, store vreg, mem ==> store wzr, mem */ +#if TARGAARCH64 || TARGRISCV64 + if (IsZeroRegister(*opnd) && opndIndex == 0 && + (insn.GetMachineOpcode() == MOP_wstr || insn.GetMachineOpcode() == MOP_xstr)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + insn.SetOperand(opndIndex, *opnd); + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + if (EBO_DUMP) { + insn.Dump(); + } + } +#endif + /* forward prop for registers. */ + if (!RegForwardCheck(insn, *opnd, opndReplace, *oldOpnd, origInfos.at(opndIndex))) { + return false; + } + /* Copies to and from the same register are not needed. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn) && (opndIndex == kInsnSecondOpnd) && + RegistersIdentical(*opnd, insn.GetOperand(kInsnFirstOpnd))) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "===Remove the new insn because Copies to and from the same register. \n"; + } + return true; + } + if (static_cast(opnd)->GetRegisterNumber() == RSP) { + /* Disallow optimization with stack pointer */ + return false; + } + + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + DecRef(*origInfos.at(opndIndex)); + insn.SetOperand(opndIndex, *opnd); + + if (EBO_DUMP) { + insn.Dump(); + } + IncRef(*opndInfo); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + /* extend the live range of the replacement operand. */ + if ((opndInfo->bb != insn.GetBB()) && opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opnd, *insn.GetBB(), *opndInfo->bb); + } + return false; +} + +/* + * this func do only one of the following optimization: + * 1. Remove DupInsns + * 2. SpecialSequence OPT + * 3. Remove Redundant "Load" + * 4. Constant Fold + */ +void Ebo::SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, + const MapleVector &opnds, const MapleVector &opndInfos, + const MapleVector &origInfos) { + if (insn.AccessMem()) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + return; + } + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, opndInfos); + } + return; + } + if (!insnReplaced && !insn.HasSideEffects()) { + uint32 opndNum = insn.GetOperandSize(); + if (opndsConstant && (opndNum > 1)) { + if (!insn.GetDefRegs().empty()) { + insnReplaced = Csel2Cset(insn, opnds); + } + } + if (insnReplaced) { + return; + } + if (opndNum >= 2) { + /* special case */ + if (!insn.GetDefRegs().empty() && ResIsNotDefAndUse(insn)) { + if ((opndNum == 3) && (insn.GetDefRegs().size() == 1) && + (((kInsnSecondOpnd < opnds.size()) && (opnds[kInsnSecondOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnSecondOpnd])) || + ((kInsnThirdOpnd < opnds.size()) && (opnds[kInsnThirdOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnThirdOpnd])))) { + insnReplaced = SimplifyConstOperand(insn, opnds, opndInfos); + } + } + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + } + } +} + +/* + * this func do: + * 1. delete DupInsn if SimplifyInsn failed. + * 2. buildInsnInfo if delete DupInsn failed(func HashInsn do this). + * 3. update replaceInfo. + */ +void Ebo::FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, + MapleVector &opnds, MapleVector &opndInfos, + const MapleVector &origInfos) { + CHECK_FATAL(insn != nullptr, "nullptr check"); + if (!insnReplaced) { + CHECK_FATAL(origInfos.size() != 0, "null ptr check"); + CHECK_FATAL(opndInfos.size() != 0, "null ptr check"); + HashInsn(*insn, origInfos, opndInfos); + /* Processing the result of the insn. */ + if ((Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) || + !insn->GetDefRegs().empty()) && !insn->IsSpecialIntrinsic()) { + Operand *res = &insn->GetOperand(kInsnFirstOpnd); + if ((res != nullptr) && (res != TRUE_OPND) && (res != GetZeroOpnd(res->GetSize()))) { + CHECK_FATAL(lastInsnInfo != nullptr, "lastInsnInfo is null!"); + OpndInfo *opndInfo = lastInsnInfo->result[0]; + /* Don't propagate for fmov insns. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && (opndInfo != nullptr) && !IsFmov(*insn)) { + CHECK_FATAL(!opnds.empty(), "null container!"); + opndInfo->replacementOpnd = opnds[kInsnSecondOpnd]; + opndInfo->replacementInfo = opndInfos[kInsnSecondOpnd]; + } else if (insn->GetBothDefUseOpnd() != kInsnMaxOpnd && (opndInfo != nullptr)) { + opndInfo->replacementOpnd = nullptr; + opndInfo->replacementInfo = nullptr; + } + } + } + insn = insn->GetNext(); + } else { + uint32 opndNum = insn->GetOperandSize(); + RemoveUses(opndNum, origInfos); + /* If insn is replaced, reanalyze the new insn to have more opportunities. */ + insn = (prev == nullptr ? bb.GetFirstInsn() : prev->GetNext()); + } +} + +void Ebo::PreProcessSpecialInsn(Insn &insn) { + DefineReturnUseRegister(insn); + + if (insn.IsCall() || insn.IsClinit()) { + DefineCallUseSpecialRegister(insn); + } +} + +/* + * this func do : + * 1.build opereand info of bb; + * 2.do Forward propagation after regalloc; + * 3.simplify the insn,include Constant folding,redundant insns elimination. + */ +void Ebo::BuildAllInfo(BB &bb) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===Enter BuildOperandinfo of bb:" << bb.GetId() << "===\n"; + } + Insn *insn = bb.GetFirstInsn(); + while ((insn != nullptr) && (insn != bb.GetLastInsn()->GetNext())) { + if (!insn->IsTargetInsn()) { + insn = insn->GetNext(); + continue; + } + PreProcessSpecialInsn(*insn); + uint32 opndNum = insn->GetOperandSize(); + if (!insn->IsMachineInstruction () || opndNum == 0) { + insn = insn->GetNext(); + continue; + } + MapleVector opnds(eboAllocator.Adapter()); + MapleVector opndInfos(eboAllocator.Adapter()); + MapleVector origInfos(eboAllocator.Adapter()); + Insn *prev = insn->GetPrev(); + bool insnReplaced = false; + bool opndsConstant = true; + /* start : Process all the operands. */ + for (uint32 i = 0; i < opndNum; ++i) { + if (!insn->OpndIsUse(i)) { + opnds.emplace_back(nullptr); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + continue; + } + Operand *opnd = &(insn->GetOperand(i)); + opnds.emplace_back(opnd); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + if (IsConstantImmOrReg(*opnd)) { + continue; + } + OpndInfo *opndInfo = BuildOperandInfo(bb, *insn, *opnd, i, origInfos); + if (opndInfo == nullptr) { + continue; + } + + /* Don't do propagation for special intrinsic insn. */ + if (!insn->IsSpecialIntrinsic()) { + insnReplaced = ForwardPropagateOpnd(*insn, opnd, i, opndInfo, origInfos); + } + if (insnReplaced) { + continue; + } + opnds.at(i) = opnd; + opndInfos.at(i) = opndInfo; + if (!IsConstantImmOrReg(*opnd)) { + opndsConstant = false; + } + } /* End : Process all the operands. */ +#if TARGARM32 + Arm32Insn *currArm32Insn = static_cast(insn); + if (currArm32Insn->IsCondExecution()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + } +#endif + + if (insnReplaced) { + RemoveUses(opndNum, origInfos); + Insn *temp = insn->GetNext(); + bb.RemoveInsn(*insn); + insn = temp; + continue; + } + + /* simplify the insn. */ + if (!insn->IsSpecialIntrinsic()) { + SimplifyInsn(*insn, insnReplaced, opndsConstant, opnds, opndInfos, origInfos); + } + FindRedundantInsns(bb, insn, prev, insnReplaced, opnds, opndInfos, origInfos); + } +} + +/* Decrement the use counts for the actual operands of an insnInfo. */ +void Ebo::RemoveInsn(InsnInfo &info) { + Insn *insn = info.insn; + CHECK_FATAL(insn != nullptr, "get insn in info failed in Ebo::RemoveInsn"); + uint32 opndNum = insn->GetOperandSize(); + OpndInfo *opndInfo = nullptr; + for (uint32 i = 0; i < opndNum; i++) { + if (!insn->OpndIsUse(i)) { + continue; + } + opndInfo = info.origOpnd[i]; + if (opndInfo != nullptr) { + DecRef(*opndInfo); + Operand *opndTemp = opndInfo->opnd; + if (opndTemp == nullptr) { + continue; + } + if (opndTemp->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offInfo != nullptr) { + DecRef(*offInfo); + } + } + } + } +#if TARGARM32 + Arm32CGFunc *a32CGFunc = static_cast(cgFunc); + auto &gotInfosMap = a32CGFunc->GetGotInfosMap(); + for (auto it = gotInfosMap.begin(); it != gotInfosMap.end();) { + if (it->first == insn) { + it = gotInfosMap.erase(it); + } else { + ++it; + } + } + auto &constInfosMap = a32CGFunc->GetConstInfosMap(); + for (auto it = constInfosMap.begin(); it != constInfosMap.end();) { + if (it->first == insn) { + it = constInfosMap.erase(it); + } else { + ++it; + } + } +#endif +} + +/* Mark opnd is live between def bb and into bb. */ +void Ebo::MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const { + if (live == nullptr) { + return; + } + if (&into == &def) { + return; + } + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + const RegOperand ® = static_cast(opnd); + into.SetLiveInBit(reg.GetRegisterNumber()); + def.SetLiveOutBit(reg.GetRegisterNumber()); +} + +/* return insn information if has insnInfo,else,return lastInsnInfo */ +InsnInfo *Ebo::LocateInsnInfo(const OpndInfo &info) { + if (info.insn != nullptr) { + if (info.insnInfo != nullptr) { + return info.insnInfo; + } else { + InsnInfo *insnInfo = lastInsnInfo; + int32 limit = 50; + for (; (insnInfo != nullptr) && (limit != 0); insnInfo = insnInfo->prev, limit--) { + if (insnInfo->insn == info.insn) { + return insnInfo; + } + } + } + } + return nullptr; +} + +/* redundant insns elimination */ +void Ebo::RemoveUnusedInsns(BB &bb, bool normal) { + OpndInfo *opndInfo = nullptr; + Operand *opnd = nullptr; + + if (firstInsnInfo == nullptr) { + return; + } + + for (InsnInfo *insnInfo = lastInsnInfo; insnInfo != nullptr; insnInfo = insnInfo->prev) { + Insn *insn = insnInfo->insn; + if ((insn == nullptr) || (insn->GetBB() == nullptr)) { + continue; + } + /* stop looking for insn when it goes out of bb. */ + if (insn->GetBB() != &bb) { + break; + } + + uint32 resNum = insn->GetDefRegs().size(); + if (IsLastAndBranch(bb, *insn)) { + goto insn_is_needed; + } + + if (insn->IsClinit()) { + goto insn_is_needed; + } + + if ((resNum == 0) || IsGlobalNeeded(*insn) || insn->IsStore() || + IsDecoupleStaticOp(*insn) || insn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + + /* last insn of a 64x1 function is a float, 64x1 function may not be a float */ + if (cgFunc->GetFunction().GetAttr(FUNCATTR_oneelem_simd) && + insnInfo == lastInsnInfo) { + goto insn_is_needed; + } + + if (insn->GetMachineOpcode() == MOP_asm || insn->IsAtomic()) { + goto insn_is_needed; + } + + /* Check all result that can be removed. */ + for (uint32 i = 0; i < resNum; ++i) { + opndInfo = insnInfo->result[i]; + /* A couple of checks. */ + if (opndInfo == nullptr) { + continue; + } + if ((opndInfo->bb != &bb) || (opndInfo->insn == nullptr)) { + goto insn_is_needed; + } + opnd = opndInfo->opnd; + if (opnd == GetZeroOpnd(opnd->GetSize())) { + continue; + } +/* this part optimize some spacial case after RA. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && opndInfo && + insn->GetOperand(kInsnSecondOpnd).IsImmediate() && IsSameRedefine(bb, *insn, *opndInfo)) { + goto can_be_removed; + } + /* end special case optimize */ + if ((beforeRegAlloc && IsPhysicalReg(*opnd)) || (IsSaveReg(*opnd) && !opndInfo->redefinedInBB)) { + goto insn_is_needed; + } + /* Copies to and from the same register are not needed. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn)) { + if (HasAssignedReg(*opnd) && HasAssignedReg(insn->GetOperand(kInsnSecondOpnd)) && + RegistersIdentical(*opnd, insn->GetOperand(kInsnSecondOpnd))) { + /* We may be able to get rid of the copy, but be sure that the operand is marked live into this block. */ + if ((insnInfo->origOpnd[kInsnSecondOpnd] != nullptr) && (&bb != insnInfo->origOpnd[kInsnSecondOpnd]->bb)) { + MarkOpndLiveIntoBB(*opnd, bb, *insnInfo->origOpnd[kInsnSecondOpnd]->bb); + } + /* propagate use count for this opnd to it's input operand. */ + if (opndInfo->same != nullptr) { + opndInfo->same->refCount += opndInfo->refCount; + } + + /* remove the copy causes the previous def to reach the end of the block. */ + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + goto can_be_removed; + } + } + /* there must bo no direct references to the operand. */ + if (!normal || (opndInfo->refCount != 0)) { + goto insn_is_needed; + } + /* + * When O1, the vreg who live out of bb should be recognized. + * The regs for clinit is also be marked to recognize it can't be deleted. so extend it to O2. + */ + if (opnd->IsRegister()) { + RegOperand *reg = static_cast(opnd); + if (beforeRegAlloc && !reg->IsBBLocalVReg()) { + goto insn_is_needed; + } + } + /* Volatile || sideeffect */ + if (opndInfo->insn->IsVolatile() || opndInfo->insn->HasSideEffects()) { + goto insn_is_needed; + } + + if (!opndInfo->redefinedInBB && LiveOutOfBB(*opnd, *opndInfo->bb)) { + goto insn_is_needed; + } + + if (opndInfo->redefinedInBB && opndInfo->redefinedInsn != nullptr && + opndInfo->redefinedInsn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + } + + if (!normal || insnInfo->mustNotBeRemoved || insn->GetDoNotRemove()) { + goto insn_is_needed; + } +can_be_removed: + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< ==== Remove Unused insn in bb:" << bb.GetId() << "====\n"; + insn->Dump(); + } + RemoveInsn(*insnInfo); + bb.RemoveInsn(*insn); + insnInfo->insn = nullptr; + insnInfo->bb = nullptr; + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if (opndInfo == nullptr) { + continue; + } + if (opndInfo->redefined && (opndInfo->same != nullptr)) { + OpndInfo *next = opndInfo->same; + next->redefined = true; + if (opndInfo->redefinedInBB && (opndInfo->same->bb == &bb)) { + next->redefinedInBB = true; + } + } + if (!opndInfo->redefinedInBB && (opndInfo->same != nullptr) && (opndInfo->same->bb == &bb)) { + opndInfo->same->redefinedInBB = false; + } + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + } + optSuccess = true; + continue; +insn_is_needed: + if (!bb.GetEhSuccs().empty()) { + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + } + } + + if (!bb.GetEhPreds().empty()) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + opndInfo = insnInfo->origOpnd[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + if ((opndInfo != nullptr) && opndInfo->opnd && (&bb != opndInfo->bb) && opndInfo->opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opndInfo->opnd, bb, *opndInfo->bb); + } + } + } + } /* end proccess insnInfo in currBB */ +} + +void Ebo::UpdateNextInfo(const OpndInfo &opndInfo) { + OpndInfo *nextInfo = opndInfo.same; + while (nextInfo != nullptr) { + if (nextInfo->insn != nullptr) { + InsnInfo *info = LocateInsnInfo(*nextInfo); + if (info != nullptr) { + info->mustNotBeRemoved = true; + } else { + /* + * Couldn't find the insnInfo entry. Make sure that the operand has + * a use count so that the defining insn will not be deleted. + */ + nextInfo->refCount += opndInfo.refCount; + } + } + nextInfo = nextInfo->same; + } +} + +/* back up to last saved OpndInfo */ +void Ebo::BackupOpndInfoList(OpndInfo *saveLast) { + if (lastOpndInfo == saveLast) { + return; + } + OpndInfo *opndInfo = lastOpndInfo; + while (opndInfo != saveLast) { + int32 hashVal = 0; + if (opndInfo->opnd->IsRegister() || opndInfo->opnd->IsRegShift()) { + hashVal = -1; + } else { + hashVal = opndInfo->hashVal; + } + UpdateOpndInfo(*opndInfo->opnd, *opndInfo, opndInfo->same, hashVal); + opndInfo = opndInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastOpndInfo = saveLast; + } else { + firstOpndInfo = nullptr; + lastOpndInfo = nullptr; + } +} + +/* back up to last saved insn */ +void Ebo::BackupInsnInfoList(InsnInfo *saveLast) { + if (lastInsnInfo == saveLast) { + return; + } + InsnInfo *insnInfo = lastInsnInfo; + while (insnInfo != saveLast) { + SetInsnInfo(insnInfo->hashIndex, *(insnInfo->same)); + insnInfo = insnInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastInsnInfo = saveLast; + } else { + firstInsnInfo = nullptr; + lastInsnInfo = nullptr; + } +} + +/* add bb to eb ,and build operandinfo of bb */ +void Ebo::AddBB2EB(BB &bb) { + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + SetBBVisited(bb); + bbNum++; + BuildAllInfo(bb); + /* Stop adding BB to EB if the bbs in the current EB exceeds kEboMaxBBNums */ + if (bbNum < kEboMaxBBNums) { + for (auto *bbSucc : bb.GetSuccs()) { + if ((bbSucc->GetPreds().size() == 1) && IsNotVisited(*bbSucc)) { + AddBB2EB(*bbSucc); + } + } + } + + RemoveUnusedInsns(bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + bbNum--; +} + +/* Perform EBO */ +void Ebo::EboProcess() { + FOR_ALL_BB(bb, cgFunc) { + if (IsNotVisited(*bb)) { + bbNum = 0; + AddBB2EB(*bb); + } + } +} + +/* Perform EBO on O1 which the optimization can only be in a single block. */ +void Ebo::EboProcessSingleBB() { + FOR_ALL_BB(bb, cgFunc) { + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + BuildAllInfo(*bb); + RemoveUnusedInsns(*bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + } +} + +void Ebo::EboInit() { + visitedBBs.resize(cgFunc->NumBBs()); + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + exprInfoTable.resize(kEboMaxOpndHash); + for (uint32 i = 0; i < kEboMaxOpndHash; ++i) { + exprInfoTable.at(i) = nullptr; + } + insnInfoTable.resize(kEboMaxInsnHash); + for (uint32 i = 0; i < kEboMaxInsnHash; ++i) { + insnInfoTable.at(i) = nullptr; + } + if (!beforeRegAlloc) { + BuildCallerSaveRegisters(); + } + optSuccess = false; +} + +/* perform EB optimizations right after instruction selection. */ +void Ebo::Run() { + EboInit(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + EboProcess(); + } else { + EboProcessSingleBB(); /* Perform SingleBB Optimization when -O1. */ + } + if (optSuccess && cgFunc->GetMirModule().IsCModule()) { + Run(); + } +} + +/* === new pm === */ +bool CgEbo0::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ebo0", f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, "ebo0"); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo0::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo0, ebo) + +bool CgEbo1::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule(), true); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo1::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo1, ebo1) + +bool CgPostEbo::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgPostEbo::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostEbo, postebo) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/eh_func.cpp b/ecmascript/mapleall/maple_be/src/cg/eh_func.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d54a2dfa2ad7b21e69f58f1fdb86c7a1f19a4c84 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/eh_func.cpp @@ -0,0 +1,737 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "eh_func.h" +#include "cgfunc.h" +#include "cg.h" +#include "mir_builder.h" +#include "switch_lowerer.h" + +namespace maplebe { +using namespace maple; + +void EHFunc::CollectEHInformation(std::vector> &catchVec) { + MIRFunction &mirFunc = cgFunc->GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + CHECK_FATAL(mirModule != nullptr, "mirModule is nullptr in CGFunc::BuildEHFunc"); + BlockNode *blkNode = mirFunc.GetBody(); + CHECK_FATAL(blkNode != nullptr, "current function body is nullptr in CGFunc::BuildEHFunc"); + EHTry *lastTry = nullptr; /* record last try */ + /* + * curTry: record the current try wrapping the current statement, + * reset to null when meet a endtry + */ + EHTry *curTry = nullptr; + StmtNode *nextStmt = nullptr; + + /* collect all try-catch blocks */ + for (StmtNode *stmt = blkNode->GetFirst(); stmt != nullptr; stmt = nextStmt) { + nextStmt = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_try: { + TryNode *tryNode = static_cast(stmt); + EHTry *ehTry = cgFunc->GetMemoryPool()->New(*(cgFunc->GetFuncScopeAllocator()), *tryNode); + lastTry = ehTry; + curTry = ehTry; + AddTry(*ehTry); + break; + } + case OP_endtry: { + DEBUG_ASSERT(lastTry != nullptr, "lastTry is nullptr when current node is endtry"); + lastTry->SetEndtryNode(*stmt); + lastTry = nullptr; + curTry = nullptr; + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(stmt); + DEBUG_ASSERT(stmt->GetPrev()->GetOpCode() == OP_label, "catch's previous node is not a label"); + LabelNode *labelStmt = static_cast(stmt->GetPrev()); + catchVec.emplace_back(std::pair(labelStmt->GetLabelIdx(), catchNode)); + /* rename the type of <*void> to <*Throwable> */ + for (uint32 i = 0; i < catchNode->Size(); i++) { + MIRType *ehType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(i)); + DEBUG_ASSERT(ehType->GetKind() == kTypePointer, "ehType must be kTypePointer."); + MIRPtrType *ehPointedTy = static_cast(ehType); + if (ehPointedTy->GetPointedTyIdx() == static_cast(PTY_void)) { + DEBUG_ASSERT(mirModule->GetThrowableTyIdx() != 0, "throwable type id is 0"); + const MIRType *throwType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirModule->GetThrowableTyIdx()); + MIRType *pointerType = cgFunc->GetBecommon().BeGetOrCreatePointerType(*throwType); + catchNode->SetExceptionTyIdxVecElement(pointerType->GetTypeIndex(), i); + } + } + break; + } + case OP_throw: { + if (!cgFunc->GetCG()->GetCGOptions().GenerateExceptionHandlingCode() || + (cgFunc->GetCG()->IsExclusiveEH() && cgFunc->GetCG()->IsExclusiveFunc(mirFunc))) { + /* remove the statment */ + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->RemoveStmt(stmt); + break; + } + UnaryStmtNode *throwNode = static_cast(stmt); + EHThrow *ehReThrow = cgFunc->GetMemoryPool()->New(*throwNode); + ehReThrow->SetJavaTry(curTry); + AddRethrow(*ehReThrow); + break; + } + case OP_block: + CHECK_FATAL(false, "should've lowered earlier"); + default: + break; + } + } +} + +void EHTry::DumpEHTry(const MIRModule &mirModule) { + if (tryNode != nullptr) { + tryNode->Dump(); + } + + if (endTryNode != nullptr) { + endTryNode->Dump(); + } + + for (const auto *currCatch : catchVec) { + if (currCatch == nullptr) { + continue; + } + currCatch->Dump(); + } +} + +void EHThrow::ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg) { + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRFunction *calleeFunc = mirModule->GetMIRBuilder()->GetOrCreateFunction("MCC_ThrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + calleeFunc->SetNoReturn(); + MapleVector args(mirModule->GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(&arg); + CallNode *callAssign = mirModule->GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callAssign); +} + +void EHThrow::ConvertThrowToRethrow(CGFunc &cgFunc) { + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + MIRFunction *unFunc = mirBuilder->GetOrCreateFunction("MCC_RethrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*unFunc->GetMIRFuncType()); + unFunc->SetNoReturn(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(rethrow->Opnd(0)); + CallNode *callNode = mirBuilder->CreateStmtCall(unFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callNode); +} + +void EHThrow::Lower(CGFunc &cgFunc) { + BaseNode *opnd0 = rethrow->Opnd(0); + DEBUG_ASSERT(((opnd0->GetPrimType() == GetLoweredPtrType()) || (opnd0->GetPrimType() == PTY_ref)), + "except a dread of a pointer to get its type"); + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + DEBUG_ASSERT(mirBuilder != nullptr, "get mirBuilder failed in EHThrow::Lower"); + MIRSymbol *mirSymbol = nullptr; + BaseNode *arg = nullptr; + MIRType *pstType = nullptr; + switch (opnd0->GetOpCode()) { + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + mirSymbol = mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx()); + DEBUG_ASSERT(mirSymbol != nullptr, "get symbol failed in EHThrow::Lower"); + pstType = mirSymbol->GetType(); + arg = drNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + if (irNode->GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structTy != nullptr, "structTy is nullptr in EHThrow::Lower "); + pstType = structTy->GetFieldType(irNode->GetFieldID()); + } else { + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + } + arg = irNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + MIRPreg *pReg = mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx()); + DEBUG_ASSERT(pReg->GetPrimType() == GetLoweredPtrType(), "must be a pointer type"); + pstType = pReg->GetMIRType(); + arg = rrNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx()); + arg = retypeNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + ConvertThrowToRethrow(cgFunc); + } + return; + } + default: + DEBUG_ASSERT(false, " NYI throw something"); + } + CHECK_FATAL(pstType != nullptr, "pstType is null in EHThrow::Lower"); + if (pstType->GetKind() != kTypePointer) { + LogInfo::MapleLogger() << "Error in function " << mirFunc.GetName() << "\n"; + rethrow->Dump(); + LogInfo::MapleLogger() << "pstType is supposed to be Pointer, but is not"; + pstType->Dump(0); + CHECK_FATAL(false, "throw operand type kind must be kTypePointer"); + } + + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pstType)->GetPointedTyIdx()); + if (!IsUnderTry()) { + /* + * in this case the throw happens without a try...endtry wrapping it, need to generate lsda. + * insert 2 labels before and after throw + */ + LabelNode *throwBeginLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + LabelNode *throwEndLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->InsertBefore(rethrow, throwBeginLbl); + bodyNode->InsertAfter(rethrow, throwEndLbl); + startLabel = throwBeginLbl; + endLabel = throwEndLbl; + } + + if (stType->GetKind() == kTypeClass) { + ConvertThrowToRuntime(cgFunc, *arg); + } else { + ConvertThrowToRethrow(cgFunc); + } +} + +EHFunc::EHFunc(CGFunc &func) + : cgFunc(&func), + tryVec(func.GetFuncScopeAllocator()->Adapter()), + ehTyTable(func.GetFuncScopeAllocator()->Adapter()), + ty2IndexTable(std::less(), func.GetFuncScopeAllocator()->Adapter()), + rethrowVec(func.GetFuncScopeAllocator()->Adapter()) {} + +EHFunc *CGFunc::BuildEHFunc() { + EHFunc *newEHFunc = GetMemoryPool()->New(*this); + SetEHFunc(*newEHFunc); + std::vector> catchVec; + newEHFunc->CollectEHInformation(catchVec); + newEHFunc->MergeCatchToTry(catchVec); + newEHFunc->BuildEHTypeTable(catchVec); + newEHFunc->InsertEHSwitchTable(); + newEHFunc->InsertCxaAfterEachCatch(catchVec); + newEHFunc->GenerateCleanupLabel(); + + GetBecommon().BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetVoid()); + if (newEHFunc->NeedFullLSDA()) { + newEHFunc->CreateLSDA(); + } else if (newEHFunc->HasThrow()) { + newEHFunc->LowerThrow(); + } + if (GetCG()->GetCGOptions().GenerateExceptionHandlingCode()) { + newEHFunc->CreateTypeInfoSt(); + } + + return newEHFunc; +} + +bool EHFunc::NeedFullLSDA() const { + if (cgFunc->GetFunction().IsJava()) { + return HasTry(); + } else { + return false; + } +} + +bool EHFunc::NeedFastLSDA() const { + if (cgFunc->GetFunction().IsJava()) { + return !HasTry(); + } else { + return false; + } +} + +bool EHFunc::HasTry() const { + return !tryVec.empty(); +} + +void EHFunc::CreateTypeInfoSt() { + MIRFunction &mirFunc = cgFunc->GetFunction(); + bool ctorDefined = false; + if (mirFunc.GetAttr(FUNCATTR_constructor) && !mirFunc.GetAttr(FUNCATTR_static) && (mirFunc.GetBody() != nullptr)) { + ctorDefined = true; + } + + if (!ctorDefined) { + return; + } + + const auto *classType = static_cast(mirFunc.GetClassType()); + if (cgFunc->GetMirModule().IsCModule() && classType == nullptr) { + return; + } + DEBUG_ASSERT(classType != nullptr, ""); + if (classType->GetMethods().empty() && (classType->GetFieldsSize() == 0)) { + return; + } + + if (classType->GetExceptionRootType() == nullptr) { + return; /* not a exception type */ + } +} + +void EHFunc::LowerThrow() { + MIRFunction &mirFunc = cgFunc->GetFunction(); + /* just lower without building LSDA */ + for (EHThrow *rethrow : rethrowVec) { + BaseNode *opnd0 = rethrow->GetRethrow()->Opnd(0); + /* except a dread of a point to get its type */ + switch (opnd0->GetOpCode()) { + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + DEBUG_ASSERT(GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx())->GetKind() == kTypePointer, + "expecting a pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *retypeNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + DEBUG_ASSERT(mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx())->GetType()->GetKind() == kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *drNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *receiverPtrType = nullptr; + if (irNode->GetFieldID() != 0) { + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structTy != nullptr, "structTy is nullptr in EHFunc::LowerThrow"); + receiverPtrType = + static_cast(structTy->GetFieldType(irNode->GetFieldID())); + } else { + receiverPtrType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + receiverPtrType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(receiverPtrType->GetPointedTyIdx())); + } + DEBUG_ASSERT(receiverPtrType->GetKind() == kTypePointer, "expecting a pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *irNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + DEBUG_ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetPrimType() == GetLoweredPtrType(), + "expect GetLoweredPtrType()"); + DEBUG_ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetMIRType()->GetKind() == kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *rrNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_constval: { + ConstvalNode *constValNode = static_cast(opnd0); + BaseNode *newNode = constValNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + DEBUG_ASSERT(newNode != nullptr, "nullptr check"); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + BaseNode *newNode = cvtNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + } + break; + } + default: + DEBUG_ASSERT(false, "unexpected or NYI"); + } + } +} + +/* + * merge catch to try + */ +void EHFunc::MergeCatchToTry(const std::vector> &catchVec) { + size_t tryOffsetCount; + for (auto *ehTry : tryVec) { + tryOffsetCount = ehTry->GetTryNode()->GetOffsetsCount(); + for (size_t i = 0; i < tryOffsetCount; i++) { + auto o = ehTry->GetTryNode()->GetOffset(i); + for (const auto &catchVecPair : catchVec) { + LabelIdx lbIdx = catchVecPair.first; + if (lbIdx == o) { + ehTry->PushBackCatchVec(*catchVecPair.second); + break; + } + } + } + CHECK_FATAL(ehTry->GetCatchVecSize() == tryOffsetCount, "EHTry instance offset does not equal catch node amount."); + } +} + +/* catchvec is going to be released by the caller */ +void EHFunc::BuildEHTypeTable(const std::vector> &catchVec) { + if (!catchVec.empty()) { + /* the first one assume to be <*void> */ + TyIdx voidTyIdx(PTY_void); + ehTyTable.emplace_back(voidTyIdx); + ty2IndexTable[voidTyIdx] = 0; + /* create void pointer and update becommon's size table */ + cgFunc->GetBecommon().UpdateTypeTable(*GlobalTables::GetTypeTable().GetVoidPtr()); + } + + /* create the type table for this function, just iterate each catch */ + CatchNode *jCatchNode = nullptr; + size_t catchNodeSize; + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + catchNodeSize = jCatchNode->Size(); + for (size_t i = 0; i < catchNodeSize; i++) { + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jCatchNode->GetExceptionTyIdxVecElement(i)); + DEBUG_ASSERT(mirTy->GetKind() == kTypePointer, "mirTy is not pointer type"); + TyIdx ehTyIdx = static_cast(mirTy)->GetPointedTyIdx(); + if (ty2IndexTable.find(ehTyIdx) != ty2IndexTable.end()) { + continue; + } + + ty2IndexTable[ehTyIdx] = ehTyTable.size(); + ehTyTable.emplace_back(ehTyIdx); + MIRClassType *catchType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyIdx)); + MIRClassType *rootType = catchType->GetExceptionRootType(); + if (rootType == nullptr) { + rootType = static_cast(GlobalTables::GetTypeTable().GetOrCreateClassType( + "Ljava_2Flang_2FThrowable_3B", *GlobalTables::GetGsymTable().GetModule())); + catchType->SetParentTyIdx(rootType->GetTypeIndex()); + } + } + } +} + +void EHFunc::DumpEHFunc() const { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + for (uint32 i = 0; i < this->tryVec.size(); i++) { + LogInfo::MapleLogger() << "\n========== start " << i << " th eh:\n"; + EHTry *ehTry = tryVec[i]; + ehTry->DumpEHTry(mirModule); + LogInfo::MapleLogger() << "========== end " << i << " th eh =========\n"; + } + + LogInfo::MapleLogger() << "\n========== start LSDA type table ========\n"; + for (uint32 i = 0; i < this->ehTyTable.size(); i++) { + LogInfo::MapleLogger() << i << " vector to "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyTable[i])->Dump(0); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "========== end LSDA type table ========\n"; + + LogInfo::MapleLogger() << "\n========== start type-index map ========\n"; + for (const auto &ty2indexTablePair : ty2IndexTable) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ty2indexTablePair.first)->Dump(0); + LogInfo::MapleLogger() << " map to "; + LogInfo::MapleLogger() << ty2indexTablePair.second << "\n"; + } + LogInfo::MapleLogger() << "========== end type-index map ========\n"; +} + +/* + * cleanup_label is an LabelNode, and placed just before endLabel. + * cleanup_label is the first statement of cleanupbb. + * the layout of clean up code is: + * //return bb + * ... + * //cleanup bb = lastbb->prev; cleanupbb->PrependBB(retbb) + * cleanup_label: + * ... + * //lastbb + * endLabel: + * .cfi_endproc + * .Label.xx.end: + * .size + */ +void EHFunc::GenerateCleanupLabel() { + MIRModule *mirModule = cgFunc->GetFunction().GetModule(); + cgFunc->SetCleanupLabel(*mirModule->GetMIRBuilder()->CreateStmtLabel(CreateLabel(".LCLEANUP"))); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + blockNode->InsertBefore(cgFunc->GetEndLabel(), cgFunc->GetCleanupLabel()); +} + +void EHFunc::InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, + const StmtNode &beforeEndLabel) { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + LabelIdx dfLabIdx = cgFunc->GetFunction().GetLabelTab()->CreateLabel(); + cgFunc->GetFunction().GetLabelTab()->AddToStringLabelMap(dfLabIdx); + StmtNode *dfLabStmt = mirModule.GetMIRBuilder()->CreateStmtLabel(dfLabIdx); + blkNode.InsertAfter(&beforeEndLabel, dfLabStmt); + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("abort", static_cast(PTY_void)); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + CallNode *callExit = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + blkNode.InsertAfter(dfLabStmt, callExit); + switchNode.SetDefaultLabel(dfLabIdx); +} + +void EHFunc::FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry) { + CatchNode *catchNode = nullptr; + MIRType *exceptionType = nullptr; + MIRPtrType *ptType = nullptr; + size_t catchVecSize = ehTry.GetCatchVecSize(); + /* update switch node's cases */ + for (size_t i = 0; i < catchVecSize; i++) { + catchNode = ehTry.GetCatchNodeAt(i); + for (size_t j = 0; j < catchNode->Size(); j++) { + exceptionType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(j)); + ptType = static_cast(exceptionType); + MapleMap::iterator ty2IdxIt = ty2IndexTable.find(ptType->GetPointedTyIdx()); + DEBUG_ASSERT(ty2IdxIt != ty2IndexTable.end(), "find tyIdx failed!"); + uint32 tableIdx = ty2IdxIt->second; + LabelNode *catchLabelNode = static_cast(catchNode->GetPrev()); + CasePair p(tableIdx, catchLabelNode->GetLabelIdx()); + bool inserted = false; + for (auto x : switchNode.GetSwitchTable()) { + if (x == p) { + inserted = true; + break; + } + } + if (!inserted) { + switchNode.InsertCasePair(p); + } + } + } +} + +/* this is also the landing pad code. */ +void EHFunc::InsertEHSwitchTable() { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + CHECK_FATAL(blockNode != nullptr, "get function body failed in EHThrow::InsertEHSwitchTable"); + StmtNode *endLabelPrevNode = nullptr; + SwitchNode *switchNode = nullptr; + for (auto *ehTry : tryVec) { + endLabelPrevNode = cgFunc->GetEndLabel()->GetPrev(); + /* + * get the next statement of the trynode. when no throw happend in try block, jump to the statement directly + * create a switch statement and insert after tryend; + */ + switchNode = mirModule.CurFuncCodeMemPool()->New(mirModule); + /* create a new label as default, and if program excute here, error it */ + InsertDefaultLabelAndAbortFunc(*blockNode, *switchNode, *endLabelPrevNode); + /* create s special symbol that use the second return of __builtin_eh_return() */ + MIRSymbol *mirSymbol = mirModule.GetMIRBuilder()->CreateSymbol(TyIdx(PTY_i32), "__eh_index__", kStVar, kScAuto, + &cgFunc->GetFunction(), kScopeLocal); + switchNode->SetSwitchOpnd(mirModule.GetMIRBuilder()->CreateExprDread(*mirSymbol)); + FillSwitchTable(*switchNode, *ehTry); + SwitchLowerer switchLower(mirModule, *switchNode, *cgFunc->GetFuncScopeAllocator()); + blockNode->InsertBlockAfter(*switchLower.LowerSwitch(), endLabelPrevNode); + ehTry->SetFallthruGoto(endLabelPrevNode->GetNext()); + } + if (!CGOptions::IsQuiet()) { + cgFunc->GetFunction().Dump(); + } +} + +LabelIdx EHFunc::CreateLabel(const std::string &cstr) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + CHECK_FATAL(mirSymbol != nullptr, "get function symbol failed in EHFunc::CreateLabel"); + std::string funcName = mirSymbol->GetName(); + std::string labStr = funcName.append(cstr).append(std::to_string(labelIdx++)); + return cgFunc->GetFunction().GetOrCreateLableIdxFromName(labStr); +} + +/* think about moving this to BELowerer where LowerThrownval is already written */ +void EHFunc::InsertCxaAfterEachCatch(const std::vector> &catchVec) { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *funcBody = cgFunc->GetFunction().GetBody(); + CatchNode *jCatchNode = nullptr; + TyIdx voidPTy = GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(); + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_JavaBeginCatch", voidPTy); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + RegreadNode *retRegRead0 = mirModule.CurFuncCodeMemPool()->New(); + retRegRead0->SetRegIdx(-kSregRetval0); + retRegRead0->SetPrimType(GetLoweredPtrType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(retRegRead0); + CallNode *callAssign = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + funcBody->InsertAfter(jCatchNode, callAssign); + } +} + +void EHFunc::CreateLSDAHeader() { + constexpr uint8 startEncoding = 0xff; + constexpr uint8 typeEncoding = 0x9b; + constexpr uint8 callSiteEncoding = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + + LSDAHeader *lsdaHeaders = cgFunc->GetMemoryPool()->New(); + LabelIdx lsdaHdLblIdx = CreateLabel("LSDAHD"); /* LSDA head */ + LabelNode *lsdaHdLblNode = mirBuilder->CreateStmtLabel(lsdaHdLblIdx); + lsdaHeaders->SetLSDALabel(*lsdaHdLblNode); + + LabelIdx lsdaTTStartIdx = CreateLabel("LSDAALLS"); /* LSDA all start; */ + LabelNode *lsdaTTLblNode = mirBuilder->CreateStmtLabel(lsdaTTStartIdx); + LabelIdx lsdaTTEndIdx = CreateLabel("LSDAALLE"); /* LSDA all end; */ + LabelNode *lsdaCSTELblNode = mirBuilder->CreateStmtLabel(lsdaTTEndIdx); + lsdaHeaders->SetTTypeOffset(lsdaTTLblNode, lsdaCSTELblNode); + + lsdaHeaders->SetLPStartEncoding(startEncoding); + lsdaHeaders->SetTTypeEncoding(typeEncoding); + lsdaHeaders->SetCallSiteEncoding(callSiteEncoding); + lsdaHeader = lsdaHeaders; +} + +void EHFunc::FillLSDACallSiteTable() { + constexpr uint8 callSiteFirstAction = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + BlockNode *bodyNode = cgFunc->GetFunction().GetBody(); + + lsdaCallSiteTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + LabelIdx lsdaCSTStartIdx = CreateLabel("LSDACSTS"); /* LSDA callsite table start; */ + LabelNode *lsdaCSTStartLabel = mirBuilder->CreateStmtLabel(lsdaCSTStartIdx); + LabelIdx lsdaCSTEndIdx = CreateLabel("LSDACSTE"); /* LSDA callsite table end; */ + LabelNode *lsdaCSTEndLabel = mirBuilder->CreateStmtLabel(lsdaCSTEndIdx); + lsdaCallSiteTable->SetCSTable(lsdaCSTStartLabel, lsdaCSTEndLabel); + + /* create LDSACallSite for each EHTry instance */ + for (auto *ehTry : tryVec) { + DEBUG_ASSERT(ehTry != nullptr, "null ptr check"); + /* replace try with a label which is the callsite_start */ + LabelIdx csStartLblIdx = CreateLabel("LSDACS"); + LabelNode *csLblNode = mirBuilder->CreateStmtLabel(csStartLblIdx); + LabelIdx csEndLblIdx = CreateLabel("LSDACE"); + LabelNode *ceLblNode = mirBuilder->CreateStmtLabel(csEndLblIdx); + TryNode *tryNode = ehTry->GetTryNode(); + bodyNode->ReplaceStmt1WithStmt2(tryNode, csLblNode); + StmtNode *endTryNode = ehTry->GetEndtryNode(); + bodyNode->ReplaceStmt1WithStmt2(endTryNode, ceLblNode); + + LabelNode *ladpadEndLabel = nullptr; + if (ehTry->GetFallthruGoto()) { + ladpadEndLabel = mirBuilder->CreateStmtLabel(CreateLabel("LSDALPE")); + bodyNode->InsertBefore(ehTry->GetFallthruGoto(), ladpadEndLabel); + } else { + ladpadEndLabel = ceLblNode; + } + /* When there is only one catch, the exception table is optimized. */ + if (ehTry->GetCatchVecSize() == 1) { + ladpadEndLabel = static_cast(ehTry->GetCatchNodeAt(0)->GetPrev()); + } + + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), csLblNode); + LabelPair csLength(csLblNode, ceLblNode); + LabelPair csLandingPad(cgFunc->GetStartLabel(), ladpadEndLabel); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteFirstAction); + ehTry->SetLSDACallSite(*lsdaCallSite); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } +} + +void EHFunc::CreateLSDA() { + constexpr uint8 callSiteCleanUpAction = 0x0; + /* create header */ + CreateLSDAHeader(); + /* create and fill callsite table */ + FillLSDACallSiteTable(); + + for (auto *rethrow : rethrowVec) { + DEBUG_ASSERT(rethrow != nullptr, "null ptr check"); + /* replace throw (void * obj) with call __java_rethrow and unwind resume */ + rethrow->Lower(*cgFunc); + if (rethrow->HasLSDA()) { + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), rethrow->GetStartLabel()); + LabelPair csLength(rethrow->GetStartLabel(), rethrow->GetEndLabel()); + LabelPair csLandingPad(nullptr, nullptr); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteCleanUpAction); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } + } + + /* LSDAAction table */ + CreateLSDAAction(); +} + +void EHFunc::CreateLSDAAction() { + constexpr uint8 actionTableNextEncoding = 0x7d; + /* iterate each try and its corresponding catch */ + LSDAActionTable *actionTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + lsdaActionTable = actionTable; + + for (auto *ehTry : tryVec) { + LSDAAction *lastAction = nullptr; + for (int32 j = static_cast(ehTry->GetCatchVecSize()) - 1; j >= 0; --j) { + CatchNode *catchNode = ehTry->GetCatchNodeAt(j); + DEBUG_ASSERT(catchNode != nullptr, "null ptr check"); + for (uint32 idx = 0; idx < catchNode->Size(); ++idx) { + MIRPtrType *ptType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(idx))); + uint32 tyIndex = ty2IndexTable[ptType->GetPointedTyIdx()]; /* get the index of ptType of ehTyTable; */ + DEBUG_ASSERT(tyIndex != 0, "exception type index not allow equal zero"); + LSDAAction *lsdaAction = + cgFunc->GetMemoryPool()->New(tyIndex, lastAction == nullptr ? 0 : actionTableNextEncoding); + lastAction = lsdaAction; + actionTable->PushBack(*lsdaAction); + } + } + + /* record actionTable group offset, per LSDAAction object in actionTable occupy 2 bytes */ + ehTry->SetCSAction((actionTable->Size() - 1) * 2 + 1); + } +} + +bool CgBuildEHFunc::PhaseRun(maplebe::CGFunc &f) { + f.BuildEHFunc(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgBuildEHFunc, buildehfunc) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/emit.cpp b/ecmascript/mapleall/maple_be/src/cg/emit.cpp new file mode 100644 index 0000000000000000000000000000000000000000..32c11c4241b7ab30d009a0dae0bb467213f1838d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/emit.cpp @@ -0,0 +1,3652 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "emit.h" +#include +#ifdef _WIN32 +#include +#endif +#include "reflection_analysis.h" +#include "muid_replacement.h" +#include "metadata_layout.h" +#include "string_utils.h" +using namespace namemangler; + +namespace { +using namespace maple; +constexpr uint32 kSizeOfHugesoRoutine = 3; +constexpr uint32 kFromDefIndexMask32Mod = 0x40000000; + +int32 GetPrimitiveTypeSize(const std::string &name) { + if (name.length() != 1) { + return -1; + } + char typeName = name[0]; + switch (typeName) { + case 'Z': + return static_cast(GetPrimTypeSize(PTY_u1)); + case 'B': + return static_cast(GetPrimTypeSize(PTY_i8)); + case 'S': + return static_cast(GetPrimTypeSize(PTY_i16)); + case 'C': + return static_cast(GetPrimTypeSize(PTY_u16)); + case 'I': + return static_cast(GetPrimTypeSize(PTY_i32)); + case 'J': + return static_cast(GetPrimTypeSize(PTY_i64)); + case 'F': + return static_cast(GetPrimTypeSize(PTY_f32)); + case 'D': + return static_cast(GetPrimTypeSize(PTY_f64)); + case 'V': + return static_cast(GetPrimTypeSize(PTY_void)); + default: + return -1; + } +} +DBGDieAttr *LFindAttribute(MapleVector &vec, DwAt key) { + for (DBGDieAttr *at : vec) + if (at->GetDwAt() == key) { + return at; + } + return nullptr; +} + +DBGAbbrevEntry *LFindAbbrevEntry(MapleVector &abbvec, unsigned int key) { + for (DBGAbbrevEntry *daie : abbvec) { + if (!daie) { + continue; + } + if (daie->GetAbbrevId() == key) { + return daie; + } + } + DEBUG_ASSERT(0, ""); + return nullptr; +} + +bool LShouldEmit(unsigned int dwform) { + return dwform != DW_FORM_flag_present; +} + +DBGDie *LFindChildDieWithName(DBGDie *die, DwTag tag, const GStrIdx key) { + for (DBGDie *c : die->GetSubDieVec()) { + if (c->GetTag() == tag) { + for (DBGDieAttr *a : c->GetAttrVec()) { + if (a->GetDwAt() == DW_AT_name) { + if ((a->GetDwForm() == DW_FORM_string || a->GetDwForm() == DW_FORM_strp) && a->GetId() == key.GetIdx()) { + return c; + } else { + break; + } + } + } + } + } + return nullptr; +} + +DBGDieAttr *LFindDieAttr(DBGDie *die, DwAt attrname) { + for (DBGDieAttr *attr : die->GetAttrVec()) { + if (attr->GetDwAt() == attrname) { + return attr; + } + } + return nullptr; +} + +static void LUpdateAttrValue(DBGDieAttr *attr, int64_t newval) { + attr->SetI(int32_t(newval)); +} +} + +namespace maplebe { +using namespace maple; +using namespace cfi; + +void Emitter::EmitLabelRef(LabelIdx labIdx) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + fileStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +void Emitter::EmitStmtLabel(LabelIdx labIdx) { + EmitLabelRef(labIdx); + fileStream << ":\n"; +} + +void Emitter::EmitLabelPair(const LabelPair &pairLabel) { + DEBUG_ASSERT(pairLabel.GetEndOffset() || pairLabel.GetStartOffset(), "NYI"); + EmitLabelRef(pairLabel.GetEndOffset()->GetLabelIdx()); + fileStream << " - "; + EmitLabelRef(pairLabel.GetStartOffset()->GetLabelIdx()); + fileStream << "\n"; +} + +void Emitter::EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx) { + char *idx = strdup(std::to_string(func->GetPuidx()).c_str()); + fileStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +AsmLabel Emitter::GetTypeAsmInfoName(PrimType primType) const { + uint32 size = GetPrimTypeSize(primType); + /* case x : x occupies bytes of pty */ + switch (size) { + case k1ByteSize: + return kAsmByte; + case k2ByteSize: +#if TARGAARCH64 || TARGRISCV64 + return kAsmShort; +#else + return kAsmValue; +#endif + case k4ByteSize: + return kAsmLong; + case k8ByteSize: + return kAsmQuad; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } + return kAsmLong; +} + +void Emitter::EmitFileInfo(const std::string &fileName) { +#if defined(_WIN32) || defined(DARWIN) + char *curDirName = getcwd(nullptr, 0); +#else + char *curDirName = get_current_dir_name(); +#endif + CHECK_FATAL(curDirName != nullptr, "null ptr check "); + Emit(asmInfo->GetCmnt()); + std::string path(curDirName); +#ifdef _WIN32 + std::string cgFile(path.append("\\mplcg")); +#else + std::string cgFile(path.append("/mplcg")); +#endif + Emit(cgFile); + Emit("\n"); + + std::string compile("Compiling "); + Emit(asmInfo->GetCmnt()); + Emit(compile); + Emit("\n"); + + std::string beOptions("Be options"); + Emit(asmInfo->GetCmnt()); + Emit(beOptions); + Emit("\n"); + + path = curDirName; + path.append("/").append(fileName); + /* strip path before out/ */ + std::string out = "/out/"; + size_t pos = path.find(out.c_str(), 0, out.length()); + if (pos != std::string::npos) { + path.erase(0, pos + 1); + } + std::string irFile("\""); + irFile.append(path).append("\""); + Emit(asmInfo->GetFile()); + Emit(irFile); + Emit("\n"); + + /* save directory path in index 8 */ + SetFileMapValue(0, path); + + /* .file #num src_file_name */ + if (cg->GetCGOptions().WithLoc()) { + /* .file 1 mpl_file_name */ + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit("1 "); + Emit(irFile); + Emit("\n"); + SetFileMapValue(1, irFile); /* save ir file in 1 */ + if (cg->GetCGOptions().WithSrc()) { + /* insert a list of src files */ + uint32 i = 2; + for (auto it : cg->GetMIRModule()->GetSrcFileInfo()) { + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit(it.second).Emit(" \""); + std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + Emit(kStr); + Emit("\"\n"); + SetFileMapValue(i++, kStr); + } + } + } + free(curDirName); + + EmitInlineAsmSection(); +#if TARGARM32 + Emit("\t.syntax unified\n"); + /* + * "The arm instruction set is a subset of + * the most commonly used 32-bit ARM instructions." + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0210c/CACBCAAE.html + */ + Emit("\t.arm\n"); + Emit("\t.fpu vfpv4\n"); + Emit("\t.arch armv7-a\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RW_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RO_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_GOT_use, 2\n"); + if (CGOptions::GetABIType() == CGOptions::kABIHard) { + Emit("\t.eabi_attribute Tag_ABI_VFP_args, 1\n"); + } + Emit("\t.eabi_attribute Tag_ABI_FP_denormal, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_exceptions, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_number_model, 3\n"); + Emit("\t.eabi_attribute Tag_ABI_align_needed, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_align_preserved, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_enum_size, 2\n"); + Emit("\t.eabi_attribute 30, 6\n"); + Emit("\t.eabi_attribute Tag_CPU_unaligned_access, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_wchar_t, 4\n"); +#endif /* TARGARM32 */ +} + +void Emitter::EmitInlineAsmSection() { + MapleVector &asmSections = cg->GetMIRModule()->GetAsmDecls(); + if (!asmSections.empty()) { + Emit("#APP\n"); + for (auto &singleSection : asmSections) { + Emit("\t"); + Emit(singleSection); + Emit("\n"); + } + Emit("#NO_APP\n"); + } +} +void Emitter::EmitAsmLabel(AsmLabel label) { + switch (label) { + case kAsmData: { + (void)Emit(asmInfo->GetData()); + (void)Emit("\n"); + return; + } + case kAsmText: { + (void)Emit(asmInfo->GetText()); + (void)Emit("\n"); + return; + } + case kAsmType: { + (void)Emit(asmInfo->GetType()); + return; + } + case kAsmByte: { + (void)Emit(asmInfo->GetByte()); + return; + } + case kAsmShort: { + (void)Emit(asmInfo->GetShort()); + return; + } + case kAsmValue: { + (void)Emit(asmInfo->GetValue()); + return; + } + case kAsmLong: { + (void)Emit(asmInfo->GetLong()); + return; + } + case kAsmQuad: { + (void)Emit(asmInfo->GetQuad()); + return; + } + case kAsmZero: + (void)Emit(asmInfo->GetZero()); + return; + default: + DEBUG_ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { + MIRType *mirType = mirSymbol.GetType(); + std::string symName; + if (mirSymbol.GetStorageClass() == kScPstatic && mirSymbol.IsLocal()) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + symName = mirSymbol.GetName() + std::to_string(pIdx); + } else { + symName = mirSymbol.GetName(); + } + if (mirSymbol.GetAsmAttr() != UStrIdx(0) && + (mirSymbol.GetStorageClass() == kScPstatic || mirSymbol.GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol.GetAsmAttr()); + symName = asmSection; + } + if (Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeAlignTable()) { + DEBUG_ASSERT(false, "container empty check"); + } + + switch (label) { + case kAsmGlbl: { + Emit(asmInfo->GetGlobal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmHidden: { + Emit(asmInfo->GetHidden()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmLocal: { + Emit(asmInfo->GetLocal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmWeak: { + Emit(asmInfo->GetWeak()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmZero: { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + EmitNullConstant(size); + return; + } + case kAsmComm: { + std::string size; + if (isFlexibleArray) { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + (void)Emit(asmInfo->GetComm()).Emit(symName).Emit(", ").Emit(size).Emit(", "); +#if PECOFF +#if TARGARM || TARGAARCH64 || TARGARK || TARGRISCV64 + std::string align = std::to_string( + static_cast(log2(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())))); +#else + std::string align = std::to_string( + Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())); +#endif + emit(align.c_str()); +#else /* ELF */ + /* output align, symbol name begin with "classInitProtectRegion" align is 4096 */ + MIRTypeKind kind = mirSymbol.GetType()->GetKind(); + MIRStorageClass storage = mirSymbol.GetStorageClass(); + if (symName.find("classInitProtectRegion") == 0) { + Emit(4096); + } else if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || (kind == kTypeUnion)) && + ((storage == kScGlobal) || (storage == kScPstatic) || (storage == kScFstatic))) { + int32 align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()); + if (GetPointerSize() < align) { + (void)Emit(std::to_string(align)); + } else { + (void)Emit(std::to_string(k8ByteSize)); + } + } else { + (void)Emit(std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()))); + } +#endif + Emit("\n"); + return; + } + case kAsmAlign: { + uint8 align = mirSymbol.GetAttrs().GetAlignValue(); + if (align == 0) { + if (mirSymbol.GetType()->GetKind() == kTypeStruct || + mirSymbol.GetType()->GetKind() == kTypeClass || + mirSymbol.GetType()->GetKind() == kTypeArray || + mirSymbol.GetType()->GetKind() == kTypeUnion) { +#if TARGX86 || TARGX86_64 + return; +#else + align = kAlignOfU8; +#endif + } else { + align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); +#if TARGARM32 || TARGAARCH64 || TARGARK || TARGRISCV64 + if (CGOptions::IsArm64ilp32() && mirSymbol.GetType()->GetPrimType() == PTY_a32) { + align = kAlignOfU8; + } else { + align = static_cast(log2(align)); + } +#endif + } + } + Emit(asmInfo->GetAlign()); + Emit(std::to_string(align)); + Emit("\n"); + return; + } + case kAsmSyname: { + Emit(symName); + Emit(":\n"); + return; + } + case kAsmSize: { + Emit(asmInfo->GetSize()); + Emit(symName); + Emit(", "); +#if TARGX86 || TARGX86_64 + Emit(".-"); + Emit(symName); +#else + std::string size; + if (isFlexibleArray) { + size = std::to_string( + Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + Emit(size); +#endif + Emit("\n"); + return; + } + case kAsmType: { + Emit(asmInfo->GetType()); + if (GetCG()->GetMIRModule()->IsCModule() && (symName == "sys_nerr" || symName == "sys_errlist")) { + /* eliminate warning from deprecated C name */ + Emit("strerror"); + } else { + Emit(symName); + } + Emit(","); + Emit(asmInfo->GetAtobt()); + Emit("\n"); + return; + } + default: + DEBUG_ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitNullConstant(uint64 size) { + EmitAsmLabel(kAsmZero); + Emit(std::to_string(size)); + Emit("\n"); +} + +void Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo) { + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + auto emitBfldValue = [&structEmitInfo, charBitWidth, this](bool flag) { + while (structEmitInfo.GetCombineBitFieldWidth() > charBitWidth) { + uint8 shift = flag ? (structEmitInfo.GetCombineBitFieldWidth() - charBitWidth) : 0U; + uint64 tmp = (structEmitInfo.GetCombineBitFieldValue() >> shift) & 0x00000000000000ffUL; + EmitAsmLabel(kAsmByte); + Emit(std::to_string(tmp)); + Emit("\n"); + structEmitInfo.DecreaseCombineBitFieldWidth(charBitWidth); + uint64 value = flag ? + structEmitInfo.GetCombineBitFieldValue() - (tmp << structEmitInfo.GetCombineBitFieldWidth()) : + structEmitInfo.GetCombineBitFieldValue() >> charBitWidth; + structEmitInfo.SetCombineBitFieldValue(value); + } + }; + if (CGOptions::IsBigEndian()) { + /* + * If the total number of bits in the bit field is not a multiple of 8, + * the bits must be aligned to 8 bits to prevent errors in the emit. + */ + auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); + if (structEmitInfo.GetCombineBitFieldWidth() < width) { + structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() << + (width - structEmitInfo.GetCombineBitFieldWidth())); + structEmitInfo.IncreaseCombineBitFieldWidth(static_cast( + width - structEmitInfo.GetCombineBitFieldWidth())); + } + emitBfldValue(true); + } else { + emitBfldValue(false); + } + if (structEmitInfo.GetCombineBitFieldWidth() != 0) { + EmitAsmLabel(kAsmByte); + uint64 value = structEmitInfo.GetCombineBitFieldValue() & 0x00000000000000ffUL; + Emit(std::to_string(value)); + Emit("\n"); + } + CHECK_FATAL(charBitWidth != 0, "divide by zero"); + if ((structEmitInfo.GetNextFieldOffset() % charBitWidth) != 0) { + uint8 value = charBitWidth - (structEmitInfo.GetNextFieldOffset() % charBitWidth); + structEmitInfo.IncreaseNextFieldOffset(value); + } + structEmitInfo.SetTotalSize(structEmitInfo.GetNextFieldOffset() / charBitWidth); + structEmitInfo.SetCombineBitFieldValue(0); + structEmitInfo.SetCombineBitFieldWidth(0); +} + +void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset) { + MIRType &mirType = mirConst.GetType(); + if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { + uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); + structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + EmitCombineBfldValue(structEmitInfo); + DEBUG_ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, + "structEmitInfo's nextFieldOffset should be <= fieldOffset"); + structEmitInfo.SetNextFieldOffset(fieldOffset); + } + uint32 fieldSize = static_cast(mirType).GetFieldSize(); + MIRIntConst &fieldValue = static_cast(mirConst); + /* Truncate the size of FieldValue to the bit field size. */ + if (fieldSize < fieldValue.GetActualBitWidth()) { + fieldValue.Trunc(fieldSize); + } + /* Clear higher Bits for signed value */ + if (structEmitInfo.GetCombineBitFieldValue() != 0) { + structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & + structEmitInfo.GetCombineBitFieldValue()); + } + if (CGOptions::IsBigEndian()) { + uint64 beValue = fieldValue.GetExtValue(); + if (fieldValue.IsNegative()) { + beValue = beValue - ((beValue >> fieldSize) << fieldSize); + } + structEmitInfo.SetCombineBitFieldValue( + (structEmitInfo.GetCombineBitFieldValue() << fieldSize) + beValue); + } else { + structEmitInfo.SetCombineBitFieldValue((fieldValue.GetExtValue() << structEmitInfo.GetCombineBitFieldWidth()) + + structEmitInfo.GetCombineBitFieldValue()); + } + structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); + structEmitInfo.IncreaseNextFieldOffset(fieldSize); + if ((nextType == nullptr) || (kTypeBitField != nextType->GetKind())) { + /* emit structEmitInfo->combineBitFieldValue */ + EmitCombineBfldValue(structEmitInfo); + } +} + +void Emitter::EmitStr(const std::string& mplStr, bool emitAscii, bool emitNewline) { + const char *str = mplStr.c_str(); + size_t len = mplStr.size(); + + if (emitAscii) { + Emit("\t.ascii\t\""); /* Do not terminate with \0 */ + } else { + Emit("\t.string\t\""); + } + + /* + * don't expand special character in a writeout to .s, + * convert all \s to \\s in string for storing in .string + */ + for (size_t i = 0; i < len; i++) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + Emit(buf); + } else if (*str == '\b') { + Emit("\\b"); + } else if (*str == '\n') { + Emit("\\n"); + } else if (*str == '\r') { + Emit("\\r"); + } else if (*str == '\t') { + Emit("\\t"); + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + Emit(buf); + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), k4BitSize, "\\%03o", (*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + Emit(buf); + } + str++; + } + + Emit("\""); + if (emitNewline) { + Emit("\n"); + } +} + +void Emitter::EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect) { + if (isIndirect) { + uint32 strId = mirStrConst.GetValue().GetIdx(); + + if (stringPtr.find(mirStrConst.GetValue()) == stringPtr.end()) { + stringPtr.insert(mirStrConst.GetValue()); + } + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t").Emit(".LSTR__").Emit(std::to_string(strId).c_str()); + } else { + EmitAsmLabel(kAsmQuad); + (void)Emit(".LSTR__").Emit(std::to_string(strId).c_str()); + } + return; + } + + const std::string ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirStrConst.GetValue()); + size_t len = ustr.size(); + if (isFlexibleArray) { + arraySize += static_cast(len) + 1; + } + EmitStr(ustr, false, false); +} + +void Emitter::EmitStr16Constant(const MIRStr16Const &mirStr16Const) { + Emit("\t.byte "); + /* note: for now, u16string is emitted 2 bytes without any \u indication */ + const std::u16string &str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16Const.GetValue()); + constexpr int bufSize = 9; + char buf[bufSize]; + char16_t c = str16[0]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret1 = snprintf_s(buf, sizeof(buf), bufSize - 1, "%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret1 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + for (uint32 i = 1; i < str16.length(); ++i) { + c = str16[i]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret2 = snprintf_s(buf, sizeof(buf), bufSize - 1, ",%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret2 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + } + if ((str16.length() & 0x1) == 1) { + Emit(",0,0"); + } +} + +void Emitter::EmitScalarConstant(MIRConst &mirConst, bool newLine, bool flag32, bool isIndirect) { + MIRType &mirType = mirConst.GetType(); + AsmLabel asmName = GetTypeAsmInfoName(mirType.GetPrimType()); + switch (mirConst.GetKind()) { + case kConstInt: { + MIRIntConst &intCt = static_cast(mirConst); + uint32 sizeInBits = GetPrimTypeBitSize(mirType.GetPrimType()); + if (intCt.GetActualBitWidth() > sizeInBits) { + intCt.Trunc(sizeInBits); + } + if (flag32) { + EmitAsmLabel(AsmLabel::kAsmLong); + } else { + EmitAsmLabel(asmName); + } + Emit(intCt.GetValue()); + if (isFlexibleArray) { + arraySize += (sizeInBits / kBitsPerByte); + } + break; + } + case kConstFloatConst: { + MIRFloatConst &floatCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(floatCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k4ByteFloatSize; + } + break; + } + case kConstDoubleConst: { + MIRDoubleConst &doubleCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(doubleCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k8ByteDoubleSize; + } + break; + } + case kConstStrConst: { + MIRStrConst &strCt = static_cast(mirConst); + if (cg->GetMIRModule()->IsCModule()) { + EmitStrConstant(strCt, isIndirect); + } else { + EmitStrConstant(strCt); + } + break; + } + case kConstStr16Const: { + MIRStr16Const &str16Ct = static_cast(mirConst); + EmitStr16Constant(str16Ct); + break; + } + case kConstAddrof: { + MIRAddrofConst &symAddr = static_cast(mirConst); + StIdx stIdx = symAddr.GetSymbolIndex(); + MIRSymbol *symAddrSym = stIdx.IsGlobal() ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + DEBUG_ASSERT(symAddrSym != nullptr, "null ptr check"); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + if (stIdx.IsGlobal() == false && symAddrSym->GetStorageClass() == kScPstatic) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)Emit("\t" + str + "\t" + symAddrSym->GetName() + std::to_string(pIdx)); + } else { + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + } + if (symAddr.GetOffset() != 0) { + (void)Emit(" + ").Emit(symAddr.GetOffset()); + } + if (symAddr.GetFieldID() > 1) { + MIRStructType *structType = static_cast(symAddrSym->GetType()); + DEBUG_ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); + (void)Emit(" + ").Emit(Globals::GetInstance()->GetBECommon()->GetFieldOffset( + *structType, symAddr.GetFieldID()).first); + } + break; + } + case kConstAddrofFunc: { + MIRAddroffuncConst &funcAddr = static_cast(mirConst); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFuncTable().at(funcAddr.GetValue()); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + break; + } + case kConstLblConst: { + MIRLblConst &lbl = static_cast(mirConst); + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t"); + } else { + EmitAsmLabel(kAsmQuad); + } + EmitLabelRef(lbl.GetValue()); + break; + } + default: + DEBUG_ASSERT(false, "NYI"); + break; + } + if (newLine) { + Emit("\n"); + } +} + +void Emitter::EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) { + MIRAddroffuncConst &funcAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr.GetValue()); + const std::string &funcName = func->GetName(); + if ((idx == kFuncDefNameIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.name."); + Emit(funcName + " - ."); + Emit("\n"); + return; + } + if ((idx == kFuncDefSizeIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.end."); + Emit(funcName + " - "); + Emit(funcName + "\n"); + return; + } + if ((idx == static_cast(MethodProperty::kPaddrData)) && mirSymbol.IsReflectionMethodsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + if (((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) || + ((idx == static_cast(ClassRO::kClinitAddr)) && mirSymbol.IsReflectionClassInfoRO())) { + Emit("\t.long\t"); + Emit(funcName + " - .\n"); + return; + } + + if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 6 means kBindingStateMethodDef:6 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x6\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodDef:6. */ +#else + Emit("__BindingProtectRegion__ + 6\n"); +#endif /* USE_32BIT_REF */ + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(funcName); + if ((stName.find(VTAB_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0) || + (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0)) { + Emit(" - .\n"); + return; + } + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +void Emitter::EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) { + MIRAddrofConst &symAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr.GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + + if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + mirSymbol.IsReflectionFieldOffsetData()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(FieldPropertyCompact::kPOffset)) && mirSymbol.IsReflectionFieldsInfoCompact()) || + ((idx == static_cast(MethodProperty::kSigName)) && mirSymbol.IsReflectionMethodsInfo()) || + ((idx == static_cast(MethodSignatureProperty::kParameterTypes)) && + mirSymbol.IsReflectionMethodSignature())) { + Emit("\t.long\t"); + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(MethodProperty::kDeclarclass)) || + (idx == static_cast(MethodProperty::kPaddrData))) && mirSymbol.IsReflectionMethodsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (idx == static_cast(MethodProperty::kDeclarclass)) { + Emit(symAddrName + " - .\n"); + } else { + Emit(symAddrName + " - . + 2\n"); + } + return; + } + + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + Emit("\t.long\t"); + Emit(symAddrName + " - . + 2\n"); + return; + } + + if ((idx == static_cast(FieldProperty::kDeclarclass)) && mirSymbol.IsReflectionFieldsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if ((idx == kDataDefAddrIndex) && (mirSymbol.IsMuidDataUndefTab() || mirSymbol.IsMuidDataDefTab())) { + if (symAddrSym->IsReflectionClassInfo()) { + Emit(".LDW.ref." + symAddrName + ":\n"); + } + Emit(kPtrPrefixStr + symAddrName + ":\n"); +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsMuidDataUndefTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 1 means kBindingStateCinfUndef:1 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x1\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfUndef:1. */ +#else + Emit("__BindingProtectRegion__ + 1\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 3 means kBindingStateDataUndef:3 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x3\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataUndef:3. */ +#else + Emit("__BindingProtectRegion__ + 3\n"); +#endif /* USE_32BIT_REF */ + } + } else { + Emit("0\n"); + } + } else { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 2 means kBindingStateCinfDef:2 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x2\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfDef:2. */ +#else + Emit("__BindingProtectRegion__ + 2\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 4 means kBindingStateDataDef:4 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x4\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataDef:4. */ +#else + Emit("__BindingProtectRegion__ + 4\n"); +#endif /* USE_32BIT_REF */ + } + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + } + return; + } + + if (idx == kDataDefAddrIndex && mirSymbol.IsMuidDataDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (StringUtils::StartsWith(stName, kLocalClassInfoStr)) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(symAddrName); + Emit(" - . + ").Emit(kDataRefIsOffset); + Emit("\n"); + return; + } +#ifdef USE_32BIT_REF + if (mirSymbol.IsReflectionHashTabBucket() || (stName.find(ITAB_PREFIX_STR) == 0) || + (mirSymbol.IsReflectionClassInfo() && (idx == static_cast(ClassProperty::kInfoRo)))) { + Emit("\t.word\t"); + } else { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + } +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + + if ((stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0)) { + Emit(symAddrName + " - .\n"); + return; + } + if (mirSymbol.IsMuidRangeTab()) { + if (idx == kRangeBeginIndex) { + Emit(symAddrSym->GetMuidTabName() + "_begin\n"); + } else { + Emit(symAddrSym->GetMuidTabName() + "_end\n"); + } + return; + } + + if (symAddrName.find(GCTIB_PREFIX_STR) == 0) { + Emit(cg->FindGCTIBPatternName(symAddrName)); + } else { + Emit(symAddrName); + } + + if ((((idx == static_cast(ClassRO::kIfields)) || (idx == static_cast(ClassRO::kMethods))) && + mirSymbol.IsReflectionClassInfoRO()) || + mirSymbol.IsReflectionHashTabBucket()) { + Emit(" - ."); + if (symAddrSym->IsReflectionFieldsInfoCompact() || + symAddrSym->IsReflectionMethodsInfoCompact()) { + /* Mark the least significant bit as 1 for compact fieldinfo */ + Emit(" + ").Emit(MethodFieldRef::kMethodFieldRefIsCompact); + } + } else if (mirSymbol.IsReflectionClassInfo()) { + if ((idx == static_cast(ClassProperty::kItab)) || + (idx == static_cast(ClassProperty::kVtab)) || + (idx == static_cast(ClassProperty::kInfoRo))) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } else if (idx == static_cast(ClassProperty::kGctib)) { + if (cg->FindGCTIBPatternName(symAddrName).find(REF_PREFIX_STR) == 0) { + Emit(" - . + ").Emit(kGctibRefIsIndirect); + } else { + Emit(" - ."); + } + } + } else if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kSuperclass)) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } + } + + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +MIRAddroffuncConst *Emitter::GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst) { + MIRAddroffuncConst *innerFuncAddr = nullptr; + size_t addrIndex = mirSymbol.IsReflectionMethodsInfo() ? static_cast(MethodProperty::kPaddrData) : + static_cast(MethodInfoCompact::kPaddrData); + MIRConst *pAddrConst = aggConst.GetConstVecItem(addrIndex); + if (pAddrConst->GetKind() == kConstAddrof) { + /* point addr data. */ + MIRAddrofConst *pAddr = safe_cast(pAddrConst); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(pAddr->GetSymbolIndex().Idx()); + MIRAggConst *methodAddrAggConst = safe_cast(symAddrSym->GetKonst()); + MIRAggConst *addrAggConst = safe_cast(methodAddrAggConst->GetConstVecItem(0)); + MIRConst *funcAddrConst = addrAggConst->GetConstVecItem(0); + if (funcAddrConst->GetKind() == kConstAddrofFunc) { + /* func sybmol. */ + innerFuncAddr = safe_cast(funcAddrConst); + } else if (funcAddrConst->GetKind() == kConstInt) { + /* def table index, replaced by def table for lazybinding. */ + std::string funcDefTabName = namemangler::kMuidFuncDefTabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + MIRSymbol *funDefTabSy = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(funcDefTabName)); + MIRAggConst &funDefTabAggConst = static_cast(*funDefTabSy->GetKonst()); + MIRIntConst *funcAddrIndexConst = safe_cast(funcAddrConst); + uint64 indexDefTab = funcAddrIndexConst->GetExtValue(); + MIRAggConst *defTabAggConst = safe_cast(funDefTabAggConst.GetConstVecItem(indexDefTab)); + MIRConst *funcConst = defTabAggConst->GetConstVecItem(0); + if (funcConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(funcConst); + } + } + } else if (pAddrConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(pAddrConst); + } + return innerFuncAddr; +} + +int64 Emitter::GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst, + const std::map &strIdx2Type) { + uint64 idx = intConst.GetExtValue(); + bool isDefTabIndex = idx & 0x1; + int64 fieldIdx = idx >> 1; + if (isDefTabIndex) { + /* it's def table index. */ + return fieldIdx; + } else { + /* really offset. */ + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + auto it = strIdx2Type.find(strIdx); + CHECK_FATAL(it->second != nullptr, "valid iterator check"); + DEBUG_ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType &ty = *it->second; + MIRStructType &structType = static_cast(ty); + std::pair fieldOffsetPair = + Globals::GetInstance()->GetBECommon()->GetFieldOffset(structType, fieldIdx); + int64 fieldOffset = fieldOffsetPair.first * static_cast(charBitWidth) + fieldOffsetPair.second; + return fieldOffset; + } +} + +void Emitter::InitRangeIdx2PerfixStr() { + rangeIdx2PrefixStr[RangeIdx::kVtabAndItab] = kMuidVtabAndItabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kItabConflict] = kMuidItabConflictPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kVtabOffset] = kMuidVtabOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kFieldOffset] = kMuidFieldOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kValueOffset] = kMuidValueOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kLocalClassInfo] = kMuidLocalClassInfoStr; + rangeIdx2PrefixStr[RangeIdx::kConststr] = kMuidConststrPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kSuperclass] = kMuidSuperclassPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kGlobalRootlist] = kMuidGlobalRootlistPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassmetaData] = kMuidClassMetadataPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassBucket] = kMuidClassMetadataBucketPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavatext] = kMuidJavatextPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDataSection] = kMuidDataSectionStr; + rangeIdx2PrefixStr[RangeIdx::kJavajni] = kRegJNITabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavajniFunc] = kRegJNIFuncTabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticKey] = kDecoupleStaticKeyStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticValue] = kDecoupleStaticValueStr; + rangeIdx2PrefixStr[RangeIdx::kBssStart] = kBssSectionStr; + rangeIdx2PrefixStr[RangeIdx::kLinkerSoHash] = kLinkerHashSoStr; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCache] = kArrayClassCacheTable; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCacheName] = kArrayClassCacheNameTable; +} + +void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex, + const std::map &strIdx2Type, size_t idx) { + MIRConst *elemConst = aggConst.GetConstVecItem(idx); + const std::string stName = mirSymbol.GetName(); + + MIRIntConst *intConst = safe_cast(elemConst); + DEBUG_ASSERT(intConst != nullptr, "Uexpected const type"); + + /* ignore abstract function addr */ + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + return; + } + + if (((idx == static_cast(MethodProperty::kVtabIndex)) && (mirSymbol.IsReflectionMethodsInfo())) || + ((idx == static_cast(MethodInfoCompact::kVtabIndex)) && mirSymbol.IsReflectionMethodsInfoCompact())) { + MIRAddroffuncConst *innerFuncAddr = GetAddroffuncConst(mirSymbol, aggConst); + if (innerFuncAddr != nullptr) { + Emit(".Label.name." + GlobalTables::GetFunctionTable().GetFunctionFromPuidx( + innerFuncAddr->GetValue())->GetName()); + Emit(":\n"); + } + } + /* refer to DeCouple::GenOffsetTableType */ + constexpr int fieldTypeIdx = 2; + constexpr int methodTypeIdx = 2; + bool isClassInfo = (idx == static_cast(ClassRO::kClassName) || + idx == static_cast(ClassRO::kAnnotation)) && mirSymbol.IsReflectionClassInfoRO(); + bool isMethodsInfo = (idx == static_cast(MethodProperty::kMethodName) || + idx == static_cast(MethodProperty::kSigName) || + idx == static_cast(MethodProperty::kAnnotation)) && mirSymbol.IsReflectionMethodsInfo(); + bool isFieldsInfo = (idx == static_cast(FieldProperty::kTypeName) || + idx == static_cast(FieldProperty::kName) || + idx == static_cast(FieldProperty::kAnnotation)) && mirSymbol.IsReflectionFieldsInfo(); + bool isMethodSignature = (idx == static_cast(MethodSignatureProperty::kSignatureOffset)) && + mirSymbol.IsReflectionMethodSignature(); + /* RegisterTable has been Int Array, visit element instead of field. */ + bool isInOffsetTab = (idx == 1 || idx == methodTypeIdx) && + (StringUtils::StartsWith(stName, kVtabOffsetTabStr) || + StringUtils::StartsWith(stName, kFieldOffsetTabStr)); + /* The 1 && 2 of Decouple static struct is the string name */ + bool isStaticStr = (idx == 1 || idx == 2) && aggConst.GetConstVec().size() == kSizeOfDecoupleStaticStruct && + StringUtils::StartsWith(stName, kDecoupleStaticKeyStr); + /* process conflict table index larger than itabConflictIndex * 2 + 2 element */ + bool isConflictPerfix = (idx >= (static_cast(itabConflictIndex) * 2 + 2)) && (idx % 2 == 0) && + StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR); + bool isArrayClassCacheName = mirSymbol.IsArrayClassCacheName(); + if (isClassInfo || isMethodsInfo || isFieldsInfo || mirSymbol.IsRegJNITab() || isInOffsetTab || + isStaticStr || isConflictPerfix || isArrayClassCacheName || isMethodSignature) { + /* compare with all 1s */ + uint32 index = static_cast((safe_cast(elemConst))->GetExtValue()) & 0xFFFFFFFF; + bool isHotReflectStr = (index & 0x00000003) != 0; /* use the last two bits of index in this expression */ + std::string hotStr; + if (isHotReflectStr) { + uint32 tag = (index & 0x00000003) - kCStringShift; /* use the last two bits of index in this expression */ + if (tag == kLayoutBootHot) { + hotStr = kReflectionStartHotStrtabPrefixStr; + } else if (tag == kLayoutBothHot) { + hotStr = kReflectionBothHotStrTabPrefixStr; + } else { + hotStr = kReflectionRunHotStrtabPrefixStr; + } + } + std::string reflectStrTabPrefix = isHotReflectStr ? hotStr : kReflectionStrtabPrefixStr; + std::string strTabName = reflectStrTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + /* left shift 2 bit to get low 30 bit data for MIRIntConst */ + elemConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(index >> 2, elemConst->GetType()); + intConst = safe_cast(elemConst); + aggConst.SetItem(static_cast(idx), intConst, aggConst.GetFieldIdItem(idx)); +#ifdef USE_32BIT_REF + if (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("+" + strTabName); + if (mirSymbol.IsRegJNITab() || mirSymbol.IsReflectionMethodsInfo() || mirSymbol.IsReflectionFieldsInfo() || + mirSymbol.IsArrayClassCacheName() || mirSymbol.IsReflectionMethodSignature()) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, kDecoupleStaticKeyStr)) { + Emit("-."); + } + if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kAnnotation)) { + Emit("-."); + } else if (idx == static_cast(ClassRO::kClassName)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + } + if (StringUtils::StartsWith(stName, ITAB_PREFIX_STR)) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef32::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + if ((idx == 1 || idx == methodTypeIdx) && StringUtils::StartsWith(stName, kVtabOffsetTabStr)) { + Emit("-."); + } + if ((idx == 1 || idx == fieldTypeIdx) && StringUtils::StartsWith(stName, kFieldOffsetTabStr)) { + Emit("-."); + } + Emit("\n"); + } else if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncUndefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 5 means kBindingStateMethodUndef:5 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x5\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodUndef:5. */ +#else + Emit("__BindingProtectRegion__ + 5\n"); +#endif /* USE_32BIT_REF */ + } else { + Emit("0\n"); + } + } else if (idx == static_cast(FieldProperty::kPClassType) && mirSymbol.IsReflectionFieldsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); + const int width = 4; +#else + EmitAsmLabel(kAsmQuad); + const int width = 8; +#endif /* USE_32BIT_REF */ + uint32 muidDataTabAddr = static_cast((safe_cast(elemConst))->GetExtValue()); + if (muidDataTabAddr != 0) { + bool isDefTabIndex = (muidDataTabAddr & kFromDefIndexMask32Mod) == kFromDefIndexMask32Mod; + std::string muidDataTabPrefix = isDefTabIndex ? kMuidDataDefTabPrefixStr : kMuidDataUndefTabPrefixStr; + std::string muidDataTabName = muidDataTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + (void)Emit(muidDataTabName + "+"); + uint32 muidDataTabIndex = muidDataTabAddr & 0x3FFFFFFF; /* high 2 bit is the mask of muid tab */ + (void)Emit(std::to_string(muidDataTabIndex * width)); + (void)Emit("-.\n"); + } else { + (void)Emit(muidDataTabAddr); + Emit("\n"); + } + return; + } else if (mirSymbol.IsRegJNIFuncTab()) { + std::string strTabName = kRegJNITabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + EmitScalarConstant(*elemConst, false); +#ifdef TARGARM32 + (void)Emit("+" + strTabName).Emit("+").Emit(MByteRef::kPositiveOffsetBias).Emit("-.\n"); +#else + Emit("+" + strTabName + "\n"); +#endif + } else if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + Emit(intConst->GetValue()); + Emit("\n"); + } else if (mirSymbol.IsReflectionFieldOffsetData()) { + /* Figure out instance field offset now. */ + size_t prefixStrLen = strlen(kFieldOffsetDataPrefixStr); + size_t pos = stName.find("_FieldID_"); + std::string typeName = stName.substr(prefixStrLen, pos - prefixStrLen); +#ifdef USE_32BIT_REF + std::string widthFlag = ".long"; +#else + std::string widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + uint64 fieldIdx = intConst->GetExtValue(); + bool isDefTabIndex = fieldIdx & 0x1; + if (isDefTabIndex) { + /* it's def table index. */ + Emit("\t// " + typeName + " static field, data def table index " + std::to_string(fieldOffset) + "\n"); + } else { + /* really offset. */ + fieldIdx >>= 1; + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + } + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } else if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + ((idx == static_cast(FieldPropertyCompact::kPOffset)) && + mirSymbol.IsReflectionFieldsInfoCompact())) { + std::string typeName; + std::string widthFlag; +#ifdef USE_32BIT_REF + const int width = 4; +#else + const int width = 8; +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsReflectionFieldsInfo()) { + typeName = stName.substr(strlen(kFieldsInfoPrefixStr)); +#ifdef USE_32BIT_REF + widthFlag = ".long"; +#else + widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + } else { + size_t prefixStrLen = strlen(kFieldsInfoCompactPrefixStr); + typeName = stName.substr(prefixStrLen); + widthFlag = ".long"; + } + int64 fieldIdx = intConst->GetExtValue(); + MIRSymbol *pOffsetData = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldOffsetDataPrefixStr + typeName)); + if (pOffsetData != nullptr) { + fieldIdx *= width; + std::string fieldOffset = kFieldOffsetDataPrefixStr + typeName; + Emit("\t" + widthFlag + "\t" + std::to_string(fieldIdx) + " + " + fieldOffset + " - .\n"); + } else { + /* pOffsetData null, means FieldMeta.offset is really offset */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } + } else if ((idx == static_cast(ClassProperty::kObjsize)) && mirSymbol.IsReflectionClassInfo()) { + std::string delimiter = "$$"; + std::string typeName = + stName.substr(strlen(CLASSINFO_PREFIX_STR), stName.find(delimiter) - strlen(CLASSINFO_PREFIX_STR)); + uint32 objSize = 0; + std::string comments; + + if (typeName.size() > 1 && typeName[0] == '$') { + /* fill element size for array class; */ + std::string newTypeName = typeName.substr(1); + /* another $(arraysplitter) */ + if (newTypeName.find("$") == std::string::npos) { + CHECK_FATAL(false, "can not find $ in std::string"); + } + typeName = newTypeName.substr(newTypeName.find("$") + 1); + int32 pTypeSize; + + /* we only need to calculate primitive type in arrays. */ + if ((pTypeSize = GetPrimitiveTypeSize(typeName)) != -1) { + objSize = static_cast(pTypeSize); + } + comments = "// elemobjsize"; + } else { + comments = "// objsize"; + } + + if (!objSize) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + auto it = strIdx2Type.find(strIdx); + DEBUG_ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType *mirType = it->second; + ASSERT_NOT_NULL(mirType); + objSize = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + } + /* objSize should not exceed 16 bits */ + CHECK_FATAL(objSize <= 0xffff, "Error:the objSize is too large"); + Emit("\t.short\t" + std::to_string(objSize) + comments + "\n"); + } else if (mirSymbol.IsMuidRangeTab()) { + MIRIntConst *subIntCt = safe_cast(elemConst); + int flag = subIntCt->GetExtValue(); + InitRangeIdx2PerfixStr(); + if (rangeIdx2PrefixStr.find(flag) == rangeIdx2PrefixStr.end()) { + EmitScalarConstant(*elemConst, false); + Emit("\n"); + return; + } + std::string prefix = rangeIdx2PrefixStr[flag]; +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + (void)Emit("\t.word\t"); +#endif + if (idx == kRangeBeginIndex) { + Emit(prefix + "_begin\n"); + } else { + Emit(prefix + "_end\n"); + } + } else { +#ifdef USE_32BIT_REF + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR) || StringUtils::StartsWith(stName, ITAB_PREFIX_STR) || + StringUtils::StartsWith(stName, VTAB_PREFIX_STR)) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("\n"); + } +} + +void Emitter::EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst, + const std::map &strIdx2Type) { + const std::string stName = mirSymbol.GetName(); + MIRAggConst &aggConst = static_cast(mirConst); + uint32 itabConflictIndex = 0; + for (size_t i = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst *elemConst = aggConst.GetConstVecItem(i); + if (i == 0 && StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { +#ifdef USE_32BIT_REF + itabConflictIndex = static_cast((safe_cast(elemConst))->GetValue()) & 0xffff; +#else + itabConflictIndex = safe_cast(elemConst)->GetExtValue() & 0xffffffff; +#endif + } + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (elemConst->GetKind() == kConstAddrofFunc) { /* addroffunc const */ + EmitAddrofFuncConst(mirSymbol, *elemConst, i); + } else if (elemConst->GetKind() == kConstAddrof) { /* addrof symbol const */ + EmitAddrofSymbolConst(mirSymbol, *elemConst, i); + } else { /* intconst */ + EmitIntConst(mirSymbol, aggConst, itabConflictIndex, strIdx2Type, i); + } + } else if (elemConst->GetType().GetKind() == kTypeArray || elemConst->GetType().GetKind() == kTypeStruct) { + if (StringUtils::StartsWith(mirSymbol.GetName(), namemangler::kOffsetTabStr) && (i == 0 || i == 1)) { + /* EmitOffsetValueTable */ +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif + if (i == 0) { + (void)Emit(namemangler::kVtabOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } else { + (void)Emit(namemangler::kFieldOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } + } else { + EmitConstantTable(mirSymbol, *elemConst, strIdx2Type); + } + } + } +} + +void Emitter::EmitArrayConstant(MIRConst &mirConst) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &arrayCt = static_cast(mirConst); + MIRArrayType &arrayType = static_cast(mirType); + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx scalarIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + if (uNum == 0 && dim) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + scalarIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (cg->GetMIRModule()->IsCModule()) { + bool strLiteral = false; + if (arrayType.GetDim() == 1) { + MIRType *ety = arrayType.GetElemType(); + if (ety->GetPrimType() == PTY_i8 || ety->GetPrimType() == PTY_u8) { + strLiteral = true; + } + } + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + EmitScalarConstant(*elemConst); + } + } else if (elemConst->GetType().GetKind() == kTypeArray) { + EmitArrayConstant(*elemConst); + } else if (elemConst->GetType().GetKind() == kTypeStruct || elemConst->GetType().GetKind() == kTypeClass || + elemConst->GetType().GetKind() == kTypeUnion) { + EmitStructConstant(*elemConst); + } else if (elemConst->GetKind() == kConstAddrofFunc) { + EmitScalarConstant(*elemConst); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + int64 iNum = (arrayType.GetSizeArrayItem(0) > 0) ? (static_cast(arrayType.GetSizeArrayItem(0))) - uNum : 0; + if (iNum > 0) { + if (!cg->GetMIRModule()->IsCModule()) { + CHECK_FATAL(!Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeSizeTable(), "container empty check"); + CHECK_FATAL(!arrayCt.GetConstVec().empty(), "container empty check"); + } + if (uNum > 0) { + uint64 unInSizeInByte = static_cast(iNum) * static_cast( + Globals::GetInstance()->GetBECommon()->GetTypeSize(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + if (unInSizeInByte != 0) { + EmitNullConstant(unInSizeInByte); + } + } else { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(scalarIdx.GetIdx()) * dim; + Emit("\t.zero\t").Emit(static_cast(size)).Emit("\n"); + } + } +} + +void Emitter::EmitVectorConstant(MIRConst &mirConst) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &vecCt = static_cast(mirConst); + size_t uNum = vecCt.GetConstVec().size(); + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = vecCt.GetConstVecItem(i); + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + bool strLiteral = false; + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + size_t lanes = GetVecLanes(mirType.GetPrimType()); + if (lanes > uNum) { + MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + for (size_t i = uNum; i < lanes; i++) { + EmitScalarConstant(zConst, true, false, false); + } + } +} + +void Emitter::EmitStructConstant(MIRConst &mirConst) { + uint32_t subStructFieldCounts = 0; + EmitStructConstant(mirConst, subStructFieldCounts); +} + +void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts) { + StructEmitInfo *sEmitInfo = cg->GetMIRModule()->GetMemPool()->New(); + CHECK_FATAL(sEmitInfo != nullptr, "create a new struct emit info failed in Emitter::EmitStructConstant"); + MIRType &mirType = mirConst.GetType(); + MIRAggConst &structCt = static_cast(mirConst); + MIRStructType &structType = static_cast(mirType); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); + /* all elements of struct. */ + uint8 num; + if (structType.GetKind() == kTypeUnion) { + num = 1; + } else { + num = static_cast(structType.GetFieldsSize()); + } + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + /* total size of emitted elements size. */ + uint32 size = beCommon->GetTypeSize(structType.GetTypeIndex()); + uint32 fieldIdx = 1; + if (structType.GetKind() == kTypeUnion) { + fieldIdx = structCt.GetFieldIdItem(0); + } + for (uint32 i = 0; i < num; ++i) { + if (((i + 1) == num) && cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + isFlexibleArray = beCommon->GetHasFlexibleArray(mirType.GetTypeIndex().GetIdx()); + arraySize = 0; + } + MIRConst *elemConst; + if (structType.GetKind() == kTypeStruct) { + elemConst = structCt.GetAggConstElement(i + 1); + } else { + elemConst = structCt.GetAggConstElement(fieldIdx); + } + MIRType *elemType = structType.GetElemType(i); + if (structType.GetKind() == kTypeUnion) { + elemType = &(elemConst->GetType()); + } + MIRType *nextElemType = nullptr; + if (i != static_cast(num - 1)) { + nextElemType = structType.GetElemType(i + 1); + } + uint64 elemSize = beCommon->GetTypeSize(elemType->GetTypeIndex()); + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + if (elemType->GetKind() == kTypeBitField) { + if (elemConst == nullptr) { + MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); + elemConst = zeroFill; + } + std::pair fieldOffsetPair = beCommon->GetFieldOffset(structType, fieldIdx); + uint64_t fieldOffset = static_cast(static_cast(fieldOffsetPair.first)) * + static_cast(charBitWidth) + + static_cast(static_cast(fieldOffsetPair.second)); + EmitBitFieldConstant(*sEmitInfo, *elemConst, nextElemType, fieldOffset); + } else { + if (elemConst != nullptr) { + if (IsPrimitiveVector(elemType->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemType->GetPrimType())) { + EmitScalarConstant(*elemConst, true, false, true); + } else if (elemType->GetKind() == kTypeArray) { + if (elemType->GetSize() != 0) { + EmitArrayConstant(*elemConst); + } + } else if ((elemType->GetKind() == kTypeStruct) || (elemType->GetKind() == kTypeClass) || + (elemType->GetKind() == kTypeUnion)) { + EmitStructConstant(*elemConst, subStructFieldCounts); + fieldIdx += subStructFieldCounts; + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } else { + EmitNullConstant(elemSize); + } + sEmitInfo->IncreaseTotalSize(elemSize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + + if (nextElemType != nullptr && kTypeBitField != nextElemType->GetKind()) { + DEBUG_ASSERT(i < static_cast(num - 1), "NYI"); + uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + auto fieldAttr = structType.GetFields()[i + 1].second.second; + nextAlign = fieldAttr.IsPacked() ? 1 : std::min(nextAlign, structPack); + DEBUG_ASSERT(nextAlign != 0, "expect non-zero"); + /* append size, append 0 when align need. */ + uint64 totalSize = sEmitInfo->GetTotalSize(); + uint64 psize = (totalSize % nextAlign == 0) ? 0 : (nextAlign - (totalSize % nextAlign)); + if (psize != 0) { + EmitNullConstant(psize); + sEmitInfo->IncreaseTotalSize(psize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + /* element is uninitialized, emit null constant. */ + } + fieldIdx++; + } + if (structType.GetKind() == kTypeStruct) { + /* The reason of subtracting one is that fieldIdx adds one at the end of the cycle. */ + subStructFieldCounts = fieldIdx - 1; + } else if (structType.GetKind() == kTypeUnion) { + subStructFieldCounts = static_cast(beCommon->GetStructFieldCount(structType.GetTypeIndex())); + } + + isFlexibleArray = false; + uint64 opSize = size - sEmitInfo->GetTotalSize(); + if (opSize != 0) { + EmitNullConstant(opSize); + } +} + +/* BlockMarker is for Debugging/Profiling */ +void Emitter::EmitBlockMarker(const std::string &markerName, const std::string §ionName, + bool withAddr, const std::string &addrName) { + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad 0xdeadbeefdeadbeef + * .size $marker_name$, 8 + */ + Emit(asmInfo->GetType()); + Emit(markerName); + Emit(", %object\n"); + if (CGOptions::IsEmitBlockMarker()) { /* exposed as global symbol, for profiling */ + Emit(asmInfo->GetGlobal()); + } else { /* exposed as local symbol, for release. */ + Emit(asmInfo->GetLocal()); + } + Emit(markerName); + Emit("\n"); + + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (sectionName.find("ro") == 0) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n" + markerName + ":\n"); +#else + Emit("3\n" + markerName + ":\n"); +#endif + EmitAsmLabel(kAsmQuad); + if (withAddr) { + Emit(addrName + "\n"); + } else { + Emit("0xdeadbeefdeadbeef\n"); /* hexspeak in aarch64 represents crash or dead lock */ + } + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); +} + +void Emitter::EmitLiteral(const MIRSymbol &literal, const std::map &strIdx2Type) { + /* + * .type _C_STR_xxxx, %object + * .local _C_STR_xxxx + * .data + * .align 3 + * _C_STR_xxxx: + * .quad __cinf_Ljava_2Flang_2FString_3B + * .... + * .size _C_STR_xxxx, 40 + */ + if (literal.GetStorageClass() == kScUnused) { + return; + } + EmitAsmLabel(literal, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(literal, kAsmLocal); /* alwasy fstatic */ + (void)Emit("\t.section\t." + std::string(kMapleLiteralString) + ",\"aw\", %progbits\n"); + EmitAsmLabel(literal, kAsmAlign); + EmitAsmLabel(literal, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = literal.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitLiteral"); + if (literal.HasAddrOfValues()) { + EmitConstantTable(literal, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + EmitAsmLabel(literal, kAsmSize); +} + +void Emitter::EmitFuncLayoutInfo(const MIRSymbol &layout) { + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad funcaddr + * .size $marker_name$, 8 + */ + MIRConst *mirConst = layout.GetKonst(); + MIRAggConst *aggConst = safe_cast(mirConst); + DEBUG_ASSERT(aggConst != nullptr, "null ptr check"); + if (aggConst->GetConstVec().size() != static_cast(LayoutType::kLayoutTypeCount)) { + maple::LogInfo::MapleLogger(kLlErr) << "something wrong happen in funclayoutsym\t" + << "constVec size\t" << aggConst->GetConstVec().size() << "\n"; + return; + } + for (size_t i = 0; i < static_cast(LayoutType::kLayoutTypeCount); ++i) { + std::string markerName = "__MBlock_" + GetLayoutTypeString(i) + "_func_start"; + CHECK_FATAL(aggConst->GetConstVecItem(i)->GetKind() == kConstAddrofFunc, "expect kConstAddrofFunc type"); + MIRAddroffuncConst *funcAddr = safe_cast(aggConst->GetConstVecItem(i)); + DEBUG_ASSERT(funcAddr != nullptr, "null ptr check"); + Emit(asmInfo->GetType()); + Emit(markerName + ", %object\n"); + Emit(asmInfo->GetGlobal()); + Emit(markerName + "\n"); + EmitAsmLabel(kAsmData); +#if TARGX86 || TARGX86_64 + EmitAsmLabel(layout, kAsmAlign); + Emit(markerName + ":\n"); +#else + Emit(asmInfo->GetAlign()); + Emit("3\n" + markerName + ":\n"); +#endif + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word "); +#endif + Emit(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr->GetValue())->GetName()); + Emit("\n"); + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); + } +} + +void Emitter::EmitStaticFields(const std::vector &fields) { + for (auto *itSymbol : fields) { + EmitAsmLabel(*itSymbol, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(*itSymbol, kAsmLocal); /* alwasy fstatic */ + EmitAsmLabel(kAsmData); + EmitAsmLabel(*itSymbol, kAsmAlign); + EmitAsmLabel(*itSymbol, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = itSymbol->GetKonst(); + EmitArrayConstant(*mirConst); + } +} + +void Emitter::EmitLiterals(std::vector> &literals, + const std::map &strIdx2Type) { + /* + * load literals profile + * currently only used here, so declare it as local + */ + if (!cg->GetMIRModule()->GetProfile().GetLiteralProfileSize()) { + for (const auto &literalPair : literals) { + EmitLiteral(*(literalPair.first), strIdx2Type); + } + return; + } + /* emit hot literal start symbol */ + EmitBlockMarker("__MBlock_literal_hot_begin", "", false); + /* + * emit literals into .data section + * emit literals in the profile first + */ + for (auto &literalPair : literals) { + if (cg->GetMIRModule()->GetProfile().CheckLiteralHot(literalPair.first->GetName())) { + /* it's in the literal profiling data, means it's "hot" */ + EmitLiteral(*(literalPair.first), strIdx2Type); + literalPair.second = true; + } + } + /* emit hot literal end symbol */ + EmitBlockMarker("__MBlock_literal_hot_end", "", false); + + /* emit cold literal start symbol */ + EmitBlockMarker("__MBlock_literal_cold_begin", "", false); + /* emit other literals (not in the profile) next. */ + for (const auto &literalPair : literals) { + if (!literalPair.second) { + /* not emit yet */ + EmitLiteral(*(literalPair.first), strIdx2Type); + } + } + /* emit cold literal end symbol */ + EmitBlockMarker("__MBlock_literal_cold_end", "", false); +} + +void Emitter::GetHotAndColdMetaSymbolInfo(const std::vector &mirSymbolVec, + std::vector &hotFieldInfoSymbolVec, + std::vector &coldFieldInfoSymbolVec, const std::string &prefixStr, + bool forceCold) { + bool isHot = false; + for (auto mirSymbol : mirSymbolVec) { + CHECK_FATAL(prefixStr.length() < mirSymbol->GetName().length(), "string length check"); + std::string name = mirSymbol->GetName().substr(prefixStr.length()); + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(name, klassJavaDescriptor); + if (prefixStr == kFieldsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckFieldHot(klassJavaDescriptor); + } else if (prefixStr == kMethodsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckMethodHot(klassJavaDescriptor); + } else { + isHot = cg->GetMIRModule()->GetProfile().CheckClassHot(klassJavaDescriptor); + } + if (isHot && !forceCold) { + hotFieldInfoSymbolVec.emplace_back(mirSymbol); + } else { + coldFieldInfoSymbolVec.emplace_back(mirSymbol); + } + } +} + +void Emitter::EmitMetaDataSymbolWithMarkFlag(const std::vector &mirSymbolVec, + const std::map &strIdx2Type, + const std::string &prefixStr, const std::string §ionName, + bool isHotFlag) { + if (cg->GetMIRModule()->IsCModule()) { + return; + } + if (mirSymbolVec.empty()) { + return; + } + const std::string &markString = "__MBlock" + prefixStr; + const std::string &hotOrCold = isHotFlag ? "hot" : "cold"; + EmitBlockMarker((markString + hotOrCold + "_begin"), sectionName, false); + if (prefixStr == kFieldsInfoCompactPrefixStr || prefixStr == kMethodsInfoCompactPrefixStr || + prefixStr == kFieldOffsetDataPrefixStr || prefixStr == kMethodAddrDataPrefixStr) { + for (auto s : mirSymbolVec) { + EmitMethodFieldSequential(*s, strIdx2Type, sectionName); + } + } else { + for (auto s : mirSymbolVec) { + EmitClassInfoSequential(*s, strIdx2Type, sectionName); + } + } + EmitBlockMarker((markString + hotOrCold + "_end"), sectionName, false); +} + +void Emitter::MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec) { + for (auto mirSymbol : mirSymbolVec) { + auto *aggConst = safe_cast(mirSymbol->GetKonst()); + if ((aggConst == nullptr) || (aggConst->GetConstVec().empty())) { + continue; + } + size_t size = aggConst->GetConstVec().size(); + MIRConst *elemConst = aggConst->GetConstVecItem(size - 1); + DEBUG_ASSERT(elemConst != nullptr, "null ptr check"); + if (elemConst->GetKind() == kConstAddrofFunc) { + maple::LogInfo::MapleLogger(kLlErr) << "ERROR: the last vtab/itab content should not be funcAddr\n"; + } else { + if (elemConst->GetKind() != kConstInt) { + CHECK_FATAL(elemConst->GetKind() == kConstAddrof, "must be"); + continue; + } + MIRIntConst *tabConst = static_cast(elemConst); +#ifdef USE_32BIT_REF + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(tabConst->GetValue()) | 0X40000000, tabConst->GetType()); +#else + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(tabConst->GetExtValue() | 0X4000000000000000, + tabConst->GetType()); +#endif + aggConst->SetItem(static_cast(size) - 1, tabConst, aggConst->GetFieldIdItem(size - 1)); + } + } +} + +void Emitter::EmitStringPointers() { + if (CGOptions::OptimizeForSize()) { + (void)Emit(asmInfo->GetSection()).Emit(".rodata,\"aMS\",@progbits,1").Emit("\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } else { + (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); + } + for (auto idx: localStrPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); + (void)Emit(".LUstr_").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } + for (auto idx: stringPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + Emit(".LSTR__").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } +} + +void Emitter::EmitLocalVariable(const CGFunc &cgFunc) { + /* function local pstatic initialization */ + if (cg->GetMIRModule()->IsCModule()) { + MIRSymbolTable *lSymTab = cgFunc.GetMirModule().CurFunction()->GetSymTab(); + if (lSymTab != nullptr) { + /* anything larger than is created by cg */ + size_t lsize = cgFunc.GetLSymSize(); + for (size_t i = 0; i < lsize; i++) { + MIRSymbol *st = lSymTab->GetSymbolFromStIdx(static_cast(i)); + if (st != nullptr && st->GetStorageClass() == kScPstatic) { + /* + * Local static names can repeat. + * Append the current program unit index to the name. + */ + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + std::string localname = st->GetName() + std::to_string(pIdx); + static std::vector emittedLocalSym; + bool found = false; + for (auto name : emittedLocalSym) { + if (name == localname) { + found = true; + break; + } + } + if (found) { + continue; + } + emittedLocalSym.push_back(localname); + + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + EmitAsmLabel(*st, kAsmAlign); + EmitAsmLabel(*st, kAsmLocal); + MIRType *ty = st->GetType(); + MIRConst *ct = st->GetKonst(); + if (ct == nullptr) { + EmitAsmLabel(*st, kAsmComm); + } else if (kTypeStruct == ty->GetKind() || kTypeUnion == ty->GetKind() || kTypeClass == ty->GetKind()) { + EmitAsmLabel(*st, kAsmSyname); + EmitStructConstant(*ct); + } else if (kTypeArray == ty->GetKind()) { + if (ty->GetSize() != 0) { + EmitAsmLabel(*st, kAsmSyname); + EmitArrayConstant(*ct); + } + } else { + EmitAsmLabel(*st, kAsmSyname); + EmitScalarConstant(*ct, true, false, true /* isIndirect */); + } + } + } + } + } +} + +void Emitter::EmitGlobalVar(const MIRSymbol &globalVar) { + EmitAsmLabel(globalVar, kAsmType); + if (globalVar.sectionAttr != UStrIdx(0)) { /* check section info if it is from inline asm */ + Emit("\t.section\t"); + Emit(GlobalTables::GetUStrTable().GetStringFromStrIdx(globalVar.sectionAttr)); + Emit(",\"aw\",%progbits\n"); + } else { + EmitAsmLabel(globalVar, kAsmLocal); + } + EmitAsmLabel(globalVar, kAsmComm); +} + +void Emitter::EmitGlobalVars(std::vector> &globalVars) { + if (GetCG()->IsLmbc() && GetCG()->GetGP() != nullptr) { + (void)Emit(asmInfo->GetLocal()).Emit("\t").Emit(GetCG()->GetGP()->GetName()).Emit("\n"); + (void)Emit(asmInfo->GetComm()).Emit("\t").Emit(GetCG()->GetGP()->GetName()); + (void)Emit(", ").Emit(GetCG()->GetMIRModule()->GetGlobalMemSize()).Emit(", ").Emit("8\n"); + } + /* load globalVars profile */ + if (globalVars.empty()) { + return; + } + std::unordered_set hotVars; + std::ifstream inFile; + if (!CGOptions::IsGlobalVarProFileEmpty()) { + inFile.open(CGOptions::GetGlobalVarProFile()); + if (inFile.fail()) { + maple::LogInfo::MapleLogger(kLlErr) << "Cannot open globalVar profile file " << CGOptions::GetGlobalVarProFile() + << "\n"; + } + } + if (CGOptions::IsGlobalVarProFileEmpty() || inFile.fail()) { + for (const auto &globalVarPair : globalVars) { + EmitGlobalVar(*(globalVarPair.first)); + } + return; + } + std::string globalVarName; + while (inFile >> globalVarName) { + (void)hotVars.insert(globalVarName); + } + inFile.close(); + bool hotBeginSet = false; + bool coldBeginSet = false; + for (auto &globalVarPair : globalVars) { + if (hotVars.find(globalVarPair.first->GetName()) != hotVars.end()) { + if (!hotBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_hot_begin", "", true, globalVarPair.first->GetName()); + hotBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + globalVarPair.second = true; + } + } + for (const auto &globalVarPair : globalVars) { + if (!globalVarPair.second) { /* not emit yet */ + if (!coldBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_cold_begin", "", true, globalVarPair.first->GetName()); + coldBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + } + } + MIRSymbol *endSym = globalVars.back().first; + MIRType *mirType = endSym->GetType(); + ASSERT_NOT_NULL(endSym); + ASSERT_NOT_NULL(mirType); + const std::string kStaticVarEndAdd = + std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())) + "+" + + endSym->GetName(); + EmitBlockMarker("__MBlock_globalVars_cold_end", "", true, kStaticVarEndAdd); +} + +void Emitter::EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName) { + EmitAsmLabel(symbol, kAsmType); + Emit(asmInfo->GetSection()); + auto sectionConstrains = symbol.IsThreadLocal() ? ",\"awT\"," : ",\"aw\","; + (void)Emit(sectionName).Emit(sectionConstrains); + if (sectionName == ".bss" || StringUtils::StartsWith(sectionName, ".bss.") || + sectionName == ".tbss" || StringUtils::StartsWith(sectionName, ".tbss.")) { + Emit("%nobits\n"); + } else { + Emit("%progbits\n"); + } + if (symbol.GetAttr(ATTR_weak)) { + EmitAsmLabel(symbol, kAsmWeak); + } else if (symbol.GetStorageClass() == kScGlobal) { + EmitAsmLabel(symbol, kAsmGlbl); + } + EmitAsmLabel(symbol, kAsmAlign); + EmitAsmLabel(symbol, kAsmSyname); + EmitAsmLabel(symbol, kAsmZero); + EmitAsmLabel(symbol, kAsmSize); +} + +void Emitter::EmitGlobalVariable() { + std::vector typeStVec; + std::vector typeNameStVec; + std::map strIdx2Type; + + /* Create name2type map which will be used by reflection. */ + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr || (type->GetKind() != kTypeClass && type->GetKind() != kTypeInterface)) { + continue; + } + GStrIdx strIdx = type->GetNameStrIdx(); + strIdx2Type[strIdx] = type; + } + + /* sort symbols; classinfo-->field-->method */ + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + std::vector classInfoVec; + std::vector vtabVec; + std::vector staticFieldsVec; + std::vector> globalVarVec; + std::vector itabVec; + std::vector itabConflictVec; + std::vector vtabOffsetVec; + std::vector fieldOffsetVec; + std::vector valueOffsetVec; + std::vector localClassInfoVec; + std::vector constStrVec; + std::vector> literalVec; + std::vector muidVec = { nullptr }; + std::vector fieldOffsetDatas; + std::vector methodAddrDatas; + std::vector methodSignatureDatas; + std::vector staticDecoupleKeyVec; + std::vector staticDecoupleValueVec; + std::vector superClassStVec; + std::vector arrayClassCacheVec; + std::vector arrayClassCacheNameVec; + + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { + continue; + } + if (mirSymbol->GetSKind() == kStFunc) { + EmitAliasAndRef(*mirSymbol); + } + + if (mirSymbol->GetName().find(VTAB_PREFIX_STR) == 0) { + vtabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_PREFIX_STR) == 0) { + itabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_CONFLICT_PREFIX_STR) == 0) { + itabConflictVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kVtabOffsetTabStr) == 0) { + vtabOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFieldOffsetTabStr) == 0) { + fieldOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kOffsetTabStr) == 0) { + valueOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCache()) { + arrayClassCacheVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCacheName()) { + arrayClassCacheNameVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kLocalClassInfoStr) == 0) { + localClassInfoVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticKeyStr)) { + staticDecoupleKeyVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticValueStr)) { + staticDecoupleValueVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsLiteral()) { + literalVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } else if (mirSymbol->IsConstString() || mirSymbol->IsLiteralPtr()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst != nullptr && mirConst->GetKind() == kConstAddrof) { + constStrVec.emplace_back(mirSymbol); + continue; + } + } else if (mirSymbol->IsReflectionClassInfoPtr()) { + /* _PTR__cinf is emitted in dataDefTab and dataUndefTab */ + continue; + } else if (mirSymbol->IsMuidTab()) { + if (!GetCG()->GetMIRModule()->IsCModule()) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, mirSymbol->GetMuidTabName()); + } + continue; + } else if (mirSymbol->IsCodeLayoutInfo()) { + if (!GetCG()->GetMIRModule()->IsCModule()) { + EmitFuncLayoutInfo(*mirSymbol); + } + continue; + } else if (mirSymbol->GetName().find(kStaticFieldNamePrefixStr) == 0) { + staticFieldsVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kGcRootList) == 0) { + EmitGlobalRootList(*mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFunctionProfileTabPrefixStr) == 0) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, kFunctionProfileTabPrefixStr); + continue; + } else if (mirSymbol->IsReflectionFieldOffsetData()) { + fieldOffsetDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodAddrData()) { + methodAddrDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionSuperclassInfo()) { + superClassStVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodSignature()) { + methodSignatureDatas.push_back(mirSymbol); + continue; + } + + if (mirSymbol->IsReflectionInfo()) { + if (mirSymbol->IsReflectionClassInfo()) { + classInfoVec.emplace_back(mirSymbol); + } + continue; + } + /* symbols we do not emit here. */ + if (mirSymbol->GetSKind() == kStFunc || mirSymbol->GetSKind() == kStJavaClass || + mirSymbol->GetSKind() == kStJavaInterface) { + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfo) { + typeStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfoName) { + typeNameStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeCxxAbi) { + continue; + } + + MIRType *mirType = mirSymbol->GetType(); + if (mirType == nullptr) { + continue; + } + if (GetCG()->GetMIRModule()->IsCModule() && mirSymbol->GetStorageClass() == kScExtern) { + /* only emit weak & initialized extern at present */ + if (mirSymbol->IsWeak() || mirSymbol->IsConst()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + continue; + } + } + /* + * emit uninitialized global/static variables. + * these variables store in .comm section. + */ + if ((mirSymbol->GetStorageClass() == kScGlobal || mirSymbol->GetStorageClass() == kScFstatic) && + !mirSymbol->IsConst()) { + if (mirSymbol->IsGctibSym()) { + /* GCTIB symbols are generated in GenerateObjectMaps */ + continue; + } + if (mirSymbol->GetStorageClass() != kScGlobal) { + globalVarVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } + if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, sectionName); + continue; + } else if (mirSymbol->IsThreadLocal()) { + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, ".tbss"); + continue; + } else if (CGOptions::IsNoCommon() || (!CGOptions::IsNoCommon() && mirSymbol->GetAttr(ATTR_static_init_zero))) { + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, ".bss"); + continue; + } + EmitAsmLabel(*mirSymbol, kAsmType); + EmitAsmLabel(*mirSymbol, kAsmComm); + continue; + } + + /* emit initialized global/static variables. */ + if (mirSymbol->GetStorageClass() == kScGlobal || + (mirSymbol->GetStorageClass() == kScExtern && GetCG()->GetMIRModule()->IsCModule()) || + (mirSymbol->GetStorageClass() == kScFstatic && !mirSymbol->IsReadOnly())) { + /* Emit section */ + EmitAsmLabel(*mirSymbol, kAsmType); + if (mirSymbol->IsReflectionStrTab()) { + std::string sectionName = ".reflection_strtab"; + if (mirSymbol->GetName().find(kReflectionStartHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_start_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionBothHotStrTabPrefixStr) == 0) { + sectionName = ".reflection_both_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionRunHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_run_hot_strtab"; + } + Emit("\t.section\t" + sectionName + ",\"a\",%progbits\n"); + } else if (mirSymbol->GetName().find(kDecoupleOption) == 0) { + Emit("\t.section\t." + std::string(kDecoupleStr) + ",\"a\",%progbits\n"); + } else if (mirSymbol->IsRegJNITab()) { + Emit("\t.section\t.reg_jni_tab,\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerVersionNum) == 0) { + Emit("\t.section\t." + std::string(kCompilerVersionNumStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kSourceMuid) == 0) { + Emit("\t.section\t." + std::string(kSourceMuidSectionStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerMfileStatus) == 0) { + Emit("\t.section\t." + std::string(kCompilerMfileStatus) + ",\"a\", %progbits\n"); + } else if (mirSymbol->IsRegJNIFuncTab()) { + Emit("\t.section\t.reg_jni_func_tab,\"aw\", %progbits\n"); + } else if (mirSymbol->IsReflectionPrimitiveClassInfo()) { + Emit("\t.section\t.primitive_classinfo,\"awG\", %progbits,__primitive_classinfo__,comdat\n"); + } else if (mirSymbol->IsReflectionHashTabBucket()) { + std::string stName = mirSymbol->GetName(); + const std::string delimiter = "$$"; + if (stName.find(delimiter) == std::string::npos) { + FATAL(kLncFatal, "Can not find delimiter in target "); + } + std::string secName = stName.substr(0, stName.find(delimiter)); + /* remove leading "__" in sec name. */ + secName.erase(0, 2); + Emit("\t.section\t." + secName + ",\"a\",%progbits\n"); + } else { + bool isThreadLocal = mirSymbol->IsThreadLocal(); + if (cg->GetMIRModule()->IsJavaModule()) { + (void)Emit("\t.section\t." + std::string(kMapleGlobalVariable) + ",\"aw\", @progbits\n"); + } else if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + auto sectionConstrains = isThreadLocal ? ",\"awT\"," : ",\"aw\","; + (void)Emit("\t.section\t" + sectionName + sectionConstrains + "@progbits\n"); + } else if (isThreadLocal) { + (void)Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + } else { + (void)Emit("\t.data\n"); + } + } + /* Emit size and align by type */ + if (mirSymbol->GetStorageClass() == kScGlobal) { + if (mirSymbol->GetAttr(ATTR_weak) || mirSymbol->IsReflectionPrimitiveClassInfo()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + EmitAsmLabel(*mirSymbol, kAsmGlbl); + } + if (theMIRModule->IsJavaModule()) { + EmitAsmLabel(*mirSymbol, kAsmHidden); + } + } else if (mirSymbol->GetStorageClass() == kScFstatic) { + if (mirSymbol->sectionAttr == UStrIdx(0)) { + EmitAsmLabel(*mirSymbol, kAsmLocal); + } + } + if (mirSymbol->IsReflectionStrTab()) { /* reflection-string-tab also aligned to 8B boundaries. */ + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + } else { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol->GetKonst(); + if (IsPrimitiveVector(mirType->GetPrimType())) { + EmitVectorConstant(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (!CGOptions::IsArm64ilp32()) { + if (IsAddress(mirType->GetPrimType())) { + uint32 sizeinbits = GetPrimTypeBitSize(mirConst->GetType().GetPrimType()); + CHECK_FATAL(sizeinbits == k64BitSize, "EmitGlobalVariable: pointer must be of size 8"); + } + } + if (cg->GetMIRModule()->IsCModule()) { + EmitScalarConstant(*mirConst, true, false, true); + } else { + EmitScalarConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeArray) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeUnion) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitStructConstant(*mirConst); + } + } else { + DEBUG_ASSERT(false, "NYI"); + } + EmitAsmLabel(*mirSymbol, kAsmSize); + /* emit constant float/double */ + } else if (mirSymbol->IsReadOnly()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst->GetKind() == maple::kConstStrConst) { + auto strCt = static_cast(mirConst); + localStrPtr.push_back(strCt->GetValue()); + } else { + EmitAsmLabel(*mirSymbol, kAsmType); + (void)Emit(asmInfo->GetSection()).Emit(asmInfo->GetRodata()).Emit("\n"); + if (!CGOptions::OptimizeForSize()) { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*mirConst); + } + } else if (mirSymbol->GetStorageClass() == kScPstatic) { + EmitAsmLabel(*mirSymbol, kAsmType); + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + EmitAsmLabel(*mirSymbol, kAsmAlign); + EmitAsmLabel(*mirSymbol, kAsmLocal); + MIRConst *ct = mirSymbol->GetKonst(); + if (ct == nullptr) { + EmitAsmLabel(*mirSymbol, kAsmComm); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*ct, true, false, true); + } else if (kTypeArray == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitArrayConstant(*ct); + } else if (kTypeStruct == mirType->GetKind() || kTypeClass == mirType->GetKind() || + kTypeUnion == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitStructConstant(*ct); + } else { + CHECK_FATAL(0, "Unknown type in Global pstatic"); + } + } + } /* end proccess all mirSymbols. */ + EmitStringPointers(); + /* emit global var */ + EmitGlobalVars(globalVarVec); + /* emit literal std::strings */ + EmitLiterals(literalVec, strIdx2Type); + /* emit static field std::strings */ + EmitStaticFields(staticFieldsVec); + + if (GetCG()->GetMIRModule()->IsCModule()) { + return; + } + + EmitMuidTable(constStrVec, strIdx2Type, kMuidConststrPrefixStr); + + /* emit classinfo, field, method */ + std::vector fieldInfoStVec; + std::vector fieldInfoStCompactVec; + std::vector methodInfoStVec; + std::vector methodInfoStCompactVec; + + std::string sectionName = kMuidClassMetadataPrefixStr; + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + Emit(sectionName + "_begin:\n"); + + for (size_t i = 0; i < classInfoVec.size(); ++i) { + MIRSymbol *mirSymbol = classInfoVec[i]; + if (mirSymbol != nullptr && mirSymbol->GetKonst() != nullptr && mirSymbol->IsReflectionClassInfo()) { + /* Emit classinfo */ + EmitClassInfoSequential(*mirSymbol, strIdx2Type, sectionName); + std::string stName = mirSymbol->GetName(); + std::string className = stName.substr(strlen(CLASSINFO_PREFIX_STR)); + /* Get classinfo ro symbol */ + MIRSymbol *classInfoROSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(CLASSINFO_RO_PREFIX_STR + className)); + EmitClassInfoSequential(*classInfoROSt, strIdx2Type, sectionName); + /* Get fields */ + MIRSymbol *fieldSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoPrefixStr + className)); + MIRSymbol *fieldStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoCompactPrefixStr + className)); + /* Get methods */ + MIRSymbol *methodSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoPrefixStr + className)); + MIRSymbol *methodStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoCompactPrefixStr + className)); + + if (fieldSt != nullptr) { + fieldInfoStVec.emplace_back(fieldSt); + } + if (fieldStCompact != nullptr) { + fieldInfoStCompactVec.emplace_back(fieldStCompact); + } + if (methodSt != nullptr) { + methodInfoStVec.emplace_back(methodSt); + } + if (methodStCompact != nullptr) { + methodInfoStCompactVec.emplace_back(methodStCompact); + } + } + } + Emit(sectionName + "_end:\n"); + + std::vector hotVtabStVec; + std::vector coldVtabStVec; + std::vector hotItabStVec; + std::vector coldItabStVec; + std::vector hotItabCStVec; + std::vector coldItabCStVec; + std::vector hotMethodsInfoCStVec; + std::vector coldMethodsInfoCStVec; + std::vector hotFieldsInfoCStVec; + std::vector coldFieldsInfoCStVec; + GetHotAndColdMetaSymbolInfo(vtabVec, hotVtabStVec, coldVtabStVec, VTAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabVec, hotItabStVec, coldItabStVec, ITAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabConflictVec, hotItabCStVec, coldItabCStVec, ITAB_CONFLICT_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(fieldInfoStVec, hotFieldsInfoCStVec, coldFieldsInfoCStVec, kFieldsInfoPrefixStr); + GetHotAndColdMetaSymbolInfo(methodInfoStVec, hotMethodsInfoCStVec, coldMethodsInfoCStVec, kMethodsInfoPrefixStr); + + std::string sectionNameIsEmpty; + std::string fieldSectionName("rometadata.field"); + std::string methodSectionName("rometadata.method"); + + /* fieldinfo */ + EmitMetaDataSymbolWithMarkFlag(hotFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, false); + EmitMetaDataSymbolWithMarkFlag(fieldInfoStCompactVec, strIdx2Type, kFieldsInfoCompactPrefixStr, fieldSectionName, + false); + /* methodinfo */ + EmitMetaDataSymbolWithMarkFlag(hotMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, false); + EmitMetaDataSymbolWithMarkFlag(methodInfoStCompactVec, strIdx2Type, kMethodsInfoCompactPrefixStr, methodSectionName, + false); + + /* itabConflict */ + MarkVtabOrItabEndFlag(coldItabCStVec); + EmitMuidTable(hotItabCStVec, strIdx2Type, kMuidItabConflictPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldItabCStVec, strIdx2Type, ITAB_CONFLICT_PREFIX_STR, kMuidColdItabConflictPrefixStr, + false); + + /* + * vtab + * And itab to vtab section + */ + for (auto sym : hotItabStVec) { + hotVtabStVec.emplace_back(sym); + } + for (auto sym : coldItabStVec) { + coldVtabStVec.emplace_back(sym); + } + MarkVtabOrItabEndFlag(coldVtabStVec); + EmitMuidTable(hotVtabStVec, strIdx2Type, kMuidVtabAndItabPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldVtabStVec, strIdx2Type, VTAB_AND_ITAB_PREFIX_STR, kMuidColdVtabAndItabPrefixStr, + false); + + /* vtab_offset */ + EmitMuidTable(vtabOffsetVec, strIdx2Type, kMuidVtabOffsetPrefixStr); + /* field_offset */ + EmitMuidTable(fieldOffsetVec, strIdx2Type, kMuidFieldOffsetPrefixStr); + /* value_offset */ + EmitMuidTable(valueOffsetVec, strIdx2Type, kMuidValueOffsetPrefixStr); + /* local clasinfo */ + EmitMuidTable(localClassInfoVec, strIdx2Type, kMuidLocalClassInfoStr); + /* Emit decouple static */ + EmitMuidTable(staticDecoupleKeyVec, strIdx2Type, kDecoupleStaticKeyStr); + EmitMuidTable(staticDecoupleValueVec, strIdx2Type, kDecoupleStaticValueStr); + + /* super class */ + EmitMuidTable(superClassStVec, strIdx2Type, kMuidSuperclassPrefixStr); + + /* field offset rw */ + EmitMetaDataSymbolWithMarkFlag(fieldOffsetDatas, strIdx2Type, kFieldOffsetDataPrefixStr, sectionNameIsEmpty, false); + /* method address rw */ + EmitMetaDataSymbolWithMarkFlag(methodAddrDatas, strIdx2Type, kMethodAddrDataPrefixStr, sectionNameIsEmpty, false); + /* method address ro */ + std::string methodSignatureSectionName("romethodsignature"); + EmitMetaDataSymbolWithMarkFlag(methodSignatureDatas, strIdx2Type, kMethodSignaturePrefixStr, + methodSignatureSectionName, false); + + /* array class cache table */ + EmitMuidTable(arrayClassCacheVec, strIdx2Type, kArrayClassCacheTable); + /* array class cache name table */ + EmitMuidTable(arrayClassCacheNameVec, strIdx2Type, kArrayClassCacheNameTable); + +#if !defined(TARGARM32) + /* finally emit __gxx_personality_v0 DW.ref */ + if (!cg->GetMIRModule()->IsCModule()) { + EmitDWRef("__mpl_personality_v0"); + } +#endif +} +void Emitter::EmitAddressString(const std::string &address) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); + Emit(address); +#else + Emit("\t.word\t" + address); +#endif +} +void Emitter::EmitGlobalRootList(const MIRSymbol &mirSymbol) { + Emit("\t.section .maple.gcrootsmap").Emit(",\"aw\",%progbits\n"); + std::vector nameVec; + std::string name = mirSymbol.GetName(); + nameVec.emplace_back(name); + nameVec.emplace_back(name + "Size"); + bool gcrootsFlag = true; + uint64 vecSize = 0; + for (const auto &gcrootsName : nameVec) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + Emit("\t.type\t" + gcrootsName + ", @object\n" + "\t.p2align 3\n"); +#else + Emit("\t.type\t" + gcrootsName + ", %object\n" + "\t.p2align 3\n"); +#endif + Emit("\t.global\t" + gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_begin:\n"); + } + Emit(gcrootsName + ":\n"); + if (gcrootsFlag) { + MIRAggConst *aggConst = safe_cast(mirSymbol.GetKonst()); + if (aggConst == nullptr) { + continue; + } + size_t i = 0; + while (i < aggConst->GetConstVec().size()) { + MIRConst *elemConst = aggConst->GetConstVecItem(i); + if (elemConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(elemConst); + CHECK_FATAL(symAddr != nullptr, "nullptr of symAddr"); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + EmitAddressString(symAddrName + "\n"); + } else { + EmitScalarConstant(*elemConst); + } + i++; + } + vecSize = i; + } else { + EmitAddressString(std::to_string(vecSize) + "\n"); + } + Emit("\t.size\t" + gcrootsName + ",.-").Emit(gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_end:\n"); + } + gcrootsFlag = false; + } +} + +void Emitter::EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, + const std::string §ionName) { + MIRSymbol *st = nullptr; + if (!vec.empty()) { + st = vec[0]; + } + if (st != nullptr && st->IsMuidRoTab()) { + Emit("\t.section ." + sectionName + ",\"a\",%progbits\n"); + } else { + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + } + Emit(sectionName + "_begin:\n"); + bool isConstString = sectionName == kMuidConststrPrefixStr; + for (size_t i = 0; i < vec.size(); i++) { + MIRSymbol *st1 = vec[i]; + DEBUG_ASSERT(st1 != nullptr, "null ptr check"); + if (st1->GetStorageClass() == kScUnused) { + continue; + } + EmitAsmLabel(*st1, kAsmType); + if (st1->GetStorageClass() == kScFstatic) { + EmitAsmLabel(*st1, kAsmLocal); + } else { + EmitAsmLabel(*st1, kAsmGlbl); + EmitAsmLabel(*st1, kAsmHidden); + } + EmitAsmLabel(*st1, kAsmAlign); + EmitAsmLabel(*st1, kAsmSyname); + MIRConst *mirConst = st1->GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitMuidTable"); + if (mirConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(mirConst); + CHECK_FATAL(symAddr != nullptr, "call static_cast failed in EmitMuidTable"); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + if (isConstString) { + EmitAddressString(symAddrSym->GetName() + " - . + "); + Emit(kDataRefIsOffset); + Emit("\n"); + } else { + EmitAddressString(symAddrSym->GetName() + "\n"); + } + } else if (mirConst->GetKind() == kConstInt) { + EmitScalarConstant(*mirConst, true); + } else { + EmitConstantTable(*st1, *mirConst, strIdx2Type); + } + EmitAsmLabel(*st1, kAsmSize); + } + Emit(sectionName + "_end:\n"); +} + +void Emitter::EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName) { + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (StringUtils::StartsWith(sectionName, "ro")) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmGlbl); + EmitAsmLabel(mirSymbol, kAsmHidden); + EmitAsmLabel(mirSymbol, kAsmAlign); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitClassInfoSequential"); + EmitConstantTable(mirSymbol, *mirConst, strIdx2Type); + EmitAsmLabel(mirSymbol, kAsmSize); +} + +void Emitter::EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string §ionName) { + std::string symName = mirSymbol.GetName(); + std::string emitSyName = symName + "_DeclaringClass"; + std::string declaringClassName = symName.substr(strlen(kFieldsInfoCompactPrefixStr) + 1); + Emit(asmInfo->GetType()); + Emit(emitSyName + ", %object\n"); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetLocal()); + Emit(emitSyName + "\n"); + Emit(asmInfo->GetAlign()); + Emit(" 2\n" + emitSyName + ":\n"); + Emit("\t.long\t"); + Emit(CLASSINFO_PREFIX_STR + declaringClassName + " - .\n"); + Emit(asmInfo->GetSize()); + Emit(emitSyName + ", 4\n"); +} + +void Emitter::EmitMethodFieldSequential(const MIRSymbol &mirSymbol, + const std::map &strIdx2Type, + const std::string §ionName) { + std::string symName = mirSymbol.GetName(); + if (symName.find(kMethodsInfoCompactPrefixStr) != std::string::npos) { + EmitMethodDeclaringClass(mirSymbol, sectionName); + } + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmLocal); + + /* Emit(2) is 4 bit align */ + Emit(asmInfo->GetAlign()).Emit(2).Emit("\n"); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *ct = mirSymbol.GetKonst(); + EmitConstantTable(mirSymbol, *ct, strIdx2Type); + std::string symbolName = mirSymbol.GetName(); + Emit("\t.size\t" + symbolName + ", .-"); + Emit(symbolName + "\n"); +} + +void Emitter::EmitDWRef(const std::string &name) { + /* + * .hidden DW.ref._ZTI3xxx + * .weak DW.ref._ZTI3xxx + * .section .data.DW.ref._ZTI3xxx,"awG",@progbits,DW.ref._ZTI3xxx,comdat + * .align 3 + * .type DW.ref._ZTI3xxx, %object + * .size DW.ref._ZTI3xxx, 8 + * DW.ref._ZTI3xxx: + * .xword _ZTI3xxx + */ + Emit("\t.hidden DW.ref." + name + "\n"); + Emit("\t.weak DW.ref." + name + "\n"); + Emit("\t.section .data.DW.ref." + name + ",\"awG\",%progbits,DW.ref."); + Emit(name + ",comdat\n"); + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + Emit("\t.type DW.ref." + name + ", \%object\n"); + Emit("\t.size DW.ref." + name + ",8\n"); + Emit("DW.ref." + name + ":\n"); +#if TARGAARCH64 || TARGRISCV64 + Emit("\t.xword " + name + "\n"); +#else + Emit("\t.word " + name + "\n"); +#endif +} + +void Emitter::EmitDecSigned(int64 num) { + std::ios::fmtflags flag(fileStream.flags()); + fileStream << std::dec << num; + fileStream.flags(flag); +} + +void Emitter::EmitDecUnsigned(uint64 num) { + std::ios::fmtflags flag(fileStream.flags()); + fileStream << std::dec << num; + fileStream.flags(flag); +} + +void Emitter::EmitHexUnsigned(uint64 num) { + std::ios::fmtflags flag(fileStream.flags()); + fileStream << "0x" << std::hex << num; + fileStream.flags(flag); +} + +#define XSTR(s) str(s) +#define str(s) #s + +void Emitter::EmitDIHeader() { + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_BEGIN) ":\n"); +} + +void Emitter::EmitDIFooter() { + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_END) ":\n"); +} + +void Emitter::EmitDIHeaderFileInfo() { + Emit("// dummy header file 1\n"); + Emit("// dummy header file 2\n"); + Emit("// dummy header file 3\n"); +} + +void Emitter::AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx) { + InsertLabdie2labidxTable(lblDie, lblIdx); +} + +LabelIdx Emitter::GetLabelIdxForLabelDie(DBGDie *lblDie) { + auto it = labdie2labidxTable.find(lblDie); + CHECK_FATAL(it != labdie2labidxTable.end(), ""); + return it->second; +} + +void Emitter::ApplyInPrefixOrder(DBGDie *die, const std::function &func) { + func(die); + DEBUG_ASSERT(die, ""); + if (die->GetSubDieVec().size() > 0) { + for (auto c : die->GetSubDieVec()) { + ApplyInPrefixOrder(c, func); + } + /* mark the end of the sibling list */ + func(nullptr); + } +} + +void Emitter::EmitDIFormSpecification(unsigned int dwform) { + switch (dwform) { + case DW_FORM_string: + Emit(".string "); + break; + case DW_FORM_strp: + case DW_FORM_data4: + case DW_FORM_ref4: + Emit(".4byte "); + break; + case DW_FORM_data1: + Emit(".byte "); + break; + case DW_FORM_data2: + Emit(".2byte "); + break; + case DW_FORM_data8: + Emit(".8byte "); + break; + case DW_FORM_sec_offset: + /* if DWARF64, should be .8byte? */ + Emit(".4byte "); + break; + case DW_FORM_addr: /* Should we use DWARF64? for now, we generate .8byte as gcc does for DW_FORM_addr */ + Emit(".8byte "); + break; + case DW_FORM_exprloc: + Emit(".uleb128 "); + break; + default: + CHECK_FATAL(maple::GetDwFormName(dwform) != nullptr, + "GetDwFormName() return null in Emitter::EmitDIFormSpecification"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(dwform) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di) { + MapleVector &attrvec = die->GetAttrVec(); + + switch (attr->GetDwForm()) { + case DW_FORM_string: { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()); + Emit("\"").Emit(name).Emit("\""); + Emit(CMNT "len = "); + EmitDecUnsigned(name.length() + 1); + } break; + case DW_FORM_strp: + Emit(".L" XSTR(DEBUG_STR_LABEL)); + fileStream << attr->GetId(); + break; + case DW_FORM_data1: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint8_t(attr->GetI())); + break; + case DW_FORM_data2: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint16_t(attr->GetI())); + break; + case DW_FORM_data4: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint32_t(attr->GetI())); + break; + case DW_FORM_data8: + if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap >::iterator it = + CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.second); /* end label */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + Emit("-"); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* start label */ + } else { + DBGDieAttr *lowpc = LFindAttribute(attrvec, DW_AT_low_pc); + CHECK_FATAL(lowpc != nullptr, "lowpc is null in Emitter::EmitDIAttrValue"); + EmitLabelRef(lowpc->GetId()); /* maybe deadbeef */ + } + } + } else { + EmitHexUnsigned(static_cast(static_cast(attr->GetI()))); + } + break; + case DW_FORM_sec_offset: + if (attrName == DW_AT_stmt_list) { + Emit(".L"); + Emit(XSTR(DEBUG_LINE_0)); + } + break; + case DW_FORM_addr: + if (attrName == DW_AT_low_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + /* if decl, name should be found; if def, we try DW_AT_specification */ + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap >::iterator + it = CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* it is a */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + } else if (tagName == DW_TAG_label) { + LabelIdx labelIdx = GetLabelIdxForLabelDie(die); + DBGDie *subpgm = die->GetParent(); + DEBUG_ASSERT(subpgm->GetTag() == DW_TAG_subprogram, "Label DIE should be a child of a Subprogram DIE"); + DBGDieAttr *fnameAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_name); + if (!fnameAttr) { + DBGDieAttr *specAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_specification); + CHECK_FATAL(specAttr, "pointer is null"); + DBGDie *twin = di->GetDie(static_cast(specAttr->GetU())); + fnameAttr = LFindAttribute(twin->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(fnameAttr, ""); + const std::string &fnameStr = GlobalTables::GetStrTable().GetStringFromStrIdx(fnameAttr->GetId()); + auto *res = memPool->New(fnameStr.c_str(), labelIdx, *memPool); + cfi::CFIOpndEmitVisitor cfiVisitor(*this); + res->Accept(cfiVisitor); + } + } else if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } + } else { + Emit("XXX--ADDR--XXX"); + } + break; + case DW_FORM_ref4: + if (attrName == DW_AT_type) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + if (die0->GetOffset()) { + EmitHexUnsigned(die0->GetOffset()); + } else { + /* unknown type, missing mplt */ + EmitHexUnsigned(di->GetDummyTypeDie()->GetOffset()); + Emit(CMNT "Warning: dummy type used"); + } + } else if (attrName == DW_AT_specification || attrName == DW_AT_sibling) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + DEBUG_ASSERT(die0->GetOffset(), ""); + EmitHexUnsigned(die0->GetOffset()); + } else if (attrName == DW_AT_object_pointer) { + GStrIdx thisIdx = GlobalTables::GetStrTable().GetStrIdxFromName(kDebugMapleThis); + DBGDie *that = LFindChildDieWithName(die, DW_TAG_formal_parameter, thisIdx); + /* need to find the this or self based on the source language + what is the name for 'this' used in mapleir? + this has to be with respect to a function */ + if (that) { + EmitHexUnsigned(that->GetOffset()); + } else { + EmitHexUnsigned(attr->GetU()); + } + } else { + Emit(" OFFSET "); + EmitHexUnsigned(attr->GetU()); + } + break; + case DW_FORM_exprloc: { + DBGExprLoc *elp = attr->GetPtr(); + switch (elp->GetOp()) { + case DW_OP_call_frame_cfa: + EmitHexUnsigned(1); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + break; + case DW_OP_addr: + EmitHexUnsigned(k9ByteSize); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit("\n\t.8byte "); + (void)Emit(GlobalTables::GetStrTable().GetStringFromStrIdx( + static_cast(elp->GetGvarStridx())).c_str()); + break; + case DW_OP_fbreg: + EmitHexUnsigned(1 + namemangler::GetSleb128Size(elp->GetFboffset())); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit("\n\t.sleb128 "); + EmitDecSigned(elp->GetFboffset()); + break; + default: + EmitHexUnsigned(uintptr_t(elp)); + break; + } + } break; + default: + CHECK_FATAL(maple::GetDwFormName(attr->GetDwForm()) != nullptr, + "GetDwFormName return null in Emitter::EmitDIAttrValue"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(attr->GetDwForm()) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIDebugInfoSection(DebugInfo *mirdi) { + /* From DWARF Standard Specification V4. 7.5.1 + collect section size */ + Emit("\t.section\t.debug_info,\"\",@progbits\n"); + /* label to mark start of the .debug_info section */ + Emit(".L" XSTR(DEBUG_INFO_0) ":\n"); + /* $ 7.5.1.1 */ + Emit("\t.4byte\t"); + EmitHexUnsigned(mirdi->GetDebugInfoLength()); + Emit(CMNT "section length\n"); + /* DWARF version. uhalf. */ + Emit("\t.2byte\t"); + /* 4 for version 4. */ + EmitHexUnsigned(kDwarfVersion); + Emit("\n"); + /* debug_abbrev_offset. 4byte for 32-bit, 8byte for 64-bit */ + Emit("\t.4byte\t.L" XSTR(DEBUG_ABBREV_0) "\n"); + /* address size. ubyte */ + Emit("\t.byte\t"); + EmitHexUnsigned(kSizeOfPTR); + Emit("\n"); + /* + * 7.5.1.2 type unit header + * currently empty... + * + * 7.5.2 Debugging Information Entry (DIE) + */ + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter, &mirdi](DBGDie *die) { + if (!die) { + /* emit the null entry and return */ + emitter->Emit("\t.byte 0x0\n"); + return; + } + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + if (verbose) { + emitter->Emit("\n"); + } + emitter->Emit("\t.uleb128 "); + emitter->EmitHexUnsigned(die->GetAbbrevId()); + if (verbose) { + emitter->Emit(CMNT); + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "GetDwTagName(die->GetTag()) return null in Emitter::EmitDIDebugInfoSection"); + emitter->Emit(maple::GetDwTagName(die->GetTag())); + emitter->Emit(" Offset= "); + emitter->EmitHexUnsigned(die->GetOffset()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetOffset()); + emitter->Emit(" ), Size= "); + emitter->EmitHexUnsigned(die->GetSize()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetSize()); + emitter->Emit(" )\n"); + } else { + emitter->Emit("\n"); + } + DBGAbbrevEntry *diae = LFindAbbrevEntry(abbrevVec, die->GetAbbrevId()); + CHECK_FATAL(diae != nullptr, "diae is null in Emitter::EmitDIDebugInfoSection"); + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + + std::string sfile, spath; + if (diae->GetTag() == DW_TAG_compile_unit && sfile.empty()) { + /* get full source path from fileMap[2] */ + if (emitter->GetFileMap().size() > k2ByteSize) { /* have src file map */ + std::string srcPath = emitter->GetFileMap()[k2ByteSize]; + size_t t = srcPath.rfind("/"); + DEBUG_ASSERT(t != std::string::npos, ""); + sfile = srcPath.substr(t + 1); + spath = srcPath.substr(0, t); + } + } + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + DBGDieAttr *attr = LFindAttribute(die->GetAttrVec(), DwAt(apl[i])); + if (!LShouldEmit(unsigned(apl[i + 1]))) { + continue; + } + /* update DW_AT_name and DW_AT_comp_dir attrs under DW_TAG_compile_unit + to be C/C++ */ + if (!sfile.empty()) { + if (attr->GetDwAt() == DW_AT_name) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(sfile).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } else if (attr->GetDwAt() == DW_AT_comp_dir) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(spath).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } + } + emitter->Emit("\t"); + emitter->EmitDIFormSpecification(unsigned(apl[i + 1])); + emitter->EmitDIAttrValue(die, attr, unsigned(apl[i]), diae->GetTag(), mirdi); + if (verbose) { + emitter->Emit(CMNT); + emitter->Emit(maple::GetDwAtName(unsigned(apl[i]))); + emitter->Emit(" : "); + emitter->Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + if (apl[i + 1] == DW_FORM_strp || apl[i + 1] == DW_FORM_string) { + emitter->Emit(" : "); + emitter->Emit(GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()).c_str()); + } else if (apl[i] == DW_AT_data_member_location) { + emitter->Emit(" : "); + emitter->Emit(apl[i + 1]).Emit(" attr= "); + emitter->EmitHexUnsigned(uintptr_t(attr)); + } + } + emitter->Emit("\n"); + } + }); +} + +void Emitter::EmitDIDebugAbbrevSection(DebugInfo *mirdi) { + Emit("\t.section\t.debug_abbrev,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_ABBREV_0) ":\n"); + + /* construct a list of DI abbrev entries + 1. DW_TAG_compile_unit 0x11 + 2. DW_TAG_subprogram 0x2e */ + bool verbose = GetCG()->GenerateVerboseAsm(); + for (DBGAbbrevEntry *diae : mirdi->GetAbbrevVec()) { + if (!diae) { + continue; + } + /* ID */ + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetAbbrevId()); + if (verbose) { + Emit(CMNT "Abbrev Entry ID"); + } + Emit("\n"); + /* TAG */ + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetTag()); + CHECK_FATAL(maple::GetDwTagName(diae->GetTag()) != nullptr, + "GetDwTagName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwTagName(diae->GetTag())); + } + Emit("\n"); + + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + /* children? */ + Emit("\t.byte "); + EmitHexUnsigned(diae->GetWithChildren()); + if (verbose) { + Emit(diae->GetWithChildren() ? CMNT "DW_CHILDREN_yes" : CMNT "DW_CHILDREN_no"); + } + Emit("\n"); + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i]); + CHECK_FATAL(maple::GetDwAtName(unsigned(apl[i])) != nullptr, + "GetDwAtName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwAtName(unsigned(apl[i]))); + } + Emit("\n"); + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i + 1]); + CHECK_FATAL(maple::GetDwFormName(unsigned(apl[i + 1])) != nullptr, + "GetDwFormName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + } + Emit("\n"); + } + /* end of an abbreviation record */ + Emit("\t.byte 0x0\n"); + Emit("\t.byte 0x0\n"); + } + Emit("\t.byte 0x0\n"); +} + +void Emitter::EmitDIDebugARangesSection() { + Emit("\t.section\t.debug_aranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugRangesSection() { + Emit("\t.section\t.debug_ranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugLineSection() { + Emit("\t.section\t.debug_line,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_LINE_0) ":\n"); +} + +void Emitter::EmitDIDebugStrSection() { + Emit("\t.section\t.debug_str,\"MS\",@progbits,1\n"); + for (auto it : GetCG()->GetMIRModule()->GetDbgInfo()->GetStrps()) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + fileStream << it; + Emit(":\n"); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it); + Emit("\t.string \"").Emit(name).Emit("\"\n"); + } +} + +void Emitter::FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr) { + DEBUG_ASSERT(byteSizeAttr->GetDwForm() == DW_FORM_data1 || byteSizeAttr->GetDwForm() == DW_FORM_data2 || + byteSizeAttr->GetDwForm() == DW_FORM_data4 || byteSizeAttr->GetDwForm() == DW_FORM_data8, + "Unknown FORM value for DW_AT_byte_size"); + if (static_cast(byteSizeAttr->GetI()) == kDbgDefaultVal) { + /* get class size */ + DBGDieAttr *nameAttr = LFindDieAttr(die, DW_AT_name); + CHECK_FATAL(nameAttr != nullptr, "name_attr is nullptr in Emitter::FillInClassByteSize"); + /* hope this is a global string index as it is a type name */ + TyIdx tyIdx = + GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(GStrIdx(nameAttr->GetId())); + CHECK_FATAL(tyIdx.GetIdx() < Globals::GetInstance()->GetBECommon()->GetSizeOfTypeSizeTable(), + "index out of range in Emitter::FillInClassByteSize"); + int64_t byteSize = static_cast(Globals::GetInstance()->GetBECommon()->GetTypeSize(tyIdx.GetIdx())); + LUpdateAttrValue(byteSizeAttr, byteSize); + } +} + +void Emitter::SetupDBGInfo(DebugInfo *mirdi) { + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter](DBGDie *die) { + if (!die) { + return; + } + + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "maple::GetDwTagName(die->GetTag()) is nullptr in Emitter::SetupDBGInfo"); + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << maple::GetDwTagName(die->GetTag()) << std::endl; + } + CHECK_FATAL(die->GetAbbrevId() < abbrevVec.size(), "index out of range in Emitter::SetupDBGInfo"); + DEBUG_ASSERT(abbrevVec[die->GetAbbrevId()]->GetAbbrevId() == die->GetAbbrevId(), ""); + DBGAbbrevEntry *diae = abbrevVec[die->GetAbbrevId()]; + switch (diae->GetTag()) { + case DW_TAG_subprogram: { + DBGExprLoc *exprloc = emitter->memPool->New(emitter->GetCG()->GetMIRModule()); + exprloc->GetSimpLoc()->SetDwOp(DW_OP_call_frame_cfa); + die->SetAttr(DW_AT_frame_base, exprloc); + } break; + case DW_TAG_structure_type: + case DW_TAG_union_type: + case DW_TAG_class_type: + case DW_TAG_interface_type: { + DBGDieAttr *byteSizeAttr = LFindDieAttr(die, DW_AT_byte_size); + if (byteSizeAttr) { + emitter->FillInClassByteSize(die, byteSizeAttr); + } + /* get the name */ + DBGDieAttr *atName = LFindDieAttr(die, DW_AT_name); + CHECK_FATAL(atName != nullptr, "at_name is null in Emitter::SetupDBGInfo"); + /* get the type from string name */ + TyIdx ctyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(GStrIdx(atName->GetId())); + MIRType *mty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ctyIdx); + MIRStructType *sty = static_cast(mty); + CHECK_FATAL(sty != nullptr, "pointer cast failed"); + CHECK_FATAL(sty->GetTypeIndex().GetIdx() < + Globals::GetInstance()->GetBECommon()->GetSizeOfStructFieldCountTable(), ""); + uint32 embeddedIDs = 0; + MIRStructType *prevSubstruct = nullptr; + for (size_t i = 0; i < sty->GetFields().size(); i++) { + TyIdx fieldtyidx = sty->GetFieldsElemt(i).second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + if (prevSubstruct) { + embeddedIDs += static_cast(Globals::GetInstance()->GetBECommon()->GetStructFieldCount( + static_cast(prevSubstruct->GetTypeIndex().GetIdx()))); + } + prevSubstruct = fieldty->EmbeddedStructType(); + FieldID fieldID = static_cast(i + embeddedIDs) + 1; + int offset = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*sty, fieldID).first; + GStrIdx fldName = sty->GetFieldsElemt(i).first; + DBGDie *cdie = LFindChildDieWithName(die, DW_TAG_member, fldName); + CHECK_FATAL(cdie != nullptr, "cdie is null in Emitter::SetupDBGInfo"); + DBGDieAttr *mloc = LFindDieAttr(cdie, DW_AT_data_member_location); + CHECK_FATAL(mloc != nullptr, "mloc is null in Emitter::SetupDBGInfo"); + DBGAbbrevEntry *childDiae = abbrevVec[cdie->GetAbbrevId()]; + CHECK_FATAL(childDiae != nullptr, "child_diae is null in Emitter::SetupDBGInfo"); + LUpdateAttrValue(mloc, offset); + } + } break; + default: + break; + } + }); + + /* compute DIE sizes and offsets */ + mirdi->ComputeSizeAndOffsets(); +} + +void Emitter::EmitAliasAndRef(const MIRSymbol &sym) { + MIRFunction *mFunc = sym.GetFunction(); + if (mFunc == nullptr || !mFunc->GetAttr(FUNCATTR_alias)) { + return; + } + if (mFunc->GetAttr(FUNCATTR_extern)) { + Emit(asmInfo->GetGlobal()).Emit(mFunc->GetName()).Emit("\n"); + } + auto &aliasPrefix = mFunc->GetAttr(FUNCATTR_weakref) ? asmInfo->GetWeakref() : asmInfo->GetSet(); + Emit(aliasPrefix); + Emit(sym.GetName()).Emit(",").Emit(mFunc->GetAttrs().GetAliasFuncName()).Emit("\n"); +} + +void Emitter::EmitHugeSoRoutines(bool lastRoutine) { + if (!lastRoutine && (javaInsnCount < (static_cast(hugeSoSeqence) * + static_cast(kHugeSoInsnCountThreshold)))) { + return; + } + for (auto &target : hugeSoTargets) { + (void)Emit("\t.section\t." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align\t8\n"); +#else + Emit("\t.align 3\n"); +#endif + std::string routineName = target + HugeSoPostFix(); + Emit("\t.type\t" + routineName + ", %function\n"); + Emit(routineName + ":\n"); + Emit("\tadrp\tx17, :got:" + target + "\n"); + Emit("\tldr\tx17, [x17, :got_lo12:" + target + "]\n"); + Emit("\tbr\tx17\n"); + javaInsnCount += kSizeOfHugesoRoutine; + } + hugeSoTargets.clear(); + ++hugeSoSeqence; +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << "imm:" << value; +} + +void LabelOperand::Dump() const { + LogInfo::MapleLogger() << "label:" << labelIndex; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/global.cpp b/ecmascript/mapleall/maple_be/src/cg/global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3801745db379d8eb1ff5c9c1a12b432f06c55f81 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/global.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_global.h" +#elif TARGRISCV64 +#include "riscv64_global.h" +#endif +#if TARGARM32 +#include "arm32_global.h" +#endif +#include "reaching.h" +#include "cgfunc.h" +#include "live.h" +/* + * This phase do some optimization using use-def chain and def-use chain. + * each function in Run() is a optimization. mainly include 2 parts: + * 1. find the number of valid bits for register by finding the definition insn of register, + * and then using the valid bits to delete redundant insns. + * 2. copy Propagate: + * a. forward copy propagate + * this optimization aim to optimize following: + * mov x100, x200; + * BBs: + * ... + * mOp ..., x100 /// multiple site that use x100 + * => + * mov x200, x200 + * BBs: + * ... + * mOp ..., x200 // multiple site that use x100 + * b. backward copy propagate + * this optimization aim to optimize following: + * mOp x200, ... // Define insn of x200 + * ... + * mOp ..., x200 // use site of x200 + * mov x100, x200; + * => + * mOp x100, ... // Define insn of x200 + * ... + * mOp ..., x100 // use site of x200 + * mov x100, x100; + * + * NOTE: after insn is modified, UD-chain and DU-chain should be maintained by self. currently several common + * interface has been implemented in RD, but they must be used reasonably. specific instructions for use + * can be found at the begining of corresponding function. + */ +namespace maplebe { +using namespace maple; + +bool CgGlobalOpt::PhaseRun(maplebe::CGFunc &f) { + ReachingDefinition *reachingDef = nullptr; + LiveAnalysis *live = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + live = GET_ANALYSIS(CgLiveAnalysis, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + reachingDef->SetAnalysisMode(kRDAllAnalysis); + GlobalOpt *globalOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + globalOpt = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + globalOpt = GetPhaseAllocator()->New(f); +#endif + globalOpt->Run(); + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgGlobalOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgGlobalOpt, globalopt) + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/ico.cpp b/ecmascript/mapleall/maple_be/src/cg/ico.cpp new file mode 100644 index 0000000000000000000000000000000000000000..078260aeb49139edc3cb2ea4af4f2c358ff92cfd --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/ico.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ico.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_ico.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_ico.h" +#include "riscv64_isa.h" +#include "riscv64_insn.h" +#elif TARGARM32 +#include "arm32_ico.h" +#include "arm32_isa.h" +#include "arm32_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +#define ICO_DUMP_NEWPM CG_DEBUG_FUNC(f) +namespace maplebe { +Insn *ICOPattern::FindLastCmpInsn(BB &bb) const { + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (cgFunc->GetTheCFG()->GetInsnModifier()->IsCompareInsn(*insn)) { + return insn; + } + } + return nullptr; +} + +std::vector ICOPattern::GetLabelOpnds(Insn &insn) const { + std::vector labelOpnds; + for (uint32 i = 0; i < insn.GetOperandSize(); i++) { + if (insn.GetOperand(i).IsLabelOpnd()) { + labelOpnds.emplace_back(static_cast(&insn.GetOperand(i))); + } + } + return labelOpnds; +} + +bool CgIco::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-before", f, f.GetMirModule()); + } + MemPool *memPool = GetPhaseMemPool(); + IfConversionOptimizer *ico = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ico = memPool->New(f, *memPool); +#endif +#if TARGARM32 + ico = memPool->New(f, *memPool); +#endif + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + std::string name = funcClass + funcName; + ico->Run(name); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-after", f, f.GetMirModule()); + } + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} +void CgIco::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgIco, ico) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/ifile.cpp b/ecmascript/mapleall/maple_be/src/cg/ifile.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bcdcb6b32fbba8d55e7afa5f819341a2d97ddc63 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/ifile.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_SECTION_H +#define MAPLEBE_INCLUDE_CG_SECTION_H + +#include "ifile.h" +#include "obj_emit.h" + +namespace maplebe { +Section::Section(std::string secName, Word type, Word flags, uint32 align, ObjEmitter &objEmitter, MemPool &memPool) + : emitter(objEmitter), name(secName, &memPool) { + secHeader.sh_type = type; + secHeader.sh_flags = flags; + secHeader.sh_addralign = align; + emitter.RegisterSection(this); +} + +void Section::Layout() { + emitter.UpdateSectionOffsetAddr(this); + GenerateData(); + // emitter.EmitData(this); + HandleLocalFixup(); + emitter.UpdateGlobalOffsetAddr(this); +} + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_SECTION_H */ diff --git a/ecmascript/mapleall/maple_be/src/cg/insn.cpp b/ecmascript/mapleall/maple_be/src/cg/insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0eeba79e3e04873e608bded99dfb68fe835b240d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/insn.cpp @@ -0,0 +1,387 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "insn.h" +#include "isa.h" +#include "cg.h" +namespace maplebe { +bool Insn::IsMachineInstruction() const { + return md && md->IsPhysicalInsn() && Globals::GetInstance()->GetTarget()->IsTargetInsn(mOp); +} +/* phi is not physical insn */ +bool Insn::IsPhi() const { + return md ? md->IsPhi() : false; +} +bool Insn::IsLoad() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoad(); +} +bool Insn::IsStore() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsStore(); +} +bool Insn::IsMove() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMove(); +} +bool Insn::IsBranch() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsBranch(); +} +bool Insn::IsCondBranch() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsCondBranch(); +} +bool Insn::IsUnCondBranch() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsUnCondBranch(); +} +bool Insn::IsBasicOp() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsBasicOp(); +} +bool Insn::IsConversion() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsConversion(); +} +bool Insn::IsUnaryOp() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsUnaryOp(); +} +bool Insn::IsShift() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsShift(); +} +bool Insn::IsCall() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsCall(); +} +bool Insn::IsTailCall() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsTailCall(); +} +bool Insn::IsAsmInsn() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsInlineAsm(); +} +bool Insn::IsDMBInsn() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsDMB(); +} +bool Insn::IsAtomic() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsAtomic(); +} +bool Insn::IsVolatile() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsVolatile(); +} +bool Insn::IsMemAccessBar() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMemAccessBar(); +} +bool Insn::IsMemAccess() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMemAccess(); +} +bool Insn::CanThrow() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->CanThrow(); +} +bool Insn::IsVectorOp() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsVectorOp(); +} +bool Insn::HasLoop() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->HasLoop(); +} +uint32 Insn::GetLatencyType() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetLatencyType(); +} +uint32 Insn::GetAtomicNum() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetAtomicNum(); +} +bool Insn::IsSpecialIntrinsic() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsSpecialIntrinsic(); +} +bool Insn::IsLoadPair() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoadPair(); +} +bool Insn::IsStorePair() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsStorePair(); +} +bool Insn::IsLoadStorePair() const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoadStorePair(); +} +bool Insn::IsLoadLabel() const { + return md->IsLoad() && GetOperand(kInsnSecondOpnd).GetKind() == Operand::kOpdBBAddress; +} +bool Insn::OpndIsDef(uint32 id) const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetOpndDes(id)->IsDef(); +} +bool Insn::OpndIsUse(uint32 id) const { + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetOpndDes(id)->IsUse(); +} +bool Insn::IsClinit() const { + return Globals::GetInstance()->GetTarget()->IsClinitInsn(mOp); +} +bool Insn::IsComment() const { + return mOp == abstract::MOP_comment && !md->IsPhysicalInsn(); +} + +bool Insn::IsImmaterialInsn() const { + return IsComment(); +} + +bool Insn::IsPseudo() const { + return md && md->IsPhysicalInsn() && Globals::GetInstance()->GetTarget()->IsPseudoInsn(mOp); +} + +Operand *Insn::GetMemOpnd() const { + for (uint32 i = 0; i < opnds.size(); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + return &opnd; + } + } + return nullptr; +} +void Insn::SetMemOpnd(MemOperand *memOpnd) { + for (uint32 i = 0; i < static_cast(opnds.size()); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + SetOperand(i, *memOpnd); + return; + } + } +} + +bool Insn::IsRegDefined(regno_t regNO) const { + return GetDefRegs().count(regNO); +} + +std::set Insn::GetDefRegs() const { + std::set defRegNOs; + size_t opndNum = opnds.size(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsDef(); + if (!isDef && !opnd.IsMemoryAccessOperand()) { + continue; + } + if (opnd.IsList()) { + for (auto *op : static_cast(opnd).GetOperands()) { + DEBUG_ASSERT(op != nullptr, "invalid operand in list operand"); + defRegNOs.emplace(op->GetRegisterNumber()); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + DEBUG_ASSERT(!defRegNOs.count(base->GetRegisterNumber()), "duplicate def in one insn"); + defRegNOs.emplace(base->GetRegisterNumber()); + } + } + } else if (opnd.IsConditionCode() || opnd.IsRegister()) { + defRegNOs.emplace(static_cast(opnd).GetRegisterNumber()); + } + } + return defRegNOs; +} + +#if DEBUG +void Insn::Check() const { + if (!md) { + CHECK_FATAL(false, " need machine description for target insn "); + } + /* check if the number of operand(s) matches */ + uint32 insnOperandSize = GetOperandSize(); + if (insnOperandSize != md->GetOpndMDLength()) { + CHECK_FATAL(false, " the number of operands in instruction does not match machine description "); + } + /* check if the type of each operand matches */ + for (uint32 i = 0; i < insnOperandSize; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != md->GetOpndDes(i)->GetOperandType()) { + CHECK_FATAL(false, " operand type does not match machine description "); + } + } +} +#endif + +Insn *Insn::Clone(MemPool &memPool) const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *Insn::GetCallTargetOperand() const { + DEBUG_ASSERT(IsCall() || IsTailCall(), "should be call"); + return &GetOperand(kInsnFirstOpnd); +} + +ListOperand *Insn::GetCallArgumentOperand() { + DEBUG_ASSERT(IsCall(), "should be call"); + DEBUG_ASSERT(GetOperand(1).IsList(), "should be list"); + return &static_cast(GetOperand(kInsnSecondOpnd)); +} + + +void Insn::CommuteOperands(uint32 dIndex, uint32 sIndex) { + Operand *tempCopy = opnds[sIndex]; + opnds[sIndex] = opnds[dIndex]; + opnds[dIndex] = tempCopy; +} + +uint32 Insn::GetBothDefUseOpnd() const { + size_t opndNum = opnds.size(); + uint32 opndIdx = kInsnMaxOpnd; + if (md->GetAtomicNum() > 1) { + return opndIdx; + } + for (uint32 i = 0; i < opndNum; ++i) { + auto *opndProp = md->GetOpndDes(i); + if (opndProp->IsRegUse() && opndProp->IsDef()) { + DEBUG_ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + if (opnds[i]->IsMemoryAccessOperand()) { + auto *MemOpnd = static_cast(opnds[i]); + if (!MemOpnd->IsIntactIndexed()) { + DEBUG_ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + } + } + return opndIdx; +} + +uint32 Insn::GetMemoryByteSize() const { + DEBUG_ASSERT(IsMemAccess(), "must be memory access insn"); + uint32 res = 0; + for (size_t i = 0 ; i < opnds.size(); ++i) { + if (md->GetOpndDes(i)->GetOperandType() == Operand::kOpdMem) { + res = md->GetOpndDes(i)->GetSize(); + } + } + DEBUG_ASSERT(res, "cannot access empty memory"); + if (IsLoadStorePair()) { + res = res << 1; + } + res = res >> 3; + return res; +} + +bool Insn::ScanReg(regno_t regNO) const { + uint32 opndNum = GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + auto *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsRegister()) { + if (static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool Insn::MayThrow() const { + if (md->IsMemAccess() && !IsLoadLabel()) { + auto *memOpnd = static_cast(GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "CG invalid memory operand."); + if (memOpnd->IsStackMem()) { + return false; + } + } + return md->CanThrow(); +} + +void Insn::SetMOP(const InsnDesc &idesc) { + mOp = idesc.GetOpc(); + md = &idesc; +} + +void Insn::Dump() const { + DEBUG_ASSERT(md != nullptr, "md should not be nullptr"); + LogInfo::MapleLogger() << "< " << GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < GetOperandSize(); ++i) { + Operand &opnd = GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + Globals::GetInstance()->GetTarget()->DumpTargetOperand(opnd, *md->GetOpndDes(i)); + LogInfo::MapleLogger() << ")"; + } + + if (IsVectorOp()) { + auto *vInsn = static_cast(this); + if (vInsn->GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn->GetNumOfRegSpec() << ")"; + } + } + if (!deoptVreg2Opnd.empty()) { + LogInfo::MapleLogger() << " (deopt: "; + bool isFirstElem = true; + for (const auto &elem : deoptVreg2Opnd) { + if (!isFirstElem) { + LogInfo::MapleLogger() << ", "; + } else { + isFirstElem = false; + } + LogInfo::MapleLogger() << elem.first << ":"; + elem.second->Dump(); + } + LogInfo::MapleLogger() << ")"; + } + LogInfo::MapleLogger() << "\n"; +} + +VectorRegSpec *VectorInsn::GetAndRemoveRegSpecFromList() { + if (regSpecList.size() == 0) { + VectorRegSpec *vecSpec = CG::GetCurCGFuncNoConst()->GetMemoryPool()->New(); + return vecSpec; + } + VectorRegSpec *ret = regSpecList.back(); + regSpecList.pop_back(); + return ret; +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/instruction_selection.cpp b/ecmascript/mapleall/maple_be/src/cg/instruction_selection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d68bb7fec24e9e9d58d2e605f33df0b486e4156 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/instruction_selection.cpp @@ -0,0 +1,21 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "instruction_selection.h" +namespace maplebe { +bool CgIsel::PhaseRun(maplebe::CGFunc &f) { + +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/src/cg/isa.cpp b/ecmascript/mapleall/maple_be/src/cg/isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b24baa437724f3d62bca0bf1957afcba03e96a21 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/isa.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "isa.h" +namespace maplebe { +#define DEFINE_MOP(op,...) const OpndDesc OpndDesc::op=__VA_ARGS__; +#include "operand.def" +#undef DEFINE_MOP +#define DEFINE_MOP(op,...) {abstract::op,__VA_ARGS__}, +const InsnDesc InsnDesc::abstractId[abstract::kMopLast] = { +#include "abstract_mmir.def" +}; +#undef DEFINE_MOP + +bool InsnDesc::IsSame(const InsnDesc &left, + std::function cmp) const { + return cmp == nullptr ? false : cmp(left, *this); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/isel.cpp b/ecmascript/mapleall/maple_be/src/cg/isel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..29bd7184c66d6b38d28b5bcecabf1e5239cbed85 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/isel.cpp @@ -0,0 +1,1390 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "isel.h" +#include "factory.h" +#include "cg.h" +#include "standardize.h" +#include +#include + +namespace maplebe { +/* register, imm , memory, cond */ +#define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ +MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ +{abstract::MOP_copy_rr_##SIZE, abstract::MOP_copy_ri_##SIZE, abstract::MOP_load_##SIZE, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_str_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +}; +#define DEF_FAST_ISEL_MAPPING_FLOAT(SIZE) \ +MOperator fastIselMapF##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ +{abstract::MOP_copy_ff_##SIZE, abstract::MOP_copy_fi_##SIZE, abstract::MOP_load_f_##SIZE, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_str_f_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +}; + +DEF_FAST_ISEL_MAPPING_INT(8) +DEF_FAST_ISEL_MAPPING_INT(16) +DEF_FAST_ISEL_MAPPING_INT(32) +DEF_FAST_ISEL_MAPPING_INT(64) +DEF_FAST_ISEL_MAPPING_FLOAT(8) +DEF_FAST_ISEL_MAPPING_FLOAT(16) +DEF_FAST_ISEL_MAPPING_FLOAT(32) +DEF_FAST_ISEL_MAPPING_FLOAT(64) + +#define DEF_SEL_MAPPING_TBL(SIZE) \ +MOperator SelMapping##SIZE(bool isInt, uint32 x, uint32 y) { \ + return isInt ? fastIselMapI##SIZE[x][y] : fastIselMapF##SIZE[x][y]; \ +} +#define USE_SELMAPPING_TBL(SIZE) \ +{SIZE, SelMapping##SIZE} + +DEF_SEL_MAPPING_TBL(8); +DEF_SEL_MAPPING_TBL(16); +DEF_SEL_MAPPING_TBL(32); +DEF_SEL_MAPPING_TBL(64); + +std::map> fastIselMappingTable = { + USE_SELMAPPING_TBL(8), + USE_SELMAPPING_TBL(16), + USE_SELMAPPING_TBL(32), + USE_SELMAPPING_TBL(64)}; + +MOperator GetFastIselMop(Operand::OperandType dTy, Operand::OperandType sTy, PrimType type) { + uint32 bitSize = GetPrimTypeBitSize(type); + bool isInteger = IsPrimitiveInteger(type); + auto tableDriven = fastIselMappingTable.find(bitSize); + if (tableDriven != fastIselMappingTable.end()) { + auto funcIt = tableDriven->second; + return funcIt(isInteger, dTy, sTy); + } else { + CHECK_FATAL(false, "unsupport type"); + } + return abstract::MOP_undef; +} + +#define DEF_EXTEND_MAPPING_TBL(TYPE) [](bool isSigned) -> MOperator { \ + return isSigned ? abstract::MOP_sext_rr_##TYPE : abstract::MOP_zext_rr_##TYPE; \ +} +using fromToTy = std::pair; /* std::pair */ +#define DEF_USE_EXTEND_MAPPING_TBL(FROMSIZE, TOSIZE) \ +{{k##FROMSIZE##BitSize, k##TOSIZE##BitSize}, DEF_EXTEND_MAPPING_TBL(TOSIZE##_##FROMSIZE)} + +std::map> fastCvtMappingTableI = { + DEF_USE_EXTEND_MAPPING_TBL(8, 16), /* Extend Mapping */ + DEF_USE_EXTEND_MAPPING_TBL(8, 32), + DEF_USE_EXTEND_MAPPING_TBL(8, 64), + DEF_USE_EXTEND_MAPPING_TBL(16, 32), + DEF_USE_EXTEND_MAPPING_TBL(16, 64), + DEF_USE_EXTEND_MAPPING_TBL(32, 64), +}; +#undef DEF_USE_EXTEND_MAPPING_TBL +#undef DEF_EXTEND_MAPPING_TBL + +static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) { + if (toSize < k8BitSize || toSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + if (fromSize < k8BitSize || fromSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + /* Extend: fromSize < toSize */ + auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); + if (tableDriven == fastCvtMappingTableI.end()) { + CHECK_FATAL(false, "unsupport cvt"); + } + MOperator mOp = tableDriven->second(isSigned); + if (mOp == abstract::MOP_undef) { + CHECK_FATAL(false, "unsupport cvt"); + } + return mOp; +} + +/* + * fast get MOperator + * such as : and, or, shl ... + */ +#define DEF_MOPERATOR_MAPPING_FUNC(TYPE) [](uint32 bitSize) -> MOperator { \ + /* 8-bits, 16-bits, 32-bits, 64-bits */ \ + constexpr static std::array fastMapping_##TYPE = \ + {abstract::MOP_##TYPE##_8, abstract::MOP_##TYPE##_16, abstract::MOP_##TYPE##_32, abstract::MOP_##TYPE##_64}; \ + return fastMapping_##TYPE[GetBitIndex(bitSize)]; \ +} + +void HandleDassign(StmtNode &stmt, MPISel &iSel) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_dassign, "expect dassign"); + auto &dassignNode = static_cast(stmt); + BaseNode *rhs = dassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + Operand* opndRhs = iSel.HandleExpr(dassignNode, *rhs); + if (opndRhs == nullptr) { + return; + } + iSel.SelectDassign(dassignNode, *opndRhs); +} + +void HandleDassignoff(StmtNode &stmt, MPISel &iSel) { + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = iSel.HandleExpr(dassignoffNode, *rhs); + iSel.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleIassign(StmtNode &stmt, MPISel &iSel) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassign, "expect iassign"); + auto &iassignNode = static_cast(stmt); + BaseNode *rhs = iassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "null ptr check"); + Operand *opndRhs = iSel.HandleExpr(stmt, *rhs); + BaseNode *addr = iassignNode.Opnd(0); + DEBUG_ASSERT(addr != nullptr, "null ptr check"); + Operand *opndAddr = iSel.HandleExpr(stmt, *addr); + if (opndRhs == nullptr || opndAddr == nullptr) { + return; + } + if (rhs->GetPrimType() != PTY_agg) { + iSel.SelectIassign(iassignNode, *opndAddr, *opndRhs); + } else { + iSel.SelectAggIassign(iassignNode, *opndAddr, *opndRhs); + } +} + +void HandleRegassign(StmtNode &stmt, MPISel &iSel) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode.Opnd(0); + DEBUG_ASSERT(operand != nullptr, "get operand of regassignNode failed"); + Operand *opnd0 = iSel.HandleExpr(regAssignNode, *operand); + iSel.SelectRegassign(regAssignNode, *opnd0); +} + +void HandleIassignoff(StmtNode &stmt, MPISel &iSel) { + auto &iassignoffNode = static_cast(stmt); + iSel.SelectIassignoff(iassignoffNode); +} + +void HandleLabel(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + DEBUG_ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); + cgFunc->SetCurBB(*newBB); +} + +void HandleGoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + cgFunc->UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + DEBUG_ASSERT(gotoNode.GetOpCode() == OP_goto, "expect goto"); + cgFunc->SetCurBBKind(BB::kBBGoto); + iSel.SelectGoto(gotoNode); + cgFunc->SetCurBB(*cgFunc->StartNewBB(gotoNode)); + DEBUG_ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + DEBUG_ASSERT(cgFunc->GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIntrinCall(StmtNode &stmt, MPISel &iSel) { + auto &call = static_cast(stmt); + iSel.SelectIntrinCall(call); +} + +void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &rangeGotoNode = static_cast(stmt); + DEBUG_ASSERT(rangeGotoNode.GetOpCode() == OP_rangegoto, "expect rangegoto"); + BaseNode *srcNode = rangeGotoNode.Opnd(0); + Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); + cgFunc->SetCurBBKind(BB::kBBRangeGoto); + iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); +} + +void HandleIgoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &igotoNode = static_cast(stmt); + BaseNode *targetNode = igotoNode.Opnd(0); + Operand *targetOpnd = iSel.HandleExpr(igotoNode, *targetNode); + iSel.SelectIgoto(*targetOpnd); + cgFunc->SetCurBBKind(BB::kBBIgoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(igotoNode)); +} + +void HandleReturn(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &retNode = static_cast(stmt); + DEBUG_ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + if (retNode.NumOpnds() != 0) { + Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); + iSel.SelectReturn(retNode, *opnd); + } + iSel.SelectReturn(); + /* return stmt will jump to the ret BB, so curBB is gotoBB */ + cgFunc->SetCurBBKind(BB::kBBGoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); +} + +void HandleComment(StmtNode &stmt, MPISel &iSel) { + return; +} + +void HandleIcall(StmtNode &stmt, MPISel &iSel) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_icall || stmt.GetOpCode() == OP_icallproto, "error"); + auto &iCallNode = static_cast(stmt); + Operand *opnd0 = iSel.HandleExpr(iCallNode, *iCallNode.Opnd(0)); + iSel.SelectIcall(iCallNode, *opnd0); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCall(StmtNode &stmt, MPISel &iSel) { + DEBUG_ASSERT(stmt.GetOpCode() == OP_call, "error"); + auto &callNode = static_cast(stmt); + iSel.SelectCall(callNode); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCondbr(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + DEBUG_ASSERT(condNode != nullptr, "expect first operand of cond br"); + /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used + * in most backend architectures */ + Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); + iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrof(addrofNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrofFunc(addrofNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofLabelNode = static_cast(expr); + return iSel.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectShift(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectExtractbits(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &dreadNode = static_cast(expr); + return iSel.SelectDread(parent, dreadNode); +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAdd(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBior(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBxor(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectSub(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectNeg(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectDiv(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectRem(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBand(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMpy(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleConstStr(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &constStrNode = static_cast(expr); + return iSel.SelectStrLiteral(constStrNode); +} + +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); + } else { + CHECK_FATAL(false, "NIY"); + } + return nullptr; +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + (void)parent; + auto ®ReadNode = static_cast(expr); + /* handle return Val */ + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &iSel.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return iSel.SelectRegread(regReadNode); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &ireadNode = static_cast(expr); + return iSel.SelectIread(parent, ireadNode); +} +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &ireadNode = static_cast(expr); + return iSel.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBnot(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +void HandleEval(const StmtNode &stmt, MPISel &iSel) { + (void)iSel.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectDepositBits(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if (IsPrimitiveInteger(targetPtyp) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return iSel.SelectCmpOp(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAlloca(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +void HandleAsm(StmtNode &stmt, MPISel &iSel) { + iSel.SelectAsm(static_cast(stmt)); +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + Operand &trueOpnd = *iSel.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *iSel.HandleExpr(expr, *expr.Opnd(2)); + Operand &condOpnd = *iSel.HandleExpr(expr, *expr.Opnd(0)); + if (condOpnd.IsImmediate()) { + return (static_cast(condOpnd).GetValue() == 0) ? &falseOpnd : &trueOpnd; + } + return iSel.SelectSelect(static_cast(expr), condOpnd, trueOpnd, falseOpnd, parent); +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMin(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMax(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectRetype(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return iSel.SelectBswap(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + default: + DEBUG_ASSERT(false, "NIY, unsupported intrinsicop."); + return nullptr; + } +} + +using HandleStmtFactory = FunctionFactory; +using HandleExprFactory = FunctionFactory; +namespace isel { +void InitHandleStmtFactory() { + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleIcall); + RegisterFactoryFunction(OP_icallproto, HandleIcall); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_asm, HandleAsm); +} +void InitHandleExprFactory() { + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} +} + +Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + return function(parent, expr, *this); +} + +void MPISel::doMPIS() { + isel::InitHandleStmtFactory(); + isel::InitHandleExprFactory(); + StmtNode *secondStmt = HandleFuncEntry(); + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + } + HandleFuncExit(); +} + +PrimType MPISel::GetIntegerPrimTypeFromSize(bool isSigned, uint32 bitSize) { + static constexpr std::array signedPrimType = {PTY_i8, PTY_i16, PTY_i32, PTY_i64}; + static constexpr std::array unsignedPrimType = {PTY_u8, PTY_u16, PTY_u32, PTY_u64}; + BitIndex index = GetBitIndex(bitSize); + return isSigned ? signedPrimType[index] : unsignedPrimType[index]; +} + +void MPISel::SelectCallCommon(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + if (cgFunc->GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc->SetCurBB(*cgFunc->StartNewBB(stmt)); + } + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(true, stmt)); + } +} + +void MPISel::SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType) { + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, primType); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, primType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +std::pair MPISel::GetFieldIdAndMirTypeFromMirNode(const BaseNode &node) { + FieldID fieldId = 0; + MIRType *mirType = nullptr; + if (node.GetOpCode() == maple::OP_iread) { + /* mirType stored in an addr. */ + auto &iread = static_cast(node); + fieldId = iread.GetFieldID(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + if (mirType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(mirType); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + } else if (node.GetOpCode() == maple::OP_dassign) { + /* mirSymbol */ + auto &dassign = static_cast(node); + fieldId = dassign.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dassign.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_dread) { + /* mirSymbol */ + auto &dread = static_cast(node); + fieldId = dread.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_iassign) { + auto &iassign = static_cast(node); + fieldId = iassign.GetFieldID(); + AddrofNode &addrofNode = static_cast(iassign.GetAddrExprBase()); + MIRType *iassignMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx()); + MIRPtrType *pointerType = nullptr; + if (iassignMirType->GetPrimType() == PTY_agg) { + MIRSymbol *addrSym = cgFunc->GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrofNode.GetStIdx()); + MIRType *addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrMirType->GetTypeIndex()); + DEBUG_ASSERT(addrMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(addrMirType); + } else { + DEBUG_ASSERT(iassignMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(iassignMirType); + } + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + } else { + CHECK_FATAL(false, "unsupported OpCode"); + } + return {fieldId, mirType}; +} + +MirTypeInfo MPISel::GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType) { + MirTypeInfo mirTypeInfo; + /* fixup primType and offset */ + if (fieldId != 0) { + DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(fieldId); + mirTypeInfo.offset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + mirTypeInfo.primType = mirType->GetPrimType(); + // aggSize for AggType + if (mirTypeInfo.primType == maple::PTY_agg) { + mirTypeInfo.size = cgFunc->GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + } + return mirTypeInfo; +} + +MirTypeInfo MPISel::GetMirTypeInfoFromMirNode(const BaseNode &node) { + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(node); + return GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); +} + +void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { + /* mirSymbol info */ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID()); + /* rhs mirType info */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* Generate Insn */ + if (rhsType == PTY_agg) { + /* Agg Type */ + SelectAggDassign(symbolInfo, symbolMem, opndRhs); + return; + } + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + SelectCopy(symbolMem, opndRhs, memType, rhsType); + return; +} + +void MPISel::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + MemOperand &memOpnd = GetOrCreateMemOpndFromSymbol(*symbol, bitSize, stmt.offset); + + SelectCopy(memOpnd, opnd0, primType); +} + +void MPISel::SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs) { + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + /* handle Lhs, generate (%Rxx) via Rxx*/ + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + RegOperand &lhsBaseOpnd = SelectCopy2Reg(opndAddr, stmt.Opnd(0)->GetPrimType()); + MemOperand &lhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(lhsBaseOpnd, symbolInfo.offset, + GetPrimTypeBitSize(memType)); + /* handle Rhs, get R## from Rhs */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* mov %R##, (%Rxx) */ + SelectCopy(lhsMemOpnd, opndRhs, memType, rhsType); +} + +void MPISel::SelectIassignoff(const IassignoffNode &stmt) { + Operand *addr = HandleExpr(stmt, *stmt.Opnd(0)); + DEBUG_ASSERT(addr != nullptr, "null ptr check"); + Operand *rhs = HandleExpr(stmt, *stmt.Opnd(1)); + DEBUG_ASSERT(rhs != nullptr, "null ptr check"); + + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &addrReg = SelectCopy2Reg(*addr, PTY_a64); + RegOperand &rhsReg = SelectCopy2Reg(*rhs, primType); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrReg, offset, bitSize); + SelectCopy(memOpnd, rhsReg, primType); +} + +ImmOperand *MPISel::SelectIntConst(const MIRIntConst &intConst, PrimType primType) { + return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), intConst.GetExtValue()); +} + +Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + if (IsPrimitiveInteger(primType)) { + resOpnd = &(cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType))); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + SelectShift(*resOpnd, regOpnd0, opnd1, opcode, primType, node.Opnd(1)->GetPrimType()); + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return resOpnd; +} + +void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, + PrimType opnd0Type, PrimType opnd1Type) { + if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { + SelectCopy(resOpnd, opnd0, opnd0Type); + return; + } + + uint32 dsize = GetPrimTypeBitSize(opnd0Type); + MOperator mOp = abstract::MOP_undef; + if (shiftDirect == OP_shl) { + static auto fastShlMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(shl); + mOp = fastShlMappingFunc(dsize); + } else if (shiftDirect == OP_ashr) { + static auto fastAshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(ashr); + mOp = fastAshrMappingFunc(dsize); + } else if (shiftDirect == OP_lshr) { + static auto fastLshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(lshr); + mOp = fastLshrMappingFunc(dsize); + } else { + CHECK_FATAL(false, "NIY, Not support shiftdirect case"); + } + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, opnd0Type); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, opnd1Type); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PregIdx pregIdx = stmt.GetRegIdx(); + PrimType regType = stmt.GetPrimType(); + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); + SelectCopy(regOpnd, opnd0, regType, rhsType); + preg2Opnd.insert(std::pair(pregIdx, ®Opnd)); + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *dest = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(*dest, regOpnd, preg->GetPrimType(), regType); + } +} + +RegOperand *MPISel::SelectRegread(RegreadNode &expr) { + PregIdx pregIdx = expr.GetRegIdx(); + PrimType rhsType = expr.GetPrimType(); + if (pregIdx < 0) { + return &SelectSpecialRegread(pregIdx, rhsType); + } + + RegOperand ® = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeSize(rhsType) * kBitsPerByte, cgFunc->GetRegTyFromPrimTy(rhsType)); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *src = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(reg, *src, rhsType, preg->GetPrimType()); + } + return ® +} + +Operand *MPISel::SelectDread(const BaseNode &parent, const AddrofNode &expr) { + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(expr); + PrimType symbolType = symbolInfo.primType; + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + PrimType primType = expr.GetPrimType(); + /* for AggType, return it's location in stack. */ + if (symbolType == maple::PTY_agg) { + CHECK_FATAL(primType == maple::PTY_agg, "NIY"); + return &symbolMem; + } + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + /* Generate Insn */ + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; +} + +Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectAdd(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectBand(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBand(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectSub(resOpnd, regOpnd0, regOpnd1, primType); + return &resOpnd; +} + +void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, + uint8 bitSize, PrimType primType) { + uint32 primBitSize = GetPrimTypeBitSize(primType); + bool isSigned = IsSignedInteger(primType); + if (bitOffset == 0 && !isSigned) { + /* + * resOpnd = opnd0 & ((1 << bitSize) - 1) + */ + ImmOperand &imm = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + (static_cast(1) << bitSize) - 1); + SelectBand(resOpnd, opnd0, imm, primType); + } else { + /* + * tmpOpnd = opnd0 << (primBitSize - bitSize - bitOffset) + * resOpnd = tmpOpnd >> (primBitSize - bitSize) + * if signed : use sar; else use shr + */ + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + primBitSize - bitSize - bitOffset); + SelectShift(tmpOpnd, opnd0, imm1Opnd, OP_shl, primType, primType); + Opcode opcode = isSigned ? OP_ashr : OP_lshr; + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, primBitSize - bitSize); + SelectShift(resOpnd, tmpOpnd, imm2Opnd, opcode, primType, primType); + } +} + +Operand *MPISel::SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + uint8 bitSize = node.GetBitsSize(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType)) { + // OP_extractbits or bitSize < 8-bit or bitSize is not pow of 2 + if (node.GetOpCode() == OP_extractbits || bitSize < k8BitSize || (bitSize & (bitSize - 1)) != 0) { + SelectCopy(resOpnd, opnd0, toType, fromType); + SelectExtractbits(resOpnd, resOpnd, node.GetBitsOffset(), bitSize, toType); + } else { + PrimType opndType = GetIntegerPrimTypeFromSize(node.GetOpCode() == OP_sext, bitSize); + RegOperand &tmpRegOpnd = SelectCopy2Reg(opnd0, opndType, fromType); + SelectIntCvt(resOpnd, tmpRegOpnd, toType, opndType); + } + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return &resOpnd; +} + +Operand *MPISel::SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; + } + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType) || IsPrimitiveInteger(fromType)) { + SelectIntCvt(*resOpnd, opnd0, toType, fromType); + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return resOpnd; +} + +void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + /* + * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR + * The signedness of operands would be shown in the expression. + */ + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + if (toSize <= fromSize) { + resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), + GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + return; + } + bool isSigned = !IsPrimitiveUnsigned(fromType); + MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void MPISel::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + static auto fastSubMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(sub); + MOperator mOp = fastSubMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + static auto fastBandMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(and); + MOperator mOp = fastBandMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + static auto fastAddMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(add); + MOperator mOp = fastAddMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand* MPISel::SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectNeg(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType) { + static auto fastNegMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(neg); + MOperator mOp = fastNegMappingFunc(GetPrimTypeBitSize(primType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBior(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + static auto fastBiorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(or); + MOperator mOp = fastBiorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand *MPISel::SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBxor(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + static auto fastBxorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(xor); + MOperator mOp = fastBxorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +MemOperand *MPISel::GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset) { + /* get rhs*/ + Operand *addrOpnd = HandleExpr(expr, *expr.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + /* Generate memOpnd */ + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, + offset, GetPrimTypeBitSize(primType)); + return &memOpnd; +} + +Operand *MPISel::SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset) { + /* get lhs mirType info */ + MirTypeInfo lhsInfo = GetMirTypeInfoFromMirNode(expr); + /* get memOpnd */ + MemOperand &memOpnd = *GetOrCreateMemOpndFromIreadNode(expr, lhsInfo.primType, lhsInfo.offset + extraOffset); + /* for AggType, return addr it self. */ + if (lhsInfo.primType == PTY_agg) { + return &memOpnd; + } + /* for BasicType, load val in addr to register. */ + PrimType primType = expr.GetPrimType(); + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType, lhsInfo.primType); + return &result; +} + +Operand *MPISel::SelectIreadoff(const BaseNode &parent, const IreadoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + + Operand *addrOpnd = HandleExpr(ireadoff, *ireadoff.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, offset, bitSize); + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType); + return &result; +} + +static inline uint64 CreateDepositBitsImm1(uint32 primBitSize, uint8 bitOffset, uint8 bitSize) { + /* $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) */ + uint64 val = UINT64_MAX; // 0xFFFFFFFFFFFFFFFF + if (bitSize + bitOffset >= primBitSize) { + val = 0; + } else { + val <<= (bitSize + bitOffset); + } + val |= (static_cast(1) << bitOffset) - 1; + return val; +} + +Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) { + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + PrimType primType = node.GetPrimType(); + uint32 primBitSize = GetPrimTypeBitSize(primType); + DEBUG_ASSERT((primBitSize == k64BitSize) || (bitOffset < k32BitSize), "wrong bitSize"); + DEBUG_ASSERT(bitSize < k64BitSize, "wrong bitSize"); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + /* + * resOpnd = (opnd0 and $imm1) or ((opnd1 << bitOffset) and (~$imm1)); + * $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) + */ + uint64 imm1Val = CreateDepositBitsImm1(primBitSize, bitOffset, bitSize); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + static_cast(imm1Val)); + /* and */ + SelectBand(resOpnd, opnd0, imm1Opnd, primType); + if (opnd1.IsIntImmediate()) { + /* opnd1 is immediate, imm2 = (opnd1.val << bitOffset) & (~$imm1) */ + int64 imm2Val = (static_cast(opnd1).GetValue() << bitOffset) & (~imm1Val); + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, imm2Val); + /* or */ + SelectBior(resOpnd, resOpnd, imm2Opnd, primType); + } else { + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(tmpOpnd, opnd1, primType, node.Opnd(1)->GetPrimType()); + /* shift -- (opnd1 << bitOffset) */ + ImmOperand &countOpnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, bitOffset); + SelectShift(tmpOpnd, tmpOpnd, countOpnd, OP_shl, primType, primType); + /* and (~$imm1) */ + ImmOperand &nonImm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, (~imm1Val)); + SelectBand(tmpOpnd, tmpOpnd, nonImm1Opnd, primType); + /* or */ + SelectBior(resOpnd, resOpnd, tmpOpnd, primType); + } + return &resOpnd; +} + +Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { + DEBUG_ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); + PrimType srcType = node.Opnd(0)->GetPrimType(); + RegOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectCopy(sizeOpnd, opnd0, PTY_u64, srcType); + + /* stack byte alignment */ + uint32 stackPtrAlignment = cgFunc->GetMemlayout()->GetStackPtrAlignment(); + RegOperand &aliOp = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectAdd(aliOp, sizeOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, + stackPtrAlignment - 1), PTY_u64); + ImmOperand &shiftOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, + __builtin_ctz(stackPtrAlignment)); + SelectShift(aliOp, aliOp, shiftOpnd, OP_lshr, PTY_u64, PTY_u64); + SelectShift(aliOp, aliOp, shiftOpnd, OP_shl, PTY_u64, PTY_u64); + + RegOperand &spOpnd = GetTargetStackPointer(PTY_u64); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + uint32 argsToStkpassSize = cgFunc->GetMemlayout()->SizeOfArgsToStackPass(); + if (argsToStkpassSize > 0) { + SelectAdd(resOpnd, spOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, argsToStkpassSize), PTY_u64); + } else { + SelectCopy(resOpnd, spOpnd, PTY_u64); + } + return &resOpnd; +} + +Operand *MPISel::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + DEBUG_ASSERT(opnd1->GetOpCode() == OP_constval, "NIY, opnd1->op should be OP_constval."); + + switch (opnd0->GetOpCode()) { + case OP_regread: { + return SelectRegread(static_cast(*opnd0)); + } + case OP_addrof: { + Operand *addrOpnd = SelectAddrof(static_cast(*opnd0), node); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst->GetKind() == kConstInt, "NIY"); + MIRIntConst *mirIntConst = static_cast(mirConst); + Operand *immOpnd = SelectIntConst(*mirIntConst, constvalNode->GetPrimType()); + + Operand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_a64), + cgFunc->GetRegTyFromPrimTy(PTY_a64)); + SelectAdd(resOpnd, *addrOpnd, *immOpnd, node.GetPrimType()); + return &resOpnd; + } + default: + CHECK_FATAL(false, "cannot handle opnd0."); + } +} + +StmtNode *MPISel::HandleFuncEntry() { + MIRFunction &mirFunc = cgFunc->GetFunction(); + BlockNode *block = mirFunc.GetBody(); + + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + DEBUG_ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + cgFunc->SetFirstBB(*cgFunc->GetCurBB()); + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(false, *stmt)); + bool withFreqInfo = mirFunc.HasFreqMap() && !mirFunc.GetLastFreqMap().empty(); + if (withFreqInfo) { + cgFunc->GetCurBB()->SetFrequency(kFreqBase); + } + + return stmt; +} + +/* This function loads src to a register, the src can be an imm, mem or a label. + * Once the source and result(destination) types are different, + * implicit conversion is executed here.*/ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (src.IsRegister() && fromSize == toSize) { + return static_cast(src); + } + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); + } else { + SelectCopy(dest, src, toType); + } + return dest; +} +/* Pretty sure that implicit type conversions will not occur. */ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { + DEBUG_ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); + if (src.IsRegister()) { + return static_cast(src); + } + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(dest, src, dtype); + return dest; +} +/* This function copy/load/store src to a dest, Once the src and dest types + * are different, implicit conversion is executed here. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { + if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); + SelectCopy(dest, dstRegOpnd, toType); + } else { + SelectCopy(dest, src, toType); + } +} + +/* Pretty sure that implicit type conversions will not occur. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { + DEBUG_ASSERT(dest.GetSize() == src.GetSize(), "NIY"); + if (dest.GetKind() == Operand::kOpdRegister){ + SelectCopyInsn(dest, src, type); + } else if (dest.GetKind() == Operand::kOpdMem) { + if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), + cgFunc->GetRegTyFromPrimTy(type)); + SelectCopyInsn(tempReg, src, type); + SelectCopyInsn(dest, tempReg, type); + } else { + SelectCopyInsn(dest, src, type); + } + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); + } + return; +} + +void MPISel::SelectCopyInsn(Operand &dest, Operand &src, PrimType type) { + MOperator mop = GetFastIselMop(dest.GetKind(), src.GetKind(), type); + CHECK_FATAL(mop != abstract::MOP_undef, "get mop failed"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mop, InsnDesc::GetAbstractId(mop)); + (void)insn.AddOpndChain(dest).AddOpndChain(src); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectBnot(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType) { + static auto fastBnotMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(not); + MOperator mOp = fastBnotMappingFunc(GetPrimTypeBitSize(primType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectMin(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectMax(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (IsPrimitivePoint(fromType) && IsPrimitivePoint(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + return &SelectCopy2Reg(opnd0, toType, fromType); + } + CHECK_FATAL(false, "NIY, retype"); + return nullptr; +} + +void MPISel::HandleFuncExit() { + BlockNode *block = cgFunc->GetFunction().GetBody(); + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->SetCleanupBB(*cgFunc->GetCurBB()->GetPrev()); +} + +bool InstructionSelector::PhaseRun(maplebe::CGFunc &f) { + MPISel *mpIS = f.GetCG()->CreateMPIsel(*GetPhaseMemPool(), *GetPhaseAllocator(), f); + mpIS->doMPIS(); + Standardize *stdz = f.GetCG()->CreateStandardize(*GetPhaseMemPool(), f); + stdz->DoStandardize(); + // f.DumpCGIR(); + return true; +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/label_creation.cpp b/ecmascript/mapleall/maple_be/src/cg/label_creation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aa734e31fd4a9c42575bf03e5ee427b6592333b1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/label_creation.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "label_creation.h" +#include "cgfunc.h" +#include "cg.h" +#include "debug_info.h" + +namespace maplebe { +using namespace maple; + +void LabelCreation::Run() { + CreateStartEndLabel(); +} + +void LabelCreation::CreateStartEndLabel() { + DEBUG_ASSERT(cgFunc != nullptr, "expect a cgfunc before CreateStartEndLabel"); + LabelIdx startLblIdx = cgFunc->CreateLabel(); + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + DEBUG_ASSERT(mirBuilder != nullptr, "get mirbuilder failed in CreateStartEndLabel"); + LabelNode *startLabel = mirBuilder->CreateStmtLabel(startLblIdx); + cgFunc->SetStartLabel(*startLabel); + cgFunc->GetFunction().GetBody()->InsertFirst(startLabel); + LabelIdx endLblIdx = cgFunc->CreateLabel(); + LabelNode *endLabel = mirBuilder->CreateStmtLabel(endLblIdx); + cgFunc->SetEndLabel(*endLabel); + cgFunc->GetFunction().GetBody()->InsertLast(endLabel); + DEBUG_ASSERT(cgFunc->GetFunction().GetBody()->GetLast() == endLabel, "last stmt must be a endLabel"); + MIRFunction *func = &cgFunc->GetFunction(); + CG *cg = cgFunc->GetCG(); + if (cg->GetCGOptions().WithDwarf()) { + DebugInfo *di = cg->GetMIRModule()->GetDbgInfo(); + DBGDie *fdie = di->GetDie(func); + fdie->SetAttr(DW_AT_low_pc, startLblIdx); + fdie->SetAttr(DW_AT_high_pc, endLblIdx); + } + /* add start/end labels into the static map table in class cg */ + if (!CG::IsInFuncWrapLabels(func)) { + CG::SetFuncWrapLabels(func, std::make_pair(startLblIdx, endLblIdx)); + } +} + +bool CgCreateLabel::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + LabelCreation *labelCreate = memPool->New(f); + labelCreate->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/live.cpp b/ecmascript/mapleall/maple_be/src/cg/live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ce2f99e536ec745135b9f261ae8102af28af4630 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/live.cpp @@ -0,0 +1,419 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "live.h" +#include +#include "cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build two sets: liveOutRegno and liveInRegno of each BB. + * This algorithm mainly include 3 parts: + * 1. initialize and get def[]/use[] of each BB; + * 2. build live_in and live_out based on this algorithm + * Out[B] = U In[S] //S means B's successor; + * In[B] = use[B] U (Out[B]-def[B]); + * 3. deal with cleanup BB. + */ +namespace maplebe { +#define LIVE_ANALYZE_DUMP_NEWPM CG_DEBUG_FUNC(f) + +void LiveAnalysis::InitAndGetDefUse() { + FOR_ALL_BB(bb, cgFunc) { + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitBB(*bb); + GetBBDefUse(*bb); + if (bb->GetEhPreds().empty()) { + continue; + } + bb->RemoveInsn(*bb->GetFirstInsn()->GetNext()); + cgFunc->DecTotalNumberOfInstructions(); + bb->RemoveInsn(*bb->GetFirstInsn()); + cgFunc->DecTotalNumberOfInstructions(); + } +} + +/* Out[BB] = Union all of In[Succs(BB)] */ +bool LiveAnalysis::GenerateLiveOut(BB &bb) { + const MapleSet bbLiveOutBak(bb.GetLiveOut()->GetInfo()); + for (auto succBB : bb.GetSuccs()) { + if (succBB->GetLiveInChange() && !succBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*succBB->GetLiveIn()); + } + if (!succBB->GetEhSuccs().empty()) { + for (auto ehSuccBB : succBB->GetEhSuccs()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + } + for (auto ehSuccBB : bb.GetEhSuccs()) { + if (ehSuccBB->GetLiveInChange() && !ehSuccBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + return !bb.GetLiveOut()->IsEqual(bbLiveOutBak); +} + +/* In[BB] = use[BB] Union (Out[BB]-def[BB]) */ +bool LiveAnalysis::GenerateLiveIn(BB &bb) { + LocalMapleAllocator allocator(stackMp); + const MapleSet bbLiveInBak(bb.GetLiveIn()->GetInfo()); + if (!bb.GetInsertUse()) { + bb.SetLiveInInfo(*bb.GetUse()); + bb.SetInsertUse(true); + } + SparseDataInfo &bbLiveOut = bb.GetLiveOut()->Clone(allocator); + if (!bbLiveOut.NoneBit()) { + bbLiveOut.Difference(*bb.GetDef()); + bb.LiveInOrBits(bbLiveOut); + } + + if (!bb.GetLiveIn()->IsEqual(bbLiveInBak)) { + return true; + } + return false; +} + +/* building liveIn and liveOut of each BB. */ +void LiveAnalysis::BuildInOutforFunc() { + iteration = 0; + bool hasChange; + do { + ++iteration; + hasChange = false; + FOR_ALL_BB_REV(bb, cgFunc) { + if (!GenerateLiveOut(*bb) && bb->GetInsertUse()) { + continue; + } + if (GenerateLiveIn(*bb)) { + bb->SetLiveInChange(true); + hasChange = true; + } else { + bb->SetLiveInChange(false); + } + } + } while (hasChange); +} + +/* reset to liveout/in_regno */ +void LiveAnalysis::ResetLiveSet() { + FOR_ALL_BB(bb, cgFunc) { + bb->GetLiveIn()->GetBitsOfInfo>(bb->GetLiveInRegNO()); + bb->GetLiveOut()->GetBitsOfInfo>(bb->GetLiveOutRegNO()); + } +} + +/* entry function for LiveAnalysis */ +void LiveAnalysis::AnalysisLive() { + InitAndGetDefUse(); + BuildInOutforFunc(); + InsertInOutOfCleanupBB(); +} + +void LiveAnalysis::DealWithInOutOfCleanupBB() { + const BB *cleanupBB = cgFunc->GetCleanupEntryBB(); + if (cleanupBB == nullptr) { + return; + } + for (size_t i = 0; i != cleanupBB->GetLiveIn()->Size(); ++i) { + if (!cleanupBB->GetLiveIn()->TestBit(i)) { + continue; + } + if (CleanupBBIgnoreReg(regno_t(i))) { + continue; + } + /* + * a param vreg may used in cleanup bb. So this param vreg will live on the whole function + * since everywhere in function body may occur exceptions. + */ + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + /* If bb is not a cleanup bb, then insert reg to both livein and liveout. */ + if ((bb != cgFunc->GetFirstBB()) && !bb->GetDef()->TestBit(i)) { + bb->SetLiveInBit(i); + } + bb->SetLiveOutBit(i); + } + } +} + +void LiveAnalysis::InsertInOutOfCleanupBB() { + const BB *cleanupBB = cgFunc->GetCleanupEntryBB(); + if (cleanupBB == nullptr) { + return; + } + if (cleanupBB->GetLiveIn() == nullptr || cleanupBB->GetLiveIn()->NoneBit()) { + return; + } + SparseDataInfo cleanupBBLi = *(cleanupBB->GetLiveIn()); + /* registers need to be ignored: (reg < 8) || (29 <= reg && reg <= 32) */ + for (uint32 i = 1; i < 8; ++i) { + cleanupBBLi.ResetBit(i); + } + for (uint32 j = 29; j <= 32; ++j) { + cleanupBBLi.ResetBit(j); + } + + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (bb != cgFunc->GetFirstBB()) { + cleanupBBLi.Difference(*bb->GetDef()); + bb->LiveInOrBits(cleanupBBLi); + } + bb->LiveOutOrBits(cleanupBBLi); + } +} + +/* + * entry of get def/use of bb. + * getting the def or use info of each regopnd as parameters of CollectLiveInfo(). +*/ +void LiveAnalysis::GetBBDefUse(BB &bb) { + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + return; + } + + FOR_BB_INSNS_REV(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + bool isAsm = insn->IsAsmInsn(); + const InsnDesc *md = insn->GetDesc(); + if (insn->IsCall() || insn->IsTailCall()) { + ProcessCallInsnParam(bb, *insn); + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *opndDesc = md->GetOpndDes(i); + DEBUG_ASSERT(opndDesc != nullptr, "null ptr check"); + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd, opndDesc->IsDef()); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else { + bool isDef = opndDesc->IsRegDef(); + bool isUse = opndDesc->IsRegUse(); + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } + } +} + +/* build use and def sets of each BB according to the type of regOpnd. */ +void LiveAnalysis::CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const { + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + bb.SetDefBit(regNO); + if (!isUse) { + bb.UseResetBit(regNO); + } + } + if (isUse) { + bb.SetUseBit(regNO); + bb.DefResetBit(regNO); + } +} + +void LiveAnalysis::ProcessAsmListOpnd(BB &bb, Operand &opnd, uint32 idx) const { + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void LiveAnalysis::ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const { + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, !isDef); + } +} + +void LiveAnalysis::ProcessMemOpnd(BB &bb, Operand &opnd) const { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void LiveAnalysis::ProcessCondOpnd(BB &bb) const { + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +/* dump the current info of def/use/livein/liveout */ +void LiveAnalysis::Dump() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n--------- liveness for " << funcSt->GetName() << " iteration "; + LogInfo::MapleLogger() << iteration << " ---------\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ") " + << std::dec << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]"; + } + LogInfo::MapleLogger() << "> idx " << bb->GetId() << " ===\n"; + + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " (" << std::hex << succ << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + + const SparseDataInfo *infoDef = nullptr; + LogInfo::MapleLogger() << " DEF: "; + infoDef = bb->GetDef(); + DumpInfo(*infoDef); + + const SparseDataInfo *infoUse = nullptr; + LogInfo::MapleLogger() << "\n USE: "; + infoUse = bb->GetUse(); + DumpInfo(*infoUse); + + const SparseDataInfo *infoLiveIn = nullptr; + LogInfo::MapleLogger() << "\n Live IN: "; + infoLiveIn = bb->GetLiveIn(); + DumpInfo(*infoLiveIn); + + const SparseDataInfo *infoLiveOut = nullptr; + LogInfo::MapleLogger() << "\n Live OUT: "; + infoLiveOut = bb->GetLiveOut(); + DumpInfo(*infoLiveOut); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "---------------------------\n"; +} + +void LiveAnalysis::DumpInfo(const SparseDataInfo &info) const { + uint32 count = 1; + for (auto x : info.GetInfo()) { + LogInfo::MapleLogger() << x << " "; + ++count; + /* 20 output one line */ + if ((count % 20) == 0) { + LogInfo::MapleLogger() << "\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +/* initialize dependent info and container of BB. */ +void LiveAnalysis::InitBB(BB &bb) { + bb.SetLiveInChange(true); + bb.SetInsertUse(false); + bb.ClearLiveInRegNO(); + bb.ClearLiveOutRegNO(); + const uint32 maxRegCount = cgFunc->GetSSAvRegCount() > cgFunc->GetMaxVReg() ? + cgFunc->GetSSAvRegCount() : cgFunc->GetMaxVReg(); + bb.SetLiveIn(*NewLiveIn(maxRegCount)); + bb.SetLiveOut(*NewLiveOut(maxRegCount)); + bb.SetDef(*NewDef(maxRegCount)); + bb.SetUse(*NewUse(maxRegCount)); +} + +void LiveAnalysis::ClearInOutDataInfo() { + FOR_ALL_BB(bb, cgFunc) { + bb->SetLiveInChange(false); + bb->DefClearDataInfo(); + bb->UseClearDataInfo(); + bb->LiveInClearDataInfo(); + bb->LiveOutClearDataInfo(); + } +} + +void LiveAnalysis::EnlargeSpaceForLiveAnalysis(BB &currBB) { + regno_t currMaxVRegNO = cgFunc->GetMaxVReg(); + if (currMaxVRegNO >= currBB.GetLiveIn()->Size()) { + FOR_ALL_BB(bb, cgFunc) { + bb->LiveInEnlargeCapacity(currMaxVRegNO); + bb->LiveOutEnlargeCapacity(currMaxVRegNO); + } + } +} + +void CgLiveAnalysis::GetAnalysisDependence(AnalysisDep &aDep) const { +#if TARGX86_64 + aDep.AddRequired(); +#endif + aDep.SetPreservedAll(); +} + +bool CgLiveAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *liveMemPool = GetPhaseMemPool(); + live = f.GetCG()->CreateLiveAnalysis(*liveMemPool, f); + CHECK_FATAL(live != nullptr, "NIY"); + live->AnalysisLive(); + if (LIVE_ANALYZE_DUMP_NEWPM) { + live->Dump(); + } + live->ResetLiveSet(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLiveAnalysis, liveanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/local_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19a363ffa48f018f8edc735272a38fe4d827e943 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/local_opt.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "local_opt.h" +#include "cg.h" +#include "mpl_logging.h" +#if defined TARGX86_64 +#include "x64_reaching.h" +#endif +/* + * this phase does optimization on local level(single bb or super bb) + * this phase requires liveanalysis + */ +namespace maplebe { +void LocalOpt::DoLocalCopyPropOptmize() { + DoLocalCopyProp(); +} + +void LocalPropOptimizePattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*bb, *insn); + } + } +} + +bool LocalCopyProp::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + auto *reachingDef = f.GetCG()->CreateReachingDefinition(*mp, f); + LocalOpt *localOpt = f.GetCG()->CreateLocalOpt(*mp, f, *reachingDef); + localOpt->DoLocalCopyPropOptmize(); + return false; +} + +void LocalCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool RedundantDefRemove::CheckCondition(Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + std::vector defOpnds; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsDef() && opndDesc->IsUse()) { + return false; + } + if (opnd.IsList()) { + continue; + } + if (opndDesc->IsDef()) { + defOpnds.emplace_back(&opnd); + } + } + if (defOpnds.size() != 1 || !defOpnds[0]->IsRegister()) { + return false; + } + auto ®Def = static_cast(*defOpnds[0]); + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(LocalCopyProp, localcopyprop) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/loop.cpp b/ecmascript/mapleall/maple_be/src/cg/loop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..757a5121ffe05a65733f94cea116ed4574a50fbe --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/loop.cpp @@ -0,0 +1,667 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "loop.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +#define LOOP_ANALYSIS_DUMP_NEWPM CG_DEBUG_FUNC(f) + +static void PrintLoopInfo(const LoopHierarchy &loop) { + LogInfo::MapleLogger() << "header " << loop.GetHeader()->GetId(); + if (loop.otherLoopEntries.size()) { + LogInfo::MapleLogger() << " multi-header "; + for (auto en : loop.otherLoopEntries) { + LogInfo::MapleLogger() << en->GetId() << " "; + } + } + if (loop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << " parent " << loop.GetOuterLoop()->GetHeader()->GetId(); + } + LogInfo::MapleLogger() << " backedge "; + for (auto *bb : loop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : loop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + if (!loop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << "\n inner_loop_headers "; + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +static void PrintInner(const LoopHierarchy &loop, uint32 level) { + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << "loop-level-" << level << "\n"; + PrintLoopInfo(*inner); + PrintInner(*inner, level + 1); + } +} + +void LoopHierarchy::PrintLoops(const std::string &name) const { + LogInfo::MapleLogger() << name << "\n"; + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintLoopInfo(*loop); + } + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintInner(*loop, 1); + } +} + +void CGFuncLoops::CheckOverlappingInnerLoops(const MapleVector &iLoops, + const MapleVector &loopMem) const { + for (auto iloop : iLoops) { + CHECK_FATAL(iloop->loopMembers.size() > 0, "Empty loop"); + for (auto bb: iloop->loopMembers) { + if (find(loopMem.begin(), loopMem.end(), bb) != loopMem.end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop member"; + CHECK_FATAL(0, "loop member overlap with inner loop"); + } + } + CheckOverlappingInnerLoops(iloop->innerLoops, loopMem); + } +} + +void CGFuncLoops::CheckLoops() const { + // Make sure backedge -> header relationship holds + for (auto bEdge: backedge) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), header) == bEdge->GetSuccs().end()) { + bool inOtherEntry = false; + for (auto entry: multiEntries) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), entry) != bEdge->GetSuccs().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(bEdge->GetEhSuccs().begin(), bEdge->GetEhSuccs().end(), header) == bEdge->GetEhSuccs().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop backedge"; + CHECK_FATAL(0, "loop backedge does not go to loop header"); + } + } + } + if (find(header->GetPreds().begin(), header->GetPreds().end(), bEdge) == header->GetPreds().end()) { + bool inOtherEntry = false; + for (auto entry: multiEntries) { + if (find(entry->GetPreds().begin(), entry->GetPreds().end(), bEdge) != entry->GetPreds().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(header->GetEhPreds().begin(), header->GetEhPreds().end(), bEdge) == header->GetEhPreds().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop header"; + CHECK_FATAL(0, "loop header does not have a backedge"); + } + } + } + } + + // Make sure containing loop members do not overlap + CheckOverlappingInnerLoops(innerLoops, loopMembers); + + if (innerLoops.empty() == false) { + for (auto lp : innerLoops) { + lp->CheckLoops(); + } + } +} + +void CGFuncLoops::PrintLoops(const CGFuncLoops &funcLoop) const { + LogInfo::MapleLogger() << "loop_level(" << funcLoop.loopLevel << ") "; + LogInfo::MapleLogger() << "header " << funcLoop.GetHeader()->GetId() << " "; + if (funcLoop.multiEntries.size()) { + LogInfo::MapleLogger() << "other-header "; + for (auto bb : funcLoop.multiEntries) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + } + if (funcLoop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << "parent " << funcLoop.GetOuterLoop()->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "backedge "; + for (auto *bb : funcLoop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : funcLoop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n exits "; + for (auto *bb : funcLoop.GetExits()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + if (!funcLoop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << " inner_loop_headers "; + for (auto *inner : funcLoop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + for (auto *inner : funcLoop.GetInnerLoops()) { + PrintLoops(*inner); + } + } +} + +// partial loop body found with formLoop is NOT really needed in down stream +// It should be simplied later +void LoopFinder::formLoop(BB* headBB, BB* backBB) { + DEBUG_ASSERT(headBB != nullptr && backBB != nullptr, "headBB or backBB is nullptr"); + LoopHierarchy *simple_loop = memPool->New(*memPool); + + if (headBB != backBB) { + DEBUG_ASSERT(!dfsBBs.empty(), "dfsBBs is empty"); + DEBUG_ASSERT(onPathBBs[headBB->GetId()], "headBB is not on execution path"); + std::stack tempStk; + + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + + while (tempStk.top() != headBB && !dfsBBs.empty()) { + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + } + + while (!tempStk.empty()) { + BB *topBB = tempStk.top(); + tempStk.pop(); + + if (onPathBBs[topBB->GetId()]) { + simple_loop->InsertLoopMembers(*topBB); + } + dfsBBs.push(topBB); + } + } + // Note: backBB is NOT on dfsBBs + simple_loop->InsertLoopMembers(*backBB); + simple_loop->SetHeader(*headBB); + simple_loop->InsertBackedge(*backBB); + + if (loops) { + loops->SetPrev(simple_loop); + } + simple_loop->SetNext(loops); + loops = simple_loop; +} + +void LoopFinder::seekBackEdge(BB* bb, MapleList succs) { + for (const auto succBB : succs) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } else { + if (onPathBBs[succBB->GetId()]) { + formLoop(succBB, bb); + bb->PushBackLoopSuccs(*succBB); + succBB->PushBackLoopPreds(*bb); + } + } + } +} + +void LoopFinder::seekCycles() { + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + if (visitedBBs[bb->GetId()]) { + onPathBBs[bb->GetId()] = false; + dfsBBs.pop(); + continue; + } + + visitedBBs[bb->GetId()] = true; + onPathBBs[bb->GetId()] = true; + seekBackEdge(bb, bb->GetSuccs()); + seekBackEdge(bb, bb->GetEhSuccs()); + } +} + +void LoopFinder::markExtraEntryAndEncl() { + DEBUG_ASSERT(dfsBBs.empty(), "dfsBBs is NOT empty"); + std::vector loopEnclosure; + loopEnclosure.resize(cgFunc->NumBBs()); + std::vector startProcess; + startProcess.resize(cgFunc->NumBBs()); + std::vector origEntries; + origEntries.resize(cgFunc->NumBBs()); + std::vector newEntries; + newEntries.resize(cgFunc->NumBBs()); + + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(loopEnclosure.begin(), loopEnclosure.end(), nullptr); + fill(startProcess.begin(), startProcess.end(), false); + fill(origEntries.begin(), origEntries.end(), nullptr); + fill(newEntries.begin(), newEntries.end(), nullptr); + + for (auto *bb : loop->GetLoopMembers()) { + loopEnclosure[bb->GetId()] = bb; + } + origEntries[loop->GetHeader()->GetId()] = loop->GetHeader(); + + // Form loop closure from the primary entry. At end collect all other entries + bool changed = false; + dfsBBs.push(loop->GetHeader()); + while (true) { + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + visitedBBs[bb->GetId()] = true; + if (startProcess[bb->GetId()]) { + dfsBBs.pop(); + for (const auto succBB : bb->GetSuccs()) { + if (loopEnclosure[bb->GetId()] == nullptr && + loopEnclosure[succBB->GetId()] != nullptr && + succBB != loop->GetHeader()) { + changed = true; + loopEnclosure[bb->GetId()] = bb; + break; + } + } + continue; + } else { + startProcess[bb->GetId()] = true; + for (const auto succBB : bb->GetSuccs()) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + + // Repeat till no new item is added in + if (changed) { + dfsBBs.push(loop->GetHeader()); + changed = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + continue; + } + + // Collect all entries + bool foundNewEntry = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + visitedBBs[bb->GetId()] = true; + while (!dfsBBs.empty()) { + BB *currBB = dfsBBs.top(); + visitedBBs[currBB->GetId()] = true; + dfsBBs.pop(); + for (const auto succBB : currBB->GetSuccs()) { + // check if entering a loop. + if ((loopEnclosure[succBB->GetId()] != nullptr) && + (loopEnclosure[currBB->GetId()] == nullptr)) { + newEntries[succBB->GetId()] = succBB; + if (origEntries[succBB->GetId()] == nullptr) { + foundNewEntry = true; + } + } + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + } + if (foundNewEntry) { + origEntries = newEntries; + for (const auto bb : newEntries) { + if (bb != nullptr) { + dfsBBs.push(bb); + } + } + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + fill(newEntries.begin(), newEntries.end(), nullptr); + } else { + break; + } + } + + // Setup loop body + for (size_t id = 0; id < loopEnclosure.size(); id++) { + if (loopEnclosure[id] != nullptr) { + loop->InsertLoopMembers(*loopEnclosure[id]); + } + } + + // Setup head and extra entries + for (const auto bb : newEntries) { + if (bb != nullptr) { + loop->otherLoopEntries.insert(bb); + } + } + loop->otherLoopEntries.erase(loop->GetHeader()); + } +} + +bool LoopFinder::HasSameHeader(const LoopHierarchy *lp1, const LoopHierarchy *lp2) { + if (lp1->GetHeader() == lp2->GetHeader()) { + return true; + } + for (auto other1 : lp1->otherLoopEntries) { + if (lp2->GetHeader() == other1) { + return true; + } + for (auto other2 : lp2->otherLoopEntries) { + if (other2 == other1) { + return true; + } + } + } + return false; +} + +void LoopFinder::MergeLoops() { + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + // Different loop bodies imply different loops + bool sameLoop = true; + if (loopHierarchy1->GetLoopMembers().size() == loopHierarchy2->GetLoopMembers().size()) { + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + if (find(loopHierarchy1->GetLoopMembers().begin(), loopHierarchy1->GetLoopMembers().end(), bb) == + loopHierarchy1->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + if (sameLoop) { + for (auto *bb : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2->GetLoopMembers().begin(), loopHierarchy2->GetLoopMembers().end(), bb) == + loopHierarchy2->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + } + if (sameLoop) { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + continue; + } + } + if (HasSameHeader(loopHierarchy1, loopHierarchy2) == false) { + continue; + } + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + loopHierarchy1->InsertLoopMembers(*bb); + } + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + loopHierarchy1->otherLoopEntries.insert(loopHierarchy2->GetHeader()); + } + for (auto bb : loopHierarchy2->otherLoopEntries) { + if (loopHierarchy1->GetHeader() != bb) { + loopHierarchy1->otherLoopEntries.insert(bb); + } + } + for (auto *bb : loopHierarchy2->GetBackedge()) { + loopHierarchy1->InsertBackedge(*bb); + } + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + } + } +} + +void LoopFinder::SortLoops() { + LoopHierarchy *head = nullptr; + LoopHierarchy *next1 = nullptr; + LoopHierarchy *next2 = nullptr; + bool swapped; + do { + swapped = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr;) { + /* remember loopHierarchy1's prev in case if loopHierarchy1 moved */ + head = loopHierarchy1; + next1 = loopHierarchy1->GetNext(); + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr;) { + next2 = loopHierarchy2->GetNext(); + + if (loopHierarchy1->GetLoopMembers().size() > loopHierarchy2->GetLoopMembers().size()) { + if (head->GetPrev() == nullptr) { + /* remove loopHierarchy2 from list */ + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + /* link loopHierarchy2 as head */ + loops = loopHierarchy2; + loopHierarchy2->SetPrev(nullptr); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } else { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + head->GetPrev()->SetNext(loopHierarchy2); + loopHierarchy2->SetPrev(head->GetPrev()); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } + head = loopHierarchy2; + swapped = true; + } + loopHierarchy2 = next2; + } + loopHierarchy1 = next1; + } + } while (swapped); +} + +void LoopFinder::UpdateOuterForInnerLoop(BB *bb, LoopHierarchy *outer) { + if (outer == nullptr) { + return; + } + for (auto ito = outer->GetLoopMembers().begin(); ito != outer->GetLoopMembers().end();) { + if (*ito == bb) { + ito = outer->EraseLoopMembers(ito); + } else { + ++ito; + } + } + if (outer->GetOuterLoop() != nullptr) { + UpdateOuterForInnerLoop(bb, const_cast(outer->GetOuterLoop())); + } +} + +void LoopFinder::UpdateOuterLoop(const LoopHierarchy *loop) { + for (auto inner : loop->GetInnerLoops()) { + UpdateOuterLoop(inner); + } + for (auto *bb : loop->GetLoopMembers()) { + UpdateOuterForInnerLoop(bb, const_cast(loop->GetOuterLoop())); + } +} + +void LoopFinder::CreateInnerLoop(LoopHierarchy &inner, LoopHierarchy &outer) { + outer.InsertInnerLoops(inner); + inner.SetOuterLoop(outer); + if (loops == &inner) { + loops = inner.GetNext(); + } else { + LoopHierarchy *prev = loops; + for (LoopHierarchy *loopHierarchy1 = loops->GetNext(); loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + if (loopHierarchy1 == &inner) { + prev->SetNext(prev->GetNext()->GetNext()); + } + prev = loopHierarchy1; + } + } +} + +static void FindLoopExits(LoopHierarchy *loop) { + for (auto *bb : loop->GetLoopMembers()) { + for (auto succ : bb->GetSuccs()) { + if (find(loop->GetLoopMembers().begin(), loop->GetLoopMembers().end(), succ) == loop->GetLoopMembers().end()) { + loop->InsertExit(*bb); + } + } + } + for (auto *inner : loop->GetInnerLoops()) { + FindLoopExits(inner); + } +} + +void LoopFinder::DetectInnerLoop() { + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + FindLoopExits(loop); + } + bool innerCreated; + do { + innerCreated = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + auto loopHierarchy2Members = loopHierarchy2->GetLoopMembers(); + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), loopHierarchy1->GetHeader()) != + loopHierarchy2Members.end()) { + bool allin = true; + // Make sure body is included + for (auto *bb1 : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), bb1) == + loopHierarchy2Members.end()) { + allin = false; + break; + } + } + if (allin) { + CreateInnerLoop(*loopHierarchy1, *loopHierarchy2); + innerCreated = true; + } + } + if (innerCreated) { + break; + } + } + } + if (innerCreated) { + break; + } + } + } while (innerCreated); + + for (LoopHierarchy *outer = loops; outer != nullptr; outer = outer->GetNext()) { + UpdateOuterLoop(outer); + } +} + +static void CopyLoopInfo(const LoopHierarchy *from, CGFuncLoops *to, CGFuncLoops *parent, MemPool *memPool) { + to->SetHeader(*const_cast(from->GetHeader())); + for (auto bb : from->otherLoopEntries) { + to->AddMultiEntries(*bb); + } + for (auto *bb : from->GetLoopMembers()) { + to->AddLoopMembers(*bb); + bb->SetLoop(*to); + } + for (auto *bb : from->GetBackedge()) { + to->AddBackedge(*bb); + } + for (auto *bb : from->GetExits()) { + to->AddExit(*bb); + } + if (!from->GetInnerLoops().empty()) { + for (auto *inner : from->GetInnerLoops()) { + CGFuncLoops *floop = memPool->New(*memPool); + to->AddInnerLoops(*floop); + floop->SetLoopLevel(to->GetLoopLevel() + 1); + CopyLoopInfo(inner, floop, to, memPool); + } + } + if (parent != nullptr) { + to->SetOuterLoop(*parent); + } +} + +void LoopFinder::UpdateCGFunc() { + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + CGFuncLoops *floop = cgFunc->GetMemoryPool()->New(*cgFunc->GetMemoryPool()); + cgFunc->PushBackLoops(*floop); + floop->SetLoopLevel(1); /* top level */ + CopyLoopInfo(loop, floop, nullptr, cgFunc->GetMemoryPool()); + } +} + +void LoopFinder::FormLoopHierarchy() { + visitedBBs.clear(); + visitedBBs.resize(cgFunc->NumBBs(), false); + sortedBBs.clear(); + sortedBBs.resize(cgFunc->NumBBs(), nullptr); + onPathBBs.clear(); + onPathBBs.resize(cgFunc->NumBBs(), false); + + FOR_ALL_BB(bb, cgFunc) { + bb->SetLevel(0); + } + bool changed; + do { + changed = false; + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + seekCycles(); + changed = true; + } + } + } while (changed); + + markExtraEntryAndEncl(); + /* + * FIX : Should merge the partial loops at the time of initial + * construction. And make the linked list as a sorted set, + * then the merge and sort phases below can go away. + * + * Start merging the loops with the same header + */ + MergeLoops(); + /* order loops from least number of members */ + SortLoops(); + DetectInnerLoop(); + UpdateCGFunc(); +} + +bool CgLoopAnalysis::PhaseRun(maplebe::CGFunc &f) { + f.ClearLoopInfo(); + MemPool *loopMemPool = GetPhaseMemPool(); + LoopFinder *loopFinder = loopMemPool->New(f, *loopMemPool); + loopFinder->FormLoopHierarchy(); + + if (LOOP_ANALYSIS_DUMP_NEWPM) { + /* do dot gen after detection so the loop backedge can be properly colored using the loop info */ + DotGenerator::GenerateDot("buildloop", f, f.GetMirModule(), true, f.GetName()); + } +#if DEBUG + for (const auto *lp : f.GetLoops()) { + lp->CheckLoops(); + } +#endif + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLoopAnalysis, loopanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/memlayout.cpp b/ecmascript/mapleall/maple_be/src/cg/memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c242edf32f3460e0090483bde7da582d1e7ada6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/memlayout.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "memlayout.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +/* + * Go over all outgoing calls in the function body and get the maximum space + * needed for storing the actuals based on the actual parameters and the ABI. + * These are usually those arguments that cannot be passed + * through registers because a call passes more than 8 arguments, or + * they cannot be fit in a pair of registers. + + * This assumes that all nesting of statements has been removed, + * so that all the statements are at only one block level. + */ +uint32 MemLayout::FindLargestActualArea(int32 &aggCopySize) { + StmtNode *stmt = mirFunction->GetBody()->GetFirst(); + if (stmt == nullptr) { + return 0; + } + uint32 maxActualSize = 0; + uint32 maxParamStackSize = 0; // Size of parameter stack requirement + uint32 maxCopyStackSize = 0; // Size of aggregate param stack copy requirement + for (; stmt != nullptr; stmt = stmt->GetNext()) { + Opcode opCode = stmt->GetOpCode(); + if ((opCode < OP_call || opCode > OP_xintrinsiccallassigned) && opCode != OP_icallproto) { + continue; + } + if (opCode == OP_intrinsiccallwithtypeassigned || opCode == OP_intrinsiccallwithtype || + opCode == OP_intrinsiccallassigned || opCode == OP_intrinsiccall) { + /* + * Some intrinsics, such as MPL_ATOMIC_EXCHANGE_PTR, are handled by CG, + * and map to machine code sequences. We ignore them because they are not + * function calls. + */ + continue; + } + /* + * if the following check fails, most likely dex has invoke-custom etc + * that is not supported yet + */ + DCHECK((opCode == OP_call || opCode == OP_icall || opCode == OP_icallproto), "Not lowered to call or icall?"); + int32 copySize; + uint32 size = ComputeStackSpaceRequirementForCall(*stmt, copySize, opCode == OP_icall || opCode == OP_icallproto); + if (size > maxParamStackSize) { + maxParamStackSize = size; + } + if (static_cast(copySize) > maxCopyStackSize) { + maxCopyStackSize = static_cast(copySize); + } + if ((maxParamStackSize + maxCopyStackSize) > maxActualSize) { + maxActualSize = maxParamStackSize + maxCopyStackSize; + } + } + aggCopySize = static_cast(maxCopyStackSize); + /* GetPointerSize() * 2's pow 2 is 4, set the low 4 bit of maxActualSize to 0 */ + if (CGOptions::IsArm64ilp32()) { + maxActualSize = RoundUp(maxActualSize, k8ByteSize * 2); + } else { + maxActualSize = RoundUp(maxActualSize, GetPointerSize() * 2); + } + return maxActualSize; +} + +bool CgLayoutFrame::PhaseRun(maplebe::CGFunc &f) { + if (CGOptions::IsPrintFunction()) { + LogInfo::MapleLogger() << f.GetName() << "\n"; + } + f.LayoutStackFrame(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/obj_emit.cpp b/ecmascript/mapleall/maple_be/src/cg/obj_emit.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cdde845c4873f89504108d0675e81989013e8e6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/obj_emit.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include "obj_emit.h" +#include "namemangler.h" +#include "aarch64_obj_emitter.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; +using namespace namemangler; + +void ObjEmitter::Run(FuncEmitInfo &funcEmitInfo) { + InsertNopInsn(static_cast(funcEmitInfo)); + EmitFuncBinaryCode(static_cast(funcEmitInfo)); +} + +/* traverse insns, get the binary code and saved in buffer */ +void ObjEmitter::EmitFuncBinaryCode(ObjFuncEmitInfo &objFuncEmitInfo) { + const CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + objFuncEmitInfo.SetFuncName(cgFunc.GetName()); + + int size = (cgFunc.GetFunction().GetStIdx().Idx() << k8BitSize) + cgFunc.GetLabelIdx() + 1; + std::vector label2Offset(size, 0xFFFFFFFFULL); + EmitInstructions(objFuncEmitInfo, label2Offset); + objFuncEmitInfo.UpdateMethodCodeSize(); + + EmitFunctionSymbolTable(objFuncEmitInfo, label2Offset); + EmitSwitchTable(objFuncEmitInfo, label2Offset); + + /* local float variable */ + for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { + CHECK_FATAL(mpPair.first <= label2Offset.size(), "label2Offset"); + label2Offset[mpPair.first] = objFuncEmitInfo.GetTextDataSize(); + objFuncEmitInfo.AppendTextData(&(mpPair.second), k8ByteSize); + } + + /* handle branch fixup here */ + objFuncEmitInfo.HandleLocalBranchFixup(label2Offset); +} + +void ObjEmitter::EmitInstructions(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &label2Offset) { + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + FOR_ALL_BB(bb, &cgFunc) { + if (bb->GetLabIdx() != 0) { + CHECK_FATAL(bb->GetLabIdx() <= label2Offset.size(), "label2Offset"); + label2Offset[bb->GetLabIdx()] = objFuncEmitInfo.GetTextDataSize(); + objFuncEmitInfo.AppendLabel2Order(bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || insn->IsAsmInsn() || insn->IsPseudo()) { + continue; + } + + /* get binary code and save in buffer */ + if (insn->GetDesc()->IsIntrinsic()) { + EmitIntrinsicInsn(*insn, label2Offset, objFuncEmitInfo); + } else if (insn->GetDesc()->IsSpecialIntrinsic()) { + EmitSpinIntrinsicInsn(*insn, label2Offset, objFuncEmitInfo); + } else { + EncodeInstruction(*insn, label2Offset, objFuncEmitInfo); + } + } + } +} + +void ObjEmitter::EmitSwitchTable(ObjFuncEmitInfo &objFuncEmitInfo, const std::vector &label2Offset) { + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + if (cgFunc.GetEmitStVec().size() == 0) { + return; + } + uint32 tmpOffset = GetBeforeTextDataSize(objFuncEmitInfo); + /* align is 8 push padding to objFuncEmitInfo.data */ + uint32 startOffset = Alignment::Align(tmpOffset, k8ByteSize); + uint32 padding = startOffset - tmpOffset; + // objFuncEmitInfo.FillTextDataPadding(padding); + objFuncEmitInfo.FillTextDataNop(padding); + + uint32 curOffset = objFuncEmitInfo.GetTextDataSize(); + for (std::pair st : cgFunc.GetEmitStVec()) { + objFuncEmitInfo.SetSwitchTableOffset(st.second->GetName(), curOffset); + MIRAggConst *arrayConst = safe_cast(st.second->GetKonst()); + ASSERT(arrayConst != nullptr, "null ptr check"); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + ASSERT(lblConst != nullptr, "null ptr check"); + CHECK_FATAL(lblConst->GetValue() <= label2Offset.size(), "label2Offset"); + uint64 offset = static_cast(label2Offset[lblConst->GetValue()]) - static_cast(curOffset); + objFuncEmitInfo.AppendTextData(offset, k8ByteSize); + } + + curOffset += arrayConst->GetConstVec().size() * k8ByteSize; + } +} + +void ObjEmitter::WriteObjFile() { + /* write header */ + Emit(&header, sizeof(header)); + + /* write sections */ + for (auto *section : sections) { + if (section->GetType() == SHT_NOBITS) { + continue; + } + + SetFileOffset(section->GetOffset()); + section->WriteSection(fileStream); + } + + /* write section table */ + SetFileOffset(header.e_shoff); + for (auto section : sections) { + Emit(§ion->GetSectionHeader(), sizeof(section->GetSectionHeader())); + } +} + +void ObjEmitter::AddSymbol(const std::string &name, Word size, const Section §ion, Address value) { + // auto nameIndex = dynStrSection->AddString(name); + auto nameIndex = strTabSection->AddString(name); + symbolTabSection->AppendSymbol({ static_cast(nameIndex), + static_cast((STB_GLOBAL << 4) + (STT_SECTION & 0xf)), 0, section.GetIndex(), value, size }); +} + +void ObjEmitter::AddFuncSymbol(const MapleString &name, Word size, Address value) { + auto symbolStrIndex = strTabSection->AddString(name); + symbolTabSection->AppendSymbol({ static_cast(symbolStrIndex), + static_cast((STB_GLOBAL << k4BitSize) + (STT_FUNC & 0xf)), 0, textSection->GetIndex(), value, size }); +} + +void ObjEmitter::ClearData() { + globalLabel2Offset.clear(); + for (auto *section : sections) { + if (section != nullptr) { + section->ClearData(); + } + } +} + +void ObjEmitter::InitELFHeader() { + header.e_ident[EI_MAG0] = ELFMAG0; + header.e_ident[EI_MAG1] = ELFMAG1; + header.e_ident[EI_MAG2] = ELFMAG2; + header.e_ident[EI_MAG3] = ELFMAG3; + header.e_ident[EI_CLASS] = ELFCLASS64; + header.e_ident[EI_DATA] = ELFDATA2LSB; + header.e_ident[EI_VERSION] = EV_CURRENT; + header.e_ident[EI_OSABI] = ELFOSABI_LINUX; + header.e_ident[EI_ABIVERSION] = 0; + std::fill_n(&header.e_ident[EI_PAD], EI_NIDENT - EI_PAD, 0); + header.e_type = ET_REL; + header.e_version = 1; + UpdateMachineAndFlags(header); + header.e_entry = 0; + header.e_ehsize = sizeof(FileHeader); + header.e_phentsize = sizeof(SegmentHeader); + header.e_shentsize = sizeof(SectionHeader); + header.e_shstrndx = shStrSection->GetIndex(); + header.e_shoff = 0; + header.e_phoff = 0; + header.e_shnum = sections.size(); + header.e_phnum = 0; +} + +// void ObjEmitter::CollectGlobalInfo() { +// size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); +// for (size_t i = 0; i < size; ++i) { +// MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); +// if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { +// continue; +// } +// globalInfo.ClassifyGlobalInfo(*mirSymbol); +// } +// } + +// void ObjEmitter::CreateStrIdx2Type() { +// /* Create name2type map which will be used by reflection. */section +// for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { +// if (type == nullptr || (type->GetKind() != kTypeClass && type->GetKind() != kTypeInterface)) { +// continue; +// } +// GStrIdx strIdx = type->GetNameStrIdx(); +// (void)strIdx2Type.insert(std::make_pair(strIdx, type)); +// } +// } + +void ObjEmitter::EmitMIRIntConst(EmitInfo &emitInfo) { + ASSERT(IsPrimitiveScalar(emitInfo.elemConst.GetType().GetPrimType()), "must be primitive type!"); + MIRIntConst &intConst = static_cast(emitInfo.elemConst); + size_t size = GetPrimTypeSize(emitInfo.elemConst.GetType().GetPrimType()); + const IntVal &value = intConst.GetValue(); + int64 val = value.GetExtValue(); + // ifileImageSection->AppendData(&val, size); + dataSection->AppendData(&val, size); + emitInfo.offset += size; +#ifdef OBJ_DEBUG + LogInfo::MapleLogger() << val << " size: " << size << "\n"; +#endif +} + +void ObjEmitter::EmitMIRAddrofConstCommon(EmitInfo &emitInfo, uint64 specialOffset) { + MIRAddrofConst &symAddr = static_cast(emitInfo.elemConst); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr.GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + LabelFixup labelFixup(symAddrName, emitInfo.offset, kLabelFixupDirect64); + if (specialOffset != 0) { + DataSection::AddLabelFixup(emitInfo.labelFixups, labelFixup); + } else { + // DataSection::AddLabelFixup(static_cast(ifileImageSection)->GetGlobalLabelFixups(), labelFixup); + } + uint64 value = specialOffset - emitInfo.offset; + size_t size = GetPrimTypeSize(emitInfo.elemConst.GetType().GetPrimType()); + // ifileImageSection->AppendData(&value, size); + dataSection->AppendData(&value, size); + emitInfo.offset += size; + +#ifdef OBJ_DEBUG + LogInfo::MapleLogger() << symAddrName << " size: " << size << "\n"; +#endif +} + +void ObjEmitter::EmitMIRAddrofConst(EmitInfo &emitInfo) { + EmitMIRAddrofConstCommon(emitInfo, 0); +} + +void ObjEmitter::EmitMIRAddrofConstOffset(EmitInfo &emitInfo) { + /* 2 is fixed offset in runtime */ + EmitMIRAddrofConstCommon(emitInfo, 2); +} + +void ObjEmitter::EmitFunctionSymbolTable(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &label2Offset) { + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + MIRFunction *func = &cgFunc.GetFunction(); + + size_t size = (func == nullptr) ? GlobalTables::GetGsymTable().GetTable().size() : + func->GetSymTab()->GetTable().size(); + for (size_t i = 0; i < size; ++i) { + const MIRSymbol *st = nullptr; + if (func == nullptr) { + auto &symTab = GlobalTables::GetGsymTable(); + st = symTab.GetSymbol(i); + } else { + auto &symTab = *func->GetSymTab(); + st = symTab.GetSymbolAt(i); + } + if (st == nullptr) { + continue; + } + MIRStorageClass storageClass = st->GetStorageClass(); + MIRSymKind symKind = st->GetSKind(); + if (storageClass == kScPstatic && symKind == kStConst) { + // align + size_t tmpOffset = GetBeforeTextDataSize(objFuncEmitInfo); + uint32 offset = Alignment::Align(tmpOffset, k8ByteSize); + uint32 padding = offset - tmpOffset; + objFuncEmitInfo.FillTextDataNop(padding); + CHECK_FATAL(cgFunc.GetLocalSymLabelIndex(*st) <= label2Offset.size(), "label2Offset"); + label2Offset[cgFunc.GetLocalSymLabelIndex(*st)] = static_cast(objFuncEmitInfo.GetTextDataSize()); + if (st->GetKonst()->GetKind() == kConstStr16Const) { + EmitStr16Const(objFuncEmitInfo, *st); + continue; + } + + if (st->GetKonst()->GetKind() == kConstStrConst) { + EmitStrConst(objFuncEmitInfo, *st); + continue; + } + + switch (st->GetKonst()->GetType().GetPrimType()) { + case PTY_u32: { + MIRIntConst *intConst = safe_cast(st->GetKonst()); + uint32 value = static_cast(intConst->GetValue().GetExtValue()); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + case PTY_f32: { + MIRFloatConst *floatConst = safe_cast(st->GetKonst()); + uint32 value = static_cast(floatConst->GetIntValue()); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + case PTY_f64: { + MIRDoubleConst *doubleConst = safe_cast(st->GetKonst()); + uint32 value = doubleConst->GetIntLow32(); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + value = doubleConst->GetIntHigh32(); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + default: + break; + } + } + } +} + +void ObjEmitter::EmitStr16Const(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &str16Symbol) { + MIRStr16Const *mirStr16Const = safe_cast(str16Symbol.GetKonst()); + const std::u16string &str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16Const->GetValue()); + + uint32 len = str16.length(); + for (uint32 i = 0; i < len; ++i) { + char16_t c = str16[i]; + objFuncEmitInfo.AppendTextData(&c, sizeof(c)); + } + if ((str16.length() & 0x1) == 1) { + uint16 value = 0; + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + } +} + +void ObjEmitter::EmitStrConst(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &strSymbol) { + MIRStrConst *mirStrConst = safe_cast(strSymbol.GetKonst()); + + auto str = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirStrConst->GetValue()); + size_t size = str.length(); + /* 1 is tail 0 of the str string */ + objFuncEmitInfo.AppendTextData(str.c_str(), size + 1); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/offset_adjust.cpp b/ecmascript/mapleall/maple_be/src/cg/offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed94c8593df9a59d31e13288ff336758134db09e --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/offset_adjust.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "offset_adjust.h" +#if TARGAARCH64 +#include "aarch64_offset_adjust.h" +#elif TARGRISCV64 +#include "riscv64_offset_adjust.h" +#endif +#if TARGARM32 +#include "arm32_offset_adjust.h" +#endif + +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgFrameFinalize::PhaseRun(maplebe::CGFunc &f) { + FrameFinalize *offsetAdjustment = nullptr; +#if TARGAARCH64 || TARGRISCV64 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif + offsetAdjustment->Run(); + return false; +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/operand.cpp b/ecmascript/mapleall/maple_be/src/cg/operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cb780cacbf2eda3a8b1989557458fe6b0947523 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/operand.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "operand.h" +#include "common_utils.h" +#include "mpl_logging.h" + +namespace maplebe { +bool IsMoveWidableImmediate(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + +bool BetterUseMOVZ(uint64 val) { + int32 n16zerosChunks = 0; + int32 n16onesChunks = 0; + uint64 sa = 0; + /* a 64 bits number is split 4 chunks, each chunk has 16 bits. check each chunk whether is all 1 or is all 0 */ + for (uint64 i = 0; i < k4BitSize; ++i, sa += k16BitSize) { + uint64 chunkVal = (val >> (static_cast(sa))) & 0x0000FFFFUL; + if (chunkVal == 0) { + ++n16zerosChunks; + } else if (chunkVal == 0xFFFFUL) { + ++n16onesChunks; + } + } + /* + * note that since we already check if the value + * can be movable with as a single mov instruction, + * we should not exepct either n_16zeros_chunks>=3 or n_16ones_chunks>=3 + */ +#if DEBUG + constexpr uint32 kN16ChunksCheck = 2; + DEBUG_ASSERT(n16zerosChunks <= kN16ChunksCheck, "n16zerosChunks ERR"); + DEBUG_ASSERT(n16onesChunks <= kN16ChunksCheck, "n16onesChunks ERR"); +#endif + return (n16zerosChunks >= n16onesChunks); +} + +bool RegOperand::operator==(const RegOperand &o) const { + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + + if (IsPhysicalRegister()) { + return (myRn == otherRn && mySz == otherSz && myFl == otherFl); + } + return (myRn == otherRn && mySz == otherSz); +} + +bool RegOperand::operator<(const RegOperand &o) const { + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + return myRn < otherRn || (myRn == otherRn && mySz < otherSz) || + (myRn == otherRn && mySz == otherSz && myFl < otherFl); +} + +Operand *MemOperand::GetOffset() const { + switch (addrMode) { + case kAddrModeBOi: + return GetOffsetOperand(); + case kAddrModeBOrX: + return GetIndexRegister(); + case kAddrModeLiteral: + break; + case kAddrModeLo12Li: + break; + default: + DEBUG_ASSERT(false, "error memoperand dump"); + break; + } + return nullptr; +} + +bool MemOperand::Equals(Operand &op) const { + if (!op.IsMemoryAccessOperand()) { + return false; + } + return Equals(static_cast(op)); +} + +bool MemOperand::Equals(const MemOperand &op) const { + if (&op == this) { + return true; + } + + if (addrMode == op.GetAddrMode()) { + switch (addrMode) { + case kAddrModeBOi: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + case kAddrModeBOrX: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetIndexRegister()->Equals(*op.GetIndexRegister()) && + GetExtendAsString() == op.GetExtendAsString() && + ShiftAmount() == op.ShiftAmount()); + case kAddrModeLiteral: + return GetSymbolName() == op.GetSymbolName(); + case kAddrModeLo12Li: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetSymbolName() == op.GetSymbolName() && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + default: + DEBUG_ASSERT(false, "error memoperand"); + break; + } + } + return false; +} + +bool MemOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const MemOperand *rightOpnd = static_cast(&right); + if (addrMode != rightOpnd->addrMode) { + return addrMode < rightOpnd->addrMode; + } + + switch (addrMode) { + case kAddrModeBOi: { + DEBUG_ASSERT(idxOpt == kIntact, "Should not compare pre/post index addressing."); + + RegOperand *baseReg = GetBaseRegister(); + RegOperand *rbaseReg = rightOpnd->GetBaseRegister(); + int32 nRet = baseReg->RegCompare(*rbaseReg); + if (nRet == 0) { + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + return nRet < 0; + } + case kAddrModeBOrX: { + if (noExtend != rightOpnd->noExtend) { + return noExtend; + } + if (!noExtend && extend != rightOpnd->extend) { + return extend < rightOpnd->extend; + } + RegOperand *indexReg = GetIndexRegister(); + const RegOperand *rindexReg = rightOpnd->GetIndexRegister(); + return indexReg->Less(*rindexReg); + } + case kAddrModeLiteral: { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + case kAddrModeLo12Li: { + if (GetSymbol() != rightOpnd->GetSymbol()) { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + default: + DEBUG_ASSERT(false, "Internal error."); + return false; + } +} + +const char *CondOperand::ccStrs[kCcLast] = { + "EQ", "NE", "CS", "HS", "CC", "LO", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT", "LE", "AL" +}; + +bool CondOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const CondOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (cc == CC_AL || rightOpnd->cc == CC_AL) { + return false; + } + return cc < rightOpnd->cc; +} + +uint32 PhiOperand::GetLeastCommonValidBit() const{ + uint32 leastCommonVb = 0; + for (auto phiOpnd : phiList) { + uint32 curVb = phiOpnd.second->GetValidBitsNum(); + if (curVb > leastCommonVb) { + leastCommonVb = curVb; + } + } + return leastCommonVb; +} +bool PhiOperand::IsRedundancy() const { + uint32 srcSsaIdx = 0; + for (auto phiOpnd : phiList) { + if (srcSsaIdx == 0) { + srcSsaIdx = phiOpnd.second->GetRegisterNumber(); + } + if (srcSsaIdx != phiOpnd.second->GetRegisterNumber()) { + return false; + } + } + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/optimize_common.cpp b/ecmascript/mapleall/maple_be/src/cg/optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eba06363011710ca479156b2a066b56a5b219d55 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/optimize_common.cpp @@ -0,0 +1,308 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "optimize_common.h" +#include "cgbb.h" +#include "cg.h" +#include "cg_option.h" +#include "loop.h" +#include "securec.h" + +/* This file provides common class and function for cfgo and ico. */ +namespace maplebe { +void Optimizer::Run(const std::string &funcName, bool checkOnly) { + /* Initialize cfg optimization patterns */ + InitOptimizePatterns(); + + /* For each pattern, search cgFunc for optimization */ + for (OptimizationPattern *p : diffPassPatterns) { + p->Search2Op(checkOnly); + } + /* Search the cgFunc for multiple possible optimizations in one pass */ + if (!singlePassPatterns.empty()) { + BB *curBB = cgFunc->GetFirstBB(); + bool flag = false; + while (curBB != nullptr) { + for (OptimizationPattern *p : singlePassPatterns) { + if (p->Optimize(*curBB)) { + flag = p->IsKeepPosition(); + p->SetKeepPosition(false); + break; + } + } + + if (flag) { + flag = false; + } else { + curBB = curBB->GetNext(); + } + } + } + + if (CGOptions::IsDumpOptimizeCommonLog()) { + constexpr int arrSize = 80; + char post[arrSize]; + errno_t cpyRet = strcpy_s(post, arrSize, "post-"); + CHECK_FATAL(cpyRet == EOK, "call strcpy_s failed"); + errno_t catRes = strcat_s(post, arrSize, name); + CHECK_FATAL(catRes == EOK, "call strcat_s failed "); + OptimizeLogger::GetLogger().Print(funcName); + } + OptimizeLogger::GetLogger().ClearLocal(); +} + +void OptimizationPattern::Search2Op(bool noOptimize) { + checkOnly = noOptimize; + BB *curBB = cgFunc->GetFirstBB(); + while (curBB != nullptr) { + bool changed = false; + do { + changed = Optimize(*curBB); + } while (changed); + if (keepPosition) { + keepPosition = false; + } else { + curBB = curBB->GetNext(); + } + } +} + +void OptimizationPattern::Log(uint32 bbID) { + OptimizeLogger::GetLogger().Log(patternName.c_str()); + DotGenerator::SetColor(bbID, dotColor.c_str()); +} + +std::map DotGenerator::coloringMap; + +void DotGenerator::SetColor(uint32 bbID, const std::string &color) { + coloringMap[bbID] = color; +} + +std::string DotGenerator::GetFileName(const MIRModule &mirModule, const std::string &filePreFix) { + std::string fileName; + if (!filePreFix.empty()) { + fileName.append(filePreFix); + fileName.append("-"); + } + fileName.append(mirModule.GetFileName()); + for (uint32 i = 0; i < fileName.length(); i++) { + if (fileName[i] == ';' || fileName[i] == '/' || fileName[i] == '|') { + fileName[i] = '_'; + } + } + + fileName.append(".dot"); + return fileName; +} + +static bool IsBackEdgeForLoop(const CGFuncLoops &loop, const BB &from, const BB &to) { + const BB *header = loop.GetHeader(); + if (header->GetId() == to.GetId()) { + for (auto *be : loop.GetBackedge()) { + if (be->GetId() == from.GetId()) { + return true; + } + } + } + for (auto *inner : loop.GetInnerLoops()) { + if (IsBackEdgeForLoop(*inner, from, to)) { + return true; + } + } + return false; +} +bool DotGenerator::IsBackEdge(const CGFunc &cgFunction, const BB &from, const BB &to) { + for (const auto *loop : cgFunction.GetLoops()) { + if (IsBackEdgeForLoop(*loop, from, to)) { + return true; + } + } + return false; +} + +void DotGenerator::DumpEdge(const CGFunc &cgFunction, std::ofstream &cfgFileOfStream, bool isIncludeEH) { + FOR_ALL_BB_CONST(bb, &cgFunction) { + for (auto *succBB : bb->GetSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << succBB->GetId(); + if (IsBackEdge(cgFunction, *bb, *succBB)) { + cfgFileOfStream << " [color=red]"; + } else { + cfgFileOfStream << " [color=green]"; + } + cfgFileOfStream << ";\n"; + } + if (isIncludeEH) { + for (auto *ehSuccBB : bb->GetEhSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << ehSuccBB->GetId(); + cfgFileOfStream << "[color=red]"; + cfgFileOfStream << ";\n"; + } + } + } +} + +bool DotGenerator::FoundListOpndRegNum(ListOperand &listOpnd, const Insn &insnObj, regno_t vReg) { + bool exist = false; + for (auto op : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(op); + if (op->IsRegister() && regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + break; + } + } + return exist; +} + +bool DotGenerator::FoundMemAccessOpndRegNum(const MemOperand &memOperand, const Insn &insnObj, regno_t vReg) { + Operand *base = memOperand.GetBaseRegister(); + Operand *offset = memOperand.GetIndexRegister(); + bool exist = false; + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } else if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } + return exist; +} + +bool DotGenerator::FoundNormalOpndRegNum(const RegOperand ®Opnd, const Insn &insnObj, regno_t vReg) { + bool exist = false; + if (regOpnd.GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + return exist; +} + +void DotGenerator::DumpBBInstructions(const CGFunc &cgFunction, regno_t vReg, std::ofstream &cfgFile) { + FOR_ALL_BB_CONST(bb, &cgFunction) { + if (vReg != 0) { + FOR_BB_INSNS_CONST(insn, bb) { + bool found = false; + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + found = FoundListOpndRegNum(listOpnd, *insn, vReg); + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + found = FoundMemAccessOpndRegNum(memOpnd, *insn, vReg); + } else { + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + found = FoundNormalOpndRegNum(regOpnd, *insn, vReg); + } + } + if (found) { + break; + } + } + if (found) { + break; + } + } + } + cfgFile << "BB" << bb->GetId() << "["; + auto it = coloringMap.find(bb->GetId()); + if (it != coloringMap.end()) { + cfgFile << "style=filled,fillcolor=" << it->second << ","; + } + if (bb->GetKind() == BB::kBBIf) { + cfgFile << "shape=diamond,label= \" BB" << bb->GetId() << ":\n"; + } else { + cfgFile << "shape=box,label= \" BB" << bb->GetId() << ":\n"; + } + cfgFile << "{ "; + cfgFile << bb->GetKindName() << "\n"; + cfgFile << bb->GetFrequency() << "\n"; + if (bb->GetLabIdx() != 0) { + cfgFile << "LabIdx=" << bb->GetLabIdx() << "\n"; + } + cfgFile << "}\"];\n"; + } +} + +/* Generate dot file for cfg */ +void DotGenerator::GenerateDot(const std::string &preFix, const CGFunc &cgFunc, const MIRModule &mod, + bool includeEH, const std::string fname, regno_t vReg) { + std::ofstream cfgFile; + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::streambuf *buf = cfgFile.rdbuf(); + std::cout.rdbuf(buf); + std::string fileName = GetFileName(mod, (preFix + "-" + fname)); + + cfgFile.open(fileName, std::ios::trunc); + CHECK_FATAL(cfgFile.is_open(), "Failed to open output file: %s", fileName.c_str()); + cfgFile << "digraph {\n"; + /* dump edge */ + DumpEdge(cgFunc, cfgFile, includeEH); + + /* dump instruction in each BB */ + DumpBBInstructions(cgFunc, vReg, cfgFile); + + cfgFile << "}\n"; + coloringMap.clear(); + cfgFile.flush(); + cfgFile.close(); + std::cout.rdbuf(coutBuf); +} + +void OptimizeLogger::Print(const std::string &funcName) { + if (!localStat.empty()) { + LogInfo::MapleLogger() << funcName << '\n'; + for (const auto &localStatPair : localStat) { + LogInfo::MapleLogger() << "Optimized " << localStatPair.first << ":" << localStatPair.second << "\n"; + } + + ClearLocal(); + LogInfo::MapleLogger() << "Total:" << '\n'; + for (const auto &globalStatPair : globalStat) { + LogInfo::MapleLogger() << "Optimized " << globalStatPair.first << ":" << globalStatPair.second << "\n"; + } + } +} + +void OptimizeLogger::Log(const std::string &patternName) { + auto itemInGlobal = globalStat.find(patternName); + if (itemInGlobal != globalStat.end()) { + itemInGlobal->second++; + } else { + (void)globalStat.emplace(std::pair(patternName, 1)); + } + auto itemInLocal = localStat.find(patternName); + if (itemInLocal != localStat.end()) { + itemInLocal->second++; + } else { + (void)localStat.emplace(std::pair(patternName, 1)); + } +} + +void OptimizeLogger::ClearLocal() { + localStat.clear(); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/peep.cpp b/ecmascript/mapleall/maple_be/src/cg/peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5b247fd02d4bb57aaf4ba41853c97e6b8b34396c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/peep.cpp @@ -0,0 +1,726 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#if TARGAARCH64 +#include "aarch64_peep.h" +#elif TARGRISCV64 +#include "riscv64_peep.h" +#elif defined TARGX86_64 +#include "x64_peep.h" +#endif +#if TARGARM32 +#include "arm32_peep.h" +#endif + +namespace maplebe { +#if TARGAARCH64 +bool CGPeepPattern::IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg) { + if (startInsn.GetBB() != endInsn.GetBB()) { + return true; + } + CHECK_FATAL(ssaInfo != nullptr, "must have ssaInfo"); + CHECK_FATAL(ccReg.IsSSAForm(), "cc reg must be ssa form"); + for (auto *curInsn = startInsn.GetNext(); curInsn != nullptr && curInsn != &endInsn; curInsn = curInsn->GetNext()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsCall()) { + return true; + } + uint32 opndNum = curInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = curInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (!curInsn->IsRegDefined(regOpnd.GetRegisterNumber())) { + continue; + } + if (static_cast(opnd).IsOfCC()) { + VRegVersion *ccVersion = ssaInfo->FindSSAVersion(ccReg.GetRegisterNumber()); + VRegVersion *curCCVersion = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + CHECK_FATAL(ccVersion != nullptr && curCCVersion != nullptr, "RegVersion must not be null based on ssa"); + CHECK_FATAL(!ccVersion->IsDeleted() && !curCCVersion->IsDeleted(), "deleted version"); + if (ccVersion->GetVersionIdx() != curCCVersion->GetVersionIdx()) { + return true; + } + } + } + } + return false; +} + +int64 CGPeepPattern::GetLogValueAtBase2(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +InsnSet CGPeepPattern::GetAllUseInsn(const RegOperand &defReg) { + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *secondInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(secondInsn); + } + } + return allUseInsn; +} + +Insn *CGPeepPattern::GetDefInsn(const RegOperand &useReg) { + if (!useReg.IsSSAForm()) { + return nullptr; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + DEBUG_ASSERT(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + CHECK_FATAL(!useVersion->IsDeleted(), "deleted version"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + return defInfo == nullptr ? nullptr : defInfo->GetInsn(); +} + +void CGPeepPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool CGPeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool CGPeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) { + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool CGPeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const { + for (auto succ : bb.GetSuccs()) { + DEBUG_ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + DEBUG_ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool CGPeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType CGPeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const { + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + DEBUG_ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +int PeepPattern::logValueAtBase2(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : (-1); +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool PeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool PeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) { + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, &cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool PeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const { + for (auto succ : bb.GetSuccs()) { + DEBUG_ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + DEBUG_ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool PeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc.GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType PeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const { + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + DEBUG_ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +bool PeepPattern::IsMemOperandOptPattern(const Insn &insn, Insn &nextInsn) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + auto *memOpnd = static_cast(nextInsn.GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "null ptr check"); + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + /* Only for immediate is 0. */ + if (memOpnd->GetOffsetImmediate()->GetOffsetValue() != 0) { + return false; + } + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return false; + } + + auto &oldBaseOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != &oldBaseOpnd) { + return false; + } + +#ifdef USE_32BIT_REF + if (nextInsn.IsAccessRefField() && nextInsn.GetOperand(kInsnFirstOpnd).GetSize() > k32BitSize) { + return false; + } +#endif + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(oldBaseOpnd, nextInsn)) { + return false; + } + return true; +} + +template +void PeepOptimizer::Run() { + auto *patterMatcher = peepOptMemPool->New(cgFunc, peepOptMemPool); + patterMatcher->InitOpts(); + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + patterMatcher->Run(*bb, *insn); + } + } +} + +int32 PeepOptimizer::index = 0; + +void PeepHoleOptimizer::Peephole0() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PeepholeOpt() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt1() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +/* === SSA form === */ +bool CgPeepHole::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *cgssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL((cgssaInfo != nullptr), "Get ssaInfo failed!"); + MemPool *mp = GetPhaseMemPool(); + auto *cgpeep = mp->New(f, mp, cgssaInfo); + CHECK_FATAL((cgpeep != nullptr), "Creat AArch64CGPeepHole failed!"); + cgpeep->Run(); + return false; +} + +void CgPeepHole::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole, cgpeephole) +#endif +/* === Physical Pre Form === */ +bool CgPrePeepHole::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + #if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); + #elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); + #endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole, cgprepeephole) + +/* === Physical Post Form === */ +bool CgPostPeepHole::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + #if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); + #elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); + #endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostPeepHole, cgpostpeephole) + +#if TARGAARCH64 +bool CgPrePeepHole0::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole0, prepeephole) + +bool CgPrePeepHole1::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt1(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole1, prepeephole1) + +bool CgPeepHole0::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->Peephole0(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole0, peephole0) + +bool CgPeepHole1::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole1, peephole) +#endif + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/pressure.cpp b/ecmascript/mapleall/maple_be/src/cg/pressure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65369ba5707882185658aad3d6f7271b80210807 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/pressure.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "pressure.h" +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#include "deps.h" + +namespace maplebe { +/* ------- RegPressure function -------- */ +int32 RegPressure::maxRegClassNum = 0; + +/* print regpressure information */ +void RegPressure::DumpRegPressure() const { + PRINT_STR_VAL("Priority: ", priority); + PRINT_STR_VAL("maxDepth: ", maxDepth); + PRINT_STR_VAL("near: ", near); + PRINT_STR_VAL("callNum: ", callNum); + + LogInfo::MapleLogger() << "\n"; +} +} /* namespace maplebe */ + diff --git a/ecmascript/mapleall/maple_be/src/cg/proepilog.cpp b/ecmascript/mapleall/maple_be/src/cg/proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..58b4d165d4d681f1ab12490312f2b3a4da133d2a --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/proepilog.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "proepilog.h" +#if TARGAARCH64 +#include "aarch64_proepilog.h" +#elif TARGRISCV64 +#include "riscv64_proepilog.h" +#endif +#if TARGARM32 +#include "arm32_proepilog.h" +#endif +#if TARGX86_64 +#include "x64_proepilog.h" +#endif +#include "cgfunc.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +Insn *GenProEpilog::InsertCFIDefCfaOffset(int32 &cfiOffset, Insn &insertAfter) { + if (!cgFunc.GenCfi()) { + return &insertAfter; + } + cfiOffset = AddtoOffsetFromCFA(cfiOffset); + Insn &cfiInsn = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa_offset). + AddOpndChain(cgFunc.CreateCfiImmOperand(cfiOffset, k64BitSize)); + Insn *newIPoint = cgFunc.GetCurBB()->InsertInsnAfter(insertAfter, cfiInsn); + cgFunc.SetDbgCallFrameOffset(cfiOffset); + return newIPoint; +} + +/* there are two stack protector: + * 1. stack protector all: for all function + * 2. stack protector strong: for some functon that + * <1> invoke alloca functon; + * <2> use stack address; + * <3> callee use return stack slot; + * <4> local symbol is vector type; + * */ +void GenProEpilog::NeedStackProtect() { + DEBUG_ASSERT(stackProtect == false, "no stack protect default"); + CG *currCG = cgFunc.GetCG(); + if (currCG->IsStackProtectorAll()) { + stackProtect = true; + return; + } + + if (!currCG->IsStackProtectorStrong()) { + return; + } + + if (cgFunc.HasAlloca()) { + stackProtect = true; + return; + } + + /* check if function use stack address or callee function return stack slot */ + auto stackProtectInfo = cgFunc.GetStackProtectInfo(); + if ((stackProtectInfo & kAddrofStack) != 0 || (stackProtectInfo & kRetureStackSlot) != 0) { + stackProtect = true; + return; + } + + /* check if local symbol is vector type */ + auto &mirFunction = cgFunc.GetFunction(); + uint32 symTabSize = static_cast(mirFunction.GetSymTab()->GetSymbolTableSize()); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *symbol = mirFunction.GetSymTab()->GetSymbolFromStIdx(i); + if (symbol == nullptr || symbol->GetStorageClass() != kScAuto || symbol->IsDeleted()) { + continue; + } + TyIdx tyIdx = symbol->GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type->GetKind() == kTypeArray) { + stackProtect = true; + return; + } + + if (type->IsStructType() && IncludeArray(*type)) { + stackProtect = true; + return; + } + } +} + +bool GenProEpilog::IncludeArray(const MIRType &type) const { + DEBUG_ASSERT(type.IsStructType(), "agg must be one of class/struct/union"); + auto &structType = static_cast(type); + /* all elements of struct. */ + auto num = static_cast(structType.GetFieldsSize()); + for (uint32 i = 0; i < num; ++i) { + MIRType *elemType = structType.GetElemType(i); + if (elemType->GetKind() == kTypeArray) { + return true; + } + if (elemType->IsStructType() && IncludeArray(*elemType)) { + return true; + } + } + return false; +} + +bool CgGenProEpiLog::PhaseRun(maplebe::CGFunc &f) { + GenProEpilog *genPE = nullptr; +#if TARGAARCH64 || TARGRISCV64 + genPE = GetPhaseAllocator()->New(f, *ApplyTempMemPool()); +#endif +#if TARGARM32 + genPE = GetPhaseAllocator()->New(f); +#endif +#if TARGX86_64 + genPE = GetPhaseAllocator()->New(f); +#endif + genPE->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/ra_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df4101fcf2801f8e07abe1928de69346673fd2e6 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/ra_opt.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_ra_opt.h" +#elif TARGRISCV64 +#include "riscv64_ra_opt.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRaOpt::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + RaOpt *raOpt = nullptr; +#if TARGAARCH64 + raOpt = memPool->New(f, *memPool); +#elif || TARGRISCV64 + raOpt = memPool->New(f, *memPool); +#endif + + if (raOpt) { + raOpt->Run(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRaOpt, raopt) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/reaching.cpp b/ecmascript/mapleall/maple_be/src/cg/reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f70549c574d293e4d19bf38b0a05d577a9d3ed70 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/reaching.cpp @@ -0,0 +1,1414 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_reaching.h" +#include "aarch64_isa.h" +#elif TARGRISCV64 +#include "riscv64_reaching.h" +#endif +#if TARGARM32 +#include "arm32_reaching.h" +#endif +#include "cg_option.h" +#include "cgfunc.h" +#include "cg.h" + +/* + * This phase build bb->in and bb->out infomation for stack memOperand and RegOperand. each bit in DataInfo + * represent whether the register or memory is live or not. to save storage space, the offset of stack is divided + * by 4, since the offset is a multiple 4. + * this algorithm mainly include 2 parts: + * 1. initialize each BB + * (1) insert pseudoInsns for function parameters, ehBB, and return R0/V0 + * (2) init bb->gen, bb->use, bb->out + * 2. build in and out + * (1) In[BB] = Union all of out[Parents(bb)] + * (2) Out[BB] = gen[BB] union in[BB] + * aditionally, this phase provide several commen funcfion about data flow. users can call these functions in + * optimization phase conveniently. + */ +namespace maplebe { +ReachingDefinition::ReachingDefinition(CGFunc &func, MemPool &memPool) + : AnalysisResult(&memPool), cgFunc(&func), rdAlloc(&memPool), stackMp(func.GetStackMemPool()), + pseudoInsns(rdAlloc.Adapter()), kMaxBBNum(cgFunc->NumBBs() + 1), normalBBSet(rdAlloc.Adapter()), + cleanUpBBSet(rdAlloc.Adapter()) {} + +/* check whether the opnd is stack register or not */ +bool ReachingDefinition::IsFrameReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + auto ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +/* intialize bb->out, bb->out only include generated DataInfo */ +void ReachingDefinition::InitOut(const BB &bb) { + if (mode & kRDRegAnalysis) { + *regOut[bb.GetId()] = *regGen[bb.GetId()]; + } + if (mode & kRDMemAnalysis) { + *memOut[bb.GetId()] = *memGen[bb.GetId()]; + } +} + +/* when DataInfo will not be used later, they should be cleared. */ +void ReachingDefinition::ClearDefUseInfo() { + for (auto insn : pseudoInsns) { + /* Keep return pseudo to extend the return register liveness to 'ret'. + * Backward propagation can move the return register definition far from the return. + */ +#ifndef TARGX86_64 + if (insn->GetMachineOpcode() == MOP_pseudo_ret_int || insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } +#endif + insn->GetBB()->RemoveInsn(*insn); + } + FOR_ALL_BB(bb, cgFunc) { + delete (regGen[bb->GetId()]); + regGen[bb->GetId()] = nullptr; + delete (regUse[bb->GetId()]); + regUse[bb->GetId()] = nullptr; + delete (regIn[bb->GetId()]); + regIn[bb->GetId()] = nullptr; + delete (regOut[bb->GetId()]); + regOut[bb->GetId()] = nullptr; + delete (memGen[bb->GetId()]); + memGen[bb->GetId()] = nullptr; + delete (memUse[bb->GetId()]); + memUse[bb->GetId()] = nullptr; + delete (memIn[bb->GetId()]); + memIn[bb->GetId()] = nullptr; + delete (memOut[bb->GetId()]); + memOut[bb->GetId()] = nullptr; + } + regGen.clear(); + regGen.shrink_to_fit(); + regUse.clear(); + regUse.shrink_to_fit(); + regIn.clear(); + regIn.shrink_to_fit(); + regOut.clear(); + regOut.shrink_to_fit(); + memGen.clear(); + memGen.shrink_to_fit(); + memUse.clear(); + memUse.shrink_to_fit(); + memIn.clear(); + memIn.shrink_to_fit(); + memOut.clear(); + memOut.shrink_to_fit(); + cgFunc->SetRD(nullptr); +} + +/* + * find used insns for register. + * input: + * insn: the insn in which register is defined + * regNO: the No of register + * isRegNO: this argument is used to form function overloading + * return: + * the set of used insns for register + */ +InsnSet ReachingDefinition::FindUseForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + InsnSet useInsnSet; + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + /* register may be redefined in current bb */ + bool findFinish = FindRegUseBetweenInsn(regNO, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !regOut[insn.GetBB()->GetId()]->TestBit(regNO)) { + if (!insn.GetBB()->GetEhSuccs().empty()) { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, false); + } + + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB != nullptr) { + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (findFinish || !regOut[firstCleanUpBB->GetId()]->TestBit(regNO)) { + return useInsnSet; + } + } + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + } + + return useInsnSet; +} + +/* + * find used insns for register iteratively. + * input: + * startBB: find used insns starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of register is saved in this set + */ +void ReachingDefinition::DFSFindUseForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const { + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*succBB, regNO, visitedBB, useInsnSet, false); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*ehSuccBB, regNO, visitedBB, useInsnSet, false); + } + } +} + +/* check whether register defined in regDefInsn has used insns */ +bool ReachingDefinition::RegHasUsePoint(uint32 regNO, Insn ®DefInsn) const { + InsnSet useInsnSet; + bool findFinish = FindRegUseBetweenInsn(regNO, regDefInsn.GetNext(), regDefInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (!findFinish) { + std::vector visitedBB(kMaxBBNum, false); + return RegIsUsedInOtherBB(*regDefInsn.GetBB(), regNO, visitedBB); + } + return false; +} + +/* check whether register is used in other BB except startBB */ +bool ReachingDefinition::RegIsUsedInOtherBB(const BB &startBB, uint32 regNO, std::vector &visitedBB) const { + InsnSet useInsnSet; + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + if (!regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*succBB, regNO, visitedBB)) { + return true; + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + if (!regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*ehSuccBB, regNO, visitedBB)) { + return true; + } + } + } + + return false; +} + +bool ReachingDefinition::RegIsUsedInCleanUpBB(uint32 regNO) const { + if (firstCleanUpBB == nullptr) { + return false; + } + InsnSet useInsnSet; + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + bool findFinish = FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (findFinish) { + return false; + } + } + + std::vector visitedBB(kMaxBBNum, false); + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + if (useInsnSet.empty()) { + return true; + } + + return false; +} + +/* + * find used insns for stack memory operand iteratively. + * input: + * startBB: find used insns starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of stack memory operand is saved in this set + */ +void ReachingDefinition::DFSFindUseForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const { + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!memIn[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (memUse[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (memGen[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*succBB, offset, visitedBB, useInsnSet); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!memIn[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + bool findFinish = false; + if (memUse[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (memGen[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*ehSuccBB, offset, visitedBB, useInsnSet); + } + } +} + +/* Out[BB] = gen[BB] union in[BB]. if bb->out changed, return true. */ +bool ReachingDefinition::GenerateOut(const BB &bb) { + bool outInfoChanged = false; + if (mode & kRDRegAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegOutBak = regOut[bb.GetId()]->Clone(alloc); + *regOut[bb.GetId()] = *(regIn[bb.GetId()]); + regOut[bb.GetId()]->OrBits(*regGen[bb.GetId()]); + if (!regOut[bb.GetId()]->IsEqual(bbRegOutBak)) { + outInfoChanged = true; + } + } + + if (mode & kRDMemAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbMemOutBak = memOut[bb.GetId()]->Clone(alloc); + *memOut[bb.GetId()] = *memIn[bb.GetId()]; + memOut[bb.GetId()]->OrBits(*memGen[bb.GetId()]); + if (!memOut[bb.GetId()]->IsEqual(bbMemOutBak)) { + outInfoChanged = true; + } + } + return outInfoChanged; +} + +bool ReachingDefinition::GenerateOut(const BB &bb, const std::set &infoIndex, const bool isReg) { + bool outInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegOutBak = regOut[bb.GetId()]->GetElem(index); + regOut[bb.GetId()]->SetElem(index, regIn[bb.GetId()]->GetElem(index)); + regOut[bb.GetId()]->OrDesignateBits(*regGen[bb.GetId()], index); + if (!outInfoChanged && (bbRegOutBak != regOut[bb.GetId()]->GetElem(index))) { + outInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemOutBak = memOut[bb.GetId()]->GetElem(index); + memOut[bb.GetId()]->SetElem(index, memIn[bb.GetId()]->GetElem(index)); + memOut[bb.GetId()]->OrDesignateBits(*memGen[bb.GetId()], index); + if (bbMemOutBak != memOut[bb.GetId()]->GetElem(index)) { + outInfoChanged = true; + } + } + } + return outInfoChanged; +} + + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb) { + bool inInfoChanged = false; + if (mode & kRDRegAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegInBak = regIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predEhBB->GetId()]); + } + + if (!regIn[bb.GetId()]->IsEqual(bbRegInBak)) { + inInfoChanged = true; + } + } + if (mode & kRDMemAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &memInBak = memIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predEhBB->GetId()]); + } + + if (!memIn[bb.GetId()]->IsEqual(memInBak)) { + inInfoChanged = true; + } + } + return inInfoChanged; +} + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb, const std::set &infoIndex, const bool isReg) { + bool inInfoChanged = false; + + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegInBak = regIn[bb.GetId()]->GetElem(index); + regIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predEhBB->GetId()], index); + } + + if (bbRegInBak != regIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemInBak = memIn[bb.GetId()]->GetElem(index); + memIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predEhBB->GetId()], index); + } + + if (bbMemInBak != memIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + + +/* In[firstCleanUpBB] = Union all of out[bbNormalSet] */ +bool ReachingDefinition::GenerateInForFirstCleanUpBB() { + CHECK_NULL_FATAL(firstCleanUpBB); + if (mode & kRDRegAnalysis) { + regIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + if (mode & kRDMemAnalysis) { + memIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + + for (auto normalBB : normalBBSet) { + if (mode & kRDRegAnalysis) { + regIn[firstCleanUpBB->GetId()]->OrBits(*regOut[normalBB->GetId()]); + } + + if (mode & kRDMemAnalysis) { + memIn[firstCleanUpBB->GetId()]->OrBits(*memOut[normalBB->GetId()]); + } + } + + return ((regIn[firstCleanUpBB->GetId()] != nullptr && regIn[firstCleanUpBB->GetId()]->Size() > 0) || + (memIn[firstCleanUpBB->GetId()] != nullptr && memIn[firstCleanUpBB->GetId()]->Size() > 0)); +} + +bool ReachingDefinition::GenerateInForFirstCleanUpBB(bool isReg, const std::set &infoIndex) { + CHECK_NULL_FATAL(firstCleanUpBB); + bool inInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 regInElemBak = regIn[firstCleanUpBB->GetId()]->GetElem(index); + regIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + regIn[firstCleanUpBB->GetId()]->OrDesignateBits(*regOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (regIn[firstCleanUpBB->GetId()]->GetElem(index) != regInElemBak)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 memInElemBak = memIn[firstCleanUpBB->GetId()]->GetElem(index); + memIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + memIn[firstCleanUpBB->GetId()]->OrDesignateBits(*memOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (memIn[firstCleanUpBB->GetId()]->GetElem(index) != memInElemBak)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + +/* allocate memory for DataInfo of bb */ +void ReachingDefinition::InitRegAndMemInfo(const BB &bb) { + if (mode & kRDRegAnalysis) { + const uint32 kMaxRegCount = cgFunc->GetMaxVReg(); + regGen[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regUse[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regIn[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regOut[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + } + + if (mode & kRDMemAnalysis) { + const int32 kStackSize = GetStackSize(); + memGen[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memUse[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memIn[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memOut[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + } +} + +/* insert pseudoInsns for function parameters, ehBB, and return R0/V0. init bb->gen, bb->use, bb->out */ +void ReachingDefinition::Initialize() { + InitDataSize(); + AddRetPseudoInsns(); + FOR_ALL_BB(bb, cgFunc) { + InitRegAndMemInfo(*bb); + } + FOR_ALL_BB(bb, cgFunc) { + if (bb == cgFunc->GetFirstBB()) { + InitStartGen(); + } + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitGenUse(*bb); + InitOut(*bb); + + if (bb->IsCleanup()) { + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + firstCleanUpBB = bb; + } + (void)cleanUpBBSet.insert(bb); + } else { + (void)normalBBSet.insert(bb); + } + } + maxInsnNO = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + insn->SetId(maxInsnNO); + maxInsnNO += kInsnNoInterval; + } + } +} + +void ReachingDefinition::InitDataSize() { + /* to visit vec[cgFunc->NumBBs()], size should be cgFunc->NumBBs() + 1 */ + const uint32 dataSize = cgFunc->NumBBs() + 1; + regIn.resize(dataSize); + regOut.resize(dataSize); + regGen.resize(dataSize); + regUse.resize(dataSize); + memIn.resize(dataSize); + memOut.resize(dataSize); + memGen.resize(dataSize); + memUse.resize(dataSize); +} + +/* compute bb->in, bb->out for each BB execpt cleanup BB */ +void ReachingDefinition::BuildInOutForFuncBody() { + std::unordered_set normalBBSetBak(normalBBSet.begin(), normalBBSet.end()); + std::unordered_set::iterator setItr; + while (!normalBBSetBak.empty()) { + setItr = normalBBSetBak.begin(); + BB *bb = *setItr; + DEBUG_ASSERT(bb != nullptr, "null ptr check"); + (void)normalBBSetBak.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)normalBBSetBak.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)normalBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(normalBBSetBak.empty(), "CG internal error."); +} + +/* if bb->out changed, update in and out */ +void ReachingDefinition::UpdateInOut(BB &changedBB) { + InitGenUse(changedBB, false); + if (!GenerateOut(changedBB)) { + return; + } + + std::unordered_set bbSet; + std::unordered_set::iterator setItr; + + for (auto succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + DEBUG_ASSERT(bb != nullptr, "null ptr check"); + bbSet.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } +} + +void ReachingDefinition::UpdateInOut(BB &changedBB, bool isReg) { + std::set changedInfoIndex; + if (isReg) { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = regGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*regGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } else { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = memGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*memGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } + if (changedInfoIndex.empty()) { + return; + } + if (!GenerateOut(changedBB, changedInfoIndex, isReg)) { + return; + } + std::set bbSet; + std::set::iterator setItr; + for (auto &succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto &ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + bbSet.erase(setItr); + if (GenerateIn(*bb, changedInfoIndex, isReg)) { + if (GenerateOut(*bb, changedInfoIndex, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(isReg, changedInfoIndex); + } +} + + +/* compute bb->in, bb->out for cleanup BBs */ +void ReachingDefinition::BuildInOutForCleanUpBB() { + DEBUG_ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB()) { + GenerateOut(*firstCleanUpBB); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +void ReachingDefinition::BuildInOutForCleanUpBB(bool isReg, const std::set &index) { + DEBUG_ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB(isReg, index)) { + GenerateOut(*firstCleanUpBB, index, isReg); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb, index, isReg)) { + if (GenerateOut(*bb, index, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +/* entry for ReachingDefinition Analysis, mode represent to analyze RegOperand, MemOperand or both of them */ +void ReachingDefinition::AnalysisStart() { + if (!cgFunc->GetFirstBB()) { + return; + } + Initialize(); + /* Build in/out for function body first. (Except cleanup bb) */ + BuildInOutForFuncBody(); + /* If cleanup bb exists, build in/out for cleanup bbs. firstCleanUpBB->in = Union all non-cleanup bb's out. */ + if (firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } + cgFunc->SetRD(this); +} + +/* check whether currentBB can reach endBB according to control flow */ +bool ReachingDefinition::CanReachEndBBFromCurrentBB(const BB ¤tBB, const BB &endBB, + std::vector &traversedBBSet) const { + if (¤tBB == &endBB) { + return true; + } + for (auto predBB : endBB.GetPreds()) { + if (traversedBBSet[predBB->GetId()]) { + continue; + } + traversedBBSet[predBB->GetId()] = true; + if (predBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *predBB, traversedBBSet)) { + return true; + } + } + for (auto ehPredBB : endBB.GetEhPreds()) { + if (traversedBBSet[ehPredBB->GetId()]) { + continue; + } + traversedBBSet[ehPredBB->GetId()] = true; + if (ehPredBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *ehPredBB, traversedBBSet)) { + return true; + } + } + return false; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsLiveInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB, bool isFirstNo) const { + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()]) { + continue; + } + visitedBB[succ->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *succ)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *succ, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *ehSucc)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *ehSucc, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + return true; +} + +/* Check if the reg is used in return BB */ +bool ReachingDefinition::CheckRegLiveinReturnBB(uint32 regNO, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +bool ReachingDefinition::RegIsUsedIncaller(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + if (startInsn.GetBB() != endInsn.GetBB()) { + return false; + } + if (startInsn.GetNext() == &endInsn || &startInsn == &endInsn) { + return false; + } + auto RegDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + if (!RegDefVec.empty()) { + return false; + } + if (IsCallerSavedReg(regNO) && startInsn.GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *(startInsn.GetBB()->GetLastInsn()), regNO)) { + return true; + } + if (CheckRegLiveinReturnBB(regNO, *startInsn.GetBB())) { + return true; + } + return false; +} + +/* check whether control flow can reach endInsn from startInsn */ +bool ReachingDefinition::RegIsLiveBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn, bool isBack, + bool isFirstNo) const { + DEBUG_ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + if (!isBack) { + return false; + } else { + return true; + } + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + std::vector RegDefVec; + if (isBack) { + RegDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + } else { + RegDefVec = FindRegDefBetweenInsn(regNO, &startInsn, endInsn.GetPrev()); + } + if (!RegDefVec.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsLiveInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB, isFirstNo); +} + +static bool SetDefInsnVecForAsm(Insn *insn, uint32 index, uint32 regNO, std::vector &defInsnVec) { + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + defInsnVec.emplace_back(insn); + return true; + } + } + return false; +} + +std::vector ReachingDefinition::FindRegDefBetweenInsn( + uint32 regNO, Insn *startInsn, Insn *endInsn, bool findAll, bool analysisDone) const { + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + if (analysisDone && !regGen[startInsn->GetBB()->GetId()]->TestBit(regNO)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsAsmInsn()) { + if (SetDefInsnVecForAsm(insn, kAsmOutputListOpnd, regNO, defInsnVec) || + SetDefInsnVecForAsm(insn, kAsmClobberListOpnd, regNO, defInsnVec)) { + if (findAll) { + defInsnVec.emplace_back(insn); + } else { + return defInsnVec; + } + } + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + if (insn->IsRegDefined(regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + } + return defInsnVec; +} + +bool ReachingDefinition::RegIsUsedOrDefBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + DEBUG_ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + return false; + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsUseOrDefInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB); +} + +/* check whether register may be redefined form in the same BB */ +bool ReachingDefinition::IsUseOrDefBetweenInsn(uint32 regNO, const BB &curBB, + const Insn &startInsn, Insn &endInsn) const { + if (regGen[curBB.GetId()]->TestBit(regNO)) { + if (!FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + } + if (regUse[curBB.GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + return true; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsUseOrDefInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB) const { + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()] || succ == &endBB) { + continue; + } + visitedBB[succ->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO) || regUse[succ->GetId()]->TestBit(regNO) || + (succ->HasCall() && IsCallerSavedReg(regNO))) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *succ, endBB, visitedBB); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO) || regUse[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *ehSucc, endBB, visitedBB); + if (!isLive) { + return false; + } + } + return true; +} + +bool ReachingDefinition::HasCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn) const { + DEBUG_ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + return true; + } + } + return false; +} + +/* operand is only defined in startBB, and only used in endBB. + * so traverse from endBB to startBB, all paths reach startBB finally. + * startBB and endBB are different, and call insns in both of them are not counted. + * whether startBB and endBB are in a loop is not counted. + */ +bool ReachingDefinition::HasCallInPath(const BB &startBB, const BB &endBB, std::vector &visitedBB) const { + DEBUG_ASSERT(&startBB != &endBB, "startBB and endBB are not counted"); + std::queue bbQueue; + bbQueue.push(&endBB); + visitedBB[endBB.GetId()] = true; + while (!bbQueue.empty()) { + const BB *bb = bbQueue.front(); + bbQueue.pop(); + for (auto predBB : bb->GetPreds()) { + if (predBB == &startBB || visitedBB[predBB->GetId()]) { + continue; + } + if (predBB->HasCall()) { + return true; + } + visitedBB[predBB->GetId()] = true; + bbQueue.push(predBB); + } + for (auto ehPredBB : bb->GetEhPreds()) { + if (ehPredBB == &startBB || visitedBB[ehPredBB->GetId()]) { + continue; + } + if (ehPredBB->HasCall()) { + return true; + } + visitedBB[ehPredBB->GetId()] = true; + bbQueue.push(ehPredBB); + } + } + return false; +} + +/* because of time cost, this function is not precise, BB in loop is not counted */ +bool ReachingDefinition::HasCallBetweenDefUse(const Insn &defInsn, const Insn &useInsn) const { + if (defInsn.GetBB()->GetId() == useInsn.GetBB()->GetId()) { + if (&useInsn == defInsn.GetNext()) { + return false; + } + if (useInsn.GetId() > defInsn.GetId()) { + return HasCallBetweenInsnInSameBB(defInsn, *useInsn.GetPrev()); + } + /* useInsn is in front of defInsn, we think there is call insn between them conservatively */ + return true; + } + /* check defInsn->GetBB() */ + if (&defInsn != defInsn.GetBB()->GetLastInsn() && + defInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*defInsn.GetNext(), *defInsn.GetBB()->GetLastInsn())) { + return true; + } + /* check useInsn->GetBB() */ + if (&useInsn != useInsn.GetBB()->GetFirstInsn() && + useInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*useInsn.GetBB()->GetFirstInsn(), *useInsn.GetPrev())) { + return true; + } + std::vector visitedBB(kMaxBBNum, false); + return HasCallInPath(*defInsn.GetBB(), *useInsn.GetBB(), visitedBB); +} + +void ReachingDefinition::EnlargeRegCapacity(uint32 size) { + FOR_ALL_BB(bb, cgFunc) { + regIn[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regOut[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regGen[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regUse[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + } +} + +void ReachingDefinition::DumpInfo(const BB &bb, DumpType flag) const { + const DataInfo *info = nullptr; + switch (flag) { + case kDumpRegGen: + LogInfo::MapleLogger() << " regGen:\n"; + info = regGen[bb.GetId()]; + break; + case kDumpRegUse: + LogInfo::MapleLogger() << " regUse:\n"; + info = regUse[bb.GetId()]; + break; + case kDumpRegIn: + LogInfo::MapleLogger() << " regIn:\n"; + info = regIn[bb.GetId()]; + break; + case kDumpRegOut: + LogInfo::MapleLogger() << " regOut:\n"; + info = regOut[bb.GetId()]; + break; + case kDumpMemGen: + LogInfo::MapleLogger() << " memGen:\n"; + info = memGen[bb.GetId()]; + break; + case kDumpMemIn: + LogInfo::MapleLogger() << " memIn:\n"; + info = memIn[bb.GetId()]; + break; + case kDumpMemOut: + LogInfo::MapleLogger() << " memOut:\n"; + info = memOut[bb.GetId()]; + break; + case kDumpMemUse: + LogInfo::MapleLogger() << " memUse:\n"; + info = memUse[bb.GetId()]; + break; + default: + return; + } + DEBUG_ASSERT(info != nullptr, "null ptr check"); + uint32 count = 1; + LogInfo::MapleLogger() << " "; + for (uint32 i = 0; i != info->Size(); ++i) { + if (info->TestBit(i)) { + count += 1; + if (kDumpMemGen <= flag && flag <= kDumpMemUse) { + /* Each element i means a 4 byte stack slot. */ + LogInfo::MapleLogger() << (i * 4) << " "; + } else { + LogInfo::MapleLogger() << i << " "; + } + /* 10 output per line */ + if (count % 10 == 0) { + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << " "; + } + } + } + + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::DumpBBCGIR(const BB &bb) const { + if (bb.IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb.IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb.GetSuccs().size()) { + LogInfo::MapleLogger() << " succs: "; + for (auto *succBB : bb.GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (bb.GetEhSuccs().size()) { + LogInfo::MapleLogger() << " eh_succs: "; + for (auto *ehSuccBB : bb.GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + + FOR_BB_INSNS_CONST(insn, &bb) { + LogInfo::MapleLogger() << " "; + insn->Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::Dump(uint32 flag) const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "get symbol in function failed in ReachingDefinition::Dump"); + LogInfo::MapleLogger() << "\n---- Reaching definition analysis for " << mirSymbol->GetName(); + LogInfo::MapleLogger() << " ----\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " ===\n"; + + if (flag & kDumpBBCGIR) { + DumpBBCGIR(*bb); + } + + if (flag & kDumpRegIn) { + DumpInfo(*bb, kDumpRegIn); + } + + if (flag & kDumpRegUse) { + DumpInfo(*bb, kDumpRegUse); + } + + if (flag & kDumpRegGen) { + DumpInfo(*bb, kDumpRegGen); + } + + if (flag & kDumpRegOut) { + DumpInfo(*bb, kDumpRegOut); + } + + if (flag & kDumpMemIn) { + DumpInfo(*bb, kDumpMemIn); + } + + if (flag & kDumpMemGen) { + DumpInfo(*bb, kDumpMemGen); + } + + if (flag & kDumpMemOut) { + DumpInfo(*bb, kDumpMemOut); + } + + if (flag & kDumpMemUse) { + DumpInfo(*bb, kDumpMemUse); + } + } + LogInfo::MapleLogger() << "------------------------------------------------------\n"; +} + +bool CgReachingDefinition::PhaseRun(maplebe::CGFunc &f) { +#if TARGAARCH64 || TARGRISCV64 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif + reachingDef->SetAnalysisMode(kRDAllAnalysis); + reachingDef->AnalysisStart(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgReachingDefinition, reachingdefinition) + +bool CgClearRDInfo::PhaseRun(maplebe::CGFunc &f) { + if (f.GetRDStatus()) { + f.GetRD()->ClearDefUseInfo(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgClearRDInfo, clearrdinfo) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/reg_alloc.cpp b/ecmascript/mapleall/maple_be/src/cg/reg_alloc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0bbedd920c0cf0a8eba646a1d0af6bd9895d5803 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/reg_alloc.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_alloc.h" +#include "live.h" +#include "loop.h" +#include "cg_dominance.h" +#include "mir_lower.h" +#include "securec.h" +#include "reg_alloc_basic.h" +#include "reg_alloc_lsra.h" +#include "cg.h" +#if TARGAARCH64 +#include "aarch64_color_ra.h" +#endif + +namespace maplebe { +void CgRegAlloc::GetAnalysisDependence(AnalysisDep &aDep) const { + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevelLiteCG || + CGOptions::GetInstance().DoLinearScanRegisterAllocation()) { + aDep.AddRequired(); + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); + } +#if TARGAARCH64 + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0 && + CGOptions::GetInstance().DoColoringBasedRegisterAllocation()) { + aDep.AddRequired(); + } +#endif +} + +bool CgRegAlloc::PhaseRun(maplebe::CGFunc &f) { + bool success = false; + while (success == false) { + MemPool *phaseMp = GetPhaseMemPool(); + /* create register allocator */ + RegAllocator *regAllocator = nullptr; + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + regAllocator = phaseMp->New(f, *phaseMp); + } else if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevelLiteCG) { +#if TARGX86_64 + Bfs *bfs = GET_ANALYSIS(CgBBSort, f); + CHECK_FATAL(bfs != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, bfs); +#else + LogInfo::MapleLogger(kLlErr) << "Error: -LiteCG option is unsupported for aarch64.\n"; +#endif + } else { +#if TARGAARCH64 + if (f.GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + Bfs *bfs = GET_ANALYSIS(CgBBSort, f); + CHECK_FATAL(bfs != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, bfs); + } else if (f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *it = GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>( + &CgLiveAnalysis::id, f); + LiveAnalysis *live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + DomAnalysis *dom = GET_ANALYSIS(CgDomAnalysis, f); + CHECK_FATAL(dom != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, *dom); + } else { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: We only support Linear Scan and GraphColor register allocation\n"; + } +#elif TARGX86_64 + LogInfo::MapleLogger(kLlErr) << + "Error: We only support -O0, and -LiteCG for x64.\n"; +#endif + } + /* do register allocation */ + CHECK_FATAL(regAllocator != nullptr, "regAllocator is null in CgDoRegAlloc::Run"); + f.SetIsAfterRegAlloc(); + success = regAllocator->AllocateRegisters(); + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + } + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + } + return false; +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/src/cg/reg_alloc_basic.cpp b/ecmascript/mapleall/maple_be/src/cg/reg_alloc_basic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..111ecf63c07ad49088ce567a905b20601eadc41c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/reg_alloc_basic.cpp @@ -0,0 +1,477 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_alloc_basic.h" +#include "cg.h" + +namespace maplebe { +/* +* NB. As an optimization we can use X8 as a scratch (temporary) +* register if the return value is not returned through memory. +*/ +Operand *DefaultO0RegAllocator::HandleRegOpnd(Operand &opnd) { + DEBUG_ASSERT(opnd.IsRegister(), "Operand should be register operand"); + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfCC()) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + availRegSet[regOpnd.GetRegisterNumber()] = false; + (void)liveReg.insert(regOpnd.GetRegisterNumber()); + return ®Opnd; + } + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { /* already allocated this register */ + DEBUG_ASSERT(regMapIt->second < regInfo->GetAllRegNum(), "must be a physical register"); + regno_t newRegNO = regMapIt->second; + availRegSet[newRegNO] = false; /* make sure the real register can not be allocated and live */ + (void)liveReg.insert(newRegNO); + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(newRegNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + if (AllocatePhysicalRegister(regOpnd)) { + (void)allocatedSet.insert(&opnd); + auto regMapItSecond = regMap.find(regOpnd.GetRegisterNumber()); + DEBUG_ASSERT(regMapItSecond != regMap.end(), " ERROR: can not find register number in regmap "); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapItSecond->second, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + + /* use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +Operand *DefaultO0RegAllocator::HandleMemOpnd(Operand &opnd) { + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "Operand should be memory access operand"); + auto *memOpnd = static_cast(&opnd); + Operand *res = nullptr; + if (memOpnd->GetBaseRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetBaseRegister()); + memOpnd->SetBaseRegister(static_cast(*res)); + } + if (memOpnd->GetIndexRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetIndexRegister()); + memOpnd->SetIndexRegister(static_cast(*res)); + } + (void)allocatedSet.insert(&opnd); + return memOpnd; +} + +Operand *DefaultO0RegAllocator::AllocSrcOpnd(Operand &opnd) { + if (opnd.IsRegister()) { + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + return HandleRegOpnd(opnd); + } else if (opnd.IsMemoryAccessOperand()) { + return HandleMemOpnd(opnd); + } + DEBUG_ASSERT(false, "NYI"); + return nullptr; +} + +Operand *DefaultO0RegAllocator::AllocDestOpnd(Operand &opnd, const Insn &insn) { + if (!opnd.IsRegister()) { + DEBUG_ASSERT(false, "result operand must be of type register"); + return nullptr; + } + auto ®Opnd = static_cast(opnd); + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + auto reg = regOpnd.GetRegisterNumber(); + availRegSet[reg] = true; + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + return &opnd; + } + + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { + regno_t reg = regMapIt->second; + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + } + } else { + /* AllocatePhysicalRegister insert a mapping from vreg no to phy reg no into regMap */ + if (AllocatePhysicalRegister(regOpnd)) { + regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id && (id <= insn.GetId())) { + ReleaseReg(regMapIt->second); + } + } + } else { + /* For register spill. use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + } + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapIt->second, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +void DefaultO0RegAllocator::GetPhysicalRegisterBank(RegType regTy, regno_t &begin, regno_t &end) const { + switch (regTy) { + case kRegTyVary: + case kRegTyCc: + break; + case kRegTyInt: + begin = *regInfo->GetIntRegs().begin(); + end = *regInfo->GetIntRegs().rbegin(); + break; + case kRegTyFloat: + begin = *regInfo->GetFpRegs().begin(); + end = *regInfo->GetFpRegs().rbegin(); + break; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void DefaultO0RegAllocator::InitAvailReg() { + for (auto it : regInfo->GetAllRegs()){ + availRegSet[it] = true; + } +} + +/* these registers can not be allocated */ +bool DefaultO0RegAllocator::IsSpecialReg(regno_t reg) const { + return regInfo->IsSpecialReg(reg); +} + +void DefaultO0RegAllocator::ReleaseReg(const RegOperand ®Opnd) { + ReleaseReg(regMap[regOpnd.GetRegisterNumber()]); +} + +void DefaultO0RegAllocator::ReleaseReg(regno_t reg) { + DEBUG_ASSERT(reg < regInfo->GetAllRegNum(), "can't release virtual register"); + liveReg.erase(reg); + if (!IsSpecialReg(reg)) { + availRegSet[reg] = true; + } +} + +/* trying to allocate a physical register to opnd. return true if success */ +bool DefaultO0RegAllocator::AllocatePhysicalRegister(const RegOperand &opnd) { + RegType regType = opnd.GetRegisterType(); + regno_t regNo = opnd.GetRegisterNumber(); + regno_t regStart = 0; + regno_t regEnd = 0; + GetPhysicalRegisterBank(regType, regStart, regEnd); + + const auto opndRegIt = regLiveness.find(regNo); + for (regno_t reg = regStart; reg <= regEnd; ++reg) { + if (!availRegSet[reg]) { + continue; + } + + if (opndRegIt != regLiveness.end()) { + const auto regIt = regLiveness.find(reg); + DEBUG_ASSERT(opndRegIt->second.size() == 1, "NIY, opnd reg liveness range must be 1."); + if (regIt != regLiveness.end() && + CheckRangesOverlap(opndRegIt->second.front(), regIt->second)) { + continue; + } + } + + regMap[opnd.GetRegisterNumber()] = reg; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + return true; + } + return false; +} + +/* If opnd is a callee saved register, save it in the prolog and restore it in the epilog */ +void DefaultO0RegAllocator::SaveCalleeSavedReg(const RegOperand ®Opnd) { + regno_t regNO = regOpnd.GetRegisterNumber(); + auto phyReg = regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO; + /* when yieldpoint is enabled, skip the reserved register for yieldpoint. */ + if (cgFunc->GetCG()->GenYieldPoint() && (regInfo->IsYieldPointReg(phyReg))) { + return; + } + + if (regInfo->IsCalleeSavedReg(phyReg)) { + calleeSaveUsed.insert(phyReg); + } +} + +uint32 DefaultO0RegAllocator::GetRegLivenessId(regno_t regNo) { + auto regIt = regLiveness.find(regNo); + return ((regIt == regLiveness.end()) ? 0 : regIt->second.back().second); +} + +bool DefaultO0RegAllocator::CheckRangesOverlap(const std::pair &range1, + const MapleVector> &ranges2) const { + /* + * Check whether range1 and ranges2 overlap. + * The ranges2 is sorted. + */ + auto pos = std::lower_bound(ranges2.begin(), ranges2.end(), range1, + [](const std::pair &r2, const std::pair &r1) { + return r1.first >= r2.second; + }); + if (pos == ranges2.end()) { + return false; + } + auto &range2 = *pos; + if (std::max(range1.first, range2.first) <= std::min(range1.second, range2.second)) { + return true; + } + return false; +} + +void DefaultO0RegAllocator::SetupRegLiveness(BB *bb) { + regLiveness.clear(); + + uint32 id = 1; + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + insn->SetId(id); + id++; + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *curMd = insn->GetDesc(); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = insn->GetOperand(i); + const OpndDesc *opndDesc = curMd->GetOpndDes(i); + if (opnd.IsRegister()) { + /* def-use is processed by use */ + SetupRegLiveness(static_cast(opnd), insn->GetId(), !opndDesc->IsUse()); + } else if (opnd.IsMemoryAccessOperand()) { + SetupRegLiveness(static_cast(opnd), insn->GetId()); + } else if (opnd.IsList()) { + SetupRegLiveness(static_cast(opnd), insn->GetId(), opndDesc->IsDef()); + } + } + } + + /* clear the last empty range */ + for (auto ®LivenessIt : regLiveness) { + auto ®LivenessRanges = regLivenessIt.second; + if (regLivenessRanges.back().first == 0) { + regLivenessRanges.pop_back(); + } + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(MemOperand &opnd, uint32 insnId) { + /* base regOpnd is use in O0 */ + if (opnd.GetBaseRegister()) { + SetupRegLiveness(*opnd.GetBaseRegister(), insnId, false); + } + /* index regOpnd must be use */ + if (opnd.GetIndexRegister()) { + SetupRegLiveness(*opnd.GetIndexRegister(), insnId, false); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(ListOperand &opnd, uint32 insnId, bool isDef) { + for (RegOperand *regOpnd : opnd.GetOperands()) { + SetupRegLiveness(*regOpnd, insnId, isDef); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(RegOperand &opnd, uint32 insnId, bool isDef) { + MapleVector> ranges(alloc.Adapter()); + auto regLivenessIt = regLiveness.emplace(opnd.GetRegisterNumber(), ranges).first; + auto ®LivenessRanges = regLivenessIt->second; + if (regLivenessRanges.empty()) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } + auto ®LivenessLastRange = regLivenessRanges.back(); + if (regLivenessLastRange.first == 0) { + regLivenessLastRange.first = insnId; + } + regLivenessLastRange.second = insnId; + + /* create new range, only phyReg need to be segmented */ + if (isDef && regInfo->IsAvailableReg(opnd.GetRegisterNumber())) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } +} + +void DefaultO0RegAllocator::AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx) { + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *dstOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(dstOpnd) != allocatedSet.end()) { + auto ®Opnd = static_cast(*dstOpnd); + SaveCalleeSavedReg(regOpnd); + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg( + regMap[regOpnd.GetRegisterNumber()], regOpnd.GetSize(), regOpnd.GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocDestOpnd(*dstOpnd, insn)); + DEBUG_ASSERT(regOpnd != nullptr, "null ptr check"); + auto physRegno = regOpnd->GetRegisterNumber(); + availRegSet[physRegno] = false; + (void)liveReg.insert(physRegno); + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(physRegno, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } + insn.SetOperand(idx, *listOpndsNew); + for (auto *dstOpnd : listOpndsNew->GetOperands()) { + uint32 id = GetRegLivenessId(dstOpnd->GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(*dstOpnd); + } + } +} + +void DefaultO0RegAllocator::AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx) { + if (allocatedSet.find(&opnd) != allocatedSet.end()) { + /* free the live range of this register */ + auto ®Opnd = static_cast(opnd); + SaveCalleeSavedReg(regOpnd); + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + /* remember the physical machine register assigned */ + regno_t regNO = regOpnd.GetRegisterNumber(); + rememberRegs.push_back(regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO); + } else if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(regOpnd); + } + } + insn.SetOperand(idx, cgFunc->GetOpndBuilder()->CreatePReg( + regMap[regOpnd.GetRegisterNumber()], regOpnd.GetSize(), regOpnd.GetRegisterType())); + return; /* already allocated */ + } + + if (opnd.IsRegister()) { + insn.SetOperand(idx, *AllocDestOpnd(opnd, insn)); + SaveCalleeSavedReg(static_cast(opnd)); + } +} + +void DefaultO0RegAllocator::AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx) { + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *srcOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(srcOpnd) != allocatedSet.end()) { + auto *regOpnd = static_cast(srcOpnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocSrcOpnd(*srcOpnd)); + CHECK_NULL_FATAL(regOpnd); + listOpndsNew->PushOpnd(*regOpnd); + } + insn.SetOperand(idx, *listOpndsNew); +} + +void DefaultO0RegAllocator::AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx) { + if (allocatedSet.find(&opnd) != allocatedSet.end() && opnd.IsRegister()) { + auto *regOpnd = &static_cast(opnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + insn.SetOperand( + idx, cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } else { + Operand *srcOpnd = AllocSrcOpnd(opnd); + CHECK_NULL_FATAL(srcOpnd); + insn.SetOperand(idx, *srcOpnd); + } +} + +bool DefaultO0RegAllocator::AllocateRegisters() { + regInfo->Init(); + InitAvailReg(); + cgFunc->SetIsAfterRegAlloc(); + + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->IsEmpty()) { + continue; + } + + SetupRegLiveness(bb); + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* handle inline assembly first due to specific def&use order */ + if (insn->IsAsmInsn()) { + AllocHandleDestList(*insn, insn->GetOperand(kAsmClobberListOpnd), kAsmClobberListOpnd); + AllocHandleDestList(*insn, insn->GetOperand(kAsmOutputListOpnd), kAsmOutputListOpnd); + AllocHandleSrcList(*insn, insn->GetOperand(kAsmInputListOpnd), kAsmInputListOpnd); + } + + const InsnDesc *curMd = insn->GetDesc(); + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the dest registers */ + Operand &opnd = insn->GetOperand(i); + if (!(opnd.IsRegister() && curMd->GetOpndDes(i)->IsDef())) { + continue; + } + if (opnd.IsList()) { + AllocHandleDestList(*insn, opnd, i); + } else { + AllocHandleDest(*insn, opnd, i); + } + } + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the src registers */ + Operand &opnd = insn->GetOperand(i); + if (!((opnd.IsRegister() && curMd->GetOpndDes(i)->IsUse()) || opnd.IsMemoryAccessOperand())) { + continue; + } + if (opnd.IsList()) { + AllocHandleSrcList(*insn, opnd, i); + } else { + AllocHandleSrc(*insn, opnd, i); + } + } + + /* hack. a better way to handle intrinsics? */ + for (auto rememberReg : rememberRegs) { + DEBUG_ASSERT(rememberReg != regInfo->GetInvalidReg(), "not a valid register"); + ReleaseReg(rememberReg); + } + rememberRegs.clear(); + } + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + * notice the order here : the first callee saved reg is expected to be RFP. + */ + regInfo->Fini(); + regInfo->SaveCalleeSavedReg(calleeSaveUsed); + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp b/ecmascript/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7cdd8a30a122a8babc96683ff41c0d500e2577b1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp @@ -0,0 +1,2181 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "reg_alloc_lsra.h" +#include +#include +#include +#include "loop.h" + +namespace maplebe { +/* + * ================== + * = Linear Scan RA + * ================== + */ +#define LSRA_DUMP (CG_DEBUG_FUNC(*cgFunc)) +namespace { +constexpr uint32 kSpilled = 1; +constexpr uint32 kMinLiveIntervalLength = 20; +constexpr uint32 kPrintedActiveListLength = 10; +/* Here, kLoopWeight is a fine-tuned empirical parameter */ +constexpr uint32 kLoopWeight = 4; +} + +#define IN_SPILL_RANGE \ + (cgFunc->GetName().find(CGOptions::GetDumpFunc()) != std::string::npos && ++debugSpillCnt && \ + (CGOptions::GetSpillRangesBegin() < debugSpillCnt) && (debugSpillCnt < CGOptions::GetSpillRangesEnd())) + +#ifdef RA_PERF_ANALYSIS +static long bfsUS = 0; +static long liveIntervalUS = 0; +static long holesUS = 0; +static long lsraUS = 0; +static long finalizeUS = 0; +static long totalUS = 0; + +extern void printLSRATime() { + std::cout << "============================================================\n"; + std::cout << " LSRA sub-phase time information \n"; + std::cout << "============================================================\n"; + std::cout << "BFS BB sorting cost: " << bfsUS << "us \n"; + std::cout << "live interval computing cost: " << liveIntervalUS << "us \n"; + std::cout << "live range approximation cost: " << holesUS << "us \n"; + std::cout << "LSRA cost: " << lsraUS << "us \n"; + std::cout << "finalize cost: " << finalizeUS << "us \n"; + std::cout << "LSRA total cost: " << totalUS << "us \n"; + std::cout << "============================================================\n"; +} +#endif + +/* + * This LSRA implementation is an interpretation of the [Poletto97] paper. + * BFS BB ordering is used to order the instructions. The live intervals are vased on + * this instruction order. All vreg defines should come before an use, else a warning is + * given. + * Live interval is traversed in order from lower instruction order to higher order. + * When encountering a live interval for the first time, it is assumed to be live and placed + * inside the 'active' structure until the vreg's last access. During the time a vreg + * is in 'active', the vreg occupies a physical register allocation and no other vreg can + * be allocated the same physical register. + */ +void LSRALinearScanRegAllocator::PrintRegSet(const MapleSet &set, const std::string &str) const { + LogInfo::MapleLogger() << str; + for (auto reg : set) { + LogInfo::MapleLogger() << " " << reg; + } + LogInfo::MapleLogger() << "\n"; +} + +bool LSRALinearScanRegAllocator::CheckForReg(Operand &opnd, const Insn &insn, const LiveInterval &li, regno_t regNO, + bool isDef) const { + if (!opnd.IsRegister()) { + return false; + } + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterType() == kRegTyCc || regOpnd.GetRegisterType() == kRegTyVary) { + return false; + } + if (regOpnd.GetRegisterNumber() == regNO) { + LogInfo::MapleLogger() << "set object circle at " << insn.GetId() << "," << li.GetRegNO() << + " size 5 fillcolor rgb \""; + if (isDef) { + LogInfo::MapleLogger() << "black\"\n"; + } else { + LogInfo::MapleLogger() << "orange\"\n"; + } + } + return true; +} + +void LSRALinearScanRegAllocator::PrintLiveRanges(const LiveInterval &li) const { + if (li.GetAssignedReg() != 0) { + uint32 base = (li.GetRegType() == kRegTyInt) ? firstIntReg : firstFpReg; + LogInfo::MapleLogger() << "(assigned R" << (li.GetAssignedReg() - base) << ")"; + } + if (li.GetStackSlot() == kSpilled) { + LogInfo::MapleLogger() << "(spill)"; + } + for (auto range : li.GetRanges()) { + LogInfo::MapleLogger() << "[" << range.GetStart() << ", " << range.GetEnd() << "]" << " "; + } + if (li.GetSplitNext() != nullptr) { + LogInfo::MapleLogger() << "### SPLIT ### "; + PrintLiveRanges(*li.GetSplitNext()); + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintAllLiveRanges() const { + LogInfo::MapleLogger() << "func: " << cgFunc->GetName() << "\n"; + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + LogInfo::MapleLogger() << "vreg" << li->GetRegNO() << ": "; + if (li->GetSplitParent() != nullptr) { + PrintLiveRanges(*li->GetSplitParent()); + } else { + PrintLiveRanges(*li); + } + } +} + +/* + * This is a support routine to compute the overlapping live intervals in graph form. + * The output file can be viewed by gnuplot. + * Despite the function name of LiveRanges, it is using live intervals. + */ +void LSRALinearScanRegAllocator::PrintLiveRangesGraph() const { + /* ================= Output to plot.pg =============== */ + std::ofstream out("plot.pg"); + CHECK_FATAL(out.is_open(), "Failed to open output file: plot.pg"); + std::streambuf *coutBuf = LogInfo::MapleLogger().rdbuf(); /* old buf */ + LogInfo::MapleLogger().rdbuf(out.rdbuf()); /* new buf */ + + LogInfo::MapleLogger() << "#!/usr/bin/gnuplot\n"; + LogInfo::MapleLogger() << "#maxInsnNum " << maxInsnNum << "\n"; + LogInfo::MapleLogger() << "#minVregNum " << minVregNum << "\n"; + LogInfo::MapleLogger() << "#maxVregNum " << maxVregNum << "\n"; + LogInfo::MapleLogger() << "reset\nset terminal png\n"; + LogInfo::MapleLogger() << "set xrange [1:" << maxInsnNum << "]\n"; + LogInfo::MapleLogger() << "set grid\nset style data linespoints\n"; + LogInfo::MapleLogger() << "set datafile missing '0'\n"; + std::vector> graph(maxVregNum, std::vector(maxInsnNum, 0)); + + uint32 minY = 0xFFFFFFFF; + uint32 maxY = 0; + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + uint32 regNO = li->GetRegNO(); + if ((li->GetLastUse() - li->GetFirstDef()) < kMinLiveIntervalLength) { + continue; + } + if (regNO < minY) { + minY = regNO; + } + if (regNO > maxY) { + maxY = regNO; + } + uint32 n; + for (n = 0; n <= (li->GetFirstDef() - 1); ++n) { + graph[regNO - minVregNum][n] = 0; + } + if (li->GetLastUse() >= n) { + for (; n <= (li->GetLastUse() - 1); ++n) { + graph[regNO - minVregNum][n] = regNO; + } + } + for (; n < maxInsnNum; ++n) { + graph[regNO - minVregNum][n] = 0; + } + + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS(insn, bb) { + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 iSecond = 0; iSecond < opndNum; ++iSecond) { + Operand &opnd = insn->GetOperand(iSecond); + const OpndDesc *regProp = md->GetOpndDes(iSecond); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::PrintLiveRangesGraph"); + bool isDef = regProp->IsRegDef(); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + (void)CheckForReg(*op, *insn, *li, regNO, isDef); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && !CheckForReg(*base, *insn, *li, regNO, false)) { + continue; + } + if (offset != nullptr && !CheckForReg(*offset, *insn, *li, regNO, false)) { + continue; + } + } else { + (void)CheckForReg(opnd, *insn, *li, regNO, isDef); + } + } + } + } + } + LogInfo::MapleLogger() << "set yrange [" << (minY - 1) << ":" << (maxY + 1) << "]\n"; + + LogInfo::MapleLogger() << "plot \"plot.dat\" using 1:2 title \"R" << minVregNum << "\""; + for (uint32 i = 1; i < ((maxVregNum - minVregNum) + 1); ++i) { + LogInfo::MapleLogger() << ", \\\n\t\"\" using 1:" << (i + kDivide2) << " title \"R" << (minVregNum + i) << "\""; + } + LogInfo::MapleLogger() << ";\n"; + + /* ================= Output to plot.dat =============== */ + std::ofstream out2("plot.dat"); + CHECK_FATAL(out2.is_open(), "Failed to open output file: plot.dat"); + LogInfo::MapleLogger().rdbuf(out2.rdbuf()); /* new buf */ + LogInfo::MapleLogger() << "##reg"; + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " R" << i; + } + LogInfo::MapleLogger() << "\n"; + for (uint32 n = 0; n < maxInsnNum; ++n) { + LogInfo::MapleLogger() << (n + 1); + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " " << graph[i - minVregNum][n]; + } + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger().rdbuf(coutBuf); +} + +void LSRALinearScanRegAllocator::PrintLiveInterval(const LiveInterval &li, const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + if (li.GetIsCall() != nullptr) { + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " isCall"; + } else if (li.GetPhysUse()) { + LogInfo::MapleLogger() << "\tregNO " << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " physUse " << li.GetPhysUse(); + LogInfo::MapleLogger() << " endByCall " << li.IsEndByCall(); + } else { + /* show regno/firstDef/lastUse with 5/8/8 width respectively */ + LogInfo::MapleLogger() << "\tregNO " << std::setw(5) << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << std::setw(8) << li.GetFirstDef(); + LogInfo::MapleLogger() << " lastUse " << std::setw(8) << li.GetLastUse(); + LogInfo::MapleLogger() << " assigned " << li.GetAssignedReg(); + LogInfo::MapleLogger() << " refCount " << li.GetRefCount(); + LogInfo::MapleLogger() << " priority " << li.GetPriority(); + } + LogInfo::MapleLogger() << " object_address 0x" << std::hex << &li << std::dec << "\n"; +} + +void LSRALinearScanRegAllocator::PrintParamQueue(const std::string &str) { + LogInfo::MapleLogger() << str << "\n"; + for (SingleQue &que : intParamQueue) { + if (que.empty()) { + continue; + } + LiveInterval *li = que.front(); + LiveInterval *last = que.back(); + PrintLiveInterval(*li, ""); + while (li != last) { + que.pop_front(); + que.push_back(li); + li = que.front(); + PrintLiveInterval(*li, ""); + } + que.pop_front(); + que.push_back(li); + } +} + +void LSRALinearScanRegAllocator::PrintCallQueue(const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + for (auto callInsnID : callQueue) { + LogInfo::MapleLogger() << callInsnID << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintActiveList(const std::string &str, uint32 len) const { + uint32 count = 0; + LogInfo::MapleLogger() << str << " " << active.size() << "\n"; + for (auto *li : active) { + PrintLiveInterval(*li, ""); + ++count; + if ((len != 0) && (count == len)) { + break; + } + } +} + +void LSRALinearScanRegAllocator::PrintActiveListSimple() const { + for (const auto *li : active) { + uint32 assignedReg = li->GetAssignedReg(); + LogInfo::MapleLogger() << li->GetRegNO() << "(" << assignedReg << ", "; + if (li->GetPhysUse()) { + LogInfo::MapleLogger() << "p) "; + } else { + LogInfo::MapleLogger() << li->GetFirstAcrossedCall(); + } + LogInfo::MapleLogger() << "<" << li->GetFirstDef() << "," << li->GetLastUse() << ">) "; + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintLiveIntervals() const { + /* vreg LogInfo */ + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + LogInfo::MapleLogger() << "\n"; + /* preg LogInfo */ + for (auto param : intParamQueue) { + for (auto *li : param) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::DebugCheckActiveList() const { + LiveInterval *prev = nullptr; + for (auto *li : active) { + if (prev != nullptr) { + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && (prev->GetRegNO() > regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys + vreg\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && (prev->GetRegNO() <= regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys reg use\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + } else { + prev = li; + } + } +} + +/* + * Prepare the free physical register pool for allocation. + * When a physical register is allocated, it is removed from the pool. + * The physical register is re-inserted into the pool when the associated live + * interval has ended. + */ +void LSRALinearScanRegAllocator::InitFreeRegPool() { + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (!regInfo->IsAvailableReg(regNO)) { + continue; + } + if (regInfo->IsGPRegister(regNO)) { + if (regInfo->IsYieldPointReg(regNO)) { + continue; + } + /* ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + intSpillRegSet.push_back(regNO - firstIntReg); + continue; + } + if (regInfo->IsCalleeSavedReg(regNO)) { + /* callee-saved registers */ + (void)intCalleeRegSet.insert(regNO - firstIntReg); + intCalleeMask |= 1u << (regNO - firstIntReg); + } else { + /* caller-saved registers */ + (void)intCallerRegSet.insert(regNO - firstIntReg); + intCallerMask |= 1u << (regNO - firstIntReg); + } + } else { + /* fp ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + fpSpillRegSet.push_back(regNO - firstFpReg); + continue; + } + if (regInfo->IsCalleeSavedReg(regNO)) { + /* fp callee-saved registers */ + (void)fpCalleeRegSet.insert(regNO - firstFpReg); + fpCalleeMask |= 1u << (regNO - firstFpReg); + } else { + /* fp caller-saved registers */ + (void)fpCallerRegSet.insert(regNO - firstFpReg); + fpCallerMask |= 1u << (regNO - firstFpReg); + } + } + } + + if (LSRA_DUMP) { + PrintRegSet(intCallerRegSet, "ALLOCATABLE_INT_CALLER"); + PrintRegSet(intCalleeRegSet, "ALLOCATABLE_INT_CALLEE"); + PrintRegSet(intParamRegSet, "ALLOCATABLE_INT_PARAM"); + PrintRegSet(fpCallerRegSet, "ALLOCATABLE_FP_CALLER"); + PrintRegSet(fpCalleeRegSet, "ALLOCATABLE_FP_CALLEE"); + PrintRegSet(fpParamRegSet, "ALLOCATABLE_FP_PARAM"); + LogInfo::MapleLogger() << "INT_SPILL_REGS"; + for (uint32 intSpillRegNO : intSpillRegSet) { + LogInfo::MapleLogger() << " " << intSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "FP_SPILL_REGS"; + for (uint32 fpSpillRegNO : fpSpillRegSet) { + LogInfo::MapleLogger() << " " << fpSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::hex; + LogInfo::MapleLogger() << "INT_CALLER_MASK " << intCallerMask << "\n"; + LogInfo::MapleLogger() << "INT_CALLEE_MASK " << intCalleeMask << "\n"; + LogInfo::MapleLogger() << "INT_PARAM_MASK " << intParamMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLER_FP_MASK " << fpCallerMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLEE_FP_MASK " << fpCalleeMask << "\n"; + LogInfo::MapleLogger() << "FP_PARAM_FP_MASK " << fpParamMask << "\n"; + LogInfo::MapleLogger() << std::dec; + } +} + +void LSRALinearScanRegAllocator::RecordPhysRegs(const RegOperand ®Opnd, uint32 insnNum, bool isDef) { + RegType regType = regOpnd.GetRegisterType(); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + if (regInfo->IsUntouchableReg(regNO)) { + return; + } + if (!regInfo->IsPreAssignedReg(regNO)) { + return; + } + if (isDef) { + /* parameter/return register def is assumed to be live until a call. */ + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetRegType(regType); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regType == kRegTyInt) { + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + } else { + if (regType == kRegTyInt) { + CHECK_FATAL(!intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } else { + CHECK_FATAL(!fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } + } +} + +void LSRALinearScanRegAllocator::UpdateLiveIntervalState(const BB &bb, LiveInterval &li) const { + if (bb.IsCatch()) { + li.SetInCatchState(); + } else { + li.SetNotInCatchState(); + } + + if (bb.GetInternalFlag1()) { + li.SetInCleanupState(); + } else { + li.SetNotInCleanupState(bb.GetId() == 1); + } +} + +void LSRALinearScanRegAllocator::UpdateRegUsedInfo(LiveInterval &li, regno_t regNO) { + uint32 index = regNO / (sizeof(uint64) * k8ByteSize); + uint64 bit = regNO % (sizeof(uint64) * k8ByteSize); + if ((regUsedInBB[index] & (static_cast(1) << bit)) != 0) { + li.SetMultiUseInBB(true); + } + regUsedInBB[index] |= (static_cast(1) << bit); + + if (minVregNum > regNO) { + minVregNum = regNO; + } + if (maxVregNum < regNO) { + maxVregNum = regNO; + } +} + +/* main entry function for live interval computation. */ +void LSRALinearScanRegAllocator::SetupLiveInterval(Operand &opnd, Insn &insn, bool isDef, uint32 &nUses) { + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 insnNum = insn.GetId(); + if (regOpnd.IsPhysicalRegister()) { + RecordPhysRegs(regOpnd, insnNum, isDef); + return; + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + + LiveInterval *li = nullptr; + uint32 regNO = regOpnd.GetRegisterNumber(); + if (liveIntervalsArray[regNO] == nullptr) { + li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + liveIntervalsArray[regNO] = li; + liQue.push_back(li); + } else { + li = liveIntervalsArray[regNO]; + } + li->SetRegType(regType); + + BB *curBB = insn.GetBB(); + if (isDef) { + /* never set to 0 before, why consider this condition ? */ + if (li->GetFirstDef() == 0) { + li->SetFirstDef(insnNum); + li->SetLastUse(insnNum + 1); + } else if (!curBB->IsUnreachable()) { + if (li->GetLastUse() < insnNum || li->IsUseBeforeDef()) { + li->SetLastUse(insnNum + 1); + } + } + /* + * try-catch related + * Not set when extending live interval with bb's livein in ComputeLiveInterval. + */ + li->SetResultCount(li->GetResultCount() + 1); + } else { + if (li->GetFirstDef() == 0) { + DEBUG_ASSERT(false, "SetupLiveInterval: use before def"); + } + /* + * In ComputeLiveInterval when extending live interval using + * live-out information, li created does not have a type. + */ + if (!curBB->IsUnreachable()) { + li->SetLastUse(insnNum); + } + ++nUses; + } + UpdateLiveIntervalState(*curBB, *li); + + li->SetRefCount(li->GetRefCount() + 1); + li->AddUsePositions(insnNum); + UpdateRegUsedInfo(*li, regNO); + + /* setup the def/use point for it */ + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), "out of range of vector liveIntervalsArray"); +} + +/* + * Support 'hole' in LSRA. + * For a live interval, there might be multiple segments of live ranges, + * and between these segments a 'hole'. + * Some other short lived vreg can go into these 'holes'. + * + * from : starting instruction sequence id + * to : ending instruction sequence id + */ +void LSRALinearScanRegAllocator::LiveInterval::AddRange(uint32 from, uint32 to) { + if (ranges.empty()) { + ranges.emplace_back(LinearRange(from, to)); + return; + } + /* create a new range */ + if (to < ranges.front().GetStart()) { + (void)ranges.insert(ranges.begin(), LinearRange(from, to)); + return; + } + DEBUG_ASSERT(from <= ranges.front().GetEnd(), "No possible on reverse traverse."); + if (to >= ranges.front().GetEnd() && from < ranges.front().GetStart()) { + ranges.front().SetStart(from); + ranges.front().SetEnd(to); + return; + } + /* extend it's range forward. e.g. def-use opnd */ + if (to >= ranges.front().GetStart() && from < ranges.front().GetStart()) { + ranges.front().SetStart(from); + return; + } + return; +} + +/* See if a vreg can fit in one of the holes of a longer live interval. */ +uint32 LSRALinearScanRegAllocator::FillInHole(const LiveInterval &li) { + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); ++it) { + auto *ili = static_cast(*it); + + /* + * If ili is part in cleanup, the hole info will be not correct, + * since cleanup bb do not have edge to normal func bb, and the + * live-out info will not correct. + */ + if (!ili->IsAllOutCleanup() || ili->IsAllInCatch()) { + continue; + } + + if (ili->GetRegType() != li.GetRegType() || ili->GetStackSlot() != 0xFFFFFFFF || ili->GetLiChild() != nullptr || + ili->GetAssignedReg() == 0) { + continue; + } + /* todo: find available holes in ili->GetRanges() */ + } + return 0; +} + +uint32 LSRALinearScanRegAllocator::LiveInterval::GetUsePosAfter(uint32 pos) const { + for (auto usePos : usePositions) { + if (usePos > pos) { + return usePos; + } + } + return 0; +} + +MapleVector::iterator LSRALinearScanRegAllocator::LiveInterval::FindPosRange( + uint32 pos) { + while (rangeFinder != ranges.end()) { + if (rangeFinder->GetEnd() > pos) { + break; + } + ++rangeFinder; + } + return rangeFinder; +} + +void LSRALinearScanRegAllocator::SetupIntervalRangesByOperand(Operand &opnd, const Insn &insn, uint32 blockFrom, + bool isDef) { + auto ®Opnd = static_cast(opnd); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + regno_t regNO = regOpnd.GetRegisterNumber(); + if (regNO <= regInfo->GetAllRegNum()) { + return; + } + if (!isDef) { + liveIntervalsArray[regNO]->AddRange(blockFrom, insn.GetId()); + return; + } + if (liveIntervalsArray[regNO]->GetRanges().empty()) { + liveIntervalsArray[regNO]->AddRange(insn.GetId(), insn.GetId()); + } else { + liveIntervalsArray[regNO]->GetRanges().front().SetStart(insn.GetId()); + } + liveIntervalsArray[regNO]->AddUsePositions(insn.GetId()); +} + +void LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand(const Insn &insn, uint32 blockFrom) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + SetupIntervalRangesByOperand(*base, insn, blockFrom, false); + } + if (offset != nullptr && offset->IsRegister()) { + SetupIntervalRangesByOperand(*offset, insn, blockFrom, false); + } + } else if (opnd.IsRegister()) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand"); + bool isDef = regProp->IsRegDef(); + SetupIntervalRangesByOperand(opnd, insn, blockFrom, isDef); + } + } +} + +/* Support finding holes by searching for ranges where holes exist. */ +void LSRALinearScanRegAllocator::BuildIntervalRanges() { + size_t bbIdx = bfs->sortedBBs.size(); + if (bbIdx == 0) { + return; + } + + do { + --bbIdx; + BB *bb = bfs->sortedBBs[bbIdx]; + if (bb->GetFirstInsn() == nullptr || bb->GetLastInsn() == nullptr) { + continue; + } + uint32 blockFrom = bb->GetFirstInsn()->GetId(); + uint32 blockTo = bb->GetLastInsn()->GetId() + 1; + + for (auto regNO : bb->GetLiveOutRegNO()) { + if (regNO < regInfo->GetAllRegNum()) { + /* Do not consider physical regs. */ + continue; + } + liveIntervalsArray[regNO]->AddRange(blockFrom, blockTo); + } + + FOR_BB_INSNS_REV(insn, bb) { + BuildIntervalRangesForEachOperand(*insn, blockFrom); + } + } while (bbIdx != 0); +} + +/* Extend live interval with live-in info */ +void LSRALinearScanRegAllocator::UpdateLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) { + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (!regInfo->IsVirtualRegister(regNO)) { + /* Do not consider physical regs. */ + continue; + } + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), "index out of range."); + LiveInterval *liOuter = liveIntervalsArray[regNO]; + if (liOuter != nullptr || (bb.IsEmpty() && bb.GetId() != 1)) { + continue; + } + /* + * try-catch related + * Since it is livein but not seen before, its a use before def + * spill it temporarily + */ + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(kSpilled); + liveIntervalsArray[regNO] = li; + li->SetFirstDef(insnNum); + liQue.push_back(li); + + li->SetUseBeforeDef(true); + + if (!bb.IsUnreachable()) { + if (bb.GetId() != 1) { + LogInfo::MapleLogger() << "ERROR: " << regNO << " use before def in bb " << bb.GetId() << " : " << + cgFunc->GetName() << "\n"; + DEBUG_ASSERT(false, "There should only be [use before def in bb 1], temporarily."); + } + LogInfo::MapleLogger() << "WARNING: " << regNO << " use before def in bb " << bb.GetId() << " : " << + cgFunc->GetName() << "\n"; + } + UpdateLiveIntervalState(bb, *li); + } +} + +/* traverse live in regNO, for each live in regNO create a new liveinterval */ +void LSRALinearScanRegAllocator::UpdateParamLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) { + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (!regInfo->IsPreAssignedReg(regNO)) { + continue; + } + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regInfo->IsGPRegister(regNO)) { + li->SetRegType(kRegTyInt); + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + li->SetRegType(kRegTyFloat); + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + UpdateLiveIntervalState(bb, *li); + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIn(BB &bb, uint32 insnNum) { + if (bb.IsEmpty() && bb.GetId() != 1) { + return; + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEOUT:"; + for (const auto &liveOutRegNO : bb.GetLiveOutRegNO()) { + LogInfo::MapleLogger() << " " << liveOutRegNO; + } + LogInfo::MapleLogger() << ".\n"; + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEIN:"; + for (const auto &liveInRegNO : bb.GetLiveInRegNO()) { + LogInfo::MapleLogger() << " " << liveInRegNO; + } + LogInfo::MapleLogger() << ".\n"; + } + + UpdateLiveIntervalByLiveIn(bb, insnNum); + + if (bb.GetFirstInsn() == nullptr) { + return; + } + if (!bb.GetEhPreds().empty()) { + bb.InsertLiveInRegNO(firstIntReg); + bb.InsertLiveInRegNO(firstIntReg + 1); + } + UpdateParamLiveIntervalByLiveIn(bb, insnNum); + if (!bb.GetEhPreds().empty()) { + bb.EraseLiveInRegNO(firstIntReg); + bb.EraseLiveInRegNO(firstIntReg + 1); + } +} + +void LSRALinearScanRegAllocator::ComputeLiveOut(BB &bb, uint32 insnNum) { + /* + * traverse live out regNO + * for each live out regNO if the last corresponding live interval is created within this bb + * update this lastUse of li to the end of BB + */ + for (const auto ®NO : bb.GetLiveOutRegNO()) { + if (regInfo->IsPreAssignedReg(static_cast(regNO))) { + LiveInterval *liOut = nullptr; + if (regInfo->IsGPRegister(regNO)) { + if (intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty()) { + continue; + } + liOut = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } else { + if (fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty()) { + continue; + } + liOut = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } + } + /* Extend live interval with live-out info */ + LiveInterval *li = liveIntervalsArray[regNO]; + if (li != nullptr && !bb.IsEmpty()) { + li->SetLastUse(bb.GetLastInsn()->GetId()); + UpdateLiveIntervalState(bb, *li); + if (bb.GetKind() == BB::kBBRangeGoto) { + li->SetSplitForbid(true); + } + } + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIntervalForEachOperand(Insn &insn) { + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + /* + * we need to process src opnd first just in case the src/dest vreg are the same and the src vreg belongs to the + * last interval. + */ + for (int32 i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + const OpndDesc *opndDesc = md->GetOpndDes(i); + DEBUG_ASSERT(opndDesc != nullptr, "ptr null check."); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveInterval(*op, insn, opndDesc->IsDef(), numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + bool isDef = false; + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveInterval(*base, insn, isDef, numUses); + } + if (offset != nullptr) { + SetupLiveInterval(*offset, insn, isDef, numUses); + } + } else { + /* Specifically, the "use-def" opnd is treated as a "use" opnd */ + bool isUse = opndDesc->IsRegUse(); + SetupLiveInterval(opnd, insn, !isUse, numUses); + } + } + if (numUses >= regInfo->GetNormalUseOperandNum()) { + needExtraSpillReg = true; + } +} + +void LSRALinearScanRegAllocator::ComputeLoopLiveIntervalPriority(const CGFuncLoops &loop) { + for (const auto *lp : loop.GetInnerLoops()) { + /* handle nested Loops */ + ComputeLoopLiveIntervalPriority(*lp); + } + for (auto *bb : loop.GetLoopMembers()) { + if (bb->IsEmpty()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + ComputeLoopLiveIntervalPriorityInInsn(*insn); + } + loopBBRegSet.clear(); + } +} + +void LSRALinearScanRegAllocator::ComputeLoopLiveIntervalPriorityInInsn(const Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsPhysicalRegister()) { + continue; + } + uint32 regNo = regOpnd.GetRegisterNumber(); + LiveInterval *li = liveIntervalsArray[regNo]; + if (li == nullptr || loopBBRegSet.find(regNo) != loopBBRegSet.end()) { + continue; + } + li->SetPriority(kLoopWeight * li->GetPriority()); + (void)loopBBRegSet.insert(regNo); + } + return; +} + +void LSRALinearScanRegAllocator::ComputeLiveInterval() { + liQue.clear(); + uint32 regUsedInBBSz = (cgFunc->GetMaxVReg() / (sizeof(uint64) * k8ByteSize) + 1); + regUsedInBB.resize(regUsedInBBSz, 0); + uint32 insnNum = 1; + for (BB *bb : bfs->sortedBBs) { + ComputeLiveIn(*bb, insnNum); + FOR_BB_INSNS(insn, bb) { + insn->SetId(insnNum); + /* skip comment and debug insn */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + + /* RecordCall, remember calls for caller/callee allocation. */ + if (insn->IsCall()) { + if (!insn->GetIsThrow() || !bb->GetEhSuccs().empty()) { + callQueue.emplace_back(insn->GetId()); + } + } + + ComputeLiveIntervalForEachOperand(*insn); + + /* handle return value for call insn */ + if (insn->IsCall()) { + /* For all backend architectures so far, adopt all RetRegs as Def via this insn, + * and then their live begins. + * next optimization, you can determine which registers are actually used. + */ + RegOperand *retReg = nullptr; + if (insn->GetRetType() == Insn::kRegInt) { + for (int i = 0; i < regInfo->GetIntRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetIntRetReg(i), + k64BitSize, kRegTyInt); + RecordPhysRegs(*retReg, insnNum, true); + } + } else { + for (int i = 0; i < regInfo->GetFpRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetFpRetReg(i), + k64BitSize, kRegTyFloat); + RecordPhysRegs(*retReg, insnNum, true); + } + } + } + ++insnNum; + } + + ComputeLiveOut(*bb, insnNum); + } + + maxInsnNum = insnNum - 1; /* insn_num started from 1 */ + regUsedInBB.clear(); + /* calculate Live Interval weight */ + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + if (li->GetIsCall() != nullptr || li->GetPhysUse()) { + continue; + } + if (li->GetLastUse() > li->GetFirstDef()) { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetLastUse() - li->GetFirstDef())); + } else { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetFirstDef() - li->GetLastUse())); + } + } + + /* enhance loop Live Interval Priority */ + if (!cgFunc->GetLoops().empty()) { + for (const auto *lp : cgFunc->GetLoops()) { + ComputeLoopLiveIntervalPriority(*lp); + } + } + + if (LSRA_DUMP) { + PrintLiveIntervals(); + } + +} + +/* Calculate the weight of a live interval for pre-spill and flexible spill */ +void LSRALinearScanRegAllocator::LiveIntervalAnalysis() { + for (uint32 bbIdx = 0; bbIdx < bfs->sortedBBs.size(); ++bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx]; + + FOR_BB_INSNS(insn, bb) { + /* 1 calculate live interfere */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + /* New instruction inserted by reg alloc (ie spill) */ + continue; + } + /* 1.1 simple retire from active */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *li = static_cast(*it); + if (li->GetLastUse() > insn->GetId()) { + break; + } + it = active.erase(it); + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::LiveIntervalAnalysis"); + bool isDef = regProp->IsRegDef(); + Operand &opnd = insn->GetOperand(i); + if (isDef) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsVirtualRegister() && regOpnd.GetRegisterType() != kRegTyCc) { + /* 1.2 simple insert to active */ + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveInterval *li = liveIntervalsArray[regNO]; + if (li->GetFirstDef() == insn->GetId()) { + (void)active.insert(li); + } + } + } + } + + /* 2 get interfere info, and analysis */ + uint32 interNum = active.size(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "In insn " << insn->GetId() << ", " << interNum << " overlap live intervals.\n"; + LogInfo::MapleLogger() << "\n"; + } + + /* 2.2 interfere with each other, analysis which to spill */ + while (interNum > CGOptions::GetOverlapNum()) { + LiveInterval *lowestLi = nullptr; + FindLowestPrioInActive(lowestLi); + if (lowestLi != nullptr) { + if (LSRA_DUMP) { + PrintLiveInterval(*lowestLi, "Pre spilled: "); + } + lowestLi->SetStackSlot(kSpilled); + lowestLi->SetShouldSave(false); + active.erase(itFinded); + interNum = active.size(); + } else { + break; + } + } + } + } + active.clear(); +} + +void LSRALinearScanRegAllocator::UpdateCallQueueAtRetirement(uint32 insnID) { + /* + * active list is sorted based on increasing lastUse + * any operand whose use is greater than current + * instruction number is still in use. + * If the use is less than or equal to instruction number + * then it is possible to retire this live interval and + * reclaim the physical register associated with it. + */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "RetireActiveByInsn instr_num " << insnID << "\n"; + } + /* Retire invalidated call from call queue */ + while (!callQueue.empty() && callQueue.front() <= insnID) { + callQueue.pop_front(); + } +} + +/* update allocate info by active queue */ +void LSRALinearScanRegAllocator::UpdateActiveAllocateInfo(const LiveInterval &li) { + uint32 start = li.GetFirstDef(); + uint32 end = li.GetLastUse(); + if (li.GetSplitParent() != nullptr || li.IsUseBeforeDef()) { + --start; + } + for (auto *activeLi : active) { + uint32 regNO = activeLi->GetAssignedReg(); + uint32 rangeStartPos; + auto posRange = activeLi->FindPosRange(start); + if (posRange == activeLi->GetRanges().end()) { + /* handle splited li */ + uint32 splitSafePos = activeLi->GetSplitPos(); + if (splitSafePos == li.GetFirstDef() && (li.GetSplitParent() != nullptr || li.IsUseBeforeDef())) { + rangeStartPos = 0; + } else if (splitSafePos > li.GetFirstDef()) { + rangeStartPos = splitSafePos - 1; + } else { + rangeStartPos = 0XFFFFFFFUL; + } + } else if (posRange->GetEhStart() != 0 && posRange->GetEhStart() < posRange->GetStart()) { + rangeStartPos = posRange->GetEhStart(); + } else { + rangeStartPos = posRange->GetStart(); + } + if (rangeStartPos > li.GetFirstDef()) { + if (rangeStartPos < end) { + blockForbiddenMask |= (1UL << activeLi->GetAssignedReg()); + } + if (rangeStartPos < freeUntilPos[regNO]) { + freeUntilPos[regNO] = rangeStartPos; + } + } else { + freeUntilPos[regNO] = 0; + } + } +} + +/* update allocate info by param queue */ +void LSRALinearScanRegAllocator::UpdateParamAllocateInfo(const LiveInterval &li) { + bool isInt = (li.GetRegType() == kRegTyInt); + MapleVector ¶mQueue = isInt ? intParamQueue : fpParamQueue; + uint32 baseReg = isInt ? firstIntReg : firstFpReg; + uint32 paramNum = isInt ? regInfo->GetIntRegs().size() : regInfo->GetFpRegs().size(); + uint32 start = li.GetFirstDef(); + uint32 end = li.GetLastUse(); + for (uint32 i = 0; i < paramNum; ++i) { + while (!paramQueue[i].empty() && paramQueue[i].front()->GetPhysUse() <= start) { + if (paramQueue[i].front()->GetPhysUse() == start && li.GetSplitParent() != nullptr) { + break; + } + paramQueue[i].pop_front(); + } + if (paramQueue[i].empty()) { + continue; + } + auto regNo = paramQueue[i].front()->GetRegNO(); + uint32 startPos = paramQueue[i].front()->GetFirstDef(); + if (startPos <= start) { + freeUntilPos[regNo] = 0; + } else { + if (startPos < end) { + blockForbiddenMask |= (1UL << (i + baseReg)); + } + if (startPos < freeUntilPos[regNo]) { + freeUntilPos[regNo] = startPos; + } + } + } +} + +/* update active in retire */ +void LSRALinearScanRegAllocator::RetireActive(LiveInterval &li, uint32 insnID) { + /* Retire live intervals from active list */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *activeLi = static_cast(*it); + if (activeLi->GetLastUse() > insnID) { + break; + } + if (activeLi->GetLastUse() == insnID) { + if (li.GetSplitParent() != nullptr || activeLi->GetSplitNext() != nullptr) { + ++it; + continue; + } + if (activeLi->IsEndByMov() && activeLi->GetRegType() == li.GetRegType()) { + li.SetPrefer(activeLi->GetAssignedReg()); + } + } + /* reserve split li in active */ + if (activeLi->GetSplitPos() >= insnID) { + ++it; + continue; + } + /* + * live interval ended for this reg in active + * release physical reg assigned to free reg pool + */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Removing " << "(" << activeLi->GetAssignedReg() << ")" << "from regmask\n"; + PrintLiveInterval(*activeLi, "\tRemoving virt_reg li\n"); + } + it = active.erase(it); + } +} + +/* find the best physical reg by freeUntilPos */ +uint32 LSRALinearScanRegAllocator::GetRegFromMask(uint32 mask, regno_t offset, const LiveInterval &li) { + uint32 prefer = li.GetPrefer(); + if (prefer != 0) { + uint32 preg = li.GetPrefer() - offset; + if ((mask & (1u << preg)) != 0 && freeUntilPos[prefer] == 0XFFFFFFFUL) { + return prefer; + } + } + uint32 bestReg = 0; + uint32 maxFreeUntilPos = 0; + for (uint32 preg = 0; preg < k32BitSize; ++preg) { + if ((mask & (1u << preg)) == 0) { + continue; + } + uint32 regNO = preg + offset; + if (freeUntilPos[regNO] >= li.GetLastUse()) { + return regNO; + } + if (freeUntilPos[regNO] > maxFreeUntilPos) { + maxFreeUntilPos = freeUntilPos[regNO]; + bestReg = regNO; + } + } + return bestReg; +} + +/* Handle adrp register assignment. Use the same register for the next instruction. */ +uint32 LSRALinearScanRegAllocator::GetSpecialPhysRegPattern(const LiveInterval &li) { + /* li's first def point */ + Insn *nInsn = nullptr; + if (nInsn == nullptr || !nInsn->IsMachineInstruction() || nInsn->IsDMBInsn() || li.GetLastUse() > nInsn->GetId()) { + return 0; + } + + const InsnDesc *md = nInsn->GetDesc(); + if (!md->GetOpndDes(0)->IsRegDef()) { + return 0; + } + Operand &opnd = nInsn->GetOperand(0); + if (!opnd.IsRegister()) { + return 0; + } + auto ®Opnd = static_cast(opnd); + if (!regOpnd.IsPhysicalRegister()) { + return 0; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (!regInfo->IsPreAssignedReg(regNO)) { + return 0; + } + + /* next insn's dest is a physical param reg 'regNO'. return 'regNO' if dest of adrp is src of next insn */ + uint32 opndNum = nInsn->GetOperandSize(); + for (uint32 i = 1; i < opndNum; ++i) { + Operand &src = nInsn->GetOperand(i); + if (src.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(src); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + auto *regSrc = static_cast(base); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + return regNO; + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + auto *regSrc = static_cast(offset); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + return regNO; + } + } + } else if (src.IsRegister()) { + auto ®Src = static_cast(src); + uint32 srcRegNO = regSrc.GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::GetSpecialPhysRegPattern"); + bool srcIsDef = regProp->IsRegDef(); + if (srcIsDef) { + break; + } + return regNO; + } + } + } + return 0; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyRegByFastAlloc(LiveInterval &li) { + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + regNO = GetRegFromMask(intCalleeMask, firstIntReg, li); + li.SetShouldSave(false); + if (regNO == 0 || freeUntilPos[regNO] < li.GetLastUse()) { + regNO = GetRegFromMask(intCallerMask, firstIntReg, li); + li.SetShouldSave(true); + } + } else if (li.GetRegType() == kRegTyFloat) { + regNO = GetRegFromMask(fpCalleeMask, firstFpReg, li); + li.SetShouldSave(false); + if (regNO == 0 || freeUntilPos[regNO] < li.GetLastUse()) { + regNO = GetRegFromMask(fpCallerMask, firstFpReg, li); + li.SetShouldSave(true); + } + } + return regNO; +} + +/* Determine if live interval crosses the call */ +bool LSRALinearScanRegAllocator::NeedSaveAcrossCall(LiveInterval &li) { + bool saveAcrossCall = false; + for (uint32 callInsnID : callQueue) { + if (callInsnID > li.GetLastUse()) { + break; + } + if (callInsnID < li.GetFirstDef()) { + continue; + } + /* Need to spill/fill around this call */ + for (auto range : li.GetRanges()) { + uint32 start; + if (range.GetEhStart() != 0 && range.GetEhStart() < range.GetStart()) { + start = range.GetEhStart(); + } else { + start = range.GetStart(); + } + if (callInsnID >= start && callInsnID < range.GetEnd()) { + saveAcrossCall = true; + break; + } + } + if (saveAcrossCall) { + break; + } + } + if (LSRA_DUMP) { + if (saveAcrossCall) { + LogInfo::MapleLogger() << "\t\tlive interval crosses a call\n"; + } else { + LogInfo::MapleLogger() << "\t\tlive interval does not cross a call\n"; + } + } + return saveAcrossCall; +} + +/* Return a phys register number for the live interval. */ +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li) { + if (fastAlloc) { + return FindAvailablePhyRegByFastAlloc(li); + } + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + regNO = FindAvailablePhyReg(li, true); + } else { + DEBUG_ASSERT(li.GetRegType() == kRegTyFloat, "impossible register type"); + regNO = FindAvailablePhyReg(li, false); + } + return regNO; +} + +/* Spill and reload for caller saved registers. */ +void LSRALinearScanRegAllocator::InsertCallerSave(Insn &insn, Operand &opnd, bool isDef) { + auto ®Opnd = static_cast(opnd); + uint32 vRegNO = regOpnd.GetRegisterNumber(); + if (vRegNO >= liveIntervalsArray.size()) { + CHECK_FATAL(false, "index out of range in LSRALinearScanRegAllocator::InsertCallerSave"); + } + LiveInterval *rli = liveIntervalsArray[vRegNO]; + RegType regType = regOpnd.GetRegisterType(); + + isSpillZero = false; + if (!isDef) { + uint32 mask; + uint32 regBase; + if (regType == kRegTyInt) { + mask = intBBDefMask; + regBase = firstIntReg; + } else { + mask = fpBBDefMask; + regBase = firstFpReg; + } + if (mask & (1u << (rli->GetAssignedReg() - regBase))) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << rli->GetAssignedReg() << " skipping due to local def\n"; + } + return; + } + } + + if (!rli->IsShouldSave()) { + return; + } + + uint32 regSize = regOpnd.GetSize(); + PrimType spType; + + if (regType == kRegTyInt) { + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + intBBDefMask |= (1u << (rli->GetAssignedReg() - firstIntReg)); + } else { + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + fpBBDefMask |= (1u << (rli->GetAssignedReg() - firstFpReg)); + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << vRegNO << "\n"; + } + + if (!isDef && !rli->IsCallerSpilled()) { + LogInfo::MapleLogger() << "WARNING: " << vRegNO << " caller restore without spill in bb " + << insn.GetBB()->GetId() << " : " << cgFunc->GetName() << "\n"; + } + rli->SetIsCallerSpilled(true); + + MemOperand *memOpnd = nullptr; + RegOperand *phyOpnd = nullptr; + + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(rli->GetAssignedReg()), regSize, + regType); + std::string comment; + bool isOutOfRange = false; + if (isDef) { + memOpnd = GetSpillMem(vRegNO, true, insn, static_cast(intSpillRegSet[0] + firstIntReg), + isOutOfRange, regSize); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " SPILL for caller_save " + std::to_string(vRegNO); + ++callerSaveSpillCount; + if (rli->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(vRegNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + memOpnd = GetSpillMem(vRegNO, false, insn, static_cast(intSpillRegSet[0] + firstIntReg), + isOutOfRange, regSize); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " RELOAD for caller_save " + std::to_string(vRegNO); + ++callerSaveReloadCount; + if (rli->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(vRegNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +MemOperand *LSRALinearScanRegAllocator::GetSpillMem(uint32 vRegNO, bool isDest, Insn &insn, regno_t regNO, + bool &isOutOfRange, uint32 bitSize) const { + MemOperand *memOpnd = regInfo->GetOrCreatSpillMem(vRegNO, bitSize); + return regInfo->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vRegNO, isDest, insn, regNO, isOutOfRange); +} + +/* Set a vreg in live interval as being marked for spill. */ +void LSRALinearScanRegAllocator::SetOperandSpill(Operand &opnd) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SetOperandSpill " << regNO; + LogInfo::MapleLogger() << "(" << liveIntervalsArray[regNO]->GetFirstAcrossedCall(); + LogInfo::MapleLogger() << ", refCount " << liveIntervalsArray[regNO]->GetRefCount() << ")\n"; + } + + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), + "index out of vector size in LSRALinearScanRegAllocator::SetOperandSpill"); + LiveInterval *li = liveIntervalsArray[regNO]; + li->SetStackSlot(kSpilled); + li->SetShouldSave(false); +} + +/* + * Generate spill/reload for an operand. + * spill_idx : one of 3 phys regs set aside for the purpose of spills. + */ +void LSRALinearScanRegAllocator::SpillOperand(Insn &insn, Operand &opnd, bool isDef, uint32 spillIdx) { + /* + * Insert spill (def) and fill (use) instructions for the operand. + * Keep track of the 'slot' (base 0). The actual slot on the stack + * will be some 'base_slot_offset' + 'slot' off FP. + * For simplification, entire 64bit register is spilled/filled. + * + * For example, a virtual register home 'slot' on the stack is location 5. + * This represents a 64bit slot (8bytes). The base_slot_offset + * from the base 'slot' determined by whoever is added, off FP. + * stack address is ( FP - (5 * 8) + base_slot_offset ) + * So the algorithm is simple, for each virtual register that is not + * allocated, it has to have a home address on the stack (a slot). + * A class variable is used, start from 0, increment by 1. + * Since LiveInterval already represent unique regNO information, + * just add a slot number to it. Subsequent reference to a regNO + * will either get an allocated physical register or a slot number + * for computing the stack location. + * + * This function will also determine the operand to be a def or use. + * For def, spill instruction(s) is appended after the insn. + * For use, spill instruction(s) is prepended before the insn. + * Use FP - (slot# *8) for now. Will recompute if base_slot_offset + * is not 0. + * + * The total number of slots used will be used to compute the stack + * frame size. This will require some interface external to LSRA. + * + * For normal instruction, two spill regs should be enough. The caller + * controls which ones to use. + * For more complex operations, need to break down the instruction. + * eg. store v1 -> [v2 + v3] // 3 regs needed + * => p1 <- v2 // address part 1 + * p2 <- v3 // address part 2 + * p1 <- p1 + p2 // freeing up p2 + * p2 <- v1 + * store p2 -> [p1] + * or we can allocate more registers to the spill register set + * For store multiple, need to break it down into two or more instr. + */ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + + regno_t spReg; + PrimType spType; + CHECK_FATAL(regNO < liveIntervalsArray.size(), "index out of range in LSRALinearScanRegAllocator::SpillOperand"); + LiveInterval *li = liveIntervalsArray[regNO]; + DEBUG_ASSERT(!li->IsShouldSave(), "SpillOperand: Should not be caller"); + uint32 regSize = regOpnd.GetSize(); + RegType regType = regOpnd.GetRegisterType(); + + if (li->GetRegType() == kRegTyInt) { + DEBUG_ASSERT((spillIdx < intSpillRegSet.size()), "SpillOperand: ran out int spill reg"); + spReg = intSpillRegSet[spillIdx] + firstIntReg; + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else if (li->GetRegType() == kRegTyFloat) { + DEBUG_ASSERT((spillIdx < fpSpillRegSet.size()), "SpillOperand: ran out fp spill reg"); + spReg = fpSpillRegSet[spillIdx] + firstFpReg; + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } else { + CHECK_FATAL(false, "SpillOperand: Should be int or float type"); + } + + bool isOutOfRange = false; + RegOperand *phyOpnd = nullptr; + if (isSpillZero) { + phyOpnd = &cgFunc->GetZeroOpnd(regSize); + } else { + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(spReg), regSize, regType); + } + li->SetAssignedReg(phyOpnd->GetRegisterNumber()); + + MemOperand *memOpnd = nullptr; + if (isDef) { + /* + * Need to assign spReg (one of the two spill reg) to the destination of the insn. + * spill_vreg <- opn1 op opn2 + * to + * spReg <- opn1 op opn2 + * store spReg -> spillmem + */ + li->SetStackSlot(kSpilled); + + ++spillCount; + memOpnd = GetSpillMem(regNO, true, insn, static_cast(intSpillRegSet[spillIdx + 1] + firstIntReg), + isOutOfRange, regSize); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " SPILL vreg:" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(regNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + /* Here, reverse of isDef, change either opn1 or opn2 to the spReg. */ + if (li->GetStackSlot() == 0xFFFFFFFF) { + LogInfo::MapleLogger() << "WARNING: " << regNO << " assigned " << li->GetAssignedReg() << + " restore without spill in bb " << insn.GetBB()->GetId() << " : " << + cgFunc->GetName() << "\n"; + } + ++reloadCount; + memOpnd = GetSpillMem(regNO, false, insn, static_cast(intSpillRegSet[spillIdx] + firstIntReg), + isOutOfRange, regSize); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " RELOAD vreg" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(regNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +/* find the lowest li that meets the constraints related to li0 form current active */ +void LSRALinearScanRegAllocator::FindLowestPrioInActive(LiveInterval *&targetLi, LiveInterval *li0, RegType regType) { + float lowestPrio = 1000.0; + bool found = false; + bool hintCalleeSavedReg = li0 && NeedSaveAcrossCall(*li0); + MapleSet::iterator it; + MapleSet::iterator lowestIt; + for (it = active.begin(); it != active.end(); ++it) { + LiveInterval *li = static_cast(*it); + regno_t regNO = li->GetAssignedReg(); + /* 1. Basic Constraints */ + if (li->GetPriority() >= lowestPrio || li->GetRegType() != regType || li->GetLiParent() || li->GetLiChild()) { + continue; + } + /* 2. If li is pre-assigned to Physical register primitively, ignore it. */ + if (regInfo->IsPreAssignedReg(li->GetRegNO())) { + continue; + } + /* 3. CalleeSavedReg is preferred here. If li is assigned to Non-CalleeSavedReg, ignore it. */ + if (hintCalleeSavedReg && !regInfo->IsCalleeSavedReg(regNO - firstIntReg)) { + continue; + } + /* 4. Checkinterference. If li is assigned to li0's OverlapPhyReg, ignore it. */ + if (li0 && li0->IsOverlapPhyReg(regNO)) { + continue; + } + lowestPrio = li->GetPriority(); + lowestIt = it; + found = true; + } + if (found) { + targetLi = *lowestIt; + itFinded = lowestIt; + } + return; +} + +/* Set a vreg in live interval as being marked for spill. */ +void LSRALinearScanRegAllocator::SetLiSpill(LiveInterval &li) { + uint32 regNO = li.GetRegNO(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SetLiSpill " << regNO; + LogInfo::MapleLogger() << "(" << li.GetFirstAcrossedCall(); + LogInfo::MapleLogger() << ", refCount " << li.GetRefCount() << ")\n"; + } + li.SetStackSlot(kSpilled); + li.SetShouldSave(false); +} + +uint32 LSRALinearScanRegAllocator::HandleSpillForLi(LiveInterval &li) { + /* choose the lowest priority li to spill */ + RegType regType = li.GetRegType(); + LiveInterval *spillLi = nullptr; + FindLowestPrioInActive(spillLi, &li, regType); + + /* + * compare spill_li with current li + * spill_li is null and li->SetStackSlot(Spilled) when the li is spilled due to LiveIntervalAnalysis + */ + if (!li.IsMustAllocate()) { + if (spillLi == nullptr || li.GetStackSlot() == kSpilled || li.GetRefCount() <= spillLi->GetRefCount()) { + /* spill current li */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: still spill " << li.GetRegNO() << ".\n"; + } + SetLiSpill(li); + return 0; + } + } + DEBUG_ASSERT(spillLi != nullptr, "spillLi is null in LSRALinearScanRegAllocator::HandleSpillForLi"); + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: " << spillLi->GetRegNO() << " instead of " << li.GetRegNO() << ".\n"; + PrintLiveInterval(*spillLi, "TO spill: "); + PrintLiveInterval(li, "Instead of: "); + } + + uint32 newRegNO = spillLi->GetAssignedReg(); + li.SetAssignedReg(newRegNO); + + /* spill this live interval */ + (void)active.erase(itFinded); + SetLiSpill(*spillLi); + spillLi->SetAssignedReg(0); + + (void)active.insert(&li); + return newRegNO; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li, bool isIntReg) { + uint32 &callerRegMask = isIntReg ? intCallerMask : fpCallerMask; + uint32 &calleeRegMask = isIntReg ? intCalleeMask : fpCalleeMask; + regno_t reg0 = isIntReg ? firstIntReg : firstFpReg; + regno_t bestReg = 0; + regno_t secondReg = 0; + + /* See if register is live accross a call */ + if (NeedSaveAcrossCall(li)) { + if (!li.IsAllInCatch() && !li.IsAllInCleanupOrFirstBB()) { + /* call in live interval, use callee if available */ + bestReg = GetRegFromMask(calleeRegMask, reg0, li); + if (bestReg != 0 && freeUntilPos[bestReg] >= li.GetLastUse()) { + li.SetShouldSave(false); + return bestReg; + } + } + /* can be optimize multi use between calls rather than in bb */ + if (bestReg == 0 || li.IsMultiUseInBB()) { + secondReg = GetRegFromMask(callerRegMask, reg0, li); + if (freeUntilPos[secondReg] >= li.GetLastUse()) { + li.SetShouldSave(true); + return secondReg; + } + } + } else { + /* Get forced register */ + uint32 forcedReg = GetSpecialPhysRegPattern(li); + if (forcedReg != 0) { + return forcedReg; + } + + bestReg = GetRegFromMask(intCallerMask, reg0, li); + if (bestReg == 0) { + bestReg = GetRegFromMask(intCalleeMask, reg0, li); + } else if (freeUntilPos[bestReg] < li.GetLastUse()) { + secondReg = GetRegFromMask(intCalleeMask, reg0, li); + if (secondReg != 0) { + bestReg = (freeUntilPos[bestReg] > freeUntilPos[secondReg]) ? bestReg : secondReg; + } + } + } + if (bestReg != 0 && freeUntilPos[bestReg] < li.GetLastUse()) { + DEBUG_ASSERT(freeUntilPos[bestReg] != 0, "impossible"); + bestReg = 0; + } + /* todo : try to fill in holes */ + /* todo : try first split if no hole exists */ + return bestReg; +} + +/* Shell function to find a physical register for an operand. */ +uint32 LSRALinearScanRegAllocator::AssignPhysRegs(LiveInterval &li) { + if (spillAll && !li.IsMustAllocate()) { + return 0; + } + + /* pre spilled: */ + if (li.GetStackSlot() != 0xFFFFFFFF && !li.IsMustAllocate()) { + return 0; + } + + if (LSRA_DUMP) { + uint32 activeSz = active.size(); + LogInfo::MapleLogger() << "\tAssignPhysRegs-active_sz " << activeSz << "\n"; + } + + uint32 regNO = FindAvailablePhyReg(li); + if (regNO != 0) { + li.SetAssignedReg(regNO); + if (regInfo->IsCalleeSavedReg(regNO)) { + if (!CGOptions::DoCalleeToSpill()) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\tCallee-save register for save/restore in prologue/epilogue: " << regNO << "\n"; + } + cgFunc->AddtoCalleeSaved(regNO); + } + ++calleeUseCnt[regNO]; + } + } + return regNO; +} + +void LSRALinearScanRegAllocator::AssignPhysRegsForLi(LiveInterval &li) { + uint32 newRegNO = AssignPhysRegs(li); + if (newRegNO == 0) { + newRegNO = HandleSpillForLi(li); + } + + if (newRegNO != 0) { + (void)active.insert(&li); + } +} + +/* Replace Use-Def Opnd */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceUdOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx) { + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + DEBUG_ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceUdOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, false); + } else if (li->GetStackSlot() == kSpilled) { + SpillOperand(insn, opnd, false, spillIdx); + SpillOperand(insn, opnd, true, spillIdx); + ++spillIdx; + } + RegOperand *phyOpnd = regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* + * Create an operand with physical register assigned, or a spill register + * in the case where a physical register cannot be assigned. + */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx, bool isDef) { + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + DEBUG_ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, isDef); + } else if (li->GetStackSlot() == kSpilled) { + spillIdx = isDef ? 0 : spillIdx; + SpillOperand(insn, opnd, isDef, spillIdx); + if (!isDef) { + ++spillIdx; + } + } + RegOperand *phyOpnd = regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* Try to estimate if spill callee should be done based on even/odd for stp in prolog. */ +void LSRALinearScanRegAllocator::CheckSpillCallee() { + if (CGOptions::DoCalleeToSpill()) { + uint32 pairCnt = 0; + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((intCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptIntCallee = true; + } + + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((fpCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptFpCallee = true; + } + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void LSRALinearScanRegAllocator::FinalizeRegisters() { + CheckSpillCallee(); + for (BB *bb : bfs->sortedBBs) { + intBBDefMask = 0; + fpBBDefMask = 0; + + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || insn->GetId() == 0) { + continue; + } + if (!insn->IsMachineInstruction()) { + continue; + } + + uint32 spillIdx = 0; + const InsnDesc *md = insn->GetDesc(); + uint opndNum = insn->GetOperandSize(); + + /* Handle source(use) opernads first */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + bool isDef = regProp->IsRegDef(); + if (isDef) { + continue; + } + Operand &opnd = insn->GetOperand(i); + RegOperand *phyOpnd = nullptr; + if (opnd.IsList()) { + /* For arm32, not arm64 */ + } else if (opnd.IsMemoryAccessOperand()) { + auto *memOpnd = + static_cast(static_cast(opnd).Clone(*cgFunc->GetMemoryPool())); + DEBUG_ASSERT(memOpnd != nullptr, "memopnd is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + insn->SetOperand(i, *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetIndexRegister(); + if (base != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *base, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + } + if (offset != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *offset, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + } else { + phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, false); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + } + + /* Handle ud(use-def) opernads */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUseDef = regProp->IsRegDef() && regProp->IsRegUse(); + if (!isUseDef) { + continue; + } + RegOperand *phyOpnd = GetReplaceUdOpnd(*insn, opnd, spillIdx); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + + /* Handle dest(def) opernads last */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUse = (regProp->IsRegUse()) || (opnd.IsMemoryAccessOperand()); + if (isUse) { + continue; + } + isSpillZero = false; + RegOperand *phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, true); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + if (isSpillZero) { + insn->GetBB()->RemoveInsn(*insn); + } + } + } + + if (insn->IsCall()) { + intBBDefMask = 0; + fpBBDefMask = 0; + } + } + } +} + +void LSRALinearScanRegAllocator::SetAllocMode() { + if (CGOptions::IsFastAlloc()) { + if (CGOptions::GetFastAllocMode() == 0) { + fastAlloc = true; + } else { + spillAll = true; + } + /* In-Range spill range can still be specified (only works with --dump-func=). */ + } else if (cgFunc->NumBBs() > CGOptions::GetLSRABBOptSize()) { + /* instruction size is checked in ComputeLieveInterval() */ + fastAlloc = true; + } + + if (LSRA_DUMP) { + if (fastAlloc) { + LogInfo::MapleLogger() << "fastAlloc mode on\n"; + } + if (spillAll) { + LogInfo::MapleLogger() << "spillAll mode on\n"; + } + } +} + +void LSRALinearScanRegAllocator::LinearScanRegAllocator() { + if (LSRA_DUMP) { + PrintParamQueue("Initial param queue"); + PrintCallQueue("Initial call queue"); + } + freeUntilPos.resize(regInfo->GetAllRegNum(), 0XFFFFFFFUL); + MapleVector initialPosVec(freeUntilPos); + uint32 curInsnID = 0; + + while (!liQue.empty()) { + LiveInterval *li = liQue.front(); + liQue.pop_front(); + if (li->GetRangesSize() == 0) { + /* range building has been skiped */ + li->AddRange(li->GetFirstDef(), li->GetLastUse()); + } + li->InitRangeFinder(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "======Alloc R" << li->GetRegNO() << "======" << "\n"; + } + blockForbiddenMask = 0; + freeUntilPos = initialPosVec; + DEBUG_ASSERT(li->GetFirstDef() >= curInsnID, "wrong li order"); + curInsnID = li->GetFirstDef(); + RetireActive(*li, curInsnID); + UpdateCallQueueAtRetirement(curInsnID); + UpdateActiveAllocateInfo(*li); + UpdateParamAllocateInfo(*li); + if (LSRA_DUMP) { + DebugCheckActiveList(); + } + AssignPhysRegsForLi(*li); + } +} + +/* Main entrance for the LSRA register allocator */ +bool LSRALinearScanRegAllocator::AllocateRegisters() { + cgFunc->SetIsAfterRegAlloc(); + calleeUseCnt.resize(regInfo->GetAllRegNum()); + liveIntervalsArray.resize(cgFunc->GetMaxVReg()); + SetAllocMode(); +#ifdef RA_PERF_ANALYSIS + auto begin = std::chrono::system_clock::now(); +#endif + if (LSRA_DUMP) { + const MIRModule &mirModule = cgFunc->GetMirModule(); + DotGenerator::GenerateDot("RA", *cgFunc, mirModule); + DotGenerator::GenerateDot("RAe", *cgFunc, mirModule, true); + LogInfo::MapleLogger() << "Entering LinearScanRegAllocator: " << cgFunc->GetName()<<"\n"; + } +/* ================= LiveInterval =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + ComputeLiveInterval(); + + if (LSRA_DUMP) { + PrintLiveRangesGraph(); + } + + bool enableDoLSRAPreSpill = true; + if (enableDoLSRAPreSpill) { + LiveIntervalAnalysis(); + } + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + liveIntervalUS += std::chrono::duration_cast(end - start).count(); +#endif + +/* ================= LiveRange =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + + bool enableDoLSRAHole = true; + if (enableDoLSRAHole) { + BuildIntervalRanges(); + } + + if (LSRA_DUMP) { + PrintAllLiveRanges(); + } +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + holesUS += std::chrono::duration_cast(end - start).count(); +#endif +/* ================= InitFreeRegPool =============== */ + InitFreeRegPool(); + +/* ================= LinearScanRegAllocator =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + LinearScanRegAllocator(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + lsraUS += std::chrono::duration_cast(end - start).count(); +#endif + + if (LSRA_DUMP) { + PrintAllLiveRanges(); + } + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + FinalizeRegisters(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + finalizeUS += std::chrono::duration_cast(end - start).count(); +#endif + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Total " << spillCount << " spillCount in " << cgFunc->GetName() << " \n"; + LogInfo::MapleLogger() << "Total " << reloadCount << " reloadCount\n"; + LogInfo::MapleLogger() << "Total " << "(" << spillCount << "+ " << callerSaveSpillCount << ") = " << + (spillCount + callerSaveSpillCount) << " SPILL\n"; + LogInfo::MapleLogger() << "Total " << "(" << reloadCount << "+ " << callerSaveReloadCount << ") = " << + (reloadCount + callerSaveReloadCount) << " RELOAD\n"; + uint32_t insertInsn = spillCount + callerSaveSpillCount + reloadCount + callerSaveReloadCount; + float rate = (float(insertInsn) / float(maxInsnNum)); + LogInfo::MapleLogger() <<"insn Num Befor RA:"<< maxInsnNum <<", insert " << insertInsn << + " insns: " << ", insertInsn/insnNumBeforRA: "<< rate <<"\n"; + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + totalUS += std::chrono::duration_cast(end - begin).count(); +#endif + + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/reg_coalesce.cpp b/ecmascript/mapleall/maple_be/src/cg/reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c41a6762ebc891dd8ab74381d60695b5b2b2b911 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/reg_coalesce.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_coalesce.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_reg_coalesce.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +void LiveIntervalAnalysis::Run() { + Analysis(); + CoalesceRegisters(); + ClearBFS(); +} + +void LiveIntervalAnalysis::DoAnalysis() { + runAnalysis = true; + Analysis(); +} + +void LiveIntervalAnalysis::Analysis() { + bfs = memPool->New(*cgFunc, *memPool); + bfs->ComputeBlockOrder(); + ComputeLiveIntervals(); +} + +/* bfs is not utilized outside the function. */ +void LiveIntervalAnalysis::ClearBFS() { + bfs = nullptr; +} + +void LiveIntervalAnalysis::Dump() { + for (auto it : vregIntervals) { + LiveInterval *li = it.second; + li->Dump(); + li->DumpDefs(); + li->DumpUses(); + } +} + +void LiveIntervalAnalysis::CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc) { + if (cgFunc->IsExtendReg(lrDest.GetRegNO())) { + cgFunc->InsertExtendSet(lrSrc.GetRegNO()); + } + cgFunc->RemoveFromExtendSet(lrDest.GetRegNO()); + /* merge destlr to srclr */ + lrSrc.MergeRanges(lrDest); + /* update conflicts */ + lrSrc.MergeConflict(lrDest); + for (auto reg : lrDest.GetConflict()) { + LiveInterval *conf = GetLiveInterval(reg); + if (conf) { + conf->AddConflict(lrSrc.GetRegNO()); + } + } + /* merge refpoints */ + lrSrc.MergeRefPoints(lrDest); + vregIntervals.erase(lrDest.GetRegNO()); +} + +bool CGliveIntervalAnalysis::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + liveInterval = f.GetCG()->CreateLLAnalysis(*memPool, f); + liveInterval->DoAnalysis(); + return false; +} +void CGliveIntervalAnalysis::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); +} +MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(CGliveIntervalAnalysis, cgliveintervalananlysis) + +bool CgRegCoalesce::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + LiveIntervalAnalysis *ll = f.GetCG()->CreateLLAnalysis(*memPool, f); + ll->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} + +void CgRegCoalesce::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegCoalesce, cgregcoalesce) + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/regsaves.cpp b/ecmascript/mapleall/maple_be/src/cg/regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d22a816c0db56c88d7aa9f8e7c92b885cf0b761d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/regsaves.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_regsaves.h" +#elif TARGRISCV64 +#include "riscv64_regsaves.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRegSavesOpt::PhaseRun(maplebe::CGFunc &f) { + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1) { + return false; + } + + /* Perform loop analysis, result to be obtained in CGFunc */ + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + /* Perform live analysis, result to be obtained in CGFunc */ + LiveAnalysis *live = nullptr; + MaplePhase *it = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgLiveAnalysis::id, f); + live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + + /* Perform dom analysis, result to be inserted into AArch64RegSavesOpt object */ + DomAnalysis *dom = nullptr; + PostDomAnalysis *pdom = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1 && + f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *phase = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgDomAnalysis::id, f); + dom = static_cast(phase)->GetResult(); + CHECK_FATAL(dom != nullptr, "null ptr check"); + phase = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgPostDomAnalysis::id, f); + pdom = static_cast(phase)->GetResult(); + CHECK_FATAL(pdom != nullptr, "null ptr check"); + } + + MemPool *memPool = GetPhaseMemPool(); + RegSavesOpt *regSavesOpt = nullptr; +#if TARGAARCH64 + regSavesOpt = memPool->New(f, *memPool, *dom, *pdom); +#elif || TARGRISCV64 + regSavesOpt = memPool->New(f, *memPool); +#endif + + if (regSavesOpt) { + regSavesOpt->SetEnabledDebug(false); /* To turn on debug trace */ + if (regSavesOpt->GetEnabledDebug()) { + dom->Dump(); + } + regSavesOpt->Run(); + } + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegSavesOpt, regsaves) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/schedule.cpp b/ecmascript/mapleall/maple_be/src/cg/schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a33bcdca5c3eb4b1ccccfed0c6ac781210513d4 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/schedule.cpp @@ -0,0 +1,940 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#if TARGARM32 +#include "arm32_schedule.h" +#endif +#include "cg.h" +#include "optimize_common.h" + +#undef PRESCHED_DEBUG + +namespace maplebe { +/* pressure standard value; pressure under this value will not lead to spill operation */ +static constexpr int g_pressureStandard = 27; +/* optimistic scheduling option */ +static constexpr bool g_optimisticScheduling = false; +/* brute maximum count limit option */ +static constexpr bool g_bruteMaximumLimit = true; +/* brute maximum count */ +static constexpr int g_schedulingMaximumCount = 20000; + +/* ---- RegPressureSchedule function ---- */ +void RegPressureSchedule::InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes) { + bb = &b; + liveReg.clear(); + scheduledNode.clear(); + readyList.clear(); + maxPriority = 0; + maxPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + curPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + physicalRegNum = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + for (auto node : nodes) { + node->SetState(kNormal); + } +} + +/* return register type according to register number */ +RegType RegPressureSchedule::GetRegisterType(regno_t reg) const { + return cgFunc.GetRegisterType(reg); +} + +/* Get amount of every physical register */ +void RegPressureSchedule::BuildPhyRegInfo(const std::vector ®NumVec) { + FOR_ALL_REGCLASS(i) { + physicalRegNum[i] = regNumVec[i]; + } +} + +/* Initialize pre-scheduling split point in BB */ +void RegPressureSchedule::initPartialSplitters(const MapleVector &nodes) { + bool addFirstAndLastNodeIndex = false; + constexpr uint32 SecondLastNodeIndexFromBack = 2; + constexpr uint32 LastNodeIndexFromBack = 1; + constexpr uint32 FirstNodeIndex = 0; + constexpr uint32 minimumBBSize = 2; + /* Add split point for the last instruction in return BB */ + if (bb->GetKind() == BB::kBBReturn && nodes.size() > minimumBBSize) { + splitterIndexes.emplace_back(nodes.size() - SecondLastNodeIndexFromBack); + addFirstAndLastNodeIndex = true; + } + /* Add first and last node as split point if needed */ + if (addFirstAndLastNodeIndex) { + splitterIndexes.emplace_back(nodes.size() - LastNodeIndexFromBack); + splitterIndexes.emplace_back(FirstNodeIndex); + } + std::sort(splitterIndexes.begin(), splitterIndexes.end(), std::less{}); +} + +/* initialize register pressure information according to bb's live-in data. + * initialize node's valid preds size. + */ +void RegPressureSchedule::Init(const MapleVector &nodes) { + readyList.clear(); + scheduledNode.clear(); + liveReg.clear(); + liveInRegNO.clear(); + liveOutRegNO.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + + FOR_ALL_REGCLASS(i) { + curPressure[i] = 0; + maxPressure[i] = 0; + } + + for (auto *node : nodes) { + /* calculate the node uses'register pressure */ + for (auto &useReg : node->GetUseRegnos()) { + CalculatePressure(*node, useReg, false); + } + + /* calculate the node defs'register pressure */ + size_t i = 0; + for (auto &defReg : node->GetDefRegnos()) { + CalculatePressure(*node, defReg, true); + RegType regType = GetRegisterType(defReg); + /* if no use list, a register is only defined, not be used */ + if (node->GetRegDefs(i) == nullptr && liveOutRegNO.find(defReg) == liveOutRegNO.end()) { + node->IncDeadDefByIndex(regType); + } + ++i; + } + /* Calculate pred size of the node */ + CalculatePredSize(*node); + } + + DepNode *firstNode = nodes.front(); + readyList.emplace_back(firstNode); + firstNode->SetState(kReady); + scheduledNode.reserve(nodes.size()); + constexpr size_t readyListSize = 10; + readyList.reserve(readyListSize); +} + +void RegPressureSchedule::SortReadyList() { + std::sort(readyList.begin(), readyList.end(), DepNodePriorityCmp); +} + +/* return true if nodes1 first. */ +bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2) { + CHECK_NULL_FATAL(node1); + CHECK_NULL_FATAL(node2); + int32 priority1 = node1->GetPriority(); + int32 priority2 = node2->GetPriority(); + if (priority1 != priority2) { + return priority1 > priority2; + } + + int32 numCall1 = node1->GetNumCall(); + int32 numCall2 = node2->GetNumCall(); + if (node1->GetIncPressure() == true && node2->GetIncPressure() == true) { + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + } + + int32 near1 = node1->GetNear(); + int32 near2 = node1->GetNear(); + int32 depthS1 = node1->GetMaxDepth() + near1; + int32 depthS2 = node2->GetMaxDepth() + near2; + if (depthS1 != depthS2) { + return depthS1 > depthS2; + } + + if (near1 != near2) { + return near1 > near2; + } + + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + + size_t succsSize1 = node1->GetSuccs().size(); + size_t succsSize2 = node1->GetSuccs().size(); + if (succsSize1 != succsSize2) { + return succsSize1 < succsSize2; + } + + if (node1->GetHasPreg() != node2->GetHasPreg()) { + return node1->GetHasPreg(); + } + + return node1->GetInsn()->GetId() < node2->GetInsn()->GetId(); +} + +/* set a node's incPressure is true, when a class register inscrease */ +void RegPressureSchedule::ReCalculateDepNodePressure(DepNode &node) { + /* if there is a type of register pressure increases, set incPressure as true. */ + auto &pressures = node.GetPressure(); + node.SetIncPressure(pressures[kRegisterInt] > 0); +} + +/* calculate the maxDepth of every node in nodes. */ +void RegPressureSchedule::CalculateMaxDepth(const MapleVector &nodes) { + /* from the last node to first node. */ + for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) { + /* init call count */ + if ((*it)->GetInsn()->IsCall()) { + (*it)->SetNumCall(1); + } + /* traversing each successor of it. */ + for (auto succ : (*it)->GetSuccs()) { + DepNode &to = succ->GetTo(); + if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) { + (*it)->SetMaxDepth(to.GetMaxDepth() + 1); + } + + if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) { + (*it)->SetNumCall(to.GetNumCall() + 1); + } else if ((*it)->GetNumCall() < to.GetNumCall()) { + (*it)->SetNumCall(to.GetNumCall()); + } + } + } +} + +/* calculate the near of every successor of the node. */ +void RegPressureSchedule::CalculateNear(const DepNode &node) { + for (auto succ : node.GetSuccs()) { + DepNode &to = succ->GetTo(); + if (succ->GetDepType() == kDependenceTypeTrue && to.GetNear() < node.GetNear() + 1) { + to.SetNear(node.GetNear() + 1); + } + } +} + +/* return true if it is last time using the regNO. */ +bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO) { + size_t i = 0; + for (auto reg : node.GetUseRegnos()) { + if (reg == regNO) { + break; + } + ++i; + } + RegList *regList = node.GetRegUses(i); + + /* + * except the node, if there are insn that has no scheduled in regNO'sregList, + * then it is not the last time using the regNO, return false. + */ + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + DEBUG_ASSERT(useNode != nullptr, "get depend node failed in RegPressureSchedule::IsLastUse"); + if ((regList->insn != node.GetInsn()) && (useNode->GetState() != kScheduled)) { + return false; + } + regList = regList->next; + } + return true; +} + +void RegPressureSchedule::CalculatePressure(DepNode &node, regno_t reg, bool def) { + RegType regType = GetRegisterType(reg); + /* if def a register, register pressure increase. */ + if (def) { + node.IncPressureByIndex(regType); + } else { + /* if it is the last time using the reg, register pressure decrease. */ + if (IsLastUse(node, reg)) { + node.DecPressureByIndex(regType); + } + } +} + +/* update live reg information. */ +void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool def) { + if (def) { + if (liveReg.find(reg) == liveReg.end()) { + (void)liveReg.insert(reg); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Add new def R" << reg << " to live reg list \n"; +#endif + } + /* if no use list, a register is only defined, not be used */ + size_t i = 1; + for (auto defReg : node.GetDefRegnos()) { + if (defReg == reg) { + break; + } + ++i; + } + if (node.GetRegDefs(i) == nullptr && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove dead def " << reg << " from live reg list \n"; +#endif + liveReg.erase(reg); + } else if (node.GetRegDefs(i) != nullptr) { +#ifdef PRESCHED_DEBUG + auto regList = node.GetRegDefs(i); + LogInfo::MapleLogger() << i << " Live def, dump use insn here \n"; + while (regList != nullptr) { + node.GetRegDefs(i)->insn->Dump(); + regList = regList->next; + } +#endif + } + } else { + if (IsLastUse(node, reg)) { + if (liveReg.find(reg) != liveReg.end() && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove last use R" << reg << " from live reg list\n"; +#endif + liveReg.erase(reg); + } + } + } +} + +/* update register pressure information. */ +void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { + size_t idx = 0; + for (auto ® : node.GetUseRegnos()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Use Reg : R" << reg << "\n"; + UpdateLiveReg(node, reg, false); + if (liveReg.find(reg) == liveReg.end()) { + ++idx; + continue; + } +#endif + + /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */ + RegList *regList = node.GetRegUses(idx); + + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + if (useNode->GetState() == kScheduled) { + regList = regList->next; + continue; + } + + if (IsLastUse(*useNode, reg)) { + RegType regType = GetRegisterType(reg); + useNode->DecPressureByIndex(regType); + } + break; + } + ++idx; + } + +#ifdef PRESCHED_DEBUG + for (auto &defReg : node.GetDefRegnos()) { + UpdateLiveReg(node, defReg, true); + } +#endif + + const auto &pressures = node.GetPressure(); + const auto &deadDefNum = node.GetDeadDefNum(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "\nnode's pressure: "; + for (auto pressure : pressures) { + LogInfo::MapleLogger() << pressure << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + + FOR_ALL_REGCLASS(i) { + curPressure[i] += pressures[i]; + curPressure[i] -= deadDefNum[i]; + if (curPressure[i] > maxPressure[i]) { + maxPressure[i] = curPressure[i]; + } + } +} + +/* update node priority and try to update the priority of all node's ancestor. */ +void RegPressureSchedule::UpdatePriority(DepNode &node) { + std::vector workQueue; + workQueue.emplace_back(&node); + node.SetPriority(maxPriority++); + do { + DepNode *nowNode = workQueue.front(); + (void)workQueue.erase(workQueue.begin()); + for (auto pred : nowNode->GetPreds()) { + DepNode &from = pred->GetFrom(); + if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) { + from.SetPriority(maxPriority); + workQueue.emplace_back(&from); + } + } + } while (!workQueue.empty()); +} + +/* return true if all node's pred has been scheduled. */ +bool RegPressureSchedule::CanSchedule(const DepNode &node) const { + return node.GetValidPredsSize() == 0; +} + +/* + * delete node from readylist and + * add the successor of node to readyList when + * 1. successor has no been scheduled; + * 2. successor's has been scheduled or the dependence between node and successor is true-dependence. + */ +void RegPressureSchedule::UpdateReadyList(const DepNode &node) { + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } +} +/* + * Another version of UpdateReadyList for brute force ready list update + * The difference is to store the state change status for the successors for later restoring + */ +void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector &changedToReady) { + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + changedToReady.emplace_back(true); + } else { + changedToReady.emplace_back(false); + } + } +} + +/* + * Restore the ready list status when finishing one brute scheduling series generation + */ +void RegPressureSchedule::RestoreReadyList(DepNode &node, std::vector &changedToReady) { + uint32 i = 0; + /* restore state information of the successors and delete them from readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.IncreaseValidPredsSize(); + if (changedToReady.at(i)) { + succNode.SetState(kNormal); + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &succNode) { + readyList.erase(it); + break; + } + } + } + ++i; + } + /* add the node back into the readyList */ + readyList.emplace_back(&node); +} +/* choose a node to schedule */ +DepNode *RegPressureSchedule::ChooseNode() { + DepNode *node = nullptr; + for (auto *it : readyList) { + if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) { + if (CanSchedule(*it)) { + return it; + } else if (node == nullptr) { + node = it; + } + } + } + if (node == nullptr) { + node = readyList.front(); + } + return node; +} + +void RegPressureSchedule::DumpBBLiveInfo() const { + LogInfo::MapleLogger() << "Live In: "; + for (auto reg : bb->GetLiveInRegNO()) { + LogInfo::MapleLogger() << "R" <GetLiveOutRegNO()) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpReadyList() const { + LogInfo::MapleLogger() << "readyList: " << "\n"; + for (DepNode *it : readyList) { + if (CanSchedule(*it)) { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "CS "; + } else { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "NO "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpSelectInfo(const DepNode &node) const { + LogInfo::MapleLogger() << "select a node: " << "\n"; + node.DumpSchedInfo(); + node.DumpRegPressure(); + node.GetInsn()->Dump(); + + LogInfo::MapleLogger() << "liveReg: "; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpDependencyInfo(const MapleVector &nodes) { + LogInfo::MapleLogger() << "Dump Dependency Begin \n"; + for (auto node : nodes) { + LogInfo::MapleLogger() << "Insn \n"; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Successors \n"; + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node->GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.GetInsn()->Dump(); + } + } + LogInfo::MapleLogger() << "Dump Dependency End \n"; +} + +void RegPressureSchedule::ReportScheduleError() const { + LogInfo::MapleLogger() << "Error No Equal Length for Series" << "\n"; + DumpDependencyInfo(originalNodeSeries); + for (auto node : scheduledNode) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Original One" << "\n"; + for (auto node : originalNodeSeries) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Error No Equal Length for End" << "\n"; +} + +void RegPressureSchedule::ReportScheduleOutput() const { + LogInfo::MapleLogger() << "Original Pressure : " << originalPressure << " \n"; + LogInfo::MapleLogger() << "Scheduled Pressure : " << scheduledPressure << " \n"; + if (originalPressure > scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Reduced by : " << (originalPressure - scheduledPressure) << " \n"; + return; + } else if (originalPressure == scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Not Changed \n"; + } else { + LogInfo::MapleLogger() << "Pressure Increased by : " << (scheduledPressure - originalPressure) << " \n"; + } + LogInfo::MapleLogger() << "Pressure Not Reduced, Restore Node Series \n"; +} + +void RegPressureSchedule::DumpBBPressureInfo() const { + LogInfo::MapleLogger() << "curPressure: "; + FOR_ALL_REGCLASS(i) { + LogInfo::MapleLogger() << curPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "maxPressure: "; + FOR_ALL_REGCLASS(i) { + LogInfo::MapleLogger() << maxPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DoScheduling(MapleVector &nodes) { + /* Store the original series */ + originalNodeSeries.clear(); + for (auto node : nodes) { + originalNodeSeries.emplace_back(node); + } + initPartialSplitters(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "\n Calculate Pressure Info for Schedule Input Series \n"; +#endif + originalPressure = CalculateRegisterPressure(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure : " << originalPressure << "\n"; +#endif + /* Original pressure is small enough, skip pre-scheduling */ + if (originalPressure < g_pressureStandard) { +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure is small enough, skip pre-scheduling \n"; +#endif + return; + } + if (splitterIndexes.empty()) { + LogInfo::MapleLogger() << "No splitter, normal scheduling \n"; + if (!g_optimisticScheduling) { + HeuristicScheduling(nodes); + } else { + InitBruteForceScheduling(nodes); + BruteForceScheduling(); + if (optimisticScheduledNodes.size() == nodes.size() && minPressure < originalPressure) { + nodes.clear(); + for (auto node : optimisticScheduledNodes) { + nodes.emplace_back(node); + } + } + } + } else { + /* Split the node list into multiple parts based on split point and conduct scheduling */ + PartialScheduling(nodes); + } + scheduledPressure = CalculateRegisterPressure(nodes); + EmitSchedulingSeries(nodes); +} + +void RegPressureSchedule::HeuristicScheduling(MapleVector &nodes) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "--------------- bb " << bb->GetId() <<" begin scheduling -------------" << "\n"; + DumpBBLiveInfo(); +#endif + + /* initialize register pressure information and readylist. */ + Init(nodes); + CalculateMaxDepth(nodes); + while (!readyList.empty()) { + /* calculate register pressure */ + for (DepNode *it : readyList) { + ReCalculateDepNodePressure(*it); + } + if (readyList.size() > 1) { + SortReadyList(); + } + + /* choose a node can be scheduled currently. */ + DepNode *node = ChooseNode(); +#ifdef PRESCHED_DEBUG + DumpBBPressureInfo(); + DumpReadyList(); + LogInfo::MapleLogger() << "first tmp select node: " << node->GetInsn()->GetId() << "\n"; +#endif + + while (!CanSchedule(*node)) { + UpdatePriority(*node); + SortReadyList(); + node = readyList.front(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "update ready list: " << "\n"; + DumpReadyList(); +#endif + } + + scheduledNode.emplace_back(node); + /* mark node has scheduled */ + node->SetState(kScheduled); + UpdateBBPressure(*node); + CalculateNear(*node); + UpdateReadyList(*node); +#ifdef PRESCHED_DEBUG + DumpSelectInfo(*node); +#endif + } + +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "---------------------------------- end --------------------------------" << "\n"; +#endif + /* update nodes according to scheduledNode. */ + nodes.clear(); + for (auto node : scheduledNode) { + nodes.emplace_back(node); + } +} +/* + * Calculate the register pressure for current BB based on an instruction series + */ +int RegPressureSchedule::CalculateRegisterPressure(MapleVector &nodes) { + /* Initialize the live, live in, live out register max pressure information */ + liveReg.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + std::vector restoreStateSeries; + int maximumPressure = 0; + /* Mock all the nodes to kScheduled status for pressure calculation */ + for (auto node : nodes) { + restoreStateSeries.emplace_back(node->GetState()); + node->SetState(kScheduled); + } + /* Update live register set according to the instruction series */ + for (auto node : nodes){ + for (auto ® : node->GetUseRegnos()) { + UpdateLiveReg(*node, reg, false); + } + for (auto &defReg : node->GetDefRegnos()) { + UpdateLiveReg(*node, defReg, true); + } + int currentPressure = static_cast(liveReg.size()); + if (currentPressure > maximumPressure) { + maximumPressure = currentPressure; + } +#ifdef PRESCHED_DEBUG + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Dump Live Reg : " << "\n"; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + } + /* Restore the Schedule State */ + uint32 i = 0; + for (auto node : nodes){ + node->SetState(restoreStateSeries.at(i)); + ++i; + } + return maximumPressure; +} + +/* + * Split the series into multiple parts and conduct pre-scheduling in every part + */ +void RegPressureSchedule::PartialScheduling(MapleVector &nodes) { + for (size_t i = 0; i < splitterIndexes.size() - 1; ++i) { + constexpr uint32 lastTwoNodeIndex = 2; + auto begin = static_cast(splitterIndexes.at(i)); + auto end = static_cast(splitterIndexes.at(i + 1)); + for (uint32 j = begin; j < end; ++j) { + partialList.emplace_back(nodes.at(j)); + } + if (i == splitterIndexes.size() - lastTwoNodeIndex) { + partialList.emplace_back(nodes.at(end)); + } + for (auto node : partialList) { + partialSet.insert(node); + } + HeuristicScheduling(partialList); + for (auto node : partialList) { + partialScheduledNode.emplace_back(node); + } + partialList.clear(); + partialSet.clear(); + } + nodes.clear(); + /* Construct overall scheduling output */ + for (auto node : partialScheduledNode) { + nodes.emplace_back(node); + } +} + +/* + * Brute-force scheduling algorithm + * It enumerates all the possible schedule series and pick a best one + */ +void RegPressureSchedule::BruteForceScheduling() { + /* stop brute force scheduling when exceeding the count limit */ + if (g_bruteMaximumLimit && (scheduleSeriesCount > g_schedulingMaximumCount)) { + return; + } + int defaultPressureValue = -1; + /* ReadyList is empty, scheduling is over */ + if (readyList.empty()) { + if (originalNodeSeries.size() != scheduledNode.size()) { +#ifdef PRESCHED_DEBUG + ReportScheduleError(); +#endif + return; + } + ++scheduleSeriesCount; + int currentPressure = CalculateRegisterPressure(scheduledNode); + if (minPressure == defaultPressureValue || currentPressure < minPressure) { + minPressure = currentPressure; + /* update better scheduled series */ + optimisticScheduledNodes.clear(); + for (auto node : scheduledNode) { + optimisticScheduledNodes.emplace_back(node); + } + return; + } + return; + } + /* store the current status of the ready list */ + std::vector innerList; + for (auto tempNode : readyList) { + innerList.emplace_back(tempNode); + } + for (auto *node : innerList){ + if (CanSchedule(*node)) { + /* update readyList and node dependency info */ + std::vector changedToReady; + BruteUpdateReadyList(*node, changedToReady); + scheduledNode.emplace_back(node); + node->SetState(kScheduled); + BruteForceScheduling(); + node->SetState(kReady); + /* restore readyList and node dependency info */ + RestoreReadyList(*node, changedToReady); + scheduledNode.pop_back(); + } + } +} + +/* + * Calculate the pred size based on the dependency information + */ +void RegPressureSchedule::CalculatePredSize(DepNode &node) { + constexpr uint32 emptyPredsSize = 0; + node.SetValidPredsSize(emptyPredsSize); + for (auto pred : node.GetPreds()) { + DepNode &from = pred->GetFrom(); + if (!partialSet.empty() && (partialSet.find(&from) == partialSet.end())) { + continue; + } else { + node.IncreaseValidPredsSize(); + } + } +} + +void RegPressureSchedule::InitBruteForceScheduling(MapleVector &nodes) { + /* Calculate pred size of the node */ + for (auto node : nodes) { + CalculatePredSize(*node); + } + readyList.clear(); + optimisticScheduledNodes.clear(); + scheduledNode.clear(); + DepNode *firstNode = nodes.front(); + firstNode->SetState(kReady); + readyList.emplace_back(firstNode); +} + +/* + * Give out the pre-scheduling output based on new register pressure + */ +void RegPressureSchedule::EmitSchedulingSeries(MapleVector &nodes) { +#ifdef PRESCHED_DEBUG + ReportScheduleOutput(); +#endif + if (originalPressure <= scheduledPressure) { + /* Restore the original series */ + nodes.clear(); + for (auto node : originalNodeSeries) { + nodes.emplace_back(node); + } + } +} + +/* + * ------------- Schedule function ---------- + * calculate and mark each insn id, each BB's firstLoc and lastLoc. + */ +void Schedule::InitIDAndLoc() { + uint32 id = 0; + FOR_ALL_BB(bb, &cgFunc) { + bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr); + FOR_BB_INSNS(insn, bb) { + insn->SetId(id++); +#if DEBUG + insn->AppendComment(" Insn id: " + std::to_string(insn->GetId())); +#endif + if (insn->IsImmaterialInsn() && !insn->IsComment()) { + bb->SetLastLoc(insn); + } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) { + bb->SetFirstLoc(*bb->GetLastLoc()); + } + } + } +} + +/* === new pm === */ +bool CgPreScheduling::PhaseRun(maplebe::CGFunc &f) { + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoPreScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("preschedule", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + DEBUG_ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(true); + live->ClearInOutDataInfo(); + + return true; +} + +void CgPreScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling, prescheduling) + +bool CgScheduling::PhaseRun(maplebe::CGFunc &f) { + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("scheduling", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + DEBUG_ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(false); + live->ClearInOutDataInfo(); + + return true; +} + +void CgScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgScheduling, scheduling) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/standardize.cpp b/ecmascript/mapleall/maple_be/src/cg/standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..24623e29e2c838f38600073ae41cdf07fde009bb --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/standardize.cpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "isel.h" +#include "standardize.h" +namespace maplebe { + +void Standardize::DoStandardize() { + /* two address mapping first */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (NeedAddressMapping(*insn)) { + AddressMapping(*insn); + } + } + } + + /* standardize for each op */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + StdzMov(*insn); + } else if (insn->IsStore() || insn->IsLoad()) { + StdzStrLdr(*insn); + } else if (insn->IsBasicOp()) { + StdzBasicOp(*insn); + } else if (insn->IsUnaryOp()) { + StdzUnaryOp(*insn); + } else if (insn->IsConversion()) { + StdzCvtOp(*insn, *cgFunc); + } else if (insn->IsShift()) { + StdzShiftOp(*insn, *cgFunc); + } else { + LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; + CHECK_FATAL(false, "NIY"); + } + } + } +} + +void Standardize::AddressMapping(Insn &insn) { + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src1 = insn.GetOperand(kInsnSecondOpnd); + uint32 destSize = dest.GetSize(); + MOperator mOp = abstract::MOP_undef; + switch (destSize) { + case k8BitSize: + mOp = abstract::MOP_copy_rr_8; + break; + case k16BitSize: + mOp = abstract::MOP_copy_rr_16; + break; + case k32BitSize: + mOp = abstract::MOP_copy_rr_32; + break; + case k64BitSize: + mOp = abstract::MOP_copy_rr_64; + break; + default: + break; + } + CHECK_FATAL(mOp != abstract::MOP_undef, "do two address mapping failed"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)newInsn.AddOpndChain(dest).AddOpndChain(src1); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/strldr.cpp b/ecmascript/mapleall/maple_be/src/cg/strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c621bf4450f25d6d09ea953ba048758b434cdbc0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/strldr.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_strldr.h" +#elif TARGRISCV64 +#include "riscv64_strldr.h" +#endif +#if TARGARM32 +#include "arm32_strldr.h" +#endif +#include "reaching.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; +#define SCHD_DUMP_NEWPM CG_DEBUG_FUNC(f) +bool CgStoreLoadOpt::PhaseRun(maplebe::CGFunc &f) { + if (SCHD_DUMP_NEWPM) { + DotGenerator::GenerateDot("storeloadopt", f, f.GetMirModule(), true); + } + ReachingDefinition *reachingDef = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + StoreLoadOpt *storeLoadOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif + storeLoadOpt->Run(); + return true; +} +void CgStoreLoadOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgStoreLoadOpt, storeloadopt) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/asm_assembler.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/asm_assembler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2f225bbd180df3d69cbcd722719a73884c81b79c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/asm_assembler.cpp @@ -0,0 +1,1513 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "assembler/asm_assembler.h" +#include +#include +#include "dwarf.h" + + +namespace assembler { +void AsmAssembler::InitialFileInfo(const std::string &inputFileName) { + std::string curDirName = get_current_dir_name(); + assert(curDirName != "" && "InitialFileInfo: curDirName is nullptr"); + std::string path(curDirName); + std::string cgFile(path.append("/mplcg")); + EmitComment(cgFile); + EmitComment("Compiling"); + EmitComment("Be options"); + + path = curDirName; + (void)path.append("/").append(inputFileName); + std::string irFile("\""); + (void)irFile.append(path).append("\""); + EmitDirective(kFile); + Emit(irFile); + Emit("\n"); +} + +void AsmAssembler::EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) { + if (secName != nullptr) { + EmitDirective(kSection); + Emit(*secName); + Emit(",\"ax\",@progbits\n"); + } else { + EmitSectionDirective(kSText); + } + EmitDirective(kAlign, 0, false, k8Bits); + + EmitSymbolAttrDirective(funcAttr, symIdx); + + EmitDirective(kFuncType, symIdx); + EmitDirective(kName, symIdx); + Emit("\t.cfi_startproc\n"); +} + +void AsmAssembler::EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) { + std::string bbLabel = GetNameFromSymMap(labelSymIdx); + if (genVerboseInfo) { + Emit("// freq:"); + Emit(freq); + Emit("\n"); + + Emit(bbLabel); + Emit(":"); + if (mirName != nullptr) { + Emit("\t// MIR: @"); + Emit(*mirName); + } + Emit("\n"); + } else { + EmitDirective(kName, labelSymIdx); + } +} + +void AsmAssembler::EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) { + (void)funcAttr; + Emit("\t.cfi_endproc\n"); + EmitDirective(kSize, symIdx); + Emit("\n"); +} + +void AsmAssembler::PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte) { + (void)sizeInByte; + bool isLocal = false; + if (symAttr == kSALocal) { + isLocal = true; + } + EmitDirective(kSize, symIdx, isLocal); + Emit("\n"); +} + +void AsmAssembler::EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelIdxs) { + EmitDirective(kAlign, 0, false, k8Bits); + EmitDirective(kName, jmpLabelIdx); + for (int64 labelIdx: labelIdxs) { + EmitSizeDirective(k8Bytes, labelIdx, true); + } +} + +void AsmAssembler::EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr, + SectionKind sectionKind) { + bool isLocal = false; + if (symAttr == kSALocal) { + isLocal = true; + } + + if (sectionKind == kSComm || sectionKind == kSBss) { + EmitSectionDirective(kSData); + EmitSymbolAttrDirective(symAttr, symIdx, isLocal); + EmitDirective(kAlign, 0, isLocal, alignInByte); + Emit("\t.comm\t"); + std::string name = GetNameFromSymMap(symIdx, isLocal); + Emit(name); + Emit(", "); + Emit(sizeInByte); + Emit(", "); + Emit(alignInByte); + Emit("\n"); + } else { + EmitDirective(kObjType, symIdx, isLocal); + EmitSectionDirective(sectionKind); + EmitSymbolAttrDirective(symAttr, symIdx, isLocal); + EmitDirective(kAlign, 0, isLocal, alignInByte); + EmitDirective(kName, symIdx, isLocal); + } +} + +void AsmAssembler::EmitDirectString(const std::string &ustr, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) { + (void)belongsToDataSec; + if (strSymIdx != 0) { + EmitSectionDirective(kSData); + EmitDirective(kAlign, 0, false, k8Bits); + EmitDirective(kName, strSymIdx); + } + + if (emitAscii) { + Emit("\t.ascii\t\""); + } else { + Emit("\t.string\t\""); + } + + const char *str = ustr.c_str(); + size_t len = ustr.size(); + /* Rewrite special char with \\ */ + for (size_t i = 0; i < len; i++) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + Emit(buf); + } else if (*str == '\b') { + Emit("\\b"); + } else if (*str == '\n') { + Emit("\\n"); + } else if (*str == '\r') { + Emit("\\r"); + } else if (*str == '\t') { + Emit("\\t"); + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + Emit(buf); + } else { + /* all others, print as number */ + (void)snprintf_s(buf, sizeof(buf), 4, "\\%03o", (*str) & 0xFF); /* 4: max store chars */ + buf[kLastChar] = '\0'; + Emit(buf); + } + str++; + } + Emit("\"\n"); +} + +void AsmAssembler::EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) { + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, strSymIdx, true); +} + +void AsmAssembler::EmitIntValue(int64 value, uint64 elemSize, bool belongsToDataSec) { + (void)belongsToDataSec; + EmitSizeDirective(elemSize, value, false); +} + +void AsmAssembler::EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) { + (void)belongsToDataSec; + Emit("\t.quad\t"); + std::string name = GetNameFromSymMap(symIdx); + Emit(name); + if (symAddrOfs != 0) { + Emit(" + "); + Emit(symAddrOfs); + } + if (structFieldOfs != 0) { + Emit(" + "); + Emit(structFieldOfs); + } + Emit("\n"); +} + +void AsmAssembler::EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) { + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, symIdx, true); +} + +void AsmAssembler::EmitLabelValue(int64 symIdx, bool belongsToDataSec) { + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, symIdx, true); +} + +void AsmAssembler::EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) { + (void)belongsToDataSec; + EmitSizeDirective(k1Byte, combineBitFieldValue, false); +} + +/* emit debug info */ +void AsmAssembler::EmitHexUnsigned(uint64 num) { + std::ios::fmtflags flag(this->outStream.flags()); + this->outStream << "0x" << std::hex << num; + (void)this->outStream.flags(flag); +} + +void AsmAssembler::EmitDecUnsigned(uint64 num) { + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + (void)outStream.flags(flag); +} + +void AsmAssembler::EmitDecSigned(int64 num) { + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + (void)outStream.flags(flag); +} + +void AsmAssembler::EmitDIHeader() { + Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + Emit(".L" XSTR(TEXT_BEGIN) ":\n"); +} + +void AsmAssembler::EmitDIFooter() { + Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + Emit(".L" XSTR(TEXT_END) ":\n"); +} + +void AsmAssembler::EmitDIHeaderFileInfo() { + Emit("// dummy header file 1\n"); + Emit("// dummy header file 2\n"); + Emit("// dummy header file 3\n"); +} + +void AsmAssembler::EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) { + /* From DWARF Standard Specification V4. 7.5.1 + collect section size */ + Emit("\t.section\t.debug_info,\"\",@progbits\n"); + /* label to mark start of the .debug_info section */ + Emit(".L" XSTR(DEBUG_INFO_0) ":\n"); + /* $ 7.5.1.1 */ + Emit("\t.4byte\t"); + EmitHexUnsigned(debugInfoLength); + Emit(CMNT "section length\n"); + /* DWARF version. uhalf. */ + Emit("\t.2byte\t"); + /* 4 for version 4. */ + EmitHexUnsigned(kDwarfVersion); + Emit("\n"); + /* debug_abbrev_offset. 4byte for 32-bit, 8byte for 64-bit */ + Emit("\t.4byte\t.L" XSTR(DEBUG_ABBREV_0) "\n"); + /* address size. ubyte */ + Emit("\t.byte\t"); + EmitHexUnsigned(kSizeOfPTR); + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, + uint32 offset, uint32 size) { + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(abbrevId); + if (verbose) { + Emit(CMNT); + Emit(dieTagName); + Emit(" Offset= "); + EmitHexUnsigned(offset); + Emit(" ("); + EmitDecUnsigned(offset); + Emit(" ), Size= "); + EmitHexUnsigned(size); + Emit(" ("); + EmitDecUnsigned(size); + Emit(" )\n"); + } else { + Emit("\n"); + } +} + +void AsmAssembler::EmitDIFormSpecification(unsigned int dwform) { + Emit("\t"); + switch (dwform) { + case DW_FORM_string: + Emit(".string"); + break; + case DW_FORM_strp: + case DW_FORM_data4: + case DW_FORM_ref4: + Emit(".4byte "); + break; + case DW_FORM_data1: + Emit(".byte "); + break; + case DW_FORM_data2: + Emit(".2byte "); + break; + case DW_FORM_data8: + Emit(".8byte "); + break; + case DW_FORM_sec_offset: + Emit(".4byte "); + break; + /* if DWARF64, should be .8byte? */ + case DW_FORM_addr: /* Should we use DWARF64? for now, we generate .8byte as gcc does for DW_FORM_addr */ + Emit(".8byte "); + break; + case DW_FORM_exprloc: + Emit(".uleb128 "); + break; + default: + assert(0 && "NYI"); + break; + } +} + +void AsmAssembler::EmitDwFormString(const std::string &name) { + Emit("\""); + Emit(name); + Emit("\""); + Emit(CMNT "len = "); + EmitDecUnsigned(name.length() + 1); +} + +void AsmAssembler::EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + outStream << strLabelId; +} + +void AsmAssembler::EmitDwFormData(int32 attrValue, uint8 sizeInByte) { + EmitHexUnsigned(attrValue); +} + +void AsmAssembler::EmitDwFormData8() { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); +} + +void AsmAssembler::EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, + uint32 endLabelIdx, uint32 startLabelIdx) { + outStream << ".L." << endLabelFuncPuIdx << "__" << endLabelIdx; + Emit("-"); + outStream << ".L." << startLabelFuncPuIdx << "__" << startLabelIdx; +} + +void AsmAssembler::EmitLabel(uint32 funcPuIdx, uint32 labIdx) { + outStream << ".L." << funcPuIdx << "__" << labIdx; +} + +void AsmAssembler::EmitDwFormSecOffset() { + Emit(".L"); + Emit(XSTR(DEBUG_LINE_0)); +} + +void AsmAssembler::EmitDwFormAddr(bool emitTextBegin) { + if (emitTextBegin) { + Emit(".L" XSTR(TEXT_BEGIN)); + } else { + Emit("XXX--ADDR--XXX"); + } +} + +void AsmAssembler::EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) { + if (emitOffset) { + Emit(" OFFSET "); + } + EmitHexUnsigned(offsetOrValue); + if (unknownType) { + Emit(CMNT "Warning: dummy type used"); + } +} + +void AsmAssembler::EmitDwFormExprlocCfa(uint32 dwOp) { + EmitHexUnsigned(1); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); +} + +void AsmAssembler::EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) { + EmitHexUnsigned(k9ByteSize); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit("\n\t.8byte "); + Emit(addrStr); +} + +void AsmAssembler::EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) { + EmitHexUnsigned(1 + sleb128Size); + Emit(CMNT "uleb128 size"); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit("\n\t.sleb128 "); + EmitDecSigned(fboffset); +} + +void AsmAssembler::EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) { + EmitHexUnsigned(k2Bytes); + Emit(CMNT "size"); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit(CMNT); + Emit(dwOpName); + Emit("\n\t.sleb128 "); + EmitDecSigned(0); + Emit(CMNT "offset"); +} + +void AsmAssembler::EmitDwFormExprloc(uintptr elp) { + EmitHexUnsigned(elp); +} + +void AsmAssembler::EmitDIDwName(const std::string &dwAtName, const std::string &dwForName) { + Emit(CMNT); + Emit(dwAtName); + Emit(" : "); + Emit(dwForName); +} + +void AsmAssembler::EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) { + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(abbrevId); + if (verbose) { + Emit(CMNT "Abbrev Entry ID"); + } + Emit("\n"); + /* TAG */ + Emit("\t.uleb128 "); + EmitHexUnsigned(tag); + if (verbose) { + Emit(CMNT); + Emit(dwTagName); + } + Emit("\n"); + /* children? */ + Emit("\t.byte "); + EmitHexUnsigned(static_cast(withChildren)); + if (verbose) { + Emit(withChildren ? CMNT "DW_CHILDREN_yes" : CMNT "DW_CHILDREN_no"); + } + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, + const std::string &dwAtName, const std::string &dwFromName) { + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + Emit("\t.uleb128 "); + EmitHexUnsigned(aplAt); + if (verbose) { + Emit(CMNT); + Emit(dwAtName); + } + Emit("\n"); + Emit("\t.uleb128 "); + EmitHexUnsigned(aplFrom); + if (verbose) { + Emit(CMNT); + Emit(dwFromName); + } + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) { + Emit("\t.section\t.debug_str,\"MS\",@progbits,1\n"); + for (int i = 0; i < static_cast(debugStrs.size()); i++) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + this->outStream << strps[i]; + Emit(":\n"); + Emit("\t.string \""); + Emit(debugStrs[i]); + Emit("\"\n"); + } +} + +void AsmAssembler::EmitNull(uint64 sizeInByte) { + EmitDirective(kZero); + Emit(sizeInByte); + Emit("\n"); +} + +/* start of X64 instructions */ +/* mov */ +void AsmAssembler::Mov(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* movabs */ +void AsmAssembler::Movabs(const ImmOpnd &immOpnd, Reg reg) { + Emit("\tmovabs"); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Movabs(int64 symIdx, Reg reg) { + Emit("\tmovabs"); + Emit("\t"); + EmitLabelReg(symIdx, reg); + Emit("\n"); +} + +/* push */ +void AsmAssembler::Push(InsnSize insnSize, Reg reg) { + Emit("\tpush"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +/* pop */ +void AsmAssembler::Pop(InsnSize insnSize, Reg reg) { + Emit("\tpop"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +/* lea */ +void AsmAssembler::Lea(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tlea"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* movzx */ +void AsmAssembler::MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) { + Emit("\tmovz"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) { + Emit("\tmovz"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* movsx */ +void AsmAssembler::MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) { + Emit("\tmovs"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) { + Emit("\tmovs"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* add */ +void AsmAssembler::Add(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* sub */ +void AsmAssembler::Sub(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* and */ +void AsmAssembler::And(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* or */ +void AsmAssembler::Or(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* xor */ +void AsmAssembler::Xor(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* not */ +void AsmAssembler::Not(InsnSize insnSize, Reg reg) { + Emit("\tnot"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Not(InsnSize insnSize, const Mem &mem) { + Emit("\tnot"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +/* neg */ +void AsmAssembler::Neg(InsnSize insnSize, Reg reg) { + Emit("\tneg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Neg(InsnSize insnSize, const Mem &mem) { + Emit("\tneg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +/* div & cwd, cdq, cqo */ +void AsmAssembler::Idiv(InsnSize insnSize, Reg reg) { + Emit("\tidiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Idiv(InsnSize insnSize, const Mem &mem) { + Emit("\tidiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Div(InsnSize insnSize, Reg reg) { + Emit("\tdiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Div(InsnSize insnSize, const Mem &mem) { + Emit("\tdiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Cwd() { + Emit("\tcwd\n"); +} + +void AsmAssembler::Cdq() { + Emit("\tcdq\n"); +} + +void AsmAssembler::Cqo() { + Emit("\tcqo\n"); +} + +/* shl */ +void AsmAssembler::Shl(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* sar */ +void AsmAssembler::Sar(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* shr */ +void AsmAssembler::Shr(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* jmp */ +void AsmAssembler::Jmp(Reg reg) { + Emit("\tjmp\t"); + Emit("*"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Jmp(const Mem &mem) { + Emit("\tjmp\t"); + Emit("*"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Jmp(int64 symIdx) { + Emit("\tjmp\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* jump condition */ +void AsmAssembler::Je(int64 symIdx) { + Emit("\tje\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Ja(int64 symIdx) { + Emit("\tja\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jae(int64 symIdx) { + Emit("\tjae\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jne(int64 symIdx) { + Emit("\tjne\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jb(int64 symIdx) { + Emit("\tjb\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jbe(int64 symIdx) { + Emit("\tjbe\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jg(int64 symIdx) { + Emit("\tjg\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jge(int64 symIdx) { + Emit("\tjge\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jl(int64 symIdx) { + Emit("\tjl\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jle(int64 symIdx) { + Emit("\tjle\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* cmp */ +void AsmAssembler::Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, Reg reg, const Mem &mem) { + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* test */ +void AsmAssembler::Test(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\ttest"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* set */ +void AsmAssembler::Setbe(Reg reg) { + Emit("\tsetbe\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setbe(const Mem &mem) { + Emit("\tsetbe\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setle(Reg reg) { + Emit("\tsetle\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setle(const Mem &mem) { + Emit("\tsetle\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setae(Reg reg) { + Emit("\tsetae\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setae(const Mem &mem) { + Emit("\tsetae\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setge(Reg reg) { + Emit("\tsetge\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setge(const Mem &mem) { + Emit("\tsetge\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setne(Reg reg) { + Emit("\tsetne\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setne(const Mem &mem) { + Emit("\tsetne\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setb(Reg reg) { + Emit("\tsetb\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setb(const Mem &mem) { + Emit("\tsetb\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setl(Reg reg) { + Emit("\tsetl\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setl(const Mem &mem) { + Emit("\tsetl\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Seta(Reg reg) { + Emit("\tseta\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Seta(const Mem &mem) { + Emit("\tseta\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setg(Reg reg) { + Emit("\tsetg\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setg(const Mem &mem) { + Emit("\tsetg\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Sete(Reg reg) { + Emit("\tsete\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Sete(const Mem &mem) { + Emit("\tsete\t"); + EmitMem(mem); + Emit("\n"); +} + +/* cmov */ +void AsmAssembler::Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmova"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmova(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmova"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovae"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovae"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovb"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovb"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovbe"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovbe"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} +void AsmAssembler::Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmove"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmove(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmove"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovge"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovge"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovle"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovle"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcmovne"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) { + Emit("\tcmovne"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* call */ +void AsmAssembler::Call(InsnSize insnSize, Reg reg) { + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t*"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Call(InsnSize insnSize, const Mem &mem) { + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t*"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Call(InsnSize insnSize, int64 symIdx) { + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* ret */ +void AsmAssembler::Ret() { + Emit("\tret\n"); +} + +/* leave */ +void AsmAssembler::Leave() { + Emit("\tleave\n"); +} + +/* imul */ +void AsmAssembler::Imul(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\timul"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* nop */ +void AsmAssembler::Nop(InsnSize insnSize, const Mem &mem) { + Emit("\tnop"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Nop() { + Emit("\tnop\n"); +} + +/* byte swap */ +void AsmAssembler::Bswap(InsnSize insnSize, Reg reg) { + Emit("\tbswap"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\txchg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* pseudo insn */ +void AsmAssembler::DealWithPseudoInst(const std::string &insn) { + Emit("\t"); + Emit(insn); + Emit("\n"); +} +/* end of X64 instructions */ +} /* namespace assembler */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/elf_assembler.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/elf_assembler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c23f2514fd6164b1fdd742a6972bbe82141f965 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/elf_assembler.cpp @@ -0,0 +1,1802 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include "assembler/elf_assembler.h" + +namespace assembler { +/* These used in ModRM mode when an instruction is encoded. */ +const uint8 kSubModReg = 5; +const uint8 kAndModReg = 4; +const uint8 kOrModReg = 1; +const uint8 kXorModReg = 6; +const uint8 kNotModReg = 2; +const uint8 kNegModReg = 3; +const uint8 kIdivModReg = 7; +const uint8 kDivModReg = 6; +const uint8 kShlModReg = 4; +const uint8 kSarModReg = 7; +const uint8 kShrModReg = 5; +const uint8 kJmpModReg = 4; +const uint8 kCmpModReg = 7; +const uint8 kCallModReg = 2; + +/* override function in base class */ +void ElfAssembler::InitialFileInfo(const std::string &inputFileName) { + /* Initialize some sections that must be used. */ + DataSection *nullDataSection = new DataSection(" ", SHT_NULL, 0, 0); + RegisterSection(*nullDataSection); + strTabSection = new StringSection(".strtab", SHT_STRTAB, 0, 1); + RegisterSection(*strTabSection); + textSection = new DataSection(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, k8Bits); + RegisterSection(*textSection); + symbolTabSection = new SymbolSection(".symtab", SHT_SYMTAB, 0, k8Bits, *strTabSection); + RegisterSection(*symbolTabSection); +} + +void ElfAssembler::EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, + SymbolAttr symAttr, SectionKind sectionKind) { + switch (sectionKind) { + case kSBss: + case kSComm: + case kSTbss: + EmitBssSectionVar(symIdx, sizeInByte, alignInByte, symAttr); + break; + case kSTdata: + case kSData: + EmitDataSectionVar(symIdx); + break; + case kSRodata: + if (rodataSection == nullptr) { + rodataSection = new DataSection(".rodata", SHT_PROGBITS, SHF_ALLOC, k8Bits); + RegisterSection(*rodataSection); + } + UpdateLabel(symIdx, LabelType::kConst, rodataSection->GetDataSize()); + break; + case kSText: + case kSDebugInfo: + case kSDebugAbbrev: + case kSDebugStr: + default: + assert(false && "unprocessed Section in EmitVariable"); + break; + } +} + +void ElfAssembler::EmitBssSectionVar(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr) { + if (bssSection == nullptr) { + bssSection = new DataSection(".bss", SHT_NOBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*bssSection); + } + uint64 bssCurSize = bssSection->GetSectionSize(); + bssSection->SetSectionSize(static_cast(bssCurSize + sizeInByte)); + if (symAttr == kSALocal) { + const std::string &symbolName = GetNameFromSymMap(symIdx, true); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), static_cast((STB_LOCAL << 4) + (STT_OBJECT & 0xf)), + 0, bssSection->GetIndex(), bssCurSize, sizeInByte}, symIdx); + UpdateLabel(symIdx, LabelType::kLocalUninitialized, static_cast(bssCurSize)); + } else { + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), static_cast((STB_GLOBAL << 4) + (STT_OBJECT & 0xf)), + 0, SHN_COMMON, static_cast
(alignInByte), sizeInByte}, symIdx); + UpdateLabel(symIdx, LabelType::kGlobalUninitialized, static_cast(bssCurSize)); + } +} + +void ElfAssembler::EmitDataSectionVar(int64 symIdx) { + if (dataSection == nullptr) { + dataSection = new DataSection(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*dataSection); + } + uint32 pos = dataSection->GetDataSize(); + UpdateLabel(symIdx, LabelType::kStatic, pos); +} + +void ElfAssembler::EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) { + if (emitMemoryManager.funcAddressSaver != nullptr) { + const std::string &funcName = GetNameFromSymMap(symIdx); + emitMemoryManager.funcAddressSaver(emitMemoryManager.codeSpace, funcName, static_cast(codeBuff.size())); + } + UpdateLabel(symIdx, LabelType::kFunc, static_cast(codeBuff.size())); +} + +void ElfAssembler::EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) { + UpdateLabel(labelSymIdx, LabelType::kBBLabel, static_cast(codeBuff.size())); +} + +void ElfAssembler::EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelSymIdxs) { + UpdateLabel(jmpLabelIdx, LabelType::kJmpLabel, static_cast(codeBuff.size())); + const size_t kLabelSize = 8; + for (auto labelSymIdx : labelSymIdxs) { + AppendFixup(labelSymIdx, kAbsolute64, {static_cast(codeBuff.size()), kLabelSize}, fixups); + uint8 imm = 0; + Encodeb(imm, kLabelSize); + } +} + +void ElfAssembler::EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) { + uint64 funcSymValue = static_cast(GetLabelRelOffset(symIdx)); + uint64 funcSymSize = static_cast(GetLabelSize(symIdx)); + uint8 funcSymType = STB_GLOBAL; + switch (funcAttr) { + case kSALocal: + funcSymType = STB_LOCAL; + break; + case kSAGlobal: + funcSymType = STB_GLOBAL; + break; + case kSAWeak: + funcSymType = STB_WEAK; + break; + case kSAStatic: + case kSAHidden: + default: + assert(false && "unkonwn/unsupport SymbolAttr in EmitFunctionFoot"); + break; + } + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), static_cast((funcSymType << kLeftShift4Bits) + + (STT_FUNC & 0xf)), 0, textSection->GetIndex(), funcSymValue, funcSymSize}, symIdx); +} + +void ElfAssembler::EmitDirectString(const std::string &str, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) { + /* Add a terminator to a string. */ + std::string ustr = str; + ustr += '\0'; + if (strSymIdx != 0) { + if (dataSection == nullptr) { + dataSection = new DataSection(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*dataSection); + } + uint32 pos = dataSection->GetDataSize(); + UpdateLabel(strSymIdx, LabelType::kStrLabel, pos); + dataSection->AppendData(ustr.data(), ustr.size()); + const size_t kStrAlignSize = 8; + /* append size, append 0 when align need. */ + size_t appendSize = kStrAlignSize - ustr.size() % kStrAlignSize; + int64 appendData = 0; + dataSection->AppendData(appendData, appendSize); + } else { + if (belongsToDataSec) { + dataSection->AppendData(ustr.data(), ustr.size()); + } else { + rodataSection->AppendData(ustr.data(), ustr.size()); + } + } +} + +void ElfAssembler::EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) { + const size_t kStrAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kStrAddrSize); + AppendFixup(strSymIdx, kAbsolute64, {pos, kStrAddrSize}, dataFixups); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kStrAddrSize); + AppendFixup(strSymIdx, kAbsolute64, {pos, kStrAddrSize}, rodataFixups); + } +} + +void ElfAssembler::EmitIntValue(int64 value, size_t valueSize, bool belongsToDataSec) { + if (belongsToDataSec) { + dataSection->AppendData(value, valueSize); + } else { + rodataSection->AppendData(value, valueSize); + } +} + +void ElfAssembler::EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) { + const size_t kAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, dataFixups, symAddrOfs); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, rodataFixups, symAddrOfs); + } +} + +void ElfAssembler::EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) { + EmitLabelValue(symIdx, belongsToDataSec); +} + +void ElfAssembler::EmitLabelValue(int64 symIdx, bool belongsToDataSec) { + const size_t kAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, dataFixups); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, rodataFixups); + } +} + +void ElfAssembler::EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) { + if (belongsToDataSec) { + dataSection->AppendData(static_cast(combineBitFieldValue), 1); + } else { + rodataSection->AppendData(static_cast(combineBitFieldValue), 1); + } +} + +void ElfAssembler::EmitNull(uint64 sizeInByte) { + int64 data = 0; + dataSection->AppendData(data, static_cast(sizeInByte)); +} + +void ElfAssembler::PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte) { + Label *label = labelManager.at(symIdx); + uint64 pos = static_cast(label->GetRelOffset()); + if (symAttr == kSALocal) { + const std::string &symbolName = GetNameFromSymMap(symIdx, true); + auto index = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(index), static_cast((STB_LOCAL << 4) + + (STT_OBJECT & 0xf)), 0, dataSection->GetIndex(), pos, sizeInByte}, symIdx); + } else { + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto index = strTabSection->AddString(symbolName); + uint8 symInfo = symAttr == kSAGlobal ? STB_GLOBAL : STB_WEAK; + AddSymToSymTab({static_cast(index), static_cast((symInfo << 4) + + (STT_OBJECT & 0xf)), 0, dataSection->GetIndex(), pos, sizeInByte}, symIdx); + } +} + +void ElfAssembler::FinalizeFileInfo() { + AppendSymsToSymTabSec(); + HandleTextSectionFixup(); + HandleDataSectionFixup(); + HandleRodataSectionFixup(); + HandleDebugInfoSectionFixup(); + WriteElfFile(); +} + +/* encode function */ +void ElfAssembler::OpReg(Reg reg, uint8 opCode1, uint8 opCode2, uint8 modReg) { + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + uint8 rex = GetRex(reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = GetRegCodeId(reg); + SetModRM(GetMod(reg), modReg, modrm); +} + +void ElfAssembler::OpMem(const Mem &mem, uint8 opCode1, uint8 opCode2, uint8 modReg) { + if (HasOpndSizePrefix(mem)) { + Encodeb(0x66); + } + + if (HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + + uint8 rex = GetRex(mem); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (mem.size == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = 0; + if (!HasSIB(mem)) { + modrm = GetRegCodeId(mem.base); + } else { + modrm = 0b100; /* r/m=b100, use SIB */ + } + SetModRM(GetMod(mem), modReg, modrm); + if (HasSIB(mem)) { + Encodeb(GetSIB(mem)); + } + OpDisp(mem); +} + +void ElfAssembler::OpDisp(const Mem &mem) { + int64 symIdx = mem.disp.first; + uint64 offset = static_cast(mem.disp.second); + if (symIdx != 0) { + if (!CanEncodeLabel(symIdx)) { + size_t offsetSize = 4; + UpdateLabel(symIdx); + AppendFixup(symIdx, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups, mem.disp.second); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } + } else if (offset == 0) { + if (mem.memType == kOnlyBase && (mem.base == RBP || mem.base == R13)) { + Encodeb(offset); + } else if (mem.base == RIP) { + Encoded(offset); + } else { + return; + } + } else { + if (mem.base != RIP && Is8Bits(offset)) { + Encodeb(offset); /* 8-bit displacement */ + } else { + Encoded(offset); /* 32-bit displacement */ + } + } +} + +void ElfAssembler::OpRR(Reg reg1, Reg reg2, uint8 opCode1, uint8 opCode2, bool extInsn) { + if (!extInsn && (HasOpndSizePrefix(reg1) || HasOpndSizePrefix(reg2))) { + Encodeb(0x66); + } + uint8 rex = extInsn ? GetRex(reg2, reg1) : GetRex(reg1, reg2); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg1) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = extInsn ? GetModRM(reg2, reg1) : GetModRM(reg1, reg2); + if (modrm != 0) { + Encodeb(modrm); + } +} + +void ElfAssembler::OpRM(Reg reg, const Mem &mem, uint8 opCode1, uint8 opCode2, bool extInsn) { + if (!extInsn && HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (!extInsn && HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + uint8 rex = GetRex(mem, reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = GetModRM(reg, mem); + Encodeb(modrm); + if (HasSIB(mem)) { + Encodeb(GetSIB(mem)); + } + OpDisp(mem); +} + +void ElfAssembler::OpImmAndReg(const ImmOpnd &immOpnd, Reg reg, uint8 opCode, uint8 modReg) { + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + uint8 regSize = GetRegSize(reg); + if (regSize == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (regSize == k64Bits|| regSize == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + immBit = isSymbol ? k32Bits : immBit; + if (GetRegCodeId(reg) == 0 && (regSize == immBit || (regSize == k64Bits && immBit == k32Bits))) { + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (GetRex(reg)) { + Encodeb(GetRex(reg)); + } + Encodeb(opCode | 0x4 | (immBit == k8Bits ? 0 : 1)); + } else { + uint8 tmp = immBit < std::min(static_cast(regSize), 32U) ? 2 : 0; + OpReg(reg, 0x80 | tmp, 0, modReg); + } + if (isSymbol) { + if (!CanEncodeLabel(immOpnd.first)) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kRelative, {static_cast(codeBuff.size()), immBit / k8Bits}, fixups); + imm = 0; + Encodeb(imm, immBit / k8Bits); + } + } else { + Encodeb(imm, immBit / k8Bits); + } +} + +void ElfAssembler::OpImmAndMem(const ImmOpnd &immOpnd, const Mem &mem, uint8 modReg) { + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + if (isSymbol) { + if (!CanEncodeLabel(immOpnd.first)) { + size_t offsetSize = 4; + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + OpMem(mem, 0x80, 0, modReg); + Encodeb(imm, offsetSize); + } + } else { + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + if (mem.size == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (mem.size == k64Bits || mem.size == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + uint8 tmp = immBit < std::min(static_cast(mem.size), 32U) ? 2 : 0; + OpMem(mem, 0x80 | tmp, 0, modReg); + Encodeb(imm, immBit / k8Bits); + } +} + +void ElfAssembler::MovRegAndDisp(Reg reg, const Mem &mem, uint8 opCode) { + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + uint8 rex = GetRex(mem, reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode | (GetRegSize(reg) == k8Bits ? 0 : 1)); + int64 symIdx = mem.disp.first; + uint64 offset = static_cast(mem.disp.second); + if (symIdx != 0) { + size_t offsetSize = k8Bits; + Encodeb(static_cast(0), offsetSize); + UpdateLabel(symIdx); + AppendFixup(symIdx, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + } + if (Is64Bits(offset)) { + Encodeq(offset); + } else { + Encoded(offset); + } +} + +void ElfAssembler::OpPushPop(Reg reg, uint8 code) { + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (IsRegExt(reg)) { + Encodeb(0x41); /* Rex prefix */ + } + Encodeb(code | GetRegCodeId(reg)); +} + +void ElfAssembler::JmpToLabel(int64 labelIdx, uint8 opCode1, uint8 opCode2, size_t offsetSize) { + Encodeb(opCode1); + if (opCode2 != 0) { + Encodeb(opCode2); + } + if (!CanEncodeLabel(labelIdx)) { + UpdateLabel(labelIdx); + AppendFixup(labelIdx, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } +} + +void ElfAssembler::OpCmovcc(Reg srcReg, Reg dstReg, uint8 opCode1, uint8 opCode2) { + if (HasOpndSizePrefix(srcReg) || HasOpndSizePrefix(dstReg)) { + Encodeb(0x66); + } + uint8 rex = GetRex(dstReg, srcReg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1); + Encodeb(opCode2); + uint8 modrm = GetModRM(dstReg, srcReg); + if (modrm != 0) { + Encodeb(modrm); + } +} + +void ElfAssembler::UpdateLabel(int64 labelIdx, LabelType type, uint32 relOffset) { + if (labelManager.count(labelIdx) == 0) { + Label *label = new Label(labelIdx, relOffset, type); + (void)labelManager.emplace(labelIdx, label); + } else { + Label *label = labelManager.at(labelIdx); + if (type != LabelType::kLNone) { + label->SetLabelType(type); + } + if (relOffset != 0xFFFFFFFFU) { + label->SetRelOffset(relOffset); + } + } +} + +bool ElfAssembler::CanEncodeLabel(int64 labelIdx) { + if (labelManager.count(labelIdx) != 0) { + Label *label = labelManager.at(labelIdx); + uint32 relOffset = label->GetRelOffset(); + LabelType labelType = label->GetLabelType(); + if ((labelType == LabelType::kBBLabel || labelType == LabelType::kFunc) && relOffset != 0xFFFFFFFFU) { + size_t offsetSize = 4; + uint64 offset = static_cast((relOffset - codeBuff.size()) - offsetSize); + Encodeb(offset, offsetSize); + return true; + } + } + return false; +} + +uint32 ElfAssembler::GetLabelSize(int64 labelIdx) const { + return static_cast(codeBuff.size()) - GetLabelRelOffset(labelIdx); +} + +uint32 ElfAssembler::GetLabelRelOffset(int64 labelIdx) const { + if (labelManager.count(labelIdx) != 0) { + Label *label = labelManager.at(labelIdx); + assert(label->GetRelOffset() != 0xFFFFFFFFU && "label's relOffset doesn't exist"); + return label->GetRelOffset(); + } + return 0; +} + +void ElfAssembler::AppendFixup(int64 labelIdx, FixupKind kind, const std::pair &offsetPair, + std::vector &tmpFixups, int64 disp) { + tmpFixups.push_back(new Fixup(labelIdx, kind, offsetPair, disp)); +} + +/* elf file */ +void ElfAssembler::InitElfHeader() { + header.e_ident[EI_MAG0] = ELFMAG0; + header.e_ident[EI_MAG1] = ELFMAG1; + header.e_ident[EI_MAG2] = ELFMAG2; + header.e_ident[EI_MAG3] = ELFMAG3; + header.e_ident[EI_CLASS] = ELFCLASS64; + header.e_ident[EI_DATA] = ELFDATA2LSB; + header.e_ident[EI_VERSION] = EV_CURRENT; + header.e_ident[EI_OSABI] = ELFOSABI_NONE; /* ELFOSABI_NONE represents UNIX System V */ + header.e_ident[EI_ABIVERSION] = 0; + (void)std::fill_n(&header.e_ident[EI_PAD], EI_NIDENT - EI_PAD, 0); + header.e_type = ET_REL; + header.e_machine = EM_X86_64; + header.e_version = EV_CURRENT; + header.e_entry = 0; + header.e_phoff = 0; + header.e_shoff = 0; /* later get */ + header.e_flags = 0; /* The Intel architecture defines no flags; so this member contains zero. */ + header.e_ehsize = sizeof(ElfFileHeader); + header.e_phentsize = 0; + header.e_phnum = 0; + header.e_shentsize = sizeof(SectionHeader); + header.e_shnum = static_cast(sections.size()); + header.e_shstrndx = strTabSection->GetIndex(); +} + +void ElfAssembler::RegisterSection(Section §ion) { + sections.push_back(§ion); + section.SetIndex(static_cast(sections.size() - 1)); +} + +void ElfAssembler::LayoutSections() { + globalOffset = sizeof(ElfFileHeader); + globalOffset = Alignment::Align(globalOffset, k8Bits); + + for (auto *section : sections) { + section->SetSectionHeaderNameIndex(static_cast(strTabSection->AddString(section->GetName()))); + } + + for (auto *section : sections) { + globalOffset = Alignment::Align(globalOffset, section->GetAlign()); + /* lay out section */ + UpdateSectionOffset(*section); + if (section->GetType() != SHT_NOBITS) { + section->GenerateData(); + } + UpdateGlobalOffset(*section); + } + + globalOffset = Alignment::Align(globalOffset, 16U); + header.e_shoff = globalOffset; + header.e_shnum = static_cast(sections.size()); +} + +void ElfAssembler::UpdateSectionOffset(Section §ion) { + if (section.GetType() != SHT_NOBITS) { + section.SetOffset(globalOffset); + } else { + section.SetOffset(0); + } +} + +void ElfAssembler::UpdateGlobalOffset(Section §ion) { + if (section.GetType() != SHT_NOBITS) { + globalOffset += section.GetSectionSize(); + } +} + +void ElfAssembler::SetFileOffset(uint64 offset) { + (void)outStream.seekp(offset); +} + +/* symIdx is the key used to get symbol's index in .symtab */ +void ElfAssembler::AddSymToSymTab(const Symbol &symbol, int64 symIdx) { + const int kGetHigh4Bits = 4; + if ((symbol.st_info >> kGetHigh4Bits) == STB_LOCAL) { + localSymTab.push_back(std::make_pair(symbol, symIdx)); + } else { + symTab.push_back(std::make_pair(symbol, symIdx)); + } +} + +void ElfAssembler::AppendRela(const Label &label, const std::pair &offsetPair, + uint64 type, Sxword addend) { + LabelType labelType = label.GetLabelType(); + int64 relOffset = static_cast(label.GetRelOffset()); + uint64 offset = static_cast(offsetPair.first); + int64 offsetSize = static_cast(offsetPair.second); + if (labelType == LabelType::kConst) { + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), relOffset}); + } else if (labelType == LabelType::kGlobal) { + addend -= offsetSize; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(label.GetlabelIdx()) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kStatic) { + addend += relOffset - offsetSize; + int64 dataSecSymIdx = ~dataSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(dataSecSymIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kLocalUninitialized) { + addend = addend + relOffset - offsetSize; + int64 bssSecSymIdx = ~bssSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(bssSecSymIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kGlobalUninitialized) { + addend = addend - offsetSize; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(label.GetlabelIdx()) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kJmpLabel) { + type = R_X86_64_32; + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kBBLabel) { + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaSection->AppendRela({offset, static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else if (labelType == LabelType::kFunc || + (label.GetRelOffset() == 0xFFFFFFFFU && labelType == LabelType::kLNone)) { + int64 labelIdx = label.GetlabelIdx(); + if (!symbolTabSection->ExistSymInSymbols(labelIdx)) { + symbolTabSection->AppendSymbol({static_cast(strTabSection->AddString(GetNameFromSymMap(labelIdx))), + static_cast((STB_GLOBAL << kLeftShift4Bits) + (STT_NOTYPE & 0xf)), 0, 0, 0, 0}); + symbolTabSection->AppendIdxInSymbols(labelIdx); + } + relaSection->AppendRela({offsetPair.first, static_cast((symbolTabSection->GetIdxInSymbols(labelIdx) << + kLeftShift32Bits) + (type & 0xffffffff)), addend}); + } else { + assert(false && "unsupported label type in func AddRela"); + } +} + +uint64 ElfAssembler::GetRelaType(FixupKind kind) const { + switch (kind) { + case kRelative: + return R_X86_64_PC32; + case kRelative64: + return R_X86_64_PC64; + case kAbsolute: + return R_X86_64_32; + case kAbsolute64: + return R_X86_64_64; + case kPLT: + return R_X86_64_PLT32; + case kFNone: + return R_X86_64_NONE; + } +} + +void ElfAssembler::HandleTextSectionFixup() { + if (!fixups.empty()) { + relaSection = new RelaSection(".rela.text", SHT_RELA, SHF_INFO_LINK, textSection->GetIndex(), + k8Bits, *symbolTabSection); + RegisterSection(*relaSection); + } + + for (auto fixup : fixups) { + int64 labelIdx = fixup->GetlabelIdx(); + if (labelManager.count(labelIdx) == 0) { + continue; + } + + const std::pair &offsetPair = fixup->GetOffset(); + Label *label = labelManager.at(labelIdx); + uint32 relOffset = label->GetRelOffset(); + LabelType labelType = label->GetLabelType(); + + FixupKind fixupKind = fixup->GetFixupKind(); + if ((fixupKind == kRelative || fixupKind == kRelative64) && + (labelType == LabelType::kBBLabel || labelType == LabelType::kFunc)) { + FixupEncode(offsetPair.first, relOffset, offsetPair.second); + fixup->SetFixupKind(kFNone); + } + + if (relOffset != 0xFFFFFFFFU && fixupKind == kPLT) { + FixupEncode(offsetPair.first, relOffset, offsetPair.second); + fixup->SetFixupKind(kFNone); + } + + fixupKind = fixup->GetFixupKind(); + uint64 type = GetRelaType(fixupKind); + int64 addend = (fixupKind == kAbsolute || fixupKind == kAbsolute64) ? 0 : -0x4; + if (fixupKind != kFNone) { + addend = labelType == LabelType::kGlobalUninitialized || labelType == LabelType::kLocalUninitialized || + labelType == LabelType::kGlobal || labelType == LabelType::kStatic ? fixup->GetDisp() : addend; + AppendRela(*label, offsetPair, type, addend); + } + } + textSection->AppendData(codeBuff.data(), codeBuff.size()); +} + +void ElfAssembler::HandleDataSectionFixup() { + if (!dataFixups.empty()) { + relaDataSection = new RelaSection(".rela.data", SHT_RELA, SHF_INFO_LINK, dataSection->GetIndex(), + k8Bits, *symbolTabSection); + RegisterSection(*relaDataSection); + } + for (auto fixup: dataFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + std::pair offset = fixup->GetOffset(); + const uint32 relocType = R_X86_64_64; + if (labelManager.count(labelIdx) == 0) { + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + int64 addend = 0; + int64 relOffset = static_cast(label->GetRelOffset()); + if (labelType == LabelType::kGlobalUninitialized) { + addend = fixup->GetDisp(); + uint64 pos = symbolTabSection->GetIdxInSymbols(labelIdx); + relaDataSection->AppendRela({offset.first, static_cast((pos << + kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else if (labelType == LabelType::kLocalUninitialized) { + addend = fixup->GetDisp(); + int64 bssSecSymIdx = ~bssSection->GetIndex() + 1; + relaDataSection->AppendRela({offset.first, static_cast((symbolTabSection->GetIdxInSymbols(bssSecSymIdx) << + kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else if (labelType == LabelType::kFunc) { + uint64 pos = symbolTabSection->GetIdxInSymbols(labelIdx); + relaDataSection->AppendRela({offset.first, static_cast((pos << + kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else if (labelType == LabelType::kStrLabel || labelType == LabelType::kGlobal || + labelType == LabelType::kStatic) { + uint64 pos = symbolTabSection->GetIdxInSymbols(~dataSection->GetIndex() + 1); + addend = (labelType == LabelType::kGlobal || labelType == LabelType::kStatic) ? + fixup->GetDisp() + relOffset : relOffset; + relaDataSection->AppendRela({offset.first, static_cast((pos << + kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else { + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaDataSection->AppendRela({offset.first, static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << + kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } + } +} + +void ElfAssembler::HandleRodataSectionFixup() { + if (!rodataFixups.empty()) { + relaRodataSection = new RelaSection(".rela.rodata", SHT_RELA, SHF_INFO_LINK, rodataSection->GetIndex(), + k8Bits, *symbolTabSection); + RegisterSection(*relaRodataSection); + } + for (auto fixup: rodataFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + std::pair offset = fixup->GetOffset(); + const uint32 relocType = R_X86_64_64; + if (labelManager.count(labelIdx) == 0) { + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + int64 addend = 0; + int64 relOffset = static_cast(label->GetRelOffset()); + if (labelType == LabelType::kGlobalUninitialized || labelType == LabelType::kLocalUninitialized) { + addend = relOffset; + uint64 pos = symbolTabSection->GetIdxInSymbols(~textSection->GetIndex() + 1); + relaRodataSection->AppendRela({offset.first, static_cast((pos << kLeftShift32Bits) + + (relocType & 0xffffffff)), addend}); + } + } +} + +void ElfAssembler::WriteElfFile() { + /* Init elf file header */ + InitElfHeader(); + + LayoutSections(); + + /* write header */ + Emit(&header, sizeof(header)); + + /* write sections */ + for (auto *section : sections) { + if (section->GetType() == SHT_NOBITS) { + continue; + } + SetFileOffset(section->GetOffset()); + section->WriteSection(outStream); + if (section == textSection) { + uint8 *memSpace = emitMemoryManager.allocateDataSection(emitMemoryManager.codeSpace, + section->GetSectionSize(), section->GetAlign(), section->GetName()); + memcpy_s(memSpace, section->GetSectionSize(), textSection->GetData().data(), textSection->GetDataSize()); + } + } + + /* write section table */ + SetFileOffset(header.e_shoff); + for (auto *section : sections) { + Emit(§ion->GetSectionHeader(), sizeof(section->GetSectionHeader())); + } +} + +/* Append the symbol of non-empty and necessary section to symbol table section. */ +void ElfAssembler::AppendSecSymsToSymTabSec() { + for (Section *section : sections) { + if (section->GetType() != SHT_PROGBITS && section->GetType() != SHT_NOBITS) { + continue; + } + DataSection *dataSec = static_cast(section); + if (section->GetFlags() == (SHF_ALLOC | SHF_EXECINSTR) || section->GetSectionSize() != 0 || + (dataSec != nullptr && dataSec->GetDataSize() != 0)) { + auto nameIndex = strTabSection->AddString(section->GetName()); + symbolTabSection->AppendSymbol({static_cast(nameIndex), static_cast((STB_LOCAL << kLeftShift4Bits) + + (STT_SECTION & 0xf)), 0, section->GetIndex(), 0, 0}); + /* Indexed by the inverse of the section index. */ + int64 secSymIdx = ~section->GetIndex() + 1; + symbolTabSection->AppendIdxInSymbols(secSymIdx); + } + } +} + +void ElfAssembler::AppendSymsToSymTabSec() { + /* emit local symbol */ + for (auto elem : localSymTab) { + Symbol symbol = elem.first; + int64 symIdx = elem.second; + symbolTabSection->AppendSymbol(symbol); + symbolTabSection->AppendIdxInSymbols(symIdx); + } + + /* Append section symbol that may be used in relocation item, section is local. */ + AppendSecSymsToSymTabSec(); + + /* set .symtab's info : index of the first non-local symbol */ + symbolTabSection->SetInfo(symbolTabSection->GetSymbolsSize()); + + /* emit global and other symbol */ + for (auto elem : symTab) { + const Symbol &symbol = elem.first; + int64 symIdx = elem.second; + symbolTabSection->AppendSymbol(symbol); + symbolTabSection->AppendIdxInSymbols(symIdx); + } +} + +/* emit debug info */ +void ElfAssembler::EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) { + debugInfoSection = new DataSection(".debug_info", SHT_PROGBITS, 0, 1); + RegisterSection(*debugInfoSection); + /* length of .debug_info section, 4 bytes */ + size_t debugInfoLenSize = 4; + debugInfoSection->AppendData(static_cast(debugInfoLength), debugInfoLenSize); + size_t dwarfVersionSize = 2; + /* DWARF version, 2 bytes */ + debugInfoSection->AppendData(static_cast(kDwarfVersion), dwarfVersionSize); + /* debug_abbrev_offset. 4 bytes for dwarf32, 8 bytes for dwarf64 */ + int64 debugAbbrevOffset = 0; + size_t debugAbbrevOffsetSize = 4; + /* If labelSymIdx equals LLONG_MAX, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX, kAbsolute, {debugInfoSection->GetDataSize(), debugAbbrevOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(debugAbbrevOffset, debugAbbrevOffsetSize); + /* address size. 1 byte */ + size_t byteOfkSizeOfPTR = 1; + debugInfoSection->AppendData(kSizeOfPTR, byteOfkSizeOfPTR); +} + +void ElfAssembler::EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, + uint32 offset, uint32 size) { + auto abbrevIdUleb128 = EncodeULEB128(abbrevId); + debugInfoSection->AppendData(&abbrevIdUleb128, abbrevIdUleb128.size()); +} + +/* EmitDIAttrValue */ +void ElfAssembler::EmitDwFormString(const std::string &name) { + debugInfoSection->AppendData(&name, name.size()); +} + +void ElfAssembler::EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) { + int64 labelSymIdx = CalculateStrLabelSymIdx(static_cast(strLabelId), static_cast(strTableSize)); + UpdateLabel(labelSymIdx, LabelType::kDebugStrLabel); + int64 strLabelOffset = 0; + size_t strLabelOffsetSize = 4; + AppendFixup(labelSymIdx, kAbsolute, {debugInfoSection->GetDataSize(), strLabelOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(strLabelOffset, strLabelOffsetSize); +} + +void ElfAssembler::EmitDwFormData(int32 attrValue, uint8 sizeInByte) { + debugInfoSection->AppendData(attrValue, sizeInByte); +} + +void ElfAssembler::EmitDwFormData8() { + int64 addr = 0; + size_t addrSizeInByte = 8; + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, + uint32 endLabelIdx, uint32 startLabelIdx) { + int64 addr = 0; + size_t addrSizeInByte = 8; + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitLabel(uint32 funcPuIdx, uint32 labIdx) { + int64 labSymIdx = CalculateLabelSymIdx(funcPuIdx, labIdx); + UpdateLabel(labIdx); + int64 addr = 0; + size_t addrSizeInByte = 8; + AppendFixup(labSymIdx, kAbsolute64, {debugInfoSection->GetDataSize(), addrSizeInByte}, debugInfoFixups); + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitDwFormSecOffset() { + int64 lineLabelOffset = 0; + size_t lineLabelOffsetSize = 4; + /* If labelSymIdx equals - 2, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX - 2, kAbsolute, {debugInfoSection->GetDataSize(), lineLabelOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(lineLabelOffset, lineLabelOffsetSize); +} + +void ElfAssembler::EmitDwFormAddr(bool emitTextBegin) { + if (emitTextBegin) { + int64 addr = 0; + size_t addrSizeInByte = 8; + /* If labelSymIdx equals LLONG_MAX - 1, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX - 1, kAbsolute64, {debugInfoSection->GetDataSize(), addrSizeInByte}, debugInfoFixups); + debugInfoSection->AppendData(addr, addrSizeInByte); + } +} + +void ElfAssembler::EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) { + size_t offsetOrValueSize = 4; + debugInfoSection->AppendData(static_cast(offsetOrValue), offsetOrValueSize); +} + +void ElfAssembler::EmitDwFormExprlocCfa(uint32 dwOp) { + debugInfoSection->AppendData(1, 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); +} + +void ElfAssembler::EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) { + debugInfoSection->AppendData(static_cast(k9ByteSize), 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); + size_t addStrSize = 8; + debugInfoSection->AppendData(&addrStr, addStrSize); +} + +void ElfAssembler::EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) { + auto sleb128SizeEncode = EncodeSLEB128(1 + static_cast(sleb128Size)); + debugInfoSection->AppendData(&sleb128SizeEncode, sleb128SizeEncode.size()); + debugInfoSection->AppendData(static_cast(dwOp), 1); + auto fboffsetSleb128 = EncodeSLEB128(fboffset); + debugInfoSection->AppendData(&fboffsetSleb128, fboffsetSleb128.size()); +} + +void ElfAssembler::EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) { + debugInfoSection->AppendData(static_cast(k2Bytes), 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); + debugInfoSection->AppendData(&dwOpName, dwOpName.size()); + int64 offset = 0; + debugInfoSection->AppendData(offset, 1); +} + +void ElfAssembler::EmitDwFormExprloc(uintptr elp) { + auto elpUleb128 = EncodeULEB128(elp); + debugInfoSection->AppendData(&elpUleb128, elpUleb128.size()); +} + +void ElfAssembler::EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) { + debugAbbrevSection = new DataSection(".debug_abbrev", SHT_PROGBITS, 0, 1); + RegisterSection(*debugAbbrevSection); + /* Abbrev Entry ID */ + auto abbrevIdUleb128 = EncodeULEB128(abbrevId); + debugAbbrevSection->AppendData(&abbrevIdUleb128, abbrevIdUleb128.size()); + /* TAG */ + auto tagUleb128 = EncodeULEB128(tag); + debugAbbrevSection->AppendData(&tagUleb128, tagUleb128.size()); + /* children */ + auto childrenValue = withChildren ? 1 : 0; + debugAbbrevSection->AppendData(childrenValue, 1); +} + +void ElfAssembler::EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, + const std::string &dwAtName, const std::string &dwFromName) { + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + auto aplAtUleb128 = EncodeULEB128(aplAt); + debugAbbrevSection->AppendData(&aplAtUleb128, aplAtUleb128.size()); + auto aplFromUleb128 = EncodeULEB128(aplFrom); + debugAbbrevSection->AppendData(&aplFromUleb128, aplFromUleb128.size()); +} + +void ElfAssembler::EmitDIDebugSectionEnd(SectionKind secKind) { + int64 value = 0; + size_t valueSizeInByte = 1; + switch (secKind) { + case kSDebugInfo: + debugInfoSection->AppendData(value, valueSizeInByte); + break; + case kSDebugAbbrev: + debugAbbrevSection->AppendData(value, valueSizeInByte); + break; + case kSBss: + case kSComm: + case kSData: + case kSRodata: + case kSTbss: + case kSTdata: + case kSText: + case kSDebugStr: + default: + assert(false && "unsupport SectionKind in EmitDIDebugSectionEnd"); + break; + } +} + +void ElfAssembler::EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) { + debugStrSection = new DataSection(".debug_str", SHT_PROGBITS, SHF_MASKPROC, 1); + RegisterSection(*debugStrSection); + for (int i = 0; i < static_cast(debugStrs.size()); i++) { + int64 strLabSymIdx = CalculateStrLabelSymIdx(size, strps[i], strTableSize); + UpdateLabel(strLabSymIdx, LabelType::kDebugStrLabel, debugStrSection->GetDataSize()); + debugStrSection->AppendData(&debugStrs[i], debugStrs[i].size()); + EmitDIDebugSectionEnd(kSDebugStr); + } +} + +void ElfAssembler::HandleDebugInfoSectionFixup() { + if (!debugInfoFixups.empty()) { + relaDebugInfoSection = new RelaSection(".rela.debug_info", SHT_RELA, SHF_INFO_LINK, + debugInfoSection->GetIndex(), k8Bits, *symbolTabSection); + RegisterSection(*relaDebugInfoSection); + } + for (auto fixup : debugInfoFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + const std::pair &offsetPair = fixup->GetOffset(); + FixupKind fixupKind = fixup->GetFixupKind(); + uint64 relocType = GetRelaType(fixupKind); + int64 addend = fixup->GetDisp(); + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + int64 debugLineSecSymIdx = ~debugLineSection->GetIndex() + 1; + int64 abbrevSecSymIdx = ~debugAbbrevSection->GetIndex() + 1; + uint64 pos = labelIdx == LLONG_MAX ? symbolTabSection->GetIdxInSymbols(debugLineSecSymIdx) : ( + labelIdx == LLONG_MAX - 1 ? symbolTabSection->GetIdxInSymbols(textSecSymIdx) : + symbolTabSection->GetIdxInSymbols(abbrevSecSymIdx)); + if (!labelManager.count(labelIdx)) { + relaDebugInfoSection->AppendRela({offsetPair.first, static_cast((pos << kLeftShift32Bits) + + (relocType & 0xffffffff)), addend}); + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + addend = label->GetRelOffset(); + if (labelType == LabelType::kBBLabel) { + pos = symbolTabSection->GetIdxInSymbols(textSecSymIdx); + } else if (labelType == LabelType::kDebugStrLabel) { + pos = symbolTabSection->GetIdxInSymbols(~debugStrSection->GetIndex() + 1); + } else { + assert(false && "unsupport label type in HandleDebugInfoSectionFixup!"); + } + relaDebugInfoSection->AppendRela({offsetPair.first, static_cast((pos << kLeftShift32Bits) + + (relocType & 0xffffffff)), addend}); + } +} + +/* start of X64 instructions */ +/* mov */ +void ElfAssembler::Mov(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x88, 0); +} + +void ElfAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + bool isSymbol = immOpnd.second; + uint64 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 regSize = GetRegSize(reg); + uint8 regId = GetRegCodeId(reg); + uint8 code = 0xB0 | ((regSize == k8Bits ? 0 : 1) << kLeftShift3Bits); + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + if (regSize == k64Bits && (isSymbol || Is32Bits(imm))) { + Encodeb(0xC7); + code = 0xC0; + regSize = k32Bits; + } + size_t offsetSize = isSymbol ? k64Bits : regSize / k8Bits; + Encodeb(code | regId); + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + } + Encodeb(imm, offsetSize); +} + +void ElfAssembler::Mov(InsnSize insnSize, const Mem &mem, Reg reg) { + if (GetRegId(reg) == 0 && mem.memType == kOnlyDisp) { + MovRegAndDisp(reg, mem, 0xA0); + } else { + OpRM(reg, mem, 0x8A, 0); + } +} + +void ElfAssembler::Mov(InsnSize insnSize, Reg reg, const Mem &mem) { + if (GetRegId(reg) == 0 && mem.memType == kOnlyDisp) { + MovRegAndDisp(reg, mem, 0xA2); + } else { + OpRM(reg, mem, 0x88, 0); + } +} + +void ElfAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + if (mem.size == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (mem.size == k64Bits || mem.size == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + immBit = isSymbol ? k64Bits : immBit; + size_t immSize = immBit / k8Bits; + OpMem(mem, 0xC6, 0, 0); + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), immSize}, fixups); + imm = 0; + } + Encodeb(imm, immSize); +} + +/* movabs */ +void ElfAssembler::Movabs(const ImmOpnd &immOpnd, Reg reg) { + bool isSymbol = immOpnd.second; + uint64 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + Encodeb(0xB8 | GetRegCodeId(reg)); + size_t offsetSize = 8; + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + } + Encodeb(imm, offsetSize); +} + +void ElfAssembler::Movabs(int64 symIdx, Reg reg) { + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + Encodeb(0xB8 | GetRegCodeId(reg)); + size_t offsetSize = 8; + size_t offset = codeBuff.size() - offsetSize; + UpdateLabel(symIdx); + AppendFixup(symIdx, kAbsolute64, {offset, offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); +} + +/* push */ +void ElfAssembler::Push(InsnSize insnSize, Reg reg) { + OpPushPop(reg, 0x50); +} + +/* pop */ +void ElfAssembler::Pop(InsnSize insnSize, Reg reg) { + OpPushPop(reg, 0x58); +} + +/* lea */ +void ElfAssembler::Lea(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x8C); +} + +/* movzx */ +void ElfAssembler::MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x0F, 0xB6 | (GetRegSize(srcReg) == k8Bits ? 0 : 1), true); +} + +void ElfAssembler::MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0F, 0xB6 | (mem.size == k8Bits ? 0 : 1), true); +} + +/* movsx */ +void ElfAssembler::MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) { + uint8 code1 = 0x0F; + uint8 code2 = 0xBE | (GetRegSize(srcReg) == k8Bits ? 0 : 1); + if (GetRegSize(srcReg) == k32Bits && GetRegSize(destReg) == k64Bits) { + code1 = 0x63; + code2 = 0; + } + OpRR(srcReg, destReg, code1, code2, true); +} + +void ElfAssembler::MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) { + uint8 code1 = 0x0F; + uint8 code2 = 0xBE | (mem.size == k8Bits ? 0 : 1); + if (mem.size == k32Bits && GetRegSize(reg) == k64Bits) { + code1 = 0x63; + code2 = 0; + } + OpRM(reg, mem, code1, code2, true); +} + +/* add */ +void ElfAssembler::Add(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x00); +} + +void ElfAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x00, 0); +} + +void ElfAssembler::Add(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x02); +} + +void ElfAssembler::Add(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x00); +} + +void ElfAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, 0); +} + +/* sub */ +void ElfAssembler::Sub(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x28); +} + +void ElfAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x28, kSubModReg); +} + + +void ElfAssembler::Sub(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x2A); +} + +void ElfAssembler::Sub(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x28); +} + +void ElfAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, kSubModReg); +} + +/* and */ +void ElfAssembler::And(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x20); +} + +void ElfAssembler::And(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x21); +} + +void ElfAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x20, kAndModReg); +} + +void ElfAssembler::And(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x20); +} + +void ElfAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, kAndModReg); +} + +/* or */ +void ElfAssembler::Or(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x08); +} + +void ElfAssembler::Or(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0A); +} + +void ElfAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x08, kOrModReg); +} + +void ElfAssembler::Or(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x08); +} + +void ElfAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, kOrModReg); +} + +/* xor */ +void ElfAssembler::Xor(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x30); +} + +void ElfAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x30, kXorModReg); +} + +void ElfAssembler::Xor(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x32); +} + +void ElfAssembler::Xor(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x30); +} + +void ElfAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, kXorModReg); +} + +/* not */ +void ElfAssembler::Not(InsnSize insnSize, Reg reg) { + OpReg(reg, 0xF6, 0, kNotModReg); +} + +void ElfAssembler::Not(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0xF6, 0, kNotModReg); +} + +/* neg */ +void ElfAssembler::Neg(InsnSize insnSize, Reg reg) { + OpReg(reg, 0xF6, 0, kNegModReg); +} + +void ElfAssembler::Neg(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0xF6, 0, kNegModReg); +} + +/* div & cwd, cdq, cqo */ +void ElfAssembler::Idiv(InsnSize insnSize, Reg reg) { + OpReg(reg, 0xF6, 0, kIdivModReg); +} + +void ElfAssembler::Idiv(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0xF6, 0, kIdivModReg); +} + +void ElfAssembler::Div(InsnSize insnSize, Reg reg) { + OpReg(reg, 0xF6, 0, kDivModReg); +} + +void ElfAssembler::Div(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0xF6, 0, kDivModReg); +} + +void ElfAssembler::Cwd() { + Encodeb(0x66); + Encodeb(0x99); +} + +void ElfAssembler::Cdq() { + Encodeb(0x99); +} + +void ElfAssembler::Cqo() { + Encodeb(0x48); + Encodeb(0x99); +} + +/* shl */ +void ElfAssembler::Shl(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpReg(destReg, 0xD2, 0, kShlModReg); +} + +void ElfAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpReg(reg, 0xC0, 0, kShlModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Shl(InsnSize insnSize, Reg reg, const Mem &mem) { + OpMem(mem, 0xD2, 0, kShlModReg); +} + +void ElfAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpMem(mem, 0xC0, 0, kShlModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* sar */ +void ElfAssembler::Sar(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpReg(destReg, 0xD2, 0, kSarModReg); +} + +void ElfAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpReg(reg, 0xC0, 0, kSarModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Sar(InsnSize insnSize, Reg reg, const Mem &mem) { + OpMem(mem, 0xD2, 0, kSarModReg); +} + +void ElfAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpMem(mem, 0xC0, 0, kSarModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* shr */ +void ElfAssembler::Shr(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpReg(destReg, 0xD2, 0, kShrModReg); +} + +void ElfAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpReg(reg, 0xC0, 0, kShrModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Shr(InsnSize insnSize, Reg reg, const Mem &mem) { + OpMem(mem, 0xD2, 0, kShrModReg); +} + +void ElfAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpMem(mem, 0xC0, 0, kShrModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* jmp */ +void ElfAssembler::Jmp(Reg reg) { + OpReg(reg, 0xFF, 0, kJmpModReg); +} + +void ElfAssembler::Jmp(const Mem &mem) { + OpMem(mem, 0xFF, 0, kJmpModReg); +} + +void ElfAssembler::Jmp(int64 symIdx) { + JmpToLabel(symIdx, 0xE9); +} + +/* jump condition */ +void ElfAssembler::Je(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x84); +} + +void ElfAssembler::Ja(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x87); +} + +void ElfAssembler::Jae(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x83); +} + +void ElfAssembler::Jne(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x85); +} + +void ElfAssembler::Jb(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x82); +} + +void ElfAssembler::Jbe(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x86); +} + +void ElfAssembler::Jg(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x8F); +} + +void ElfAssembler::Jge(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x8D); +} + +void ElfAssembler::Jl(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x8C); +} + +void ElfAssembler::Jle(int64 symIdx) { + JmpToLabel(symIdx, 0x0F, 0x8E); +} + +/* cmp */ +void ElfAssembler::Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x38); +} + +void ElfAssembler::Cmp(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x3A); +} + +void ElfAssembler::Cmp(InsnSize insnSize, Reg reg, const Mem &mem) { + OpRM(reg, mem, 0x38); +} + + +void ElfAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) { + OpImmAndReg(immOpnd, reg, 0x38, kCmpModReg); +} + +void ElfAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) { + OpImmAndMem(immOpnd, mem, kCmpModReg); +} + +/* test */ +void ElfAssembler::Test(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(srcReg, destReg, 0x84); +} + +/* setcc */ +void ElfAssembler::Setbe(Reg reg) { + OpReg(reg, 0x0F, 0x96, 0); +} + +void ElfAssembler::Setbe(const Mem &mem) { + OpMem(mem, 0x0F, 0x96, 0); +} + +void ElfAssembler::Setle(Reg reg) { + OpReg(reg, 0x0F, 0x9E, 0); +} + +void ElfAssembler::Setle(const Mem &mem) { + OpMem(mem, 0x0F, 0x9E, 0); +} + +void ElfAssembler::Setae(Reg reg) { + OpReg(reg, 0x0F, 0x93, 0); +} + +void ElfAssembler::Setae(const Mem &mem) { + OpMem(mem, 0x0F, 0x93, 0); +} + +void ElfAssembler::Setge(Reg reg) { + OpReg(reg, 0x0F, 0x9D, 0); +} +void ElfAssembler::Setge(const Mem &mem) { + OpMem(mem, 0x0F, 0x9D, 0); +} + +void ElfAssembler::Setne(Reg reg) { + OpReg(reg, 0x0F, 0x95, 0); +} + +void ElfAssembler::Setne(const Mem &mem) { + OpMem(mem, 0x0F, 0x95, 0); +} + +void ElfAssembler::Setb(Reg reg) { + OpReg(reg, 0x0F, 0x92, 0); +} + +void ElfAssembler::Setb(const Mem &mem) { + OpMem(mem, 0x0F, 0x92, 0); +} + +void ElfAssembler::Setl(Reg reg) { + OpReg(reg, 0x0F, 0x9C, 0); +} + +void ElfAssembler::Setl(const Mem &mem) { + OpMem(mem, 0x0F, 0x9C, 0); +} + +void ElfAssembler::Seta(Reg reg) { + OpReg(reg, 0x0F, 0x97, 0); +} + +void ElfAssembler::Seta(const Mem &mem) { + OpMem(mem, 0x0F, 0x97, 0); +} + +void ElfAssembler::Setg(Reg reg) { + OpReg(reg, 0x0F, 0x9F, 0); +} + +void ElfAssembler::Setg(const Mem &mem) { + OpMem(mem, 0x0F, 0x9F, 0); +} + +void ElfAssembler::Sete(Reg reg) { + OpReg(reg, 0x0F, 0x94, 0); +} + +void ElfAssembler::Sete(const Mem &mem) { + OpMem(mem, 0x0F, 0x94, 0); +} + +/* cmov */ +void ElfAssembler::Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x47); +} + +void ElfAssembler::Cmova(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x47); +} +void ElfAssembler::Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x43); +} + +void ElfAssembler::Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x43); +} + +void ElfAssembler::Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x42); +} + +void ElfAssembler::Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x42); +} + +void ElfAssembler::Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x46); +} + +void ElfAssembler::Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x46); +} + +void ElfAssembler::Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x44); +} + +void ElfAssembler::Cmove(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x44); +} + +void ElfAssembler::Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x4F); +} + +void ElfAssembler::Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x4F); +} + +void ElfAssembler::Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x4D); +} + +void ElfAssembler::Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x4D); +} + +void ElfAssembler::Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x4C); +} + +void ElfAssembler::Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x4C); +} + +void ElfAssembler::Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x4E); +} + +void ElfAssembler::Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x4E); +} + +void ElfAssembler::Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpCmovcc(srcReg, destReg, 0x0F, 0x45); +} + +void ElfAssembler::Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) { + OpRM(reg, mem, 0x0E, 0x45); +} + +/* call */ +void ElfAssembler::Call(InsnSize insnSize, Reg reg) { + OpReg(reg, 0xFF, 0, kCallModReg); +} + +void ElfAssembler::Call(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0xFF, 0, kCallModReg); +} + +void ElfAssembler::Call(InsnSize insnSize, int64 symIdx) { + Encodeb(0xE8); + if (!CanEncodeLabel(symIdx)) { + size_t offsetSize = 4; + UpdateLabel(symIdx, LabelType::kFunc); + AppendFixup(symIdx, kPLT, {static_cast(codeBuff.size()), offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } +} + +/* ret */ +void ElfAssembler::Ret() { + Encodeb(0xC3); +} + +/* leave */ +void ElfAssembler::Leave() { + Encodeb(0xC9); +} + +/* imul */ +void ElfAssembler::Imul(InsnSize insnSize, Reg srcReg, Reg destReg) { + OpRR(destReg, srcReg, 0x0F, 0xAF); +} + +/* nop */ +void ElfAssembler::Nop(InsnSize insnSize, const Mem &mem) { + OpMem(mem, 0x0E, 0x1F, 0); +} + +void ElfAssembler::Nop() { + Encodeb(0x90); +} + +/* byte swap */ +void ElfAssembler::Bswap(InsnSize insnSize, Reg reg) { + uint8 rex = GetRex(reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(0x0F); + Encodeb(0xC8 | GetRegCodeId(reg)); +} + +void ElfAssembler::Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) { + /* if the reg is ax, eax or rax */ + if ((GetRegId(srcReg) == 0 || GetRegId(destReg) == 0) && GetRegSize(srcReg) != k8Bits) { + uint8 rex = GetRex(srcReg, destReg); + if (rex != 0) { + Encodeb(rex); + } else if (GetRegSize(srcReg) == k16Bits) { + Encodeb(0x66); + } + uint8 regCodeId = GetRegId(srcReg) == 0 ? GetRegCodeId(destReg) : GetRegCodeId(srcReg); + Encodeb(0x90 | regCodeId); + } else { + OpRR(srcReg, destReg, 0x86); + } +} +/* end of X64 instructions */ +} /* namespace assembler */ \ No newline at end of file diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..15a58bb68404ef73a94dd375e8fea206382e8815 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -0,0 +1,1271 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_MPISel.h" +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "x64_isa_tbl.h" +#include "x64_cg.h" +#include "isel.h" + +namespace maplebe { +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) { + PrimType symType; + int32 fieldOffset = 0; + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); +} +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) { + MIRStorageClass storageClass = symbol.GetStorageClass(); + MemOperand *result = nullptr; + RegOperand *stackBaseReg = nullptr; + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + auto *symloc = static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + DEBUG_ASSERT(symloc != nullptr, "sym loc should have been defined"); + stackBaseReg = static_cast(cgFunc)->GetBaseReg(*symloc); + int stOfst = cgFunc->GetBaseOffset(*symloc); + /* Create field symbols in aggregate structure */ + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm( + k64BitSize, stOfst + offset)); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + if ((storageClass == kScGlobal) || (storageClass == kScExtern) || + (storageClass == kScPstatic) || (storageClass == kScFstatic)) { + stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt); + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(stOfstOpnd); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + CHECK_FATAL(false, "NIY"); + return *result; +} + +void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { + MIRType *retType = cgFunc->GetFunction().GetReturnType(); + X64CallConvImpl retLocator(cgFunc->GetBecommon()); + CCLocInfo retMech; + retLocator.LocateRetVal(*retType, retMech); + if (retMech.GetRegCount() == 0) { + return; + } + std::vector retRegs; + if (!cgFunc->GetFunction().StructReturnedInRegs() || + retNode.Opnd(0)->GetOpCode() == OP_constval) { + PrimType oriPrimType = retMech.GetPrimTypeOfReg0(); + regno_t retReg = retMech.GetReg0(); + DEBUG_ASSERT(retReg != kRinvalid, "NIY"); + RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType), + cgFunc->GetRegTyFromPrimTy(oriPrimType)); + retRegs.push_back(&retOpnd); + SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType()); + } else { + CHECK_FATAL(opnd.IsMemoryAccessOperand(), "NIY"); + MemOperand &memOpnd = static_cast(opnd); + ImmOperand *offsetOpnd = memOpnd.GetOffsetOperand(); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + + PrimType oriPrimType0 = retMech.GetPrimTypeOfReg0(); + regno_t retReg0 = retMech.GetReg0(); + DEBUG_ASSERT(retReg0 != kRinvalid, "NIY"); + RegOperand &retOpnd0 = cgFunc->GetOpndBuilder()->CreatePReg(retReg0, GetPrimTypeBitSize(oriPrimType0), + cgFunc->GetRegTyFromPrimTy(oriPrimType0)); + MemOperand &rhsMemOpnd0 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType0)); + rhsMemOpnd0.SetBaseRegister(*baseOpnd); + rhsMemOpnd0.SetOffsetOperand(*offsetOpnd); + retRegs.push_back(&retOpnd0); + SelectCopy(retOpnd0, rhsMemOpnd0, oriPrimType0); + + regno_t retReg1 = retMech.GetReg1(); + if (retReg1 != kRinvalid) { + PrimType oriPrimType1 = retMech.GetPrimTypeOfReg1(); + RegOperand &retOpnd1 = cgFunc->GetOpndBuilder()->CreatePReg(retReg1, GetPrimTypeBitSize(oriPrimType1), + cgFunc->GetRegTyFromPrimTy(oriPrimType1)); + MemOperand &rhsMemOpnd1 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType1)); + ImmOperand &newOffsetOpnd = static_cast(*offsetOpnd->Clone(*cgFunc->GetMemoryPool())); + newOffsetOpnd.SetValue(newOffsetOpnd.GetValue() + GetPrimTypeSize(oriPrimType0)); + rhsMemOpnd1.SetBaseRegister(*baseOpnd); + rhsMemOpnd1.SetOffsetOperand(newOffsetOpnd); + retRegs.push_back(&retOpnd1); + SelectCopy(retOpnd1, rhsMemOpnd1, oriPrimType1); + } + } + /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/ + SelectPseduoForReturn(retRegs); +} + +void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { + for (auto retReg : retRegs) { + MOperator mop = x64::MOP_pseudo_ret_int; + Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]); + cgFunc->GetCurBB()->AppendInsn(pInsn); + pInsn.AddOpndChain(*retReg); + } +} + +void X64MPIsel::SelectReturn() { + /* jump to epilogue */ + MOperator mOp = x64::MOP_jmpq_l; + LabelNode *endLabel = cgFunc->GetEndLabel(); + auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); +} + +void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { + int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple X64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL(false, "unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + DEBUG_ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + */ +void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { + paramPassByReg.clear(); + + /* for IcallNode, the 0th operand is the function pointer */ + size_t argBegin = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) { + ++argBegin; + } + + MIRFunction *callee = nullptr; + if (naryNode.GetOpCode() == OP_call) { + PUIdx calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + } + X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(naryNode)); + CCLocInfo ploc; + for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + DEBUG_ASSERT(argExpr != nullptr, "not null check"); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i)); + if (primType == PTY_agg) { + SelectParmListForAggregate(*argExpr, parmLocator, isArgUnused); + continue; + } + + Operand *argOpnd = HandleExpr(naryNode, *argExpr); + DEBUG_ASSERT(argOpnd != nullptr, "not null check"); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + parmLocator.LocateNextParm(*mirType, ploc); + + /* skip unused args */ + if (isArgUnused) { + continue; + } + + if (ploc.reg0 != x64::kRinvalid) { + /* load to the register. */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType}); + } else { + /* load to stack memory */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &actMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, + GetPrimTypeBitSize(primType)); + SelectCopy(actMemOpnd, *argOpnd, primType); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NIY"); + } + + /* param pass by reg */ + for (auto [regOpnd, argOpnd, primType] : paramPassByReg) { + DEBUG_ASSERT(regOpnd != nullptr, "not null check"); + DEBUG_ASSERT(argOpnd != nullptr, "not null check"); + SelectCopy(*regOpnd, *argOpnd, primType); + srcOpnds.PushOpnd(*regOpnd); + } +} + +RegOperand &X64MPIsel::SelectSpecialRegread(PregIdx pregIdx, PrimType primType) { + switch (-pregIdx) { + case kSregFp: { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RFP, k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + } + case kSregSp: { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + } + default: { + CHECK_FATAL(false, "ERROR: Not supported special register!"); + } + } +} + +bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +void X64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { + CHECK_FATAL((aggSize > 0) && (aggSize <= k16ByteSize), "out of range."); + RegOperand *baseOpnd = symbolMem.GetBaseRegister(); + int32 stOffset = symbolMem.GetOffsetOperand()->GetValue(); + bool isCopyOneReg = (aggSize <= k8ByteSize); + int32 extraSize = (aggSize % k8ByteSize) * kBitsPerByte; + if (extraSize == 0) { + extraSize = k64BitSize; + } else if (extraSize <= k8BitSize) { + extraSize = k8BitSize; + } else if (extraSize <= k16BitSize) { + extraSize = k16BitSize; + } else if (extraSize <= k32BitSize) { + extraSize = k32BitSize; + } else { + extraSize = k64BitSize; + } + /* generate move from return registers(rax, rdx) to mem of symbol */ + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraSize); + /* mov %rax mem */ + RegOperand ®Rhs0 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, + (isCopyOneReg ? extraSize : k64BitSize), kRegTyInt); + MemOperand &memSymbo0 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, + static_cast(stOffset), isCopyOneReg ? extraSize : k64BitSize); + SelectCopy(memSymbo0, regRhs0, isCopyOneReg ? extraTy : PTY_u64); + /* mov %rdx mem */ + if (!isCopyOneReg) { + RegOperand ®Rhs1 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, extraSize, kRegTyInt); + MemOperand &memSymbo1 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, + static_cast(stOffset + k8ByteSize), extraSize); + SelectCopy(memSymbo1, regRhs1, extraTy); + } + return; +} + +void X64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + /* in x86-64, 8 bytes data is copied at a time */ + uint32 copyTimes = copySize / k8ByteSize; + uint32 extraCopySize = copySize % k8ByteSize; + ImmOperand *stOfstLhs = lhs.GetOffsetOperand(); + ImmOperand *stOfstRhs = rhs.GetOffsetOperand(); + RegOperand *baseLhs = lhs.GetBaseRegister(); + RegOperand *baseRhs = rhs.GetBaseRegister(); + if (copySize < 40U) { + for (int32 i = 0; i < copyTimes; ++i) { + /* prepare dest addr */ + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + i * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + /* prepare src addr */ + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + i * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + /* copy data */ + SelectCopy(memOpndLhs, memOpndRhs, PTY_a64); + } + } else { + /* adopt rep insn in x64's isa */ + std::vector opndVec; + opndVec.push_back(PrepareMemcpyParm(lhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(rhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(copySize)); + SelectLibCallNoReturn("memcpy", opndVec, PTY_a64); + return; + } + /* take care of extra content at the end less than the unit */ + if (extraCopySize == 0) { + return; + } + extraCopySize = ((extraCopySize <= k4ByteSize) ? k4ByteSize : k8ByteSize) * kBitsPerByte; + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraCopySize); + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + copyTimes * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + copyTimes * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + SelectCopy(memOpndLhs, memOpndRhs, extraTy); +} + +void X64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + DEBUG_ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + DEBUG_ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { + /* rhs is Func Return, it must be from Regread */ + if (opndRhs.IsRegister()) { + SelectIntAggCopyReturn(symbolMem, lhsInfo.size); + return; + } + /* In generally, rhs is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + SelectAggCopy(symbolMem, memRhs, lhsInfo.size); +} + +void X64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) { + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + MIRType *stmtMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + + /* In generally, RHS is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + ImmOperand *stOfstSrc = memRhs.GetOffsetOperand(); + RegOperand *baseSrc = memRhs.GetBaseRegister(); + + if (stmtMirType->GetPrimType() == PTY_agg) { + /* generate move to regs for agg return */ + RegOperand *result[kFourRegister] = { nullptr }; /* up to 2 int or 4 fp */ + uint32 numRegs = (symbolInfo.size <= k8ByteSize) ? kOneRegister : kTwoRegister; + PrimType retPrimType = (symbolInfo.size <= k4ByteSize) ? PTY_u32 : PTY_u64; + for (int i = 0; i < numRegs; i++) { + MemOperand &rhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(retPrimType)); + rhsMemOpnd.SetBaseRegister(*baseSrc); + ImmOperand &newStOfstSrc = static_cast(*stOfstSrc->Clone(*cgFunc->GetMemoryPool())); + newStOfstSrc.SetValue(newStOfstSrc.GetValue() + i * k8ByteSize); + rhsMemOpnd.SetOffsetOperand(newStOfstSrc); + regno_t regNo = (i == 0) ? x64::RAX : x64::RDX; + result[i] = &cgFunc->GetOpndBuilder()->CreatePReg(regNo, GetPrimTypeBitSize(retPrimType), + cgFunc->GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*(result[i]), rhsMemOpnd, retPrimType); + } + } else { + RegOperand *lhsAddrOpnd = &SelectCopy2Reg(AddrOpnd, stmt.Opnd(0)->GetPrimType()); + MemOperand &symbolMem = cgFunc->GetOpndBuilder()->CreateMem(*lhsAddrOpnd, symbolInfo.offset, + GetPrimTypeBitSize(PTY_u64)); + SelectAggCopy(symbolMem, memRhs, symbolInfo.size); + } +} + +Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt)); + } + } +} + +void X64MPIsel::SelectCall(CallNode &callNode) { + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym); + + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectParmList(callNode, paramOpnds); + + MIRType *retType = fn->GetReturnType(); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } +} + +void X64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + RegOperand &targetOpnd = SelectCopy2Reg(opnd0, iCallNode.Opnd(0)->GetPrimType()); + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectParmList(iCallNode, paramOpnds); + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx()); + if (iCallNode.GetOpCode() == OP_icallproto) { + CHECK_FATAL((retType->GetKind() == kTypeFunction), "NIY, must be func"); + auto calleeType = static_cast(retType); + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx()); + } + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + const auto &deoptBundleInfo = iCallNode.GetDeoptBundleInfo(); + for (const auto &elem : deoptBundleInfo) { + auto itr = preg2Opnd.find(elem.second); + if (itr == preg2Opnd.end()) { + CHECK_FATAL(false, "no operand generated for deopt vreg"); + } else { + callInsn.AddDeoptBundleInfo(elem.first, itr->second); + } + } +} + +Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void X64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = x64::MOP_jmpq_l; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->SetKind(BB::kBBGoto); + return; +} + +void X64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = x64::MOP_jmpq_r; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* This function is to generate an inline function to generate the va_list data structure */ +/* type $__va_list align(8), + @__gr_top <* void> align(8), + @__vr_top <* void> align(8), + @__gr_offs i32 align(4), + @__vr_offs i32 align(4)}> + } +*/ +void X64MPIsel::GenCVaStartIntrin(RegOperand &opnd, uint32 stkOffset) { + /* FPLR only pushed in regalloc() after intrin function */ + RegOperand &fpOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RFP, k64BitSize, kRegTyInt); + + uint32 fpLrLength = k16BitSize; + /* __stack */ + if (stkOffset != 0) { + stkOffset += fpLrLength; + } + + /* isvary reset StackFrameSize */ + ImmOperand &vaListOnPassArgStackOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + RegOperand &vReg = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(vReg, fpOpnd, vaListOnPassArgStackOffset, GetLoweredPtrType()); + + // The 8-byte data in the a structure needs to use this mop. + MOperator mOp = x64::MOP_movq_r_m; + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaList = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, 0, k64BitSize); + Insn &fillInStkOffsetInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInStkOffsetInsn.AddOpndChain(vReg).AddOpndChain(vaList); + cgFunc->GetCurBB()->AppendInsn(fillInStkOffsetInsn); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + stkOffset = 0; + ImmOperand &grTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, grTopOffset, PTY_a64); + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaListGRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k8BitSize, k64BitSize); + Insn &fillInGRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListGRTop); + cgFunc->GetCurBB()->AppendInsn(fillInGRTopInsn); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(cgFunc->GetMemlayout())->GetSizeOfGRSaveArea()); + stkOffset += grAreaSize; + stkOffset += k8BitSize; + ImmOperand &vaListVRTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, vaListVRTopOffset, PTY_a64); + + MemOperand &vaListVRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k16BitSize, k64BitSize); + Insn &fillInVRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListVRTop); + cgFunc->GetCurBB()->AppendInsn(fillInVRTopInsn); + + // The 4-byte data in the a structure needs to use this mop. + mOp = x64::MOP_movl_r_m; + + /* __gr_offs */ + int32 grOffs = 0 - grAreaSize; + ImmOperand &vaListGROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, grOffs); + RegOperand &grOffsRegOpnd = SelectCopy2Reg(vaListGROffsOffset, PTY_a32); + + MemOperand &vaListGROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize, k64BitSize); + Insn &fillInGROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGROffsInsn.AddOpndChain(grOffsRegOpnd).AddOpndChain(vaListGROffs); + cgFunc->GetCurBB()->AppendInsn(fillInGROffsInsn); + + /* __vr_offs */ + int32 vrOffs = static_cast(0UL - static_cast(static_cast( + cgFunc->GetMemlayout())->GetSizeOfVRSaveArea())); + ImmOperand &vaListVROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, vrOffs); + RegOperand &vrOffsRegOpnd = SelectCopy2Reg(vaListVROffsOffset, PTY_a32); + + MemOperand &vaListVROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize + 4, k64BitSize); + Insn &fillInVROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVROffsInsn.AddOpndChain(vrOffsRegOpnd).AddOpndChain(vaListVROffs); + cgFunc->GetCurBB()->AppendInsn(fillInVROffsInsn); +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void X64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = SelectCopy2Reg(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(cgFunc->GetFunction().GetNthParamTyIdx(i)); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } + } + + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +void X64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(indexOpnd, opnd0, opnd1, srcType); + + /* load the displacement into a register by accessing memory at base + index * 8 */ + /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */ + MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64)); + RegOperand &baseReg= cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt); + dstMemOpnd.SetBaseRegister(baseReg); + dstMemOpnd.SetIndexRegister(indexOpnd); + dstMemOpnd.SetOffsetOperand(stOpnd); + dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize)); + + /* jumping to the absolute address which is stored in dstRegOpnd */ + MOperator mOp = x64::MOP_jmpq_m; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(dstMemOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + /* of AddrofNode must be either ptr, a32 or a64 */ + PrimType ptype = expr.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(ptype), + cgFunc->GetRegTyFromPrimTy(ptype)); + MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + uint pSize = GetPrimTypeSize(ptype); + MOperator mOp; + if (pSize <= k4ByteSize) { + mOp = x64::MOP_leal_m_r; + } else if (pSize <= k8ByteSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + uint32 instrSize = static_cast(expr.SizeOfInstr()); + /* must be either a32 or a64. */ + PrimType primType = (instrSize == k8ByteSize) ? PTY_a64 : (instrSize == k4ByteSize) ? PTY_a32 : PTY_begin; + CHECK_FATAL(primType != PTY_begin, "prim-type of Func Addr must be either a32 or a64!"); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + MIRSymbol *symbol = mirFunction->GetFuncSymbol(); + MIRStorageClass storageClass = symbol->GetStorageClass(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + if (storageClass == maple::kScText && symbol->GetSKind() == maple::kStFunc) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*symbol, 0, 0); + X64MOP_t mOp = x64::MOP_movabs_i_r; + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(stOpnd).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + } else { + CHECK_FATAL(false, "NIY"); + } + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + PrimType primType = expr.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RIP, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + + auto labelStr = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(expr.GetOffset()); + MIRSymbol *labelSym = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + DEBUG_ASSERT(labelSym != nullptr, "null ptr check"); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + labelSym->SetNameStrIdx(labelStr); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + DEBUG_ASSERT(etype != nullptr, "null ptr check"); + auto *labelConst = cgFunc->GetMemoryPool()->New(expr.GetOffset(), + cgFunc->GetFunction().GetPuidx(), *etype); + DEBUG_ASSERT(labelConst != nullptr, "null ptr check"); + labelSym->SetKonst(labelConst); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(bitSize); + memOpnd.SetBaseRegister(baseOpnd); + memOpnd.SetOffsetOperand(stOpnd); + + X64MOP_t mOp = x64::MOP_begin; + if (bitSize <= k32BitSize) { + mOp = x64::MOP_leal_m_r; + } else if (bitSize <= k64BitSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resOpnd; +} + +static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isSigned) { + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jl_l : MOP_jb_l) + : (isSigned ? MOP_jge_l : MOP_jae_l); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jle_l : MOP_jbe_l) + : (isSigned ? MOP_jg_l : MOP_ja_l); + case OP_gt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jg_l : MOP_ja_l) + : (isSigned ? MOP_jle_l : MOP_jbe_l); + case OP_ge: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) + : (isSigned ? MOP_jl_l : MOP_jb_l); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { + Opcode opcode = stmt.GetOpCode(); + X64MOP_t jmpOperator = x64::MOP_begin; + if (opnd0.IsImmediate()) { + DEBUG_ASSERT(opnd0.IsIntImmediate(), "only support int immediate"); + DEBUG_ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode"); + ImmOperand &immOpnd0 = static_cast(opnd0); + if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) || + (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) { + return; + } + jmpOperator = x64::MOP_jmpq_l; + cgFunc->SetCurBBKind(BB::kBBGoto); + } else { + PrimType primType; + Opcode condOpcode = condNode.GetOpCode(); + if (!kOpcodeInfo.IsCompare(condOpcode)) { + primType = condNode.GetPrimType(); + ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0); + SelectCmp(opnd0, imm0, primType); + condOpcode = OP_ne; + } else { + primType = static_cast(condNode).GetOpndType(); + } + DEBUG_ASSERT(!IsPrimitiveFloat(primType), "unsupported float"); + jmpOperator = PickJmpInsn(opcode, condOpcode, IsSignedInteger(primType)); + cgFunc->SetCurBBKind(BB::kBBIf); + } + /* gen targetOpnd, .L.xxx__xx */ + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + /* select jump Insn */ + Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator])); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + std::string labelStr; + labelStr.append(".LUstr_"); + labelStr.append(std::to_string(constStr.GetStrIdx())); + MIRSymbol *labelSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + auto *c = cgFunc->GetMemoryPool()->New(constStr.GetStrIdx(), *etype); + if (labelSym == nullptr) { + labelSym = cgFunc->GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c->GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc->NewMirConst(*c)); + } + if (c->GetPrimType() == PTY_ptr) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, cgFunc->GetRegTyFromPrimTy(PTY_a64)); + Insn &addrOfInsn = (cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r])); + addrOfInsn.AddOpndChain(stOpnd).AddOpndChain(addrOpnd); + cgFunc->GetCurBB()->AppendInsn(addrOfInsn); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + uint32 bitSize = GetPrimTypeBitSize(primType); + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + retReg = x64::RAX; + break; + case kSregRetval1: + retReg = x64::RDX; + break; + default: + CHECK_FATAL(false, "GetTargetRetOperand: NIY"); + break; + } + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + + return resOpnd; +} + +void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + if(IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + uint32 bitSize = GetPrimTypeBitSize(primType); + SelectCopy(resOpnd, opnd0, primType); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType); + X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_imulq_r_r : + (bitSize == k32BitSize) ? x64::MOP_imull_r_r : + (bitSize == k16BitSize) ? x64::MOP_imulw_r_r : x64::MOP_begin; + CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + } else { + CHECK_FATAL(false, "NIY"); + } +} + +/* + * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX) + * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and + * store the quotient in EAX and the remainder in EDX. + * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign + * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor. + * An overflow generates a #DE (divide error) exception, rather than setting the OF flag. + * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor. + * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic. + */ +Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + DEBUG_ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode"); + if(IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + uint32 bitSize = GetPrimTypeBitSize(primType); + /* copy dividend to eax */ + RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(raxOpnd, opnd0, primType); + + RegOperand &rdxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + bool isSigned = IsSignedInteger(primType); + if (isSigned) { + /* cdq edx:eax = sign-extend of eax*/ + X64MOP_t cvtMOp = (bitSize == k64BitSize) ? x64::MOP_cqo : + (bitSize == k32BitSize) ? x64::MOP_cdq : + (bitSize == k16BitSize) ? x64::MOP_cwd : x64::MOP_begin; + CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping"); + Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(cvtInsn); + } else { + /* set edx = 0 */ + SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType); + } + /* div */ + X64MOP_t divMOp = (bitSize == k64BitSize) ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r) : + (bitSize == k32BitSize) ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r) : + (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r) : + x64::MOP_begin; + CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + /* return */ + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType); + return &resOpnd; + } else { + CHECK_FATAL(false, "NIY"); + } +} + +Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + PrimType primOpndType = node.GetOpndType(); + RegOperand *resOpnd = nullptr; + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType()); + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCmp(regOpnd0, regOpnd1, primOpndType); + Opcode parentOp = parent.GetOpCode(); + if (parentOp == OP_brfalse || parentOp == OP_brtrue || parentOp == OP_select) { + return resOpnd; + } + SelectCmpResult(*resOpnd, node.GetOpCode(), dtype, primOpndType); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + if (IsPrimitiveInteger(primType)) { + x64::X64MOP_t cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType); + DEBUG_ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp"); + Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp])); + cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(cmpInsn); + } else { + CHECK_FATAL(false, "NIY"); + } +} + +void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType) { + bool isSigned = !IsPrimitiveUnsigned(primOpndType); + /* set result -> u8 */ + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8)); + x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned); + DEBUG_ASSERT(setMOp != x64::MOP_begin, "unsupported mOp"); + Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]); + setInsn.AddOpndChain(tmpResOpnd); + cgFunc->GetCurBB()->AppendInsn(setInsn); + /* cvt u8 -> primType */ + SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8); +} + +Operand *X64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + PrimType dtype = expr.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &trueRegOpnd = SelectCopy2Reg(trueOpnd, dtype, expr.Opnd(1)->GetPrimType()); + RegOperand &falseRegOpnd = SelectCopy2Reg(falseOpnd, dtype, expr.Opnd(2)->GetPrimType()); + Opcode cmpOpcode; + PrimType cmpPrimType; + if (kOpcodeInfo.IsCompare(expr.Opnd(0)->GetOpCode())) { + CompareNode* cmpNode = static_cast(expr.Opnd(0)); + DEBUG_ASSERT(cmpNode != nullptr, "null ptr check"); + cmpOpcode = cmpNode->GetOpCode(); + cmpPrimType = cmpNode->GetOpndType(); + } else { + cmpPrimType = expr.Opnd(0)->GetPrimType(); + cmpOpcode = OP_ne; + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(cmpPrimType), 0); + SelectCmp(cond, immOpnd, cmpPrimType); + } + SelectSelect(resOpnd, trueRegOpnd, falseRegOpnd, dtype, cmpOpcode, cmpPrimType); + return &resOpnd; +} + +void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType) { + CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY"); + bool isSigned = !IsPrimitiveUnsigned(primType); + uint32 bitSize = GetPrimTypeBitSize(primType); + if (bitSize == k8BitSize) { + /* cmov unsupported 8bit, cvt to 32bit */ + PrimType cvtType = isSigned ? PTY_i32 : PTY_u32; + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt); + Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType); + Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType); + SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType); + SelectCopy(resOpnd, tmpResOpnd, primType, cvtType); + return; + } + RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType); + SelectCopy(resOpnd, falseOpnd, primType); + x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType)); + DEBUG_ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp"); + Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]); + comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(comvInsn); +} + +void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + if (IsPrimitiveInteger(primType)) { + SelectCmp(opnd0, opnd1, primType); + Opcode cmpOpcode = isMin ? OP_lt : OP_gt; + SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *X64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + auto bitWidth = GetPrimTypeBitSize(dtype); + // bswap only support 32/64-bit, xchg support 16-bit -- xchg al, ah + CHECK_FATAL(bitWidth == k16BitSize || bitWidth == k32BitSize || + bitWidth == k64BitSize, "NIY, unsupported bitWidth."); + + RegOperand *resOpnd = nullptr; + + if (bitWidth == k16BitSize) { + /* + * For 16-bit, use xchg, such as: xchg ah, al. So, the register must support high 8-bit. + * For x64, we can use RAX(AH:AL), RBX(BH:BL), RCX(CH:CL), RDX(DH:DL). + * The RA does not perform special processing for the high 8-bit case. + * So, we use the RAX regiser in here. + */ + resOpnd = &cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitWidth, + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand &lowerOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &highOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, + cgFunc->GetRegTyFromPrimTy(dtype)); + highOpnd.SetHigh8Bit(); + x64::X64MOP_t xchgMop = MOP_xchgb_r_r; + Insn &xchgInsn = cgFunc->GetInsnBuilder()->BuildInsn(xchgMop, X64CG::kMd[xchgMop]); + xchgInsn.AddOpndChain(highOpnd).AddOpndChain(lowerOpnd); + cgFunc->GetCurBB()->AppendInsn(xchgInsn); + } else { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(bitWidth, + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + x64::X64MOP_t bswapMop = (bitWidth == k64BitSize) ? MOP_bswapq_r : MOP_bswapl_r; + Insn &bswapInsn = cgFunc->GetInsnBuilder()->BuildInsn(bswapMop, X64CG::kMd[bswapMop]); + bswapInsn.AddOperand(*resOpnd); + cgFunc->GetCurBB()->AppendInsn(bswapInsn); + } + return resOpnd; +} + +RegOperand &X64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void X64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SetHasAsm(); + CHECK_FATAL(false, "NIY"); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..caa500a593b39e29585ab803a04cfe7f45a1bd31 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp @@ -0,0 +1,147 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cgfunc.h" +#include "becommon.h" +#include "x64_isa.h" + +namespace maplebe { +using namespace maple; +namespace x64 { +bool IsAvailableReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return canBeAssigned; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return canBeAssigned; +//TODO: add fp registers here. +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsCallerSaveReg(X64reg regNO) { + return (regNO == R0) || (regNO == R4) || (R2 <= regNO && regNO <= R3) || + (R6 <= regNO && regNO <= R7) || (R8 <= regNO && regNO <= R11) || + (V2 <= regNO && regNO <= V7) || (V16 <= regNO && regNO <= V23); +} + +bool IsCalleeSavedReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isCalleeSave; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isCalleeSave; +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsParamReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isParam; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isParam; +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isSpill; +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsExtraSpillReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isExtraSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isExtraSpill; +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd) { + /* if has 3 RegOpnd, previous reg used to spill. */ + if (has3RegOpnd) { + return IsSpillReg(regNO) || IsExtraSpillReg(regNO); + } + return IsSpillReg(regNO); +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_args.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..900245735e69abe8573edfe6e5e13a80ba1b443d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_args.cpp @@ -0,0 +1,297 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cg.h" +#include "x64_isa.h" +#include "x64_MPISel.h" + +namespace maplebe { +using namespace maple; + +void X64MoveRegArgs::Run() { + MoveVRegisterArgs(); + MoveRegisterArgs(); +} + +void X64MoveRegArgs::CollectRegisterArgs(std::map &argsList, + std::vector &indexList, + std::map &pairReg, + std::vector &numFpRegs, + std::vector &fpSize) const { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + uint32 numFormal = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + numFpRegs.resize(numFormal); + fpSize.resize(numFormal); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + uint32 start = 0; + if (numFormal) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx tyIdx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + start = 1; + } + } + } + for (uint32 i = start; i < numFormal; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + if (ploc.reg0 == kRinvalid) { + continue; + } + X64reg reg0 = static_cast(ploc.reg0); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + if (sym->IsPreg()) { + continue; + } + argsList[i] = reg0; + indexList.emplace_back(i); + if (ploc.reg1 == kRinvalid) { + continue; + } + if (ploc.numFpPureRegs) { + uint32 index = i; + numFpRegs[index] = ploc.numFpPureRegs; + fpSize[index] = ploc.fpSize; + continue; + } + pairReg[i] = static_cast(ploc.reg1); + } +} + +ArgInfo X64MoveRegArgs::GetArgInfo(std::map &argsList, + uint32 argIndex, std::vector &numFpRegs, std::vector &fpSize) const { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + ArgInfo argInfo; + argInfo.reg = argsList[argIndex]; + argInfo.mirTy = x64CGFunc->GetFunction().GetNthParamType(argIndex); + argInfo.symSize = x64CGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.memPairSecondRegSize = 0; + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = false; + argInfo.isTwoRegParm = false; + if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { + argInfo.isTwoRegParm = true; + if (numFpRegs[argIndex] > kOneRegister) { + argInfo.symSize = fpSize[argIndex]; + } else { + if (argInfo.symSize > k12ByteSize) { + argInfo.memPairSecondRegSize = k8ByteSize; + } else { + /* Round to 4 the stack space required for storing the struct */ + argInfo.memPairSecondRegSize = k4ByteSize; + } + argInfo.doMemPairOpt = true; + argInfo.symSize = GetPointerSize(); + } + } else if (argInfo.symSize > k16ByteSize) { + /* For large struct passing, a pointer to the copy is used. */ + argInfo.symSize = GetPointerSize(); + } else { + if (argInfo.symSize > k4ByteSize) { + argInfo.symSize = k8ByteSize; + } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize <= k4ByteSize)) { + argInfo.symSize = k4ByteSize; + } + } + + if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { + /* vector type */ + CHECK_FATAL(false, "NIY"); + } + + argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; + argInfo.sym = x64CGFunc->GetFunction().GetFormal(argIndex); + CHECK_NULL_FATAL(argInfo.sym); + argInfo.symLoc = static_cast(x64CGFunc->GetMemlayout()-> + GetSymAllocInfo(argInfo.sym->GetStIndex())); + CHECK_NULL_FATAL(argInfo.symLoc); + return argInfo; +} + +void X64MoveRegArgs::GenerateMovInsn(ArgInfo &argInfo, X64reg reg2) { + /* reg2 is required when the struct size is between 8-16 bytes */ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + int32 stOffset = x64CGFunc->GetBaseOffset(*argInfo.symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*argInfo.symLoc)); + uint32 opndSize = argInfo.symSize * kBitsPerByte; + RegOperand ®Opnd = x64CGFunc->GetOpndBuilder()->CreatePReg(argInfo.reg, + opndSize, argInfo.regType); + MemOperand *memOpnd = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + + MOperator mOp; + if (opndSize == k64BitSize) { + mOp = x64::MOP_movq_r_m; + } else if (opndSize == k32BitSize) { + mOp = x64::MOP_movl_r_m; + } else if (opndSize == k16BitSize) { + mOp = x64::MOP_movw_r_m; + } else if (opndSize == k8BitSize) { + mOp = x64::MOP_movb_r_m; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd).AddOpndChain(*memOpnd); + x64CGFunc->GetCurBB()->AppendInsn(insn); + if (reg2 != kRinvalid) { + RegOperand ®Opnd2 = x64CGFunc->GetOpndBuilder()->CreatePReg(reg2, opndSize, argInfo.regType); + MemOperand *memOpnd2 = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset + 8, opndSize); + Insn &insn2 = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn2.AddOpndChain(regOpnd2).AddOpndChain(*memOpnd2); + x64CGFunc->GetCurBB()->AppendInsn(insn2); + } +} + +void X64MoveRegArgs::MoveRegisterArgs() { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + + /* <[0], maplebe::R0>; <[1], maplebe::V0> */ + std::map movePara; + /* [0], [1] */ + std::vector moveParaIndex; + std::map pairReg; + std::vector numFpRegs; + std::vector fpSize; + CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); + + for (auto indexItem = moveParaIndex.begin(); indexItem != moveParaIndex.end(); ++indexItem) { + uint32 index = *indexItem; + ArgInfo argInfo = GetArgInfo(movePara, index, numFpRegs, fpSize); + GenerateMovInsn(argInfo, pairReg[index]); + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} + +void X64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) { + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 opndSize = GetPrimTypeBitSize(stype); + auto symLoc = static_cast(x64CGFunc->GetMemlayout()-> + GetSymAllocInfo(mirSym.GetStIndex())); + int32 stOffset = x64CGFunc->GetBaseOffset(*symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*symLoc)); + MemOperand &memOpnd = x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), opndSize, cgFunc->GetRegTyFromPrimTy(stype)); + + MOperator mOp; + if (opndSize == k64BitSize) { + mOp = x64::MOP_movq_m_r; + } else if (opndSize == k32BitSize) { + mOp = x64::MOP_movl_m_r; + } else if (opndSize == k16BitSize) { + mOp = x64::MOP_movw_m_r; + } else if (opndSize == k8BitSize) { + mOp = x64::MOP_movb_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) { + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 srcBitSize = ((byteSize < k4ByteSize) ? k4ByteSize : byteSize) * kBitsPerByte; + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), srcBitSize, regType); + RegOperand &srcRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg(ploc.reg0, srcBitSize, regType); + + MOperator mOp; + if (srcBitSize == k64BitSize) { + mOp = x64::MOP_movq_r_r; + } else if (srcBitSize == k32BitSize) { + mOp = x64::MOP_movl_r_r; + } else if (srcBitSize == k16BitSize) { + mOp = x64::MOP_movw_r_r; + } else if (srcBitSize == k8BitSize) { + mOp = x64::MOP_movb_r_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(srcRegOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveVRegisterArgs() { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + + uint32 formalCount = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + uint32 start = 0; + if (formalCount) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx idx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(idx) <= k16BitSize) { + start = 1; + } + } + } + for (uint32 i = start; i < formalCount; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + + /* load locarefvar formals to store in the reflocals. */ + if (x64CGFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + CHECK_FATAL(false, "NIY"); + } + + if (!sym->IsPreg()) { + continue; + } + + if (ploc.reg0 == kRinvalid) { + /* load stack parameters to the vreg. */ + LoadStackArgsToVReg(*sym); + } else { + MoveArgsToVReg(ploc, *sym); + } + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0d02df37edea34c89ebe284c908afb7b77487191 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp @@ -0,0 +1,217 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cgfunc.h" +#include "becommon.h" +#include "abi.h" +#include "x64_call_conv.h" +namespace maplebe { +using namespace maple; +using namespace x64; + +int32 CCallConventionInfo::ClassifyAggregate(MIRType &mirType, uint64 sizeOfTy, + std::vector &classes) const { + /* + * 1. If the size of an object is larger than four eightbytes, or it contains unaligned + * fields, it has class MEMORY; + * 2. for the processors that do not support the __m256 type, if the size of an object + * is larger than two eightbytes and the first eightbyte is not SSE or any other eightbyte + * is not SSEUP, it still has class MEMORY. + * This in turn ensures that for rocessors that do support the __m256 type, if the size of + * an object is four eightbytes and the first eightbyte is SSE and all other eightbytes are + * SSEUP, it can be passed in a register. + *(Currently, assume that m256 is not supported) + */ + if (sizeOfTy > k2EightBytesSize) { + classes.push_back(kMemoryClass); + } else if (sizeOfTy > k1EightBytesSize) { + classes.push_back(kIntegerClass); + classes.push_back(kIntegerClass); + } else { + classes.push_back(kIntegerClass); + } + return static_cast(sizeOfTy); +} + +int32 CCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const { + switch (mirType.GetPrimType()) { + /* + * Arguments of types void, (signed and unsigned) _Bool, char, short, int, + * long, long long, and pointers are in the INTEGER class. + */ + case PTY_void: + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes.push_back(kIntegerClass); + return k8ByteSize; + /* + * Arguments of type __int128 offer the same operations as INTEGERs, + * yet they do not fit into one general purpose register but require + * two registers. + */ + case PTY_i128: + case PTY_u128: + classes.push_back(kIntegerClass); + classes.push_back(kIntegerClass); + return k16ByteSize; + case PTY_agg: { + /* + * The size of each argument gets rounded up to eightbytes, + * Therefore the stack will always be eightbyte aligned. + */ + uint64 sizeOfTy = RoundUp(be.GetTypeSize(mirType.GetTypeIndex()), k8ByteSize); + if (sizeOfTy == 0) { + return 0; + } + /* If the size of an object is larger than four eightbytes, it has class MEMORY */ + if ((sizeOfTy > k4EightBytesSize)) { + classes.push_back(kMemoryClass); + return static_cast(sizeOfTy); + } + return ClassifyAggregate(mirType, sizeOfTy, classes); + } + default: + CHECK_FATAL(false, "NYI"); + } + return 0; +} + +int32 WebKitJSCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const { + switch (mirType.GetPrimType()) { + /* + * Arguments of types void, (signed and unsigned) _Bool, char, short, int, + * long, long long, and pointers are in the INTEGER class. + */ + case PTY_void: + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + classes.push_back(kIntegerClass); + return k4ByteSize; + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes.push_back(kIntegerClass); + return k8ByteSize; + default: + CHECK_FATAL(false, "NYI"); + } + return 0; +} + +int32 GHCCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const { + // TODO: + return 0; +} + +void X64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { + pLoc.reg0 = kRinvalid; + pLoc.reg1 = kRinvalid; + pLoc.reg2 = kRinvalid; + pLoc.reg3 = kRinvalid; + pLoc.memOffset = nextStackArgAdress; + pLoc.fpSize = 0; + pLoc.numFpPureRegs = 0; +} + +int32 X64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) { + InitCCLocInfo(pLoc); + std::vector classes {}; + int32 alignedTySize = GetCallConvInfo().Classification(beCommon, mirType, classes); + if (alignedTySize == 0) { + return 0; + } + pLoc.memSize = alignedTySize; + ++paramNum; + if (classes[0] == kIntegerClass) { + if((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) { + pLoc.reg0 = AllocateGPParmRegister(); + DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + AllocateTwoGPParmRegisters(pLoc); + DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO"); + } + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* being passed in memory */ + nextStackArgAdress = pLoc.memOffset + alignedTySize; + } + return 0; +} + +int32 X64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) { + InitCCLocInfo(pLoc); + std::vector classes {}; /* Max of four Regs. */ + int32 alignedTySize = GetCallConvInfo().Classification(beCommon, retType, classes); + if (alignedTySize == 0) { + return 0; /* size 0 ret val */ + } + if (classes[0] == kIntegerClass) { + /* If the class is INTEGER, the next available register of the sequence %rax, */ + /* %rdx is used. */ + CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs"); + pLoc.regCount = alignedTySize; + if((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)){ + pLoc.reg0 = AllocateGPReturnRegister(); + DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(), "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + AllocateTwoGPReturnRegisters(pLoc); + DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(), "RegNo should be pramRegNO"); + } + if (nextGeneralReturnRegNO == kOneRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } else if (nextGeneralReturnRegNO == kTwoRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + pLoc.primTypeOfReg1 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } + return 0; + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* + * the caller provides space for the return value and passes + * the address of this storage in %rdi as if it were the first + * argument to the function. In effect, this address becomes a + * “hidden” first argument. + * On return %rax will contain the address that has been passed + * in by the caller in %rdi. + * Currently, this scenario is not fully supported. + */ + pLoc.reg0 = AllocateGPReturnRegister(); + return 0; + } + CHECK_FATAL(false, "NYI"); + return 0; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..502a1871101197d177135bc06376921e0e3597c5 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_cfgo.h" +#include "x64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void X64CFGOptimizer::InitOptimizePatterns() { + /* disable the pass that conflicts with cfi */ + if (!cgFunc->GenCfi()) { + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + } + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 X64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) { + return x64::GetJumpTargetIdx(insn); +} +MOperator X64FlipBRPattern::FlipConditionOp(MOperator flippedOp) { + return x64::FlipConditionOp(flippedOp); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8db6092719a15995774bd59bb03ea83ee87063d0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_cg.h" +#include "x64_cgfunc.h" +#include "x64_isa.h" +namespace maplebe { + +using namespace x64; + +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc X64CG::kMd[kMopLast] = { +#include "x64_md.def" +}; +#undef DEFINE_MOP + +void X64CG::EnrollTargetPhases(maple::MaplePhaseManager *pm) const { +#include "x64_phases.def" +} + +CGFunc *X64CG::CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) { + return memPool.New(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId); +} + +bool X64CG::IsEffectiveCopy(Insn &insn) const { + return false; +} +bool X64CG::IsTargetInsn(MOperator mOp) const { + return (mOp >= MOP_movb_r_r && mOp <= MOP_pseudo_ret_int); +} +bool X64CG::IsClinitInsn(MOperator mOp) const { + return false; +} +bool X64CG::IsPseudoInsn(MOperator mOp) const { + return false; +} + +Insn &X64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { + CHECK_FATAL(false, "NIY"); + Insn *a = nullptr; + return *a; +} + +PhiOperand &X64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) { + CHECK_FATAL(false, "NIY"); + PhiOperand *a = nullptr; + return *a; +} + +void X64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const { + X64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +bool X64CG::IsExclusiveFunc(MIRFunction &mirFunc) { + return false; +} + +/* NOTE: Consider making be_common a field of CG. */ +void X64CG::GenerateObjectMaps(BECommon &beCommon) { + +} + +/* Used for GCTIB pattern merging */ +std::string X64CG::FindGCTIBPatternName(const std::string &name) const { + return ""; +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..18a883250c5aa0ec6bf4c07fb883f3c1e22b8156 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -0,0 +1,923 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include "x64_cgfunc.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "assembler/operand.h" + +namespace maplebe { +/* null implementation yet */ +void X64CGFunc::GenSaveMethodInfoCode(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GenerateCleanupCode(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::NeedCleanup() { + CHECK_FATAL(false, "NIY"); + return false; +} +void X64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +uint32 X64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) { + CHECK_FATAL(false, "NIY"); + return 0; +} +void X64CGFunc::AssignLmbcFormalParams() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::LmbcGenSaveSpForAlloca() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::MergeReturn() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DetermineReturnTypeofCall() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0){ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAbort() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAsm(AsmNode &node) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggDassign(DassignNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassign(IassignNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignoff(IassignoffNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturn(Operand *opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIgoto(Operand *opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectGoto(GotoNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCall(CallNode &callNode) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclz(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCctz(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCpopcount(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCparity(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclrsb(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCisaligned(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCalignup(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCaligndown(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMembar(StmtNode &membar) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectComment(CommentNode &comment) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleCatch() { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDread(const BaseNode &parent, AddrofNode &expr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectRegread(RegreadNode &expr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +Operand &X64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, + PrimType finalBitFieldDestType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntConst(MIRIntConst &intConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStrConst(MIRStrConst &strConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStr16Const(MIRStr16Const &strConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand &X64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectMalloc(UnaryNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand &X64CGFunc::SelectCopy(Operand &src, PrimType srcType, PrimType dstType) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand *X64CGFunc::SelectAlloca(UnaryNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectGCMalloc(GCMallocNode &call) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::GenerateYieldpoint(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +Operand &X64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetOrCreateRflag() { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +const Operand *X64CGFunc::GetRflag() const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const Operand *X64CGFunc::GetFloatRflag() const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const LabelOperand *X64CGFunc::GetLabelOperand(LabelIdx labIdx) const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) { + std::string lableName = ".L." + std::to_string(GetUniqueID()) + + "__" + std::to_string(labIdx); + return GetOpndBuilder()->CreateLabel(lableName.c_str(), labIdx); +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(BB &bb) { + CHECK_FATAL(false, "NIY"); + LabelOperand *a; + return *a; +} +RegOperand &X64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateFramePointerRegOperand() { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateStackBaseRegOperand() { + return GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); +} +RegOperand &X64CGFunc::GetZeroOpnd(uint32 size) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand &X64CGFunc::CreateCfiRegOperand(uint32 reg, uint32 size) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::CreateImmOperand(PrimType primType, int64 val) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +void X64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::CleanupDeadMov(bool dump) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) { + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::IsFrameReg(const RegOperand &opnd) const { + CHECK_FATAL(false, "NIY"); + return false; +} +RegOperand *X64CGFunc::SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, + int32 lane) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, + bool isLow, bool isWide) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::ProcessLazyBinding() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DBGFixCallFrameLocationOffsets() { + CHECK_FATAL(false, "NIY"); +} +MemOperand *X64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx idx) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +int32 X64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc) { + const auto *symAlloc = static_cast(&symbolAlloc); + /* Call Frame layout of X64 + * Refer to layout in x64_memlayout.h. + * Do Not change this unless you know what you do + * memlayout like this + * rbp position + * prologue slots -- + * ArgsReg | + * Locals | -- FrameSize + * Spill | + * ArgsStk -- + */ + constexpr const int32 sizeofFplr = 2 * kIntregBytelen; + // baseOffset is the offset of this symbol based on the rbp position. + int32 baseOffset = symAlloc->GetOffset(); + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + auto *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsSpillReg) { + /* spill = -(Locals + ArgsReg + baseOffset + ReseverdSlot + kSizeOfPtr) */ + return -(memLayout->GetSizeOfLocals() + memLayout->SizeOfArgsRegisterPassed() + + baseOffset + GetFunction().GetFrameReseverdSlot() + GetPointerSize()); + } + else if (sgKind == kMsLocals) { + /* Locals = baseOffset - (ReseverdSlot + Locals + ArgsReg) */ + return baseOffset - (GetFunction().GetFrameReseverdSlot() + memLayout->GetSizeOfLocals() + memLayout->SizeOfArgsRegisterPassed()); + } else if (sgKind == kMsArgsRegPassed) { + /* ArgsReg = baseOffset - ReseverdSlot - ArgsReg */ + return baseOffset - GetFunction().GetFrameReseverdSlot() - memLayout->SizeOfArgsRegisterPassed(); + } else if (sgKind == kMsArgsStkPassed) { + return baseOffset + sizeofFplr; + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +RegOperand *X64CGFunc::GetBaseReg(const maplebe::SymbolAlloc &symAlloc) { + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), "NIY"); + if (sgKind == kMsLocals || sgKind == kMsArgsRegPassed || sgKind == kMsArgsStkPassed) { + return &GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + CHECK_FATAL(false, "NIY sgKind"); + } + return nullptr; +} + +void X64CGFunc::FreeSpillRegMem(regno_t vrNum) { + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + DEBUG_ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *X64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) { + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + auto it = reuseSpillLocMem.find(bitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int32 offset = GetOrCreatSpillRegLocation(vrNum); + MemOperand *memOpnd = &GetOpndBuilder()->CreateMem(baseOpnd, offset, bitSize); + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +void X64OpndDumpVisitor::Visit(maplebe::RegOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "reg "; + DumpRegInfo(*v); + DumpSize(*v); + const OpndDesc *regDesc = GetOpndDesc(); + LogInfo::MapleLogger() << " ["; + if (regDesc->IsRegDef()) { + LogInfo::MapleLogger() << "DEF,"; + } + if (regDesc->IsRegUse()) { + LogInfo::MapleLogger() << "USE,"; + } + LogInfo::MapleLogger() << "]"; + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(CommentOperand *v) { + LogInfo::MapleLogger() << ":#" << v->GetComment(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ImmOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "imm "; + LogInfo::MapleLogger() << v->GetValue(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::MemOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "mem "; + if (v->GetBaseRegister() != nullptr) { + DumpRegInfo(*v->GetBaseRegister()); + if (v->GetOffsetOperand() != nullptr) { + LogInfo::MapleLogger() << " + " << v->GetOffsetOperand()->GetValue(); + } + } + DumpSize(*v); + DumpOpndSuffix(); +} +void X64OpndDumpVisitor::DumpRegInfo(maplebe::RegOperand &v) { + if (v.GetRegisterNumber() > baseVirtualRegNO) { + LogInfo::MapleLogger() << "V" << v.GetRegisterNumber(); + } else { + uint8 regType = -1; + switch (v.GetSize()) { + case k8BitSize: + /* use lower 8-bits */ + regType = X64CG::kR8LowList; + break; + case k16BitSize: + regType = X64CG::kR16List; + break; + case k32BitSize: + regType = X64CG::kR32List; + break; + case k64BitSize: + regType = X64CG::kR64List; + break; + default: + CHECK_FATAL(false, "unkown reg size"); + break; + } + assembler::Reg reg = assembler::kRegArray[regType][v.GetRegisterNumber()]; + LogInfo::MapleLogger() << "%" << assembler::kRegStrMap.at(reg); + } +} + +void X64OpndDumpVisitor::Visit(maplebe::FuncNameOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "funcname "; + LogInfo::MapleLogger() << v->GetName(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ListOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "list "; + + MapleList opndList = v->GetOperands(); + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::LabelOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "label "; + LogInfo::MapleLogger() << v->GetLabelIndex(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(PhiOperand *v) { + CHECK_FATAL(false, "NIY"); +} + +void X64OpndDumpVisitor::Visit(CondOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(StImmOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(BitShiftOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +} diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8b79cda6a86f0a00b7a96ef1445611a07b7d7603 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp @@ -0,0 +1,2335 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_emitter.h" +#include "x64_cgfunc.h" +#include "x64_cg.h" +#include "insn.h" + + +#define __ assmbler. + +namespace { +using namespace maple; + +DBGDieAttr *LFindAttribute(MapleVector &vec, DwAt key) { + for (DBGDieAttr *at : vec) { + if (at->GetDwAt() == key) { + return at; + } + } + return nullptr; +} + +DBGAbbrevEntry *LFindAbbrevEntry(MapleVector &abbvec, unsigned int key) { + for (DBGAbbrevEntry *daie : abbvec) { + if (!daie) { + continue; + } + if (daie->GetAbbrevId() == key) { + return daie; + } + } + DEBUG_ASSERT(0, ""); + return nullptr; +} + +bool LShouldEmit(unsigned int dwform) { + return dwform != static_cast(DW_FORM_flag_present); +} + +DBGDie *LFindChildDieWithName(DBGDie &die, DwTag tag, const GStrIdx key) { + for (DBGDie *c : die.GetSubDieVec()) { + if (c->GetTag() != tag) { + continue; + } + for (DBGDieAttr *a : c->GetAttrVec()) { + if ((a->GetDwAt() == static_cast(DW_AT_name)) && + ((a->GetDwForm() == static_cast(DW_FORM_string) || + a->GetDwForm() == static_cast(DW_FORM_strp)) && a->GetId() == key.GetIdx())) { + return c; + } + if ((a->GetDwAt() == static_cast(DW_AT_name)) && + (!((a->GetDwForm() == static_cast(DW_FORM_string) || + a->GetDwForm() == static_cast(DW_FORM_strp)) && a->GetId() == key.GetIdx()))) { + break; + } + } + } + return nullptr; +} + +/* GetDwOpName(unsigned n) */ +#define TOSTR(s) #s +const std::string GetDwOpName(unsigned n) { + switch (n) { +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) case DW_OP_##NAME: return TOSTR(DW_OP_##NAME) + case DW_OP_hi_user: return "DW_OP_hi_user"; + default: return nullptr; + } +} +} + +using namespace std; +using namespace assembler; + +namespace maplebe { +uint8 X64Emitter::GetSymbolAlign(const MIRSymbol &mirSymbol, bool isComm) { + uint8 alignInByte = mirSymbol.GetAttrs().GetAlignValue(); + MIRTypeKind kind = mirSymbol.GetType()->GetKind(); + if (isComm) { + MIRStorageClass storage = mirSymbol.GetStorageClass(); + if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || + (kind == kTypeUnion)) && ((storage == kScGlobal) || (storage == kScPstatic) || + (storage == kScFstatic)) && alignInByte < kSizeOfPTR) { + alignInByte = kQ; + return alignInByte; + } + } + if (alignInByte == 0) { + if (kind == kTypeStruct || kind == kTypeClass || kind == kTypeArray || kind == kTypeUnion) { + return alignInByte; + } else { + alignInByte = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); + } + } + return alignInByte; +} + +uint64 X64Emitter::GetSymbolSize(const TyIdx typeIndex) { + uint64 sizeInByte = Globals::GetInstance()->GetBECommon()->GetTypeSize(typeIndex); + return sizeInByte; +} + +Reg X64Emitter::TransferReg(Operand *opnd) const { + RegOperand *v = static_cast(opnd); + /* check whether this reg is still virtual */ + CHECK_FATAL(v->IsPhysicalRegister(), "register is still virtual or reg num is 0"); + + uint8 regType = -1; + switch (v->GetSize()) { + case k8BitSize: + regType = v->IsHigh8Bit() ? X64CG::kR8HighList : X64CG::kR8LowList; + break; + case k16BitSize: + regType = X64CG::kR16List; + break; + case k32BitSize: + regType = X64CG::kR32List; + break; + case k64BitSize: + regType = X64CG::kR64List; + break; + default: + FATAL(kLncFatal, "unkown reg size"); + break; + } + Reg reg = kRegArray[regType][v->GetRegisterNumber()]; + return reg; +} + +pair X64Emitter::TransferImm(Operand *opnd) { + ImmOperand *v = static_cast(opnd); + if (v->GetKind() == Operand::kOpdStImmediate) { + uint32 symIdx = v->GetSymbol()->GetNameStrIdx().get(); + const string &symName = v->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + return pair(symIdx, true); + } else { + return pair(v->GetValue(), false); + } +} + +Mem X64Emitter::TransferMem(Operand *opnd, uint32 funcUniqueId) { + MemOperand *v = static_cast(opnd); + Mem mem; + mem.size = v->GetSize(); + if (v->GetOffsetOperand() != nullptr) { + ImmOperand *ofset = v->GetOffsetOperand(); + if (ofset->GetKind() == Operand::kOpdStImmediate) { + string symbolName = ofset->GetName(); + const MIRSymbol *symbol = ofset->GetSymbol(); + + MIRStorageClass storageClass = symbol->GetStorageClass(); + bool isLocalVar = ofset->GetSymbol()->IsLocal(); + if (storageClass == kScPstatic && isLocalVar) { + symbolName.append(to_string(funcUniqueId)); + } + + int64 symIdx; + /* 2 : if it is a bb label, the second position in symbolName is '.' */ + if (symbolName.size() > 2 && symbolName[2] == '.') { + string delimiter = "__"; + size_t pos = symbolName.find(delimiter); + uint32 itsFuncUniqueId = pos > 3 ? stoi(symbolName.substr(3, pos)) : 0; /* 3: index starts after ".L." */ + uint32 labelIdx = stoi(symbolName.substr(pos + 2, symbolName.length())); /* 2: delimiter.length() */ + symIdx = CalculateLabelSymIdx(itsFuncUniqueId, labelIdx); + } else { + symIdx = symbol->GetNameStrIdx().get(); + } + __ StoreNameIntoSymMap(symIdx, symbolName); + mem.disp.first = symIdx; + } + if (ofset->GetValue() != 0) { + mem.disp.second = ofset->GetValue(); + } + } + if (v->GetBaseRegister() != nullptr) { + if (v->GetIndexRegister() != nullptr && v->GetBaseRegister()->GetRegisterNumber() == x64::RBP) { + mem.base = ERR; + } else { + mem.base = TransferReg(v->GetBaseRegister()); + } + } + if (v->GetIndexRegister() != nullptr) { + mem.index = TransferReg(v->GetIndexRegister()); + uint8 s = static_cast(v->GetScaleOperand()->GetValue()); + /* 1, 2, 4, 8: allowed range for s */ + CHECK_FATAL(s == 1 || s == 2 || s == 4 || s == 8, "mem.s is not 1, 2, 4, or 8"); + mem.s = s; + } + mem.SetMemType(); + return mem; +} + +int64 X64Emitter::TransferLabel(Operand *opnd, uint32 funcUniqueId) { + LabelOperand *v = static_cast(opnd); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, v->GetLabelIndex()); + __ StoreNameIntoSymMap(labelSymIdx, v->GetParentFunc()); + return labelSymIdx; +} + +uint32 X64Emitter::TransferFuncName(Operand *opnd) { + FuncNameOperand *v = static_cast(opnd); + uint32 funcSymIdx = v->GetFunctionSymbol()->GetNameStrIdx().get(); + __ StoreNameIntoSymMap(funcSymIdx, v->GetName()); + return funcSymIdx; +} + +void X64Emitter::EmitInsn(Insn &insn, uint32 funcUniqueId) { +#if DEBUG + insn.Check(); +#endif + + MOperator mop = insn.GetMachineOpcode(); + const InsnDesc &curMd = X64CG::kMd[mop]; + uint32 opndNum = curMd.GetOpndMDLength(); /* Get operands Number */ + + /* Get operand(s) */ + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (opndNum > 0) { + opnd0 = &insn.GetOperand(0); + if (opndNum > 1) { + opnd1 = &insn.GetOperand(1); + } + } + + switch (mop) { + /* mov */ + case x64::MOP_movb_r_r: + __ Mov(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movw_r_r: + __ Mov(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movl_r_r: + __ Mov(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movq_r_r: + __ Mov(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movb_m_r: + __ Mov(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movw_m_r: + __ Mov(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movl_m_r: + __ Mov(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movq_m_r: + __ Mov(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movb_i_r: + __ Mov(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movw_i_r: + __ Mov(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movl_i_r: + __ Mov(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movq_i_r: + __ Mov(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movb_i_m: + __ Mov(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movw_i_m: + __ Mov(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movl_i_m: + __ Mov(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movb_r_m: + __ Mov(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movw_r_m: + __ Mov(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movl_r_m: + __ Mov(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movq_r_m: + __ Mov(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* movzx */ + case x64::MOP_movzbw_r_r: + __ MovZx(kB, kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbl_r_r: + __ MovZx(kB, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbq_r_r: + __ MovZx(kB, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzwl_r_r: + __ MovZx(kW, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzwq_r_r: + __ MovZx(kW, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbw_m_r: + __ MovZx(kB, kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzbl_m_r: + __ MovZx(kB, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzbq_m_r: + __ MovZx(kB, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzwl_m_r: + __ MovZx(kW, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzwq_m_r: + __ MovZx(kW, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* movsx */ + case x64::MOP_movsbw_r_r: + __ MovSx(kB, kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbl_r_r: + __ MovSx(kB, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbq_r_r: + __ MovSx(kB, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movswl_r_r: + __ MovSx(kW, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movswq_r_r: + __ MovSx(kW, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movslq_r_r: + __ MovSx(kL, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbw_m_r: + __ MovSx(kB, kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movsbl_m_r: + __ MovSx(kB, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movsbq_m_r: + __ MovSx(kB, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movswl_m_r: + __ MovSx(kW, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movswq_m_r: + __ MovSx(kW, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movslq_m_r: + __ MovSx(kL, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* add */ + case x64::MOP_addb_r_r: + __ Add(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addw_r_r: + __ Add(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addl_r_r: + __ Add(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addq_r_r: + __ Add(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addb_i_r: + __ Add(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addw_i_r: + __ Add(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addl_i_r: + __ Add(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addq_i_r: + __ Add(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addb_m_r: + __ Add(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addw_m_r: + __ Add(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addl_m_r: + __ Add(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addq_m_r: + __ Add(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addb_r_m: + __ Add(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addw_r_m: + __ Add(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addl_r_m: + __ Add(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addq_r_m: + __ Add(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addb_i_m: + __ Add(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addw_i_m: + __ Add(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addl_i_m: + __ Add(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addq_i_m: + __ Add(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* movabs */ + case x64::MOP_movabs_i_r: + __ Movabs(TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movabs_l_r: + __ Movabs(TransferLabel(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* push */ + case x64::MOP_pushq_r: + __ Push(kQ, TransferReg(opnd0)); + break; + /* pop */ + case x64::MOP_popq_r: + __ Pop(kQ, TransferReg(opnd0)); + break; + /* lea */ + case x64::MOP_leaw_m_r: + __ Lea(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_leal_m_r: + __ Lea(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_leaq_m_r: + __ Lea(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* sub , sbb */ + case x64::MOP_subb_r_r: + __ Sub(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subw_r_r: + __ Sub(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subl_r_r: + __ Sub(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subq_r_r: + __ Sub(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subb_i_r: + __ Sub(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subw_i_r: + __ Sub(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subl_i_r: + __ Sub(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subq_i_r: + __ Sub(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subb_m_r: + __ Sub(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subw_m_r: + __ Sub(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subl_m_r: + __ Sub(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subq_m_r: + __ Sub(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subb_r_m: + __ Sub(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subw_r_m: + __ Sub(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subl_r_m: + __ Sub(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subq_r_m: + __ Sub(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subb_i_m: + __ Sub(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subw_i_m: + __ Sub(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subl_i_m: + __ Sub(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subq_i_m: + __ Sub(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* and */ + case x64::MOP_andb_r_r: + __ And(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andw_r_r: + __ And(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andl_r_r: + __ And(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andq_r_r: + __ And(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andb_i_r: + __ And(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andw_i_r: + __ And(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andl_i_r: + __ And(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andq_i_r: + __ And(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andb_m_r: + __ And(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andw_m_r: + __ And(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andl_m_r: + __ And(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andq_m_r: + __ And(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andb_r_m: + __ And(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andw_r_m: + __ And(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andl_r_m: + __ And(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andq_r_m: + __ And(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andb_i_m: + __ And(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andw_i_m: + __ And(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andl_i_m: + __ And(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andq_i_m: + __ And(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* or */ + case x64::MOP_orb_r_r: + __ Or(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orw_r_r: + __ Or(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orl_r_r: + __ Or(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orq_r_r: + __ Or(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orb_m_r: + __ Or(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orw_m_r: + __ Or(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orl_m_r: + __ Or(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orq_m_r: + __ Or(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orb_i_r: + __ Or(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orw_i_r: + __ Or(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orl_i_r: + __ Or(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orq_i_r: + __ Or(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orb_r_m: + __ Or(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orw_r_m: + __ Or(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orl_r_m: + __ Or(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orq_r_m: + __ Or(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orb_i_m: + __ Or(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orw_i_m: + __ Or(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orl_i_m: + __ Or(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orq_i_m: + __ Or(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* xor */ + case x64::MOP_xorb_r_r: + __ Xor(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorw_r_r: + __ Xor(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorl_r_r: + __ Xor(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorq_r_r: + __ Xor(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorb_i_r: + __ Xor(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorw_i_r: + __ Xor(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorl_i_r: + __ Xor(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorq_i_r: + __ Xor(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorb_m_r: + __ Xor(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorw_m_r: + __ Xor(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorl_m_r: + __ Xor(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorq_m_r: + __ Xor(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorb_r_m: + __ Xor(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorw_r_m: + __ Xor(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorl_r_m: + __ Xor(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorq_r_m: + __ Xor(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorb_i_m: + __ Xor(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorw_i_m: + __ Xor(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorl_i_m: + __ Xor(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorq_i_m: + __ Xor(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* not */ + case x64::MOP_notb_r: + __ Not(kB, TransferReg(opnd0)); + break; + case x64::MOP_notw_r: + __ Not(kW, TransferReg(opnd0)); + break; + case x64::MOP_notl_r: + __ Not(kL, TransferReg(opnd0)); + break; + case x64::MOP_notq_r: + __ Not(kQ, TransferReg(opnd0)); + break; + case x64::MOP_notb_m: + __ Not(kB, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notw_m: + __ Not(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notl_m: + __ Not(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notq_m: + __ Not(kQ, TransferMem(opnd0, funcUniqueId)); + break; + /* neg */ + case x64::MOP_negb_r: + __ Neg(kB, TransferReg(opnd0)); + break; + case x64::MOP_negw_r: + __ Neg(kW, TransferReg(opnd0)); + break; + case x64::MOP_negl_r: + __ Neg(kL, TransferReg(opnd0)); + break; + case x64::MOP_negq_r: + __ Neg(kQ, TransferReg(opnd0)); + break; + case x64::MOP_negb_m: + __ Neg(kB, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negw_m: + __ Neg(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negl_m: + __ Neg(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negq_m: + __ Neg(kQ, TransferMem(opnd0, funcUniqueId)); + break; + /* div, cwd, cdq, cqo */ + case x64::MOP_idivw_r: + __ Idiv(kW, TransferReg(opnd0)); + break; + case x64::MOP_idivl_r: + __ Idiv(kL, TransferReg(opnd0)); + break; + case x64::MOP_idivq_r: + __ Idiv(kQ, TransferReg(opnd0)); + break; + case x64::MOP_idivw_m: + __ Idiv(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_idivl_m: + __ Idiv(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_idivq_m: + __ Idiv(kQ, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divw_r: + __ Div(kW, TransferReg(opnd0)); + break; + case x64::MOP_divl_r: + __ Div(kL, TransferReg(opnd0)); + break; + case x64::MOP_divq_r: + __ Div(kQ, TransferReg(opnd0)); + break; + case x64::MOP_divw_m: + __ Div(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divl_m: + __ Div(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divq_m: + __ Div(kQ, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_cwd: + __ Cwd(); + break; + case x64::MOP_cdq: + __ Cdq(); + break; + case x64::MOP_cqo: + __ Cqo(); + break; + /* shl */ + case x64::MOP_shlb_r_r: + __ Shl(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlw_r_r: + __ Shl(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shll_r_r: + __ Shl(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlq_r_r: + __ Shl(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlb_i_r: + __ Shl(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlw_i_r: + __ Shl(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shll_i_r: + __ Shl(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlq_i_r: + __ Shl(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlb_r_m: + __ Shl(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlw_r_m: + __ Shl(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shll_r_m: + __ Shl(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlq_r_m: + __ Shl(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlb_i_m: + __ Shl(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlw_i_m: + __ Shl(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shll_i_m: + __ Shl(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlq_i_m: + __ Shl(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* sar */ + case x64::MOP_sarb_r_r: + __ Sar(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarw_r_r: + __ Sar(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarl_r_r: + __ Sar(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarq_r_r: + __ Sar(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarb_i_r: + __ Sar(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarw_i_r: + __ Sar(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarl_i_r: + __ Sar(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarq_i_r: + __ Sar(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarb_r_m: + __ Sar(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarw_r_m: + __ Sar(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarl_r_m: + __ Sar(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarq_r_m: + __ Sar(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarb_i_m: + __ Sar(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarw_i_m: + __ Sar(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarl_i_m: + __ Sar(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarq_i_m: + __ Sar(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* shr */ + case x64::MOP_shrb_r_r: + __ Shr(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrw_r_r: + __ Shr(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrl_r_r: + __ Shr(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrq_r_r: + __ Shr(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrb_i_r: + __ Shr(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrw_i_r: + __ Shr(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrl_i_r: + __ Shr(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrq_i_r: + __ Shr(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrb_r_m: + __ Shr(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrw_r_m: + __ Shr(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrl_r_m: + __ Shr(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrq_r_m: + __ Shr(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrb_i_m: + __ Shr(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrw_i_m: + __ Shr(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrl_i_m: + __ Shr(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrq_i_m: + __ Shr(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* jmp */ + case x64::MOP_jmpq_r: + __ Jmp(TransferReg(opnd0)); + break; + case x64::MOP_jmpq_m: + __ Jmp(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_jmpq_l: + __ Jmp(TransferLabel(opnd0, funcUniqueId)); + break; + /* je, jne */ + case x64::MOP_je_l: + __ Je(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_ja_l: + __ Ja(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jae_l: + __ Jae(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jne_l: + __ Jne(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jb_l: + __ Jb(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jbe_l: + __ Jbe(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jg_l: + __ Jg(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jge_l: + __ Jge(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jl_l: + __ Jl(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jle_l: + __ Jle(TransferLabel(opnd0, funcUniqueId)); + break; + /* cmp */ + case x64::MOP_cmpb_r_r: + __ Cmp(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_r_r: + __ Cmp(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_r_r: + __ Cmp(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_r_r: + __ Cmp(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_i_r: + __ Cmp(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_i_r: + __ Cmp(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_i_r: + __ Cmp(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_i_r: + __ Cmp(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_m_r: + __ Cmp(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_m_r: + __ Cmp(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_m_r: + __ Cmp(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_m_r: + __ Cmp(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_r_m: + __ Cmp(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpw_r_m: + __ Cmp(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpl_r_m: + __ Cmp(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpq_r_m: + __ Cmp(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpb_i_m: + __ Cmp(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpw_i_m: + __ Cmp(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpl_i_m: + __ Cmp(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpq_i_m: + __ Cmp(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_testq_r_r: + __ Test(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* setcc */ + case x64::MOP_seta_r: + __ Seta(TransferReg(opnd0)); + break; + case x64::MOP_setae_r: + __ Setae(TransferReg(opnd0)); + break; + case x64::MOP_setb_r: + __ Setb(TransferReg(opnd0)); + break; + case x64::MOP_setbe_r: + __ Setbe(TransferReg(opnd0)); + break; + case x64::MOP_sete_r: + __ Sete(TransferReg(opnd0)); + break; + case x64::MOP_setg_r: + __ Setg(TransferReg(opnd0)); + break; + case x64::MOP_setge_r: + __ Setge(TransferReg(opnd0)); + break; + case x64::MOP_setl_r: + __ Setl(TransferReg(opnd0)); + break; + case x64::MOP_setle_r: + __ Setle(TransferReg(opnd0)); + break; + case x64::MOP_setne_r: + __ Setne(TransferReg(opnd0)); + break; + case x64::MOP_seta_m: + __ Seta(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setae_m: + __ Setae(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setb_m: + __ Setb(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setbe_m: + __ Setbe(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_sete_m: + __ Sete(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setl_m: + __ Setl(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setle_m: + __ Setle(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setg_m: + __ Setg(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setge_m: + __ Setge(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setne_m: + __ Setne(TransferMem(opnd0, funcUniqueId)); + break; + /* cmova & cmovae */ + case x64::MOP_cmovaw_r_r: + __ Cmova(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmoval_r_r: + __ Cmova(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaq_r_r: + __ Cmova(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaw_m_r: + __ Cmova(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmoval_m_r: + __ Cmova(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaq_m_r: + __ Cmova(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaew_r_r: + __ Cmovae(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovael_r_r: + __ Cmovae(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaeq_r_r: + __ Cmovae(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaew_m_r: + __ Cmovae(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovael_m_r: + __ Cmovae(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaeq_m_r: + __ Cmovae(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovb & cmovbe */ + case x64::MOP_cmovbw_r_r: + __ Cmovb(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbl_r_r: + __ Cmovb(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbq_r_r: + __ Cmovb(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbw_m_r: + __ Cmovb(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbl_m_r: + __ Cmovb(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbq_m_r: + __ Cmovb(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbew_r_r: + __ Cmovbe(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbel_r_r: + __ Cmovbe(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbeq_r_r: + __ Cmovbe(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbew_m_r: + __ Cmovbe(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbel_m_r: + __ Cmovbe(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbeq_m_r: + __ Cmovbe(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmove */ + case x64::MOP_cmovew_r_r: + __ Cmove(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovel_r_r: + __ Cmove(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmoveq_r_r: + __ Cmove(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovew_m_r: + __ Cmove(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovel_m_r: + __ Cmove(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmoveq_m_r: + __ Cmove(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovg & cmovge */ + case x64::MOP_cmovgw_r_r: + __ Cmovg(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgl_r_r: + __ Cmovg(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgq_r_r: + __ Cmovg(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgw_m_r: + __ Cmovg(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgl_m_r: + __ Cmovg(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgq_m_r: + __ Cmovg(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgew_r_r: + __ Cmovge(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgel_r_r: + __ Cmovge(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgeq_r_r: + __ Cmovge(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgew_m_r: + __ Cmovge(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgel_m_r: + __ Cmovge(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgeq_m_r: + __ Cmovge(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovl & cmovle */ + case x64::MOP_cmovlw_r_r: + __ Cmovl(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovll_r_r: + __ Cmovl(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlq_r_r: + __ Cmovl(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlw_m_r: + __ Cmovl(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovll_m_r: + __ Cmovl(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlq_m_r: + __ Cmovl(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlew_r_r: + __ Cmovle(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlel_r_r: + __ Cmovle(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovleq_r_r: + __ Cmovle(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlew_m_r: + __ Cmovle(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlel_m_r: + __ Cmovle(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovleq_m_r: + __ Cmovle(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovne */ + case x64::MOP_cmovnew_r_r: + __ Cmovne(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovnel_r_r: + __ Cmovne(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovneq_r_r: + __ Cmovne(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovnew_m_r: + __ Cmovne(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovnel_m_r: + __ Cmovne(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovneq_m_r: + __ Cmovne(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* call */ + case x64::MOP_callq_r: + __ Call(kQ, TransferReg(opnd0)); + break; + case x64::MOP_callq_l: + __ Call(kQ, TransferFuncName(opnd0)); + break; + case x64::MOP_callq_m: + __ Call(kQ, TransferMem(opnd0, funcUniqueId)); + break; + /* ret */ + case x64::MOP_retq: + __ Ret(); + break; + case x64::MOP_leaveq: + __ Leave(); + break; + /* imul */ + case x64::MOP_imulw_r_r: + __ Imul(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_imull_r_r: + __ Imul(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_imulq_r_r: + __ Imul(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* nop */ + case x64::MOP_nop: + __ Nop(); + break; + /* byte swap */ + case x64::MOP_bswapl_r: + __ Bswap(kL, TransferReg(opnd0)); + break; + case x64::MOP_bswapq_r: + __ Bswap(kQ, TransferReg(opnd0)); + break; + case x64::MOP_xchgb_r_r: + __ Xchg(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* pseudo instruction */ + case x64::MOP_pseudo_ret_int: + __ DealWithPseudoInst(curMd.GetName()); + break; + default: { + insn.Dump(); + LogInfo::MapleLogger() << "\n"; + FATAL(kLncFatal, "unsupported instruction"); + break; + } + } +} + +void X64Emitter::EmitFunctionHeader(CGFunc &cgFunc) { + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + uint32 symIdx = funcSymbol->GetNameStrIdx().get(); + const string &symName = funcSymbol->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + + SymbolAttr funcAttr = kSAGlobal; + if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_weak)) { + funcAttr = kSAWeak; + } else if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_local)) { + funcAttr = kSALocal; + } else if (!cgFunc.GetCG()->GetMIRModule()->IsCModule()) { + funcAttr = kSAHidden; + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + __ EmitFunctionHeader(symIdx, funcAttr, §ionName); + } else { + __ EmitFunctionHeader(symIdx, funcAttr, nullptr); + } +} + +void X64Emitter::EmitBBHeaderLabel(CGFunc &cgFunc, LabelIdx labIdx, uint32 freq) { + uint32 funcUniqueId = cgFunc.GetUniqueID(); + /* Concatenate BB Label Name and its idx */ + string bbLabel = ".L."; + bbLabel.append(to_string(funcUniqueId)); + bbLabel.append("__"); + bbLabel.append(to_string(labIdx)); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, static_cast(labIdx)); + __ StoreNameIntoSymMap(labelSymIdx, bbLabel); + + if (cgFunc.GetCG()->GenerateVerboseCG()) { + const string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + /* If label name has @ as its first char, it is not from MIR */ + if (!labelName.empty() && labelName.at(0) != '@') { + __ EmitBBLabel(labelSymIdx, true, freq, &labelName); + } else { + __ EmitBBLabel(labelSymIdx, true, freq); + } + } else { + __ EmitBBLabel(labelSymIdx); + } +} + +/* Specially, emit switch table here */ +void X64Emitter::EmitJmpTable(const CGFunc &cgFunc) { + for (auto &it : cgFunc.GetEmitStVec()) { + MIRSymbol *st = it.second; + DEBUG_ASSERT(st->IsReadOnly(), "NYI"); + uint32 symIdx = st->GetNameStrIdx().get(); + const string &symName = st->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_NULL_FATAL(arrayConst); + uint32 funcUniqueId = cgFunc.GetUniqueID(); + vector labelSymIdxs; + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_NULL_FATAL(lblConst); + uint32 labelIdx = lblConst->GetValue(); + string labelName = ".L." + to_string(funcUniqueId) + "__" + to_string(labelIdx); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, labelIdx); + __ StoreNameIntoSymMap(labelSymIdx, labelName); + labelSymIdxs.push_back(labelSymIdx); + } + __ EmitJmpTableElem(symIdx, labelSymIdxs); + } +} + +void X64Emitter::EmitFunctionFoot(CGFunc &cgFunc) { + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + uint32 symIdx = funcSymbol->GetNameStrIdx().get(); + SymbolAttr funcAttr = kSALocal; + if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_weak)) { + funcAttr = kSAWeak; + } else if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_local)) { + funcAttr = kSALocal; + } else if (!funcSymbol->GetFunction()->GetAttr(FUNCATTR_static)) { + funcAttr = kSAGlobal; + } + __ EmitFunctionFoot(symIdx, funcAttr); +} + +uint64 X64Emitter::EmitStructure(MIRConst &mirConst, CG& cg, bool belongsToDataSec) { + uint32 subStructFieldCounts = 0; + uint64 valueSize = EmitStructure(mirConst, cg, subStructFieldCounts, belongsToDataSec); + return valueSize; +} + +uint64 X64Emitter::EmitStructure(MIRConst &mirConst, CG& cg, uint32 &subStructFieldCounts, bool belongsToDataSec) { + StructEmitInfo *sEmitInfo = cg.GetMIRModule()->GetMemPool()->New(); + CHECK_NULL_FATAL(sEmitInfo); + MIRType &mirType = mirConst.GetType(); + MIRAggConst &structCt = static_cast(mirConst); + MIRStructType &structType = static_cast(mirType); + uint8 structPack = static_cast(structType.GetTypeAttrs().GetPack()); + uint64 valueSize = 0; + MIRTypeKind structKind = structType.GetKind(); + /* all elements of struct. */ + uint8 num = structKind == kTypeUnion ? 1 : static_cast(structType.GetFieldsSize()); + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + /* total size of emitted elements size. */ + uint64 sizeInByte = GetSymbolSize(structType.GetTypeIndex()); + uint32 fieldIdx = structKind == kTypeUnion ? structCt.GetFieldIdItem(0) : 1; + for (uint32 i = 0; i < num; ++i) { + MIRConst *elemConst = structKind == kTypeStruct ? structCt.GetAggConstElement(i + 1) : + structCt.GetAggConstElement(fieldIdx); + MIRType *elemType = structKind == kTypeUnion ? &(elemConst->GetType()) : structType.GetElemType(i); + MIRType *nextElemType = i != static_cast(num - 1) ? structType.GetElemType(i + 1) : nullptr; + uint64 elemSize = GetSymbolSize(elemType->GetTypeIndex()); + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * k8Bits; + MIRTypeKind elemKind = elemType->GetKind(); + if (elemKind == kTypeBitField) { + if (elemConst == nullptr) { + MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); + elemConst = zeroFill; + } + pair fieldOffsetPair = beCommon->GetFieldOffset(structType, fieldIdx); + uint64 fieldOffset = static_cast(static_cast(fieldOffsetPair.first)) * + static_cast(charBitWidth) + static_cast(static_cast(fieldOffsetPair.second)); + EmitBitField(*sEmitInfo, *elemConst, nextElemType, fieldOffset); + } else { + if (elemConst != nullptr) { + if (IsPrimitiveVector(elemType->GetPrimType())) { + valueSize += EmitVector(*elemConst); + } else if (IsPrimitiveScalar(elemType->GetPrimType())) { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec, true); + } else if (elemKind == kTypeArray) { + if (elemType->GetSize() != 0) { + valueSize += EmitArray(*elemConst, cg, belongsToDataSec); + } + } else if (elemKind == kTypeStruct || elemKind == kTypeClass || elemKind == kTypeUnion) { + valueSize += EmitStructure(*elemConst, cg, subStructFieldCounts, belongsToDataSec); + fieldIdx += subStructFieldCounts; + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } else { + __ EmitNull(elemSize); + } + sEmitInfo->IncreaseTotalSize(elemSize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + + if (nextElemType != nullptr && nextElemType->GetKind() != kTypeBitField) { + DEBUG_ASSERT(i < static_cast(num - 1), "NYI"); + uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + auto fieldAttr = structType.GetFields()[i + 1].second.second; + nextAlign = fieldAttr.IsPacked() ? 1 : min(nextAlign, structPack); + DEBUG_ASSERT(nextAlign != 0, "expect non-zero"); + /* append size, append 0 when align need. */ + uint64 totalSize = sEmitInfo->GetTotalSize(); + uint64 psize = (totalSize % nextAlign == 0) ? 0 : (nextAlign - (totalSize % nextAlign)); + /* element is uninitialized, emit null constant. */ + if (psize != 0) { + __ EmitNull(psize); + sEmitInfo->IncreaseTotalSize(psize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + } + fieldIdx++; + } + if (structType.GetKind() == kTypeStruct) { + /* The reason of subtracting one is that fieldIdx adds one at the end of the cycle. */ + subStructFieldCounts = fieldIdx - 1; + } else if (structType.GetKind() == kTypeUnion) { + subStructFieldCounts = static_cast(beCommon->GetStructFieldCount(structType.GetTypeIndex())); + } + + uint64 opSize = sizeInByte - sEmitInfo->GetTotalSize(); + if (opSize != 0) { + __ EmitNull(opSize); + } + return valueSize; +} + +uint64 X64Emitter::EmitVector(MIRConst &mirConst, bool belongsToDataSec) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &vecCt = static_cast(mirConst); + size_t uNum = vecCt.GetConstVec().size(); + uint64 valueSize = 0; + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = vecCt.GetConstVecItem(i); + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + uint64 elemSize = EmitSingleElement(*elemConst, belongsToDataSec); + valueSize += elemSize; + } else { + DEBUG_ASSERT(false, "EmitVector: should not run here"); + } + } + size_t lanes = GetVecLanes(mirType.GetPrimType()); + if (lanes > uNum) { + MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + for (size_t i = uNum; i < lanes; i++) { + uint64 elemSize = EmitSingleElement(zConst, belongsToDataSec); + valueSize += elemSize; + } + } + return valueSize; +} + +uint64 X64Emitter::EmitArray(MIRConst &mirConst, CG& cg, bool belongsToDataSec) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &arrayCt = static_cast(mirConst); + MIRArrayType &arrayType = static_cast(mirType); + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx elmTyIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(elmTyIdx); + uint64 valueSize = 0; + if (uNum == 0 && dim) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + elmTyIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(elmTyIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + valueSize += EmitVector(*elemConst, belongsToDataSec); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (cg.GetMIRModule()->IsCModule()) { + bool strLiteral = false; + if (arrayType.GetDim() == 1) { + MIRType *ety = arrayType.GetElemType(); + if (ety->GetPrimType() == PTY_i8 || ety->GetPrimType() == PTY_u8) { + strLiteral = true; + } + } + valueSize += EmitSingleElement(*elemConst, belongsToDataSec, !strLiteral); + } else { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec); + } + } else if (elemConst->GetType().GetKind() == kTypeArray) { + valueSize += EmitArray(*elemConst, cg, belongsToDataSec); + } else if (elemConst->GetType().GetKind() == kTypeStruct || + elemConst->GetType().GetKind() == kTypeClass || elemConst->GetType().GetKind() == kTypeUnion) { + valueSize += EmitStructure(*elemConst, cg); + } else if (elemConst->GetKind() == kConstAddrofFunc) { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + int64 iNum = (arrayType.GetSizeArrayItem(0) > 0) ? + (static_cast(arrayType.GetSizeArrayItem(0))) - uNum : 0; + if (iNum > 0) { + if (uNum > 0) { + uint64 unInSizeInByte = static_cast(iNum) * static_cast( + GetSymbolSize(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + if (unInSizeInByte != 0) { + __ EmitNull(unInSizeInByte); + } + } else { + uint64 sizeInByte = GetSymbolSize(elmTyIdx) * dim; + __ EmitNull(sizeInByte); + } + } + return valueSize; +} + +void X64Emitter::EmitAddrofElement(MIRConst &mirConst, bool belongsToDataSec) { + MIRAddrofConst &symAddr = static_cast(mirConst); + StIdx stIdx = symAddr.GetSymbolIndex(); + MIRSymbol *symAddrSym = stIdx.IsGlobal() ? + GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) : + CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + string addrName = symAddrSym->GetName(); + if (!stIdx.IsGlobal() && symAddrSym->GetStorageClass() == kScPstatic) { + uint32 funcUniqueId = CG::GetCurCGFunc()->GetUniqueID(); + addrName += to_string(funcUniqueId); + } + uint32 symIdx = symAddrSym->GetNameStrIdx(); + int32 symAddrOfs = 0; + int32 structFieldOfs = 0; + if (symAddr.GetOffset() != 0) { + symAddrOfs = symAddr.GetOffset(); + } + if (symAddr.GetFieldID() > 1) { + MIRStructType *structType = static_cast(symAddrSym->GetType()); + DEBUG_ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); + structFieldOfs = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*structType, symAddr.GetFieldID()).first; + } + __ StoreNameIntoSymMap(symIdx, addrName); + __ EmitAddrValue(symIdx, symAddrOfs, structFieldOfs, belongsToDataSec); +} + +uint32 X64Emitter::EmitSingleElement(MIRConst &mirConst, bool belongsToDataSec, bool isIndirect) { + MIRType &elmType = mirConst.GetType(); + uint64 elemSize = elmType.GetSize(); + MIRConstKind kind = mirConst.GetKind(); + switch (kind) { + case kConstAddrof: + EmitAddrofElement(mirConst, belongsToDataSec); + break; + case kConstAddrofFunc: { + MIRAddroffuncConst &funcAddr = static_cast(mirConst); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFuncTable().at(funcAddr.GetValue()); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + + uint32 symIdx = symAddrSym->GetNameStrIdx(); + const string &name = symAddrSym->GetName(); + __ StoreNameIntoSymMap(symIdx, name); + __ EmitAddrOfFuncValue(symIdx, belongsToDataSec); + break; + } + case kConstInt: { + MIRIntConst &intCt = static_cast(mirConst); + uint32 sizeInBits = elemSize << kLeftShift3Bits; + if (intCt.GetActualBitWidth() > sizeInBits) { + DEBUG_ASSERT(false, "actual value is larger than expected"); + } + int64 value = intCt.GetExtValue(); + __ EmitIntValue(value, elemSize, belongsToDataSec); + break; + } + case kConstLblConst: { + MIRLblConst &lbl = static_cast(mirConst); + uint32 labelIdx = lbl.GetValue(); + uint32 funcUniqueId = lbl.GetPUIdx(); + string labelName = ".L." + to_string(funcUniqueId) + "__" + to_string(labelIdx); + int64 symIdx = CalculateLabelSymIdx(funcUniqueId, labelIdx); + __ StoreNameIntoSymMap(symIdx, labelName); + __ EmitLabelValue(symIdx, belongsToDataSec); + break; + } + case kConstStrConst: { + MIRStrConst &strCt = static_cast(mirConst); + if (isIndirect) { + uint32 strIdx = strCt.GetValue().GetIdx(); + string strName = ".LSTR__" + to_string(strIdx); + int64 strSymIdx = CalculateStrLabelSymIdx(GlobalTables::GetGsymTable().GetSymbolTableSize(), strIdx); + stringPtr.push_back(strIdx); + __ StoreNameIntoSymMap(strSymIdx, strName); + __ EmitIndirectString(strSymIdx, belongsToDataSec); + } else { + const string &ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strCt.GetValue()); + __ EmitDirectString(ustr, belongsToDataSec); + } + break; + } + default: + FATAL(kLncFatal, "EmitSingleElement: unsupport variable kind"); + break; + } + return elemSize; +} + +void X64Emitter::EmitBitField(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset, bool belongsToDataSec) { + MIRType &mirType = mirConst.GetType(); + if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { + uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); + structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + EmitCombineBfldValue(structEmitInfo); + DEBUG_ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, "structEmitInfo's nextFieldOffset > fieldOffset"); + structEmitInfo.SetNextFieldOffset(fieldOffset); + } + uint32 fieldSize = static_cast(mirType).GetFieldSize(); + MIRIntConst &fieldValue = static_cast(mirConst); + /* Truncate the size of FieldValue to the bit field size. */ + if (fieldSize < fieldValue.GetActualBitWidth()) { + fieldValue.Trunc(fieldSize); + } + /* Clear higher Bits for signed value */ + if (structEmitInfo.GetCombineBitFieldValue() != 0) { + structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & + structEmitInfo.GetCombineBitFieldValue()); + } + if (CGOptions::IsBigEndian()) { + uint64 beValue = static_cast(fieldValue.GetExtValue()); + if (fieldValue.IsNegative()) { + beValue = beValue - ((beValue >> fieldSize) << fieldSize); + } + structEmitInfo.SetCombineBitFieldValue((structEmitInfo.GetCombineBitFieldValue() << fieldSize) + beValue); + } else { + structEmitInfo.SetCombineBitFieldValue((static_cast(fieldValue.GetExtValue()) << + structEmitInfo.GetCombineBitFieldWidth()) + + structEmitInfo.GetCombineBitFieldValue()); + } + structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); + structEmitInfo.IncreaseNextFieldOffset(fieldSize); + if ((nextType == nullptr) || (nextType->GetKind() != kTypeBitField)) { + /* emit structEmitInfo->combineBitFieldValue */ + EmitCombineBfldValue(structEmitInfo); + } +} + +void X64Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo, bool belongsToDataSec) { + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * k8Bits; + const uint64 kGetLow8Bits = 0x00000000000000ffUL; + auto emitBfldValue = [&structEmitInfo, charBitWidth, belongsToDataSec, this](bool flag) { + while (structEmitInfo.GetCombineBitFieldWidth() > charBitWidth) { + uint8 shift = flag ? (structEmitInfo.GetCombineBitFieldWidth() - charBitWidth) : 0U; + uint64 tmp = (structEmitInfo.GetCombineBitFieldValue() >> shift) & kGetLow8Bits; + __ EmitBitFieldValue(tmp, belongsToDataSec); + structEmitInfo.DecreaseCombineBitFieldWidth(charBitWidth); + uint64 value = flag ? + structEmitInfo.GetCombineBitFieldValue() - (tmp << structEmitInfo.GetCombineBitFieldWidth()) : + structEmitInfo.GetCombineBitFieldValue() >> charBitWidth; + structEmitInfo.SetCombineBitFieldValue(value); + } + }; + if (CGOptions::IsBigEndian()) { + /* + * If the total number of bits in the bit field is not a multiple of 8, + * the bits must be aligned to 8 bits to prevent errors in the emit. + */ + auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); + if (structEmitInfo.GetCombineBitFieldWidth() < width) { + structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() << + (width - structEmitInfo.GetCombineBitFieldWidth())); + structEmitInfo.IncreaseCombineBitFieldWidth( + static_cast(width - structEmitInfo.GetCombineBitFieldWidth())); + } + emitBfldValue(true); + } else { + emitBfldValue(false); + } + if (structEmitInfo.GetCombineBitFieldWidth() != 0) { + uint64 value = structEmitInfo.GetCombineBitFieldValue() & kGetLow8Bits; + __ EmitBitFieldValue(value, belongsToDataSec); + } + CHECK_FATAL(charBitWidth != 0, "divide by zero"); + if ((structEmitInfo.GetNextFieldOffset() % charBitWidth) != 0) { + uint8 value = charBitWidth - static_cast((structEmitInfo.GetNextFieldOffset() % charBitWidth)); + structEmitInfo.IncreaseNextFieldOffset(value); + } + structEmitInfo.SetTotalSize(structEmitInfo.GetNextFieldOffset() / charBitWidth); + structEmitInfo.SetCombineBitFieldValue(0); + structEmitInfo.SetCombineBitFieldWidth(0); +} + +void X64Emitter::EmitLocalVariable(CGFunc &cgFunc) { + if (!cgFunc.GetCG()->GetMIRModule()->IsCModule()) { + return; + } + /* function local pstatic initialization */ + MIRSymbolTable *lSymTab = cgFunc.GetFunction().GetSymTab(); + if (lSymTab != nullptr) { + uint32 funcUniqueId = cgFunc.GetUniqueID(); + size_t lsize = lSymTab->GetSymbolTableSize(); + vector emittedLocalSym; + for (uint32 i = 0; i < lsize; i++) { + MIRSymbol *symbol = lSymTab->GetSymbolFromStIdx(i); + if (symbol != nullptr && symbol->GetStorageClass() == kScPstatic) { + const string &symbolName = symbol->GetName() + to_string(funcUniqueId); + /* Local static names can repeat, if repeat, pass */ + bool found = false; + for (auto name : emittedLocalSym) { + if (name == symbolName) { + found = true; + break; + } + } + if (found) { + continue; + } + emittedLocalSym.push_back(symbolName); + + uint32 symIdx = symbol->GetNameStrIdx().get(); + __ StoreNameIntoSymMap(symIdx, symbolName, true); + + MIRConst *ct = symbol->GetKonst(); + MIRType *ty = symbol->GetType(); + uint64 sizeInByte = GetSymbolSize(ty->GetTypeIndex()); + uint8 alignInByte = GetSymbolAlign(*symbol); + if (ct == nullptr) { + alignInByte = GetSymbolAlign(*symbol, true); + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSALocal, kSBss); + } else { + MIRTypeKind kind = ty->GetKind(); + uint64 valueSize = 0; + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSALocal, kSData); + if (kind == kTypeStruct || kind == kTypeUnion || kind == kTypeClass) { + valueSize = EmitStructure(*ct, *cgFunc.GetCG()); + } else if (IsPrimitiveVector(ty->GetPrimType())) { + valueSize = EmitVector(*ct); + } else if (kind == kTypeArray) { + valueSize = EmitArray(*ct, *cgFunc.GetCG()); + } else { + valueSize = EmitSingleElement(*ct, true); + } + __ PostEmitVariable(symIdx, kSALocal, valueSize); + } + } + } + } +} + +void X64Emitter::EmitStringPointers() { + for (uint32 strIdx : stringPtr) { + string ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strIdx); + int64 strSymIdx = CalculateStrLabelSymIdx(GlobalTables::GetGsymTable().GetSymbolTableSize(), strIdx); + __ EmitDirectString(ustr, true, strSymIdx); + } +} + +void X64Emitter::EmitGlobalVariable(CG& cg) { + uint64 size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + for (uint64 i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { + continue; + } + + MIRStorageClass storageClass = mirSymbol->GetStorageClass(); + /* symbols we do not emit here. */ + if (storageClass == kScTypeInfo || storageClass == kScTypeInfoName || storageClass == kScTypeCxxAbi) { + continue; + } + + MIRType *mirType = mirSymbol->GetType(); + if (mirType == nullptr) { + continue; + } + int64 symIdx = mirSymbol->GetNameStrIdx().get(); + uint64 sizeInByte = GetSymbolSize(mirType->GetTypeIndex()); + uint8 alignInByte = GetSymbolAlign(*mirSymbol); + + /* Uninitialized global/static variables */ + if ((storageClass == kScGlobal || storageClass == kScFstatic) && !mirSymbol->IsConst()) { + if (mirSymbol->IsGctibSym()) { + continue; + } + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + SectionKind secKind; + if (mirSymbol->IsThreadLocal()) { + secKind = kSTbss; + } else if (maplebe::CGOptions::IsNoCommon()) { + secKind = kSBss; + } else { + secKind = kSComm; + alignInByte = GetSymbolAlign(*mirSymbol, true); + } + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, secKind); + continue; + } + MIRTypeKind kind = mirType->GetKind(); + /* Initialized global/static variables. */ + if (storageClass == kScGlobal || (storageClass == kScFstatic && !mirSymbol->IsReadOnly())) { + MIRConst *mirConst = mirSymbol->GetKonst(); + uint64 valueSize = 0; + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + if (mirSymbol->IsThreadLocal()) { + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSTdata); + } else { + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSData); + } + if (IsPrimitiveVector(mirType->GetPrimType())) { + valueSize = EmitVector(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + valueSize = EmitSingleElement(*mirConst, true, cg.GetMIRModule()->IsCModule()); + } else if (kind == kTypeArray) { + CHECK_FATAL(!mirSymbol->HasAddrOfValues(), "EmitGlobalVariable: need EmitConstantTable"); + valueSize = EmitArray(*mirConst, cg); + } else if (kind == kTypeStruct || kind == kTypeClass || kind == kTypeUnion) { + CHECK_FATAL(!mirSymbol->HasAddrOfValues(), "EmitGlobalVariable: need EmitConstantTable"); + EmitStructure(*mirConst, cg); + } else { + DEBUG_ASSERT(false, "EmitGlobalVariable: Unknown mirKind"); + } + __ PostEmitVariable(symIdx, kSAGlobal, valueSize); + } else if (mirSymbol->IsReadOnly()) { /* If symbol is const & static */ + MIRConst *mirConst = mirSymbol->GetKonst(); + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + if (mirConst == nullptr) { + alignInByte = GetSymbolAlign(*mirSymbol, true); + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSComm); + } else { + SymbolAttr symAttr = kSAGlobal; + if (mirSymbol->IsWeak()) { + symAttr = kSAWeak; + } else if (storageClass == kScPstatic || (storageClass == kScFstatic && mirSymbol->sectionAttr == UStrIdx(0))) { + symAttr = kSAStatic; + } + __ EmitVariable(symIdx, sizeInByte, alignInByte, symAttr, kSRodata); + if (IsPrimitiveVector(mirType->GetPrimType())) { + (void)EmitVector(*mirConst, false); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (storageClass == kScPstatic) { + (void)EmitSingleElement(*mirConst, false, true); + } else { + (void)EmitSingleElement(*mirConst, false); + } + } else if (kind == kTypeArray) { + (void)EmitArray(*mirConst, cg, false); + } else if (kind == kTypeStruct || kind == kTypeUnion || kind == kTypeClass) { + (void)EmitStructure(*mirConst, cg); + } else { + FATAL(kLncFatal, "Unknown type in Global pstatic"); + } + } + } + } /* end proccess all mirSymbols. */ + EmitStringPointers(); +} + +void X64Emitter::Run(CGFunc &cgFunc) { + X64CGFunc &x64CGFunc = static_cast(cgFunc); + uint32 funcUniqueId = cgFunc.GetUniqueID(); + /* emit local variable(s) if exists */ + EmitLocalVariable(cgFunc); + + /* emit function header */ + EmitFunctionHeader(cgFunc); + + /* emit instructions */ + FOR_ALL_BB(bb, &x64CGFunc) { + if (bb->IsUnreachable()) { + continue; + } + + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + EmitBBHeaderLabel(cgFunc, bb->GetLabIdx(), bb->GetFrequency()); + } + + FOR_BB_INSNS(insn, bb) { + EmitInsn(*insn, funcUniqueId); + } + } + + /* emit switch table if exists */ + EmitJmpTable(cgFunc); + + EmitFunctionFoot(cgFunc); + + __ ClearLocalSymMap(); +} + +bool CgEmission::PhaseRun(CGFunc &f) { + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + static_cast(emitter)->Run(f); + return false; +} + +void X64Emitter::EmitDwFormAddr(const DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, + DebugInfo &di) { + MapleVector attrvec = die.GetAttrVec(); + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormAddr(true); + } + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_subprogram)) { + /* if decl, name should be found; if def, we try DW_AT_specification */ + DBGDieAttr *name = LFindAttribute(attrvec, static_cast(DW_AT_name)); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, static_cast(DW_AT_specification)); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di.GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), static_cast(DW_AT_name)); + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + } + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap >::iterator it = CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + /* it is a */ + __ EmitLabel(mfunc->GetPuidx(), (*it).second.first); + } else { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + __ EmitLabel(pIdx, attr.GetId()); /* maybe deadbeef */ + } + } + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_label)) { + DBGDie *subpgm = die.GetParent(); + DEBUG_ASSERT(subpgm->GetTag() == DW_TAG_subprogram, "Label DIE should be a child of a Subprogram DIE"); + DBGDieAttr *fnameAttr = LFindAttribute(subpgm->GetAttrVec(), static_cast(DW_AT_name)); + if (!fnameAttr) { + DBGDieAttr *specAttr = LFindAttribute(subpgm->GetAttrVec(), static_cast(DW_AT_specification)); + CHECK_FATAL(specAttr, "pointer is null"); + DBGDie *twin = di.GetDie(static_cast(specAttr->GetU())); + fnameAttr = LFindAttribute(twin->GetAttrVec(), static_cast(DW_AT_name)); + } + /* todo */ + } + if (attrName == static_cast(DW_AT_high_pc)) { + if (tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormData8(); + } + } + if (attrName != static_cast(DW_AT_high_pc) && attrName != static_cast(DW_AT_low_pc)) { + __ EmitDwFormAddr(); + } +} + + +void X64Emitter::EmitDwFormRef4(DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di) { + if (attrName == static_cast(DW_AT_type)) { + DBGDie *die0 = di.GetDie(static_cast(attr.GetU())); + if (die0->GetOffset()) { + __ EmitDwFormRef4(die0->GetOffset()); + } else { + /* unknown type, missing mplt */ + __ EmitDwFormRef4(di.GetDummyTypeDie()->GetOffset(), true); + } + } else if (attrName == static_cast(DW_AT_specification) || attrName == static_cast(DW_AT_sibling)) { + DBGDie *die0 = di.GetDie(static_cast(attr.GetU())); + DEBUG_ASSERT(die0->GetOffset(), ""); + __ EmitDwFormRef4(die0->GetOffset()); + } else if (attrName == static_cast(DW_AT_object_pointer)) { + GStrIdx thisIdx = GlobalTables::GetStrTable().GetStrIdxFromName(kDebugMapleThis); + DBGDie *that = LFindChildDieWithName(die, static_cast(DW_TAG_formal_parameter), thisIdx); + /* need to find the this or self based on the source language + what is the name for 'this' used in mapleir? + this has to be with respect to a function */ + if (that) { + __ EmitDwFormRef4(that->GetOffset()); + } else { + __ EmitDwFormRef4(attr.GetU()); + } + } else { + __ EmitDwFormRef4(attr.GetU(), false, true); + } +} + +void X64Emitter::EmitDwFormData8(const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di, + MapleVector &attrvec) { + if (attrName == static_cast(DW_AT_high_pc)) { + if (tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormData8(); + } else if (tagName == static_cast(DW_TAG_subprogram)) { + DBGDieAttr *name = LFindAttribute(attrvec, static_cast(DW_AT_name)); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, static_cast(DW_AT_specification)); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di.GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), static_cast(DW_AT_name)); + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + } + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap >::iterator it = + CG::GetFuncWrapLabels().find(mfunc); + uint32 endLabelFuncPuIdx; + uint32 startLabelFuncPuIdx; + uint32 endLabelIdx; + uint32 startLabelIdx; + if (it != CG::GetFuncWrapLabels().end()) { + /* end label */ + endLabelFuncPuIdx = mfunc->GetPuidx(); + endLabelIdx = (*it).second.second; + } else { + /* maybe deadbeef */ + endLabelFuncPuIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + endLabelIdx = (*it).second.second; + } + if (it != CG::GetFuncWrapLabels().end()) { + /* start label */ + startLabelFuncPuIdx = mfunc->GetPuidx(); + startLabelIdx = (*it).second.first; + } else { + DBGDieAttr *lowpc = LFindAttribute(attrvec, static_cast(DW_AT_low_pc)); + CHECK_FATAL(lowpc != nullptr, "lowpc is null in Emitter::EmitDIAttrValue"); + /* maybe deadbeef */ + startLabelFuncPuIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + startLabelIdx = lowpc->GetId(); + } + __ EmitDwFormData8(endLabelFuncPuIdx, startLabelFuncPuIdx, endLabelIdx, startLabelIdx); + } + } else { + __ EmitDwFormData(attr.GetI(), k8Bytes); + } +} + + +void X64Emitter::EmitDIAttrValue(DBGDie &die, DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di) { + MapleVector &attrvec = die.GetAttrVec(); + switch (attr.GetDwForm()) { + case DW_FORM_string: + __ EmitDwFormString(GlobalTables::GetStrTable().GetStringFromStrIdx(attr.GetId())); + break; + case DW_FORM_strp: + __ EmitDwFormStrp(attr.GetId(), GlobalTables::GetStrTable().StringTableSize()); + break; + case DW_FORM_data1: + __ EmitDwFormData(attr.GetI(), k1Byte); + break; + case DW_FORM_data2: + __ EmitDwFormData(attr.GetI(), k2Bytes); + break; + case DW_FORM_data4: + __ EmitDwFormData(attr.GetI(), k4Bytes); + break; + case DW_FORM_data8: + EmitDwFormData8(attr, attrName, tagName, di, attrvec); + break; + case DW_FORM_sec_offset: + if (attrName == static_cast(DW_AT_stmt_list)) { + __ EmitDwFormSecOffset(); + } + break; + case DW_FORM_addr: + EmitDwFormAddr(die, attr, attrName, tagName, di); + break; + case DW_FORM_ref4: + EmitDwFormRef4(die, attr, attrName, tagName, di); + break; + case DW_FORM_exprloc: { + DBGExprLoc *elp = attr.GetPtr(); + switch (elp->GetOp()) { + case DW_OP_call_frame_cfa: + __ EmitDwFormExprlocCfa(elp->GetOp()); + break; + case DW_OP_addr: + __ EmitDwFormExprlocAddr(elp->GetOp(), + GlobalTables::GetStrTable().GetStringFromStrIdx(static_cast(elp->GetGvarStridx())).c_str()); + break; + case DW_OP_fbreg: + __ EmitDwFormExprlocFbreg(elp->GetOp(), elp->GetFboffset(), namemangler::GetSleb128Size(elp->GetFboffset())); + break; + case DW_OP_breg0: + case DW_OP_breg1: + case DW_OP_breg2: + case DW_OP_breg3: + case DW_OP_breg4: + case DW_OP_breg5: + case DW_OP_breg6: + case DW_OP_breg7: + __ EmitDwFormExprlocBregn(elp->GetOp(), GetDwOpName(elp->GetOp())); + break; + default: + __ EmitDwFormExprloc(uintptr(elp)); + break; + } + } break; + default: + CHECK_FATAL(maple::GetDwFormName(attr.GetDwForm()) != nullptr, + "GetDwFormName return null in Emitter::EmitDIAttrValue"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(attr.GetDwForm()) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + + +void X64Emitter::EmitDIDebugInfoSection(DebugInfo &mirdi) { + __ EmitDIDebugInfoSectionHeader(mirdi.GetDebugInfoLength()); + /* + * 7.5.1.2 type unit header + * currently empty... + * + * 7.5.2 Debugging Information Entry (DIE) + */ + X64Emitter *emitter = this; + MapleVector &abbrevVec = mirdi.GetAbbrevVec(); + ApplyInPrefixOrder(mirdi.GetCompUnit(), [&abbrevVec, &emitter, &mirdi, this](DBGDie *die) { + if (!die) { + /* emit the null entry and return */ + emitter->GetAssembler().EmitDIDebugSectionEnd(kSDebugInfo); + return; + } + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + if (verbose) { + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "GetDwTagName(die->GetTag()) return null in Emitter::EmitDIDebugInfoSection"); + } + uint32 abbrevId = die->GetAbbrevId(); + emitter->GetAssembler().EmitDIDebugInfoSectionAbbrevId(verbose, abbrevId, maple::GetDwTagName(die->GetTag()), + die->GetOffset(), die->GetSize()); + DBGAbbrevEntry *diae = LFindAbbrevEntry(abbrevVec, abbrevId); + CHECK_FATAL(diae != nullptr, "diae is null in Emitter::EmitDIDebugInfoSection"); + std::string sfile, spath; + if (diae->GetTag() == static_cast(DW_TAG_compile_unit) && sfile.empty()) { + /* get full source path from fileMap[2] */ + if (emitter->GetFileMap().size() > k2ByteSize) { /* have src file map */ + std::string srcPath = emitter->GetFileMap()[k2ByteSize]; + size_t t = srcPath.rfind("/"); + DEBUG_ASSERT(t != std::string::npos, ""); + sfile = srcPath.substr(t + 1); + spath = srcPath.substr(0, t); + } + } + + UpdateAttrAndEmit(sfile, mirdi, *diae, *die, spath); + }); +} + + +void X64Emitter::UpdateAttrAndEmit(const string& sfile, DebugInfo &mirdi, DBGAbbrevEntry &diae, DBGDie &die, + const string& spath) { + X64Emitter *emitter = this; + MapleVector &apl = diae.GetAttrPairs(); /* attribute pair list */ + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + for (size_t i = 0; i < diae.GetAttrPairs().size(); i += k2ByteSize) { + DBGDieAttr *attr = LFindAttribute(die.GetAttrVec(), DwAt(apl[i])); + CHECK_FATAL(attr != nullptr, "attr is null"); + if (!LShouldEmit(unsigned(apl[i + 1]))) { + continue; + } + + /* update DW_AT_name and DW_AT_comp_dir attrs under DW_TAG_compile_unit + to be C/C++ */ + if (!sfile.empty()) { + if (attr->GetDwAt() == static_cast(DW_AT_name)) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(sfile).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } else if (attr->GetDwAt() == static_cast(DW_AT_comp_dir)) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(spath).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } + } + emitter->GetAssembler().EmitDIFormSpecification(unsigned(apl[i + 1])); + emitter->EmitDIAttrValue(die, *attr, unsigned(apl[i]), diae.GetTag(), mirdi); + if (verbose) { + std::string dwAtName = maple::GetDwAtName(unsigned(apl[i])); + std::string dwForName = maple::GetDwFormName(unsigned(apl[i + 1])); + emitter->GetAssembler().EmitDIDwName(dwAtName, dwForName); + if (apl[i + 1] == static_cast(DW_FORM_strp) || apl[i + 1] == static_cast(DW_FORM_string)) { + emitter->GetAssembler().EmitDIDWFormStr( + GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()).c_str()); + } else if (apl[i] == static_cast(DW_AT_data_member_location)) { + emitter->GetAssembler().EmitDIDWDataMemberLocaltion(apl[i + 1], uintptr(attr)); + } + } + emitter->GetAssembler().EmitLine(); + } +} + + +void X64Emitter::EmitDIDebugAbbrevSection(DebugInfo &mirdi) { + __ EmitDIDebugAbbrevSectionHeader(); + + /* construct a list of DI abbrev entries + 1. DW_TAG_compile_unit 0x11 + 2. DW_TAG_subprogram 0x2e */ + bool verbose = GetCG()->GenerateVerboseAsm(); + for (DBGAbbrevEntry *diae : mirdi.GetAbbrevVec()) { + if (!diae) { + continue; + } + CHECK_FATAL(maple::GetDwTagName(diae->GetTag()) != nullptr, + "GetDwTagName return null in X64Emitter::EmitDIDebugAbbrevSection"); + __ EmitDIDebugAbbrevDiae(verbose, diae->GetAbbrevId(), diae->GetTag(), maple::GetDwTagName(diae->GetTag()), + diae->GetWithChildren()); + + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + CHECK_FATAL(maple::GetDwAtName(unsigned(apl[i])) != nullptr, + "GetDwAtName return null in X64Emitter::EmitDIDebugAbbrevSection"); + CHECK_FATAL(maple::GetDwFormName(unsigned(apl[i + 1])) != nullptr, + "GetDwFormName return null in X64Emitter::EmitDIDebugAbbrevSection"); + __ EmitDIDebugAbbrevDiaePairItem(verbose, apl[i], apl[1 + 1], maple::GetDwAtName(unsigned(apl[i])), + maple::GetDwFormName(unsigned(apl[i + 1]))); + } + __ EmitDIDebugSectionEnd(kSDebugAbbrev); + __ EmitDIDebugSectionEnd(kSDebugAbbrev); + } + __ EmitDIDebugSectionEnd(kSDebugAbbrev); +} + +void X64Emitter::EmitDIDebugStrSection() { + std::vector debugStrs; + std::vector strps; + for (auto it : GetCG()->GetMIRModule()->GetDbgInfo()->GetStrps()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it); + (void)debugStrs.emplace_back(name); + (void)strps.emplace_back(it); + } + __ EmitDIDebugStrSection(strps, debugStrs, GlobalTables::GetGsymTable().GetSymbolTableSize(), + GlobalTables::GetStrTable().StringTableSize()); +} + +void X64Emitter::EmitDebugInfo(CG& cg) { + if (!cg.GetCGOptions().WithDwarf()) { + return; + } + SetupDBGInfo(cg.GetMIRModule()->GetDbgInfo()); + __ EmitDIHeaderFileInfo(); + EmitDIDebugInfoSection(*(cg.GetMIRModule()->GetDbgInfo())); + EmitDIDebugAbbrevSection(*(cg.GetMIRModule()->GetDbgInfo())); + __ EmitDIDebugARangesSection(); + __ EmitDIDebugRangesSection(); + __ EmitDIDebugLineSection(); + EmitDIDebugStrSection(); +} + +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..770caf0000379b47b1fbc1f8793ccaecacdc1de8 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_isa.h" +#include "insn.h" + +namespace maplebe { +namespace x64 { +MOperator FlipConditionOp(MOperator flippedOp) { + switch (flippedOp) { + case X64MOP_t::MOP_je_l: + return X64MOP_t::MOP_jne_l; + case X64MOP_t::MOP_jne_l: + return X64MOP_t::MOP_je_l; + case X64MOP_t::MOP_ja_l: + return X64MOP_t::MOP_jbe_l; + case X64MOP_t::MOP_jbe_l: + return X64MOP_t::MOP_ja_l; + case X64MOP_t::MOP_jae_l: + return X64MOP_t::MOP_jb_l; + case X64MOP_t::MOP_jb_l: + return X64MOP_t::MOP_jae_l; + case X64MOP_t::MOP_jg_l: + return X64MOP_t::MOP_jle_l; + case X64MOP_t::MOP_jle_l: + return X64MOP_t::MOP_jg_l; + case X64MOP_t::MOP_jge_l: + return X64MOP_t::MOP_jl_l; + case X64MOP_t::MOP_jl_l: + return X64MOP_t::MOP_jge_l; + default: + break; + } + return X64MOP_t::MOP_begin; +} + +uint32 GetJumpTargetIdx(const Insn &insn) { + CHECK_FATAL(insn.IsCondBranch() || insn.IsUnCondBranch(), "Not a jump insn"); + return kInsnFirstOpnd; +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_live.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..994d33a5f3d940cfd5c5f0d2f784e68d3d73ddd1 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_live.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_live.h" +#include "x64_cg.h" + +namespace maplebe { +static const std::set intParamRegSet = {RDI, RSI, RDX, RCX, R8, R9}; + +bool X64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) { + if (intParamRegSet.find(reg) != intParamRegSet.end()) { + return true; + } + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..866a264da20c75e41e95a1a56b8093473e09009c --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_local_opt.h" +#include "x64_reaching.h" +#include "operand.h" +#include "x64_cg.h" + +namespace maplebe { +void X64LocalOpt::DoLocalCopyProp() { + LocalOptimizeManager optManager(*cgFunc, *GetRDInfo()); + optManager.Optimize(); + optManager.Optimize(); +} + +bool CopyRegProp::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_movb_r_r && mOp != MOP_movw_r_r && mOp != MOP_movl_r_r && mOp != MOP_movq_r_r) { + return false; + } + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®Use = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (regUse.GetRegisterNumber() == regDef.GetRegisterNumber()) { + return false; + } + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +void CopyRegProp::Optimize(BB &bb, Insn &insn) { + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + reachingDef->FindRegUseBetweenInsn(regDef.GetRegisterNumber(), nextInsn, bb.GetLastInsn(), useInsnSet); + bool redefined = false; + auto &replaceOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + for (Insn *tInsn : useInsnSet) { + std::vector defInsnVec = reachingDef->FindRegDefBetweenInsn(replaceOpnd.GetRegisterNumber(), + &insn, tInsn, false, false); + if (defInsnVec.size() > 0) { + redefined = true; + } + if (redefined) { + break; + } + propagateOperand(*tInsn, regDef, replaceOpnd); + } + return; +} + +bool CopyRegProp::propagateOperand(Insn &insn, RegOperand& oldOpnd, RegOperand& replaceOpnd) { + bool propagateSuccess = false; + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + if (insn.IsShift() && oldOpnd.GetRegisterNumber() == x64::RCX) { + return false; + } + if (insn.GetMachineOpcode() == MOP_pseudo_ret_int) { + return false; + } + for (int i = 0; i < opndNum; i++) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + /* list operands are used by call, + * which can not be propagated + */ + continue; + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsUse() && !regProp->IsDef() && opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + if (RegOperand::IsSameReg(regOpnd, oldOpnd)) { + insn.SetOperand(i, replaceOpnd); + propagateSuccess = true; + } + } + } + return propagateSuccess; +} + +void X64RedundantDefRemove::Optimize(BB &bb, Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + RegOperand *regDef = nullptr; + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsRegDef()) { + regDef = static_cast(&opnd); + } + } + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + reachingDef->FindRegUseBetweenInsn(regDef->GetRegisterNumber(), + nextInsn, bb.GetLastInsn(), useInsnSet); + if (useInsnSet.size() == 0) { + bb.RemoveInsn(insn); + return; + } + return; +} +} + diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d53f8a2f19fe88014863981ae82d6ea43ff49e6f --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp @@ -0,0 +1,293 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" +#include "x64_call_conv.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +uint32 X64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) { + /* instantiate a parm locator */ + X64CallConvImpl parmLocator(be); + uint32 sizeOfArgsToStkPass = 0; + size_t i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else { + /* OP_iread */ + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + return sizeOfArgsToStkPass; +} + +void X64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); +} + +void X64MemLayout::LayoutVarargParams() { + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + X64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (be.HasFuncReturnType(*func)) { + TyIdx tidx = be.GetFuncReturnType(*func); + if (be.GetTypeSize(tidx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + /* The range here is R0 to R15. However, not all registers in the range are parameter registers. + * If necessary later, you can add parameter register checks. */ + if (ploc.reg0 >= R0 && ploc.reg0 <= R15) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R15) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + + SetSizeOfGRSaveArea((k6BitSize - nIntRegs) * GetPointerSize()); + SetSizeOfVRSaveArea((k6BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } +} + +void X64MemLayout::LayoutFormalParams() { + X64CallConvImpl parmLocator(be); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + // The function name here is not appropriate, it should be to determine + // whether the function returns a structure less than 16 bytes. At this + // time, the first parameter is a structure occupant, which has no + // practical significance. + if (be.HasFuncReturnType(*mirFunction)) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + continue; + } + } + + MIRType *ty = mirFunction->GetNthParamType(i); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + uint32 size = 0; + uint32 align = 0; + if (ploc.reg0 != kRinvalid) { + if (!sym->IsPreg()) { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + if (ty->GetPrimType() == PTY_agg && be.GetTypeSize(ptyIdx) > k4ByteSize) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + align = GetPointerSize(); + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } + } else { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + } +} + +void X64MemLayout::LayoutLocalVariables() { + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void X64MemLayout::AssignSpillLocationsToPseudoRegisters() { + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } +} + +SymbolAlloc *X64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segSpillReg); + uint32 regSize = GetPointerSize(); + segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); + symLoc->SetOffset(segSpillReg.GetSize()); + segSpillReg.SetSize(segSpillReg.GetSize() + regSize); + SetSpillRegLocInfo(vrNum, *symLoc); + return symLoc; +} + +void X64MemLayout::LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize) { + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); +} + +void X64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) { + LayoutVarargParams(); + LayoutFormalParams(); + + // Need to be aligned ? + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + + /* allocate the local variables in the stack */ + LayoutLocalVariables(); + LayoutReturnRef(structCopySize, maxParmStackSize); + + // Need to adapt to the cc interface. + structCopySize = 0; + // Scenes with more than 6 parameters are not yet enabled. + maxParmStackSize = 0; + + cgFunc->SetUseFP(cgFunc->UseFP() || static_cast(StackFrameSize()) > kMaxPimm32); +} + +uint64 X64MemLayout::StackFrameSize() const { + uint64 total = locals().GetSize() + segArgsRegPassed.GetSize() + segArgsToStkPass.GetSize() + + segGrSaveArea.GetSize() + segVrSaveArea.GetSize() + segSpillReg.GetSize() + + cgFunc->GetFunction().GetFrameReseverdSlot(); // frame reserved slot + return RoundUp(total, stackPtrAlignment); +} + +int32 X64MemLayout::GetGRSaveAreaBaseLoc() { + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment)); + return total; +} + +int32 X64MemLayout::GetVRSaveAreaBaseLoc() { + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment) + + RoundUp(GetSizeOfVRSaveArea(), stackPtrAlignment)); + return total; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e3299646341ad308e93cf0ec53cb4a14dc06b28e --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp @@ -0,0 +1,161 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_optimize_common.h" +#include "x64_cgfunc.h" +#include "cgbb.h" + +namespace maplebe { +void X64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) { + Insn *jmpInsn = bb.GetLastInsn(); + if (bb.GetKind() == BB::kBBIgoto) { + CHECK_FATAL(targetOperand.IsLabel(), "NIY"); + CHECK_FATAL(false, "NIY"); + } + jmpInsn->SetOperand(x64::GetJumpTargetIdx(*jmpInsn), targetOperand); +} + +void X64InsnVisitor::ModifyJumpTarget(LabelIdx targetLabel, BB &bb) { + std::string lableName = ".L." + std::to_string(GetCGFunc()->GetUniqueID()) + + "__" + std::to_string(targetLabel); + ModifyJumpTarget(GetCGFunc()->GetOpndBuilder()->CreateLabel(lableName.c_str(), targetLabel), bb); +} + +void X64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) { + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand( + x64::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *X64InsnVisitor::CloneInsn(Insn &originalInsn) { + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_jmp_m, MOP_jmp_r is a jump instruction, but the target is unknown at compile time. + */ +LabelIdx X64InsnVisitor::GetJumpLabel(const Insn &insn) const { + uint32 operandIdx = x64::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + DEBUG_ASSERT(false, "Operand is not label"); + return 0; +} + +bool X64InsnVisitor::IsCompareInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_cmpb_r_r: + case MOP_cmpb_m_r: + case MOP_cmpb_i_r: + case MOP_cmpb_r_m: + case MOP_cmpb_i_m: + case MOP_cmpw_r_r: + case MOP_cmpw_m_r: + case MOP_cmpw_i_r: + case MOP_cmpw_r_m: + case MOP_cmpw_i_m: + case MOP_cmpl_r_r: + case MOP_cmpl_m_r: + case MOP_cmpl_i_r: + case MOP_cmpl_r_m: + case MOP_cmpl_i_m: + case MOP_cmpq_r_r: + case MOP_cmpq_m_r: + case MOP_cmpq_i_r: + case MOP_cmpq_r_m: + case MOP_cmpq_i_m: + case MOP_testq_r_r: + return true; + default: + return false; + } +} + +bool X64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const { + return false; +} + +bool X64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_addb_r_r: + case MOP_addw_r_r: + case MOP_addl_r_r: + case MOP_addq_r_r: + case MOP_addb_m_r: + case MOP_addw_m_r: + case MOP_addl_m_r: + case MOP_addq_m_r: + case MOP_addb_i_r: + case MOP_addw_i_r: + case MOP_addl_i_r: + case MOP_addq_i_r: + case MOP_addb_r_m: + case MOP_addw_r_m: + case MOP_addl_r_m: + case MOP_addq_r_m: + case MOP_addb_i_m: + case MOP_addw_i_m: + case MOP_addl_i_m: + case MOP_addq_i_m: + case MOP_subb_r_r: + case MOP_subw_r_r: + case MOP_subl_r_r: + case MOP_subq_r_r: + case MOP_subb_m_r: + case MOP_subw_m_r: + case MOP_subl_m_r: + case MOP_subq_m_r: + case MOP_subb_i_r: + case MOP_subw_i_r: + case MOP_subl_i_r: + case MOP_subq_i_r: + case MOP_subb_r_m: + case MOP_subw_r_m: + case MOP_subl_r_m: + case MOP_subq_r_m: + case MOP_subb_i_m: + case MOP_subw_i_m: + case MOP_subl_i_m: + case MOP_subq_i_m: + return true; + default: + return false; + } +} + +RegOperand *X64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) { + return &GetCGFunc()->GetOpndBuilder()->CreateVReg(pReg.GetRegisterNumber(), + pReg.GetSize(), pReg.GetRegisterType()); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a0da2464dbdc3f945ec4a899a9088225938f847 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp @@ -0,0 +1,75 @@ + /* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "x64_cg.h" + +namespace maplebe { +void X64CGPeepHole::Run() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo == nullptr) { + DoNormalOptimize(*bb, *insn); + } + } + } +} + +bool X64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { + CHECK_FATAL(false, "x64 does not support ssa optimize"); + return false; +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) { + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void X64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + case MOP_movb_r_r: + case MOP_movw_r_r: + case MOP_movl_r_r: + case MOP_movq_r_r: { + manager->NormalPatternOpt(true); + break; + } + default: + break; + } + } +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..05e3759a2c0853e94d4112195b9f082d146d454d --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp @@ -0,0 +1,201 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_proepilog.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "isel.h" +#include "x64_cg.h" + +namespace maplebe { +using namespace maple; +/* + * If a function without callee-saved register, and end with a function call, + * then transfer bl/blr to b/br. + * Return value: true if function do not need Prologue/Epilogue. false otherwise. + */ +bool X64GenProEpilog::TailCallOpt() { + return false; +} + +bool X64GenProEpilog::NeedProEpilog() { + return true; +} +void X64GenProEpilog::GenerateCalleeSavedRegs(bool isPush) { + X64CGFunc &x64cgFunc = static_cast(cgFunc); + const auto &calleeSavedRegs = x64cgFunc.GetCalleeSavedRegs(); + if (calleeSavedRegs.empty()) { + return; + } + /* CalleeSave(0) = -(FrameSize + CalleeReg - ArgsStk) */ + X64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 offset = -(memLayout->StackFrameSize() + static_cast(cgFunc).SizeOfCalleeSaved() - + memLayout->SizeOfArgsToStackPass()); + RegOperand &baseReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + for (const auto ® : calleeSavedRegs) { + RegType regType = IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + uint32 regByteSize = IsGPRegister(reg) ? kIntregBytelen : kFpregBytelen; + uint32 regSize = regByteSize * kBitsPerByte; + DEBUG_ASSERT((regSize == k32BitSize || regSize == k64BitSize), "only supported 32/64-bits"); + RegOperand &calleeReg = cgFunc.GetOpndBuilder()->CreatePReg(reg, regSize, regType); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(baseReg, offset, regSize); + if (isPush) { + GeneratePushCalleeSavedRegs(calleeReg, memOpnd, regSize); + } else { + GeneratePopCalleeSavedRegs(calleeReg, memOpnd, regSize); + } + offset += regByteSize; + } +} + +void X64GenProEpilog::GeneratePushCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) { + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_r_m : x64::MOP_movq_r_m; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePopCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) { + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_m_r : x64::MOP_movq_m_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(memOpnd).AddOpndChain(regOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePushUnnamedVarargRegs() { + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + X64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size = GetPointerSize(); + uint32 dataSizeBits = size * kBitsPerByte; + int64 offset = -memlayout->GetGRSaveAreaBaseLoc(); + if (memlayout->GetSizeOfGRSaveArea() % kX64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + uint32 start_regno = k6BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for GR Save Area"); + + /* Parameter registers in x86: %rdi, %rsi, %rdx, %rcx, %r8, %r9 */ + std::vector paramRegs = {RDI, RSI, RDX, RCX, R8, R9}; + for (uint32 i = start_regno; i < paramRegs.size(); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = cgFunc.GetOpndBuilder()->CreatePReg(paramRegs[i], k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += size; + } + + if (!CGOptions::UseGeneralRegOnly()) { + offset = -memlayout->GetVRSaveAreaBaseLoc(); + start_regno = k6BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = start_regno + static_cast(V0); i < static_cast(V6); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = cgFunc.GetOpndBuilder()->CreatePReg(static_cast(i), k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += (size * k2BitSize); + } + } + } +} + +void X64GenProEpilog::GenerateProlog(BB &bb) { + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + /* push %rbp */ + MOperator mPushrOp = x64::MOP_pushq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPushrOp, X64CG::kMd[mPushrOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + + /* mov %rsp, %rbp */ + MOperator mMovrrOp = x64::MOP_movq_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrrOp, X64CG::kMd[mMovrrOp]); + RegOperand &opndSpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(opndSpReg).AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + + /* sub $framesize, %rsp */ + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + MOperator mSubirOp = x64::MOP_subq_i_r; + Insn &subInsn = cgFunc.GetInsnBuilder()->BuildInsn(mSubirOp, X64CG::kMd[mSubirOp]); + auto *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 trueFrameSize = memLayout->StackFrameSize() + + static_cast(cgFunc).SizeOfCalleeSaved(); + ImmOperand &opndImm = cgFunc.GetOpndBuilder()->CreateImm(k32BitSize, trueFrameSize); + subInsn.AddOpndChain(opndImm).AddOpndChain(opndSpReg); + cgFunc.GetCurBB()->AppendInsn(subInsn); + } + + GenerateCalleeSavedRegs(true); + GeneratePushUnnamedVarargRegs(); + + bb.InsertAtBeginning(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::GenerateEpilog(BB &bb) { + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + GenerateCalleeSavedRegs(false); + + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + /* + * leave equal with + * mov rsp rbp + * pop rbp + */ + MOperator mLeaveOp = x64::MOP_leaveq; + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mLeaveOp, X64CG::kMd[mLeaveOp]); + cgFunc.GetCurBB()->AppendInsn(popInsn); + } else { + /* pop %rbp */ + MOperator mPopOp = x64::MOP_popq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPopOp, X64CG::kMd[mPopOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + } + /* ret */ + MOperator mRetOp = x64::MOP_retq; + Insn &retInsn = cgFunc.GetInsnBuilder()->BuildInsn(mRetOp, X64CG::kMd[mRetOp]); + cgFunc.GetCurBB()->AppendInsn(retInsn); + + bb.AppendBBInsns(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::Run() { + GenerateProlog(*(cgFunc.GetFirstBB())); + GenerateEpilog(*(cgFunc.GetLastBB())); +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..011ace66cef03e6baad9144b6f434a32af34ea63 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_reaching.h" +#include "x64_cg.h" +#include "insn.h" +#include "isa.h" +namespace maplebe { +/* find insn using register between startInsn and endInsn +* startInsn and endInsn must be in the same BB. +*/ +bool X64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, + Insn *endInsn, InsnSet ®UseInsnSet) const { + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + if (IsDiv(*insn) && regNO == x64::RAX) { + /* div insn use rax implicitly */ + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + /* handle def or def use */ + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + /* handle use */ + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + if (findFinish) { + break; + } + } + return findFinish; +} + +std::vector X64ReachingDefinition::FindRegDefBetweenInsnGlobal(uint32 regNO, + Insn *startInsn, Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +std::vector X64ReachingDefinition::FindMemDefBetweenInsn(uint32 offset, + const Insn *startInsn, Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +InsnSet X64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::InitStartGen() { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitEhDefine(BB &bb) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, + const Insn &endInsn, regno_t regNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::AddRetPseudoInsn(BB &bb) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +int32 X64ReachingDefinition::GetStackSize() const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return 0; +}; + +void X64ReachingDefinition::AddRetPseudoInsns() { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +}; + +/* reg killed killed by call insn */ +bool X64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const { + return x64::IsCallerSaveReg((X64reg)regNO); +} + +bool X64ReachingDefinition::IsDiv(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + return (MOP_idivw_r <= mOp && mOp <= MOP_divq_m); +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa45957371187dc488a2a3c624665e15ffda6618 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "becommon.h" +#include "x64_cgfunc.h" +#include "x64_reg_info.h" + +namespace maplebe { +using namespace maple; +using namespace x64; +void X64RegInfo::Init() { + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + continue; + } + if (!x64::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (x64::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void X64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) { + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + for (auto reg: savedRegs) { + x64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool X64RegInfo::IsSpecialReg(regno_t regno) const { + X64reg reg = static_cast(regno); + if ((reg == RBP) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register(RYP) can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + return false; +} + +bool X64RegInfo::IsCalleeSavedReg(regno_t regno) const { + return x64::IsCalleeSavedReg(static_cast(regno)); +} + +bool X64RegInfo::IsYieldPointReg(regno_t regno) const { + return false; +} + +bool X64RegInfo::IsUnconcernedReg(regno_t regNO) const { + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + return true; + } + return false; +} + +bool X64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + return IsUnconcernedReg(regNO); +} + +void X64RegInfo::Fini() { +} + +ListOperand* X64RegInfo::CreateListOperand() { + CHECK_FATAL(false, "CreateListOperand, unsupported"); + return nullptr; +} + +Insn *X64RegInfo::BuildMovInstruction(Operand &opnd0, Operand &opnd1) { + CHECK_FATAL(false, "BuildMovInstruction, unsupported"); + return nullptr; +} + +RegOperand *X64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) { + return &(GetCurrFunction()->GetOpndBuilder()->CreatePReg(regNO, size, kind)); +} + +Insn *X64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = x64::MOP_movb_r_m; + break; + case k16BitSize: + mOp = x64::MOP_movw_r_m; + break; + case k32BitSize: + mOp = x64::MOP_movl_r_m; + break; + case k64BitSize: + mOp = x64::MOP_movq_r_m; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(phyOpnd).AddOpndChain(memOpnd); + return &insn; +} + +Insn *X64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = x64::MOP_movb_m_r; + break; + case k16BitSize: + mOp = x64::MOP_movw_m_r; + break; + case k32BitSize: + mOp = x64::MOP_movl_m_r; + break; + case k64BitSize: + mOp = x64::MOP_movq_m_r; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(phyOpnd); + return &insn; +} + +Insn *X64RegInfo::BuildCommentInsn(const std::string &comment) { + CHECK_FATAL(false, "Comment Insn, unsupported"); + CommentOperand *commentOpnd = &(GetCurrFunction()->GetOpndBuilder()->CreateComment(comment)); + commentOpnd = nullptr; + return nullptr; +} + +void X64RegInfo::FreeSpillRegMem(regno_t vrNum) { + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + x64CGFunc->FreeSpillRegMem(vrNum); +} + +MemOperand *X64RegInfo::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) { + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + return x64CGFunc->GetOrCreatSpillMem(vrNum, bitSize); +} + +MemOperand *X64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) { + isOutOfRange = false; + return memOpnd; +} + +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..33a888d17b64704c262cd929bed0b66849470fb7 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_standardize.h" +#include "x64_isa.h" +#include "x64_cg.h" +#include "insn.h" + +namespace maplebe { +#define DEFINE_MAPPING(ABSTRACT_IR, X64_MOP, ...) {ABSTRACT_IR, X64_MOP}, +std::unordered_map x64AbstractMapping = { +#include "x64_abstract_mapping.def" +}; + +static inline X64MOP_t GetMopFromAbstraceIRMop(MOperator mOp) { + auto iter = x64AbstractMapping.find(mOp); + if (iter == x64AbstractMapping.end()) { + CHECK_FATAL(false, "NIY mapping"); + } + CHECK_FATAL(iter->second != x64::MOP_begin, "NIY mapping"); + return iter->second; +} + +void X64Standardize::StdzMov(maplebe::Insn &insn) { + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd); +} + +void X64Standardize::StdzStrLdr(Insn &insn) { + StdzMov(insn); +} + +void X64Standardize::StdzBasicOp(Insn &insn) { + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src2 = insn.GetOperand(kInsnThirdOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(src2).AddOpndChain(dest); +} + +void X64Standardize::StdzUnaryOp(Insn &insn) { + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(dest); +} + +void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { + uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); + uint32 destSize = OpndDesSize; + uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + uint32 srcSize = OpndSrcSize; + switch (insn.GetMachineOpcode()) { + case abstract::MOP_zext_rr_64_8: + destSize = k32BitSize; + break; + case abstract::MOP_zext_rr_64_16: + destSize = k32BitSize; + break; + case abstract::MOP_zext_rr_64_32: + destSize = k32BitSize; + break; + default: + break; + } + MOperator directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + if (directlyMappingMop != abstract::MOP_undef) { + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); + RegOperand *src = static_cast(opnd0); + if (srcSize != OpndSrcSize) { + src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + srcSize, src->GetRegisterType()); + } + Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); + RegOperand *dest = static_cast(opnd1); + if (destSize != OpndDesSize) { + dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + destSize, dest->GetRegisterType()); + } + insn.CleanAllOperand(); + insn.AddOpndChain(*src).AddOpndChain(*dest); + } else { + CHECK_FATAL(false, "NIY mapping"); + } +} + +void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { + RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + /* count operand cvt -> PTY_u8 */ + if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { + countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); + } + /* copy count operand to cl(rcx) register */ + RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + X64MOP_t copyMop = x64::MOP_movb_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); + insn.GetBB()->InsertInsnBefore(insn, copyInsn); + /* shift OP */ + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + RegOperand &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + insn.CleanAllOperand(); + insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); +} + +} diff --git a/ecmascript/mapleall/maple_be/src/cg/yieldpoint.cpp b/ecmascript/mapleall/maple_be/src/cg/yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d321743b7d0eca163b793b8fb274d2960ae160c3 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/cg/yieldpoint.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "yieldpoint.h" +#if TARGAARCH64 +#include "aarch64_yieldpoint.h" +#elif TARGRISCV64 +#include "riscv64_yieldpoint.h" +#endif +#if TARGARM32 +#include "arm32_yieldpoint.h" +#endif +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +bool CgYieldPointInsertion::PhaseRun(maplebe::CGFunc &f) { + YieldPointInsertion *yieldPoint = nullptr; +#if TARGAARCH64 || TARGRISCV64 + yieldPoint = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + yieldPoint = GetPhaseAllocator()->New(f); +#endif + yieldPoint->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/mapleall/maple_be/src/litecg/litecg.cpp b/ecmascript/mapleall/maple_be/src/litecg/litecg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f373060121b6a051866d14c1104dae0667de5760 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/litecg/litecg.cpp @@ -0,0 +1,109 @@ +#include "litecg.h" +#include "mir_builder.h" +#include "cg_option.h" +#include "mad.h" +#include "cg.h" +#include "maple_phase_support.h" +#include "maple_phase.h" +#include "cg_phasemanager.h" +#include + +namespace maple { + +namespace litecg { + +using namespace maplebe; + +LiteCG::LiteCG(Module& mirModule) : module(mirModule) { + + // Create CGOption: set up default options + // TODO: should we make CGOptions local? + cgOptions = &CGOptions::GetInstance(); + cgOptions->EnableLiteCG(); + cgOptions->SetEmitFileType("obj"); // TODO: to kElf + // cgOptions->SetTarget(X86_64); + // cgOptions->SetDebug(); + cgOptions->SetQuiet(true); + //cgOptions->SetDefaultOptions(module); + + // module information prepare + std::string moduleName = module.GetFileName(); + GStrIdx fileNameStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(moduleName); + + // TODO: is this strictly required? + GStrIdx nameStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename"); + module.PushFileInfoPair(MIRInfoPair(nameStrIdx, fileNameStrIdx.GetIdx())); + module.PushFileInfoIsString(true); + + module.SetFlavor(kFlavorUnknown); // TODO: need a new flavor + module.SetSrcLang(kSrcLangC); // TODO: fix this + module.GetImportFiles().clear(); + + // Setup output file name + module.SetOutputFileName(moduleName + ".s"); +} + +LiteCG& LiteCG::SetOutputType(OutputType config) { + cgOptions->SetEmitFileType((config == kAsm) ? "asm" : "obj"); + return *this; +} + +LiteCG& LiteCG::SetTargetType(TargetType config) { + // TODO: update target support + // cgOptions->SetTarget(X86_64); + return *this; +} + +LiteCG& LiteCG::SetDebugType(DebugType config) { + // TODO: fix the exposed debug options + // cgOptions->SetDebug(?); + return *this; +} + +LiteCG& LiteCG::SetVerbose(InfoType config) { + cgOptions->SetQuiet((config == kQuiet) ? true : false); + return *this; +} + +void LiteCG::DumpIRToFile(const std::string& fileName) { + module.DumpToFile(fileName); +} + +LiteCG& LiteCG::SetupLiteCGEmitMemoryManager(void *codeSpace, + MemoryManagerAllocateDataSectionCallback dataSectionAllocator, + MemoryManagerSaveFunc2AddressInfoCallback funcAddressSaver) { + cgOptions->SetupEmitMemoryManager(codeSpace, dataSectionAllocator, funcAddressSaver); + return *this; +} + +void LiteCG::DoCG() { + bool timePhases = false; + // MPLTimer timer; + // timer.Start(); + + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + + // TODO: not sure how to do this. + auto cgPhaseManager = std::make_unique(memPoolCtrler, "cg function phasemanager"); + const MaplePhaseInfo *cgPMInfo = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&CgFuncPM::id); + auto *cgfuncPhaseManager = static_cast(cgPMInfo->GetConstructor()(cgPhaseManager.get())); + cgfuncPhaseManager->SetQuiet(CGOptions::IsQuiet()); + + if (timePhases) { + cgfuncPhaseManager->InitTimeHandler(); + } + + /* It is a specifc work around (need refactor) */ + cgfuncPhaseManager->SetCGOptions(cgOptions); + (void) cgfuncPhaseManager->PhaseRun(module); + + if (timePhases) { + cgfuncPhaseManager->DumpPhaseTime(); + } + // timer.Stop(); + // LogInfo::MapleLogger() << "Mplcg consumed " << timer.ElapsedMilliseconds() << "ms" << '\n'; +} + +} // namespace litecg + +} // namespace maple diff --git a/ecmascript/mapleall/maple_be/src/litecg/litecg_test.cpp b/ecmascript/mapleall/maple_be/src/litecg/litecg_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2ec7b06531e936e08e169f38e13a33875e0d76d0 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/litecg/litecg_test.cpp @@ -0,0 +1,60 @@ +/* + a basic example to use litecg & lmir builder API. + + At this stage, it shows: + - The basic workflow of using litecg API. + - using lmir builder API to construct in-memory IR input to litecg. + - and then dump the input IR to a text-format maple IR file. + */ + +#include "litecg.h" + +using namespace maple::litecg; + +#define __ irBuilder-> // make the code looks better + +void generateIR(LMIRBuilder* irBuilder) { + /* case 1: Note here parameters are implicitly defined without return + Var handle, thus requires GetLocalVar. + + i32 function1(i32 param1, i64 param2) { + return param1 + (i32) param2; + } + */ + Function& function1 = __ DefineFunction("function1") + .Param(__ i32Type, "param1") + .Param(__ i64Type, "param2") + .Return(__ i32Type) + .Done(); + + __ SetCurFunc(function1); + + BB& bb = __ CreateBB(); + Stmt& retStmt = __ Return( + __ Add(__ i32Type, + __ Dread(__ GetLocalVar("param1")), + __ Trunc(__ i64Type, __ i32Type, + __ Dread(__ GetLocalVar("param2"))))); + __ AppendStmt(bb, retStmt); + __ AppendBB(bb); + + // TODO: to be complete + /* case 2 + + */ +} + +int main() +{ + LiteCG liteCG("lmirexample"); + // liteCG.SetTargetType(X86_64).SetOutputType(Asm); + + auto irBuilder = liteCG.GetIRBuilder(); + generateIR(&irBuilder); + + liteCG.DumpIRToFile("lmirexample.mpl"); + + // liteCG.doCG(); + + return 0; +} diff --git a/ecmascript/mapleall/maple_be/src/litecg/lmir_builder.cpp b/ecmascript/mapleall/maple_be/src/litecg/lmir_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0752790c6975e83b8daaabdf69c00a888fde2242 --- /dev/null +++ b/ecmascript/mapleall/maple_be/src/litecg/lmir_builder.cpp @@ -0,0 +1,698 @@ +#include "lmir_builder.h" +#include "mir_builder.h" + +namespace maple { +namespace litecg { + +// not exposed any longer +inline Type *GetPrimitiveType(PrimType type) { + return GlobalTables::GetTypeTable().GetPrimType(type); +} + +Module *CreateModuleWithName(const std::string &name) +{ + return new Module(name); +} + +void ReleaseModule(Module *module) +{ + delete module; +} + +void LiteCGSetDeoptBundleInfo(Stmt &callNode, const std::map &deoptBundleInfo) +{ + if (callNode.GetOpCode() == OP_call) { + static_cast(callNode).SetDeoptBundleInfo(deoptBundleInfo); + } else { + static_cast(callNode).SetDeoptBundleInfo(deoptBundleInfo); + } +} + +LMIRBuilder::LMIRBuilder(Module& module_) + : mirBuilder(*module_.GetMIRBuilder()), module(module_) { + i8Type = GetPrimitiveType(PTY_i8); + i16Type = GetPrimitiveType(PTY_i16); + i32Type = GetPrimitiveType(PTY_i32); + i64Type = GetPrimitiveType(PTY_i64); + i128Type = GetPrimitiveType(PTY_i128); + u1Type = GetPrimitiveType(PTY_u1); + u8Type = GetPrimitiveType(PTY_u8); + u16Type = GetPrimitiveType(PTY_u16); + u32Type = GetPrimitiveType(PTY_u32); + u64Type = GetPrimitiveType(PTY_u64); + u128Type = GetPrimitiveType(PTY_u128); + voidType = GetPrimitiveType(PTY_void); + f32Type = GetPrimitiveType(PTY_f32); + f64Type = GetPrimitiveType(PTY_f64); + + // builtin types: commonly used derived types + strType = CreatePtrType(u8Type); // u8PtrType + i64PtrType = CreatePtrType(i64Type); + i64RefType = CreateRefType(i64Type); +} + +void LMIRBuilder::DumpIRToFile(const std::string fileName) { + module.DumpToFile(fileName); +} + +LiteCGTypeKind LMIRBuilder::LiteCGGetTypeKind(Type *type) const { + switch(type->GetKind()) { + case MIRTypeKind::kTypeInvalid: + return kLiteCGTypeInvalid; + case MIRTypeKind::kTypeUnknown: + return kLiteCGTypeUnknown; + case MIRTypeKind::kTypeScalar: + return kLiteCGTypeScalar; + case MIRTypeKind::kTypeBitField: + return kLiteCGTypeBitField; + case MIRTypeKind::kTypeArray: + return kLiteCGTypeArray; + case MIRTypeKind::kTypeFArray: + return kLiteCGTypeFArray; + case MIRTypeKind::kTypeJArray: + return kLiteCGTypeJArray; + case MIRTypeKind::kTypeStruct: + return kLiteCGTypeStruct; + case MIRTypeKind::kTypeUnion: + return kLiteCGTypeUnion; + case MIRTypeKind::kTypeClass: + return kLiteCGTypeClass; + case MIRTypeKind::kTypeInterface: + return kLiteCGTypeInterface; + case MIRTypeKind::kTypeStructIncomplete: + return kLiteCGTypeStructIncomplete; + case MIRTypeKind::kTypeClassIncomplete: + return kLiteCGTypeClassIncomplete; + case MIRTypeKind::kTypeConstString: + return kLiteCGTypeConstString; + case MIRTypeKind::kTypeInterfaceIncomplete: + return kLiteCGTypeInterfaceIncomplete; + case MIRTypeKind::kTypePointer: + return kLiteCGTypePointer; + case MIRTypeKind::kTypeFunction: + return kLiteCGTypeFunction; + case MIRTypeKind::kTypeVoid: + return kLiteCGTypeVoid; + case MIRTypeKind::kTypeByName: + return kLiteCGTypeByName; + case MIRTypeKind::kTypeParam: + return kLiteCGTypeParam; + case MIRTypeKind::kTypeInstantVector: + return kLiteCGTypeInstantVector; + case MIRTypeKind::kTypeGenericInstant: + return kLiteCGTypeGenericInstant; + default: + return kLiteCGTypeUnknown; + } +} + +Type *LMIRBuilder::CreatePtrType(Type *mirType) { + auto type = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ptr); + return type; +} + +Type *LMIRBuilder::CreateRefType(Type *mirType) { + auto type = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ref); + return type; +} + +bool LMIRBuilder::IsHeapPointerType(Type *mirType) const { + return mirType->GetPrimType() == PTY_ref; +} + +ArrayType *LMIRBuilder::CreateArrayType(Type *elemType, std::vector& dimSize) { + auto type = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, + dimSize.size(), dimSize.data()); + return static_cast(type); +} + +Type *LMIRBuilder::CreateStructTypeInternal(const String& name, + std::vector>& fields_) { + FieldVector parentFields; // parentFields not used. + // TODO: not sure about the cost + FieldVector fields; + for (auto field : fields_) { + auto strIdx = mirBuilder.GetOrCreateStringIndex(field.first.data()); + fields.push_back(FieldPair(strIdx, + TyIdxFieldAttrPair(field.second->GetTypeIndex(), FieldAttrs()))); + } + auto type = GlobalTables::GetTypeTable().GetOrCreateStructType(name, fields, + parentFields, module); + return type; +} + +Type* LMIRBuilder::GetStructType(const String& name) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(name); + TyIdx typeIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(strIdx); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + return type; +} + +StructConst& LMIRBuilder::CreateStructConstInternal(StructType *type) { + return *module.GetMemPool()->New(module, *type); +} + +ArrayConst& LMIRBuilder::CreateArrayConstInternal(ArrayType *type) { + return *module.GetMemPool()->New(module, *type); +} + +FieldOffset LMIRBuilder::GetFieldOffset(StructType *structType, FieldId fieldId) { + // TODO: we should avoid access CG internals here + //return Globals::GetInstance()->GetBECommon()->GetFieldOffset(*structType, fieldId); + return std::pair(0, 0); +} + +Type *LMIRBuilder::CreateFuncType(std::vector params_, Type *retType, bool isVarg) { + std::vector params; + std::vector attrs; // not used so far + + for (const auto param: params_) { + params.push_back(param->GetTypeIndex()); + attrs.push_back(TypeAttrs()); + } + + auto type = GlobalTables::GetTypeTable().GetOrCreateFunctionType(retType->GetTypeIndex(), + params, attrs, isVarg); + return type; +} + +Type *LMIRBuilder::LiteCGGetPointedType(Type *type) { + if (type == nullptr || !type->IsMIRPtrType()) { + return nullptr; + } + return static_cast(type)->GetPointedFuncType(); +} + +std::vector LMIRBuilder::LiteCGGetFuncParamTypes(Type *type) { + std::vector ¶mTypeList = static_cast(type)->GetParamTypeList(); + std::vector paramTypes; + for (const auto paramType : paramTypeList) { + paramTypes.push_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(paramType)); + } + return paramTypes; +} + +Type *LMIRBuilder::LiteCGGetFuncReturnType(Type *type) { + if (type == nullptr || !type->IsMIRFuncType()) { + return nullptr; + } + TyIdx retTypeIndex = static_cast(type)->GetRetTyIdx(); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTypeIndex); +} + +// TODO: not sure it's FUNCATTR_local or FUNCATTR_static +static const FuncAttrKind FuncAttrMapTable[] = { + //FUNC_global, FUNC_weak, FUNC_internal + FUNCATTR_extern, FUNCATTR_weak, FUNCATTR_local +}; + +static const FuncAttrKind FuncConvAttrMapTable[] = { + //CCall, Web_Kit_JS_Call, GHC_Call + FUNCATTR_ccall, FUNCATTR_webkitjscall, FUNCATTR_ghcall +}; + +static const StmtAttrKind StmtConvAttrMapTable[] = { + STMTATTR_ccall, STMTATTR_webkitjscall, STMTATTR_ghcall +}; + +Function& LMIRBuilder::CreateFunctionInternal(const String& name, Type *retType, Params& params_, + bool isVargs, bool needBody, FuncAttr attr, ConvAttr convAttr) { + ArgVector params(module.GetMPAllocator().Adapter()); + for (auto param : params_) { + params.push_back(param); + } + auto& function = *mirBuilder.CreateFunction(name, *retType, params, isVargs, needBody); + // TODO: check for attr + function.SetAttr(FuncAttrMapTable[attr]); + function.SetAttr(FuncConvAttrMapTable[convAttr]); + // It defines a function, add to module + if (needBody) { + module.AddFunction(&function); + } + return function; +} + +Function* LMIRBuilder::GetFunc(const String& name) { + return mirBuilder.GetFunctionFromName(name); +} + +void LMIRBuilder::SetCurFunc(Function& function) { + module.SetCurFunction(&function); +} + +Function &LMIRBuilder::GetCurFunction() const { + return *module.CurFunction(); +} + +void LMIRBuilder::SetFuncFrameResverdSlot(int slot) { + module.CurFunction()->GetFuncAttrs().SetFrameResverdSlot(slot); +} + +void LMIRBuilder::SetFuncFramePointer(const String& val) { + module.CurFunction()->GetFuncAttrs().SetFramePointer(val); +} + +MIRPreg *LMIRBuilder::LiteCGGetPreg(Function& func, int32_t pRegNo) { + return func.GetPregItem(pRegNo); +} + +Expr LMIRBuilder::LiteCGGetPregFP(Function& func) { + return Regread(kSregFp); +} + +Expr LMIRBuilder::LiteCGGetPregSP() { + return Regread(kSregSp); +} + +// TODO: not sure it's FUNCATTR_local or FUNCATTR_static +static const AttrKind VarAttrMapTable[] = { + //VAR_external, VAR_weak, VAR_internal, VAR_global, VAR_readonly + ATTR_extern, ATTR_weak, ATTR_local, ATTR_extern, ATTR_readonly +}; + +Var& LMIRBuilder::CreateGlobalVar(Type *type, const String& name, GlobalVarAttr attr) { + Var *var = mirBuilder.GetOrCreateSymbol(type->GetTypeIndex(), name, kStVar, kScGlobal, + nullptr, kScopeGlobal, false); // sameType? + var->SetAttr(VarAttrMapTable[attr]); + return *var; +} + +Var& LMIRBuilder::CreateGlobalVar(Type *type, const String& name, Const& init, GlobalVarAttr attr) { + Var& var = CreateGlobalVar(type, name, attr); + var.SetKonst(&init); + return var; +} + +Var *LMIRBuilder::GetGlobalVar(const String& name) { + return mirBuilder.GetGlobalDecl(name); +} + +Var& LMIRBuilder::CreateLocalVar(Type *type, const String& name) { + return *mirBuilder.GetOrCreateLocalDecl(name, *type); +} + +Var *LMIRBuilder::GetLocalVar(const String& name) { + return mirBuilder.GetLocalDecl(name); +} + +Var *LMIRBuilder::GetLocalVarFromExpr(Expr inExpr) { + auto *node = inExpr.GetNode(); + if (!node || node->GetOpCode() != OP_dread) { + return nullptr; + } + return GetCurFunction().GetSymbolTabItem(static_cast(node)->GetStIdx().Idx(), true); +} + +Var &LMIRBuilder::GetParam(Function& function, size_t index) const { + return *function.GetFormal(index); +} + +Const& LMIRBuilder::CreateIntConst(Type *type, int64_t val) { + return *GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *type); +} + +Const& LMIRBuilder::CreateFloatConst(float val) { + return *GlobalTables::GetFpConstTable().GetOrCreateFloatConst(val); +} + +Const& LMIRBuilder::CreateDoubleConst(double val) { + return *GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(val); +} + +Const& LMIRBuilder::CreateStrConst(const String& constStr) { + // TODO: fix the type for string const + return *module.GetMemPool()->New(constStr, *strType); +} + +BB& LMIRBuilder::CreateBB(bool needLabel) { + // TODO: not sure block-node is a correct representation + // create block statement in current function + BB& bb = *module.CurFuncCodeMemPool()->New(); + if (needLabel) { + // generate implement label statement as the first statement + LabelIdx labelIdx = module.CurFunction()->GetLabelTab()->CreateLabel(); + (void)module.CurFunction()->GetLabelTab()->AddToStringLabelMap(labelIdx); + auto *labelStmt = module.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(labelIdx); + bb.AddStatement(labelStmt); + } + return bb; +} + +void LMIRBuilder::AppendStmt(BB& bb, Stmt& stmt) { + bb.AddStatement(&stmt); +} + +void LMIRBuilder::AppendStmtBeforeBranch(BB& bb, Stmt& stmt) { + bool inserted = false; + auto &nodes = bb.GetStmtNodes(); + for (auto it = nodes.crbegin(); it != nodes.crend(); it++) { + auto& node = *it; + if (!node.IsCondBr() && (node.GetOpCode() != OP_goto)) { + bb.InsertAfter(&node, &stmt); + inserted = true; + break; + } + } + CHECK_FATAL(inserted, "PreBB must have a non jump stmt to insert PhiVarAssagin Stmt."); +} + +bool LMIRBuilder::IsEmptyBB(BB& bb) { + return bb.IsEmpty() || (bb.GetFirst() == bb.GetLast() && bb.GetFirst()->GetOpCode() == OP_label); +} + +void LMIRBuilder::SetStmtCallConv(Stmt& stmt, ConvAttr convAttr) { + stmt.SetAttr(StmtConvAttrMapTable[convAttr]); +} + +void LMIRBuilder::AppendBB(BB& bb) { + module.CurFunction()->GetBody()->AddStatement(&bb); +} + +BB& LMIRBuilder::GetLastAppendedBB() { + BB *pb = dynamic_cast(module.CurFunction()->GetBody()->GetLast()); + return *pb; +} + +LabelIdx GetBBLabelIdx(BB& bb) { + LabelNode *labelNode = dynamic_cast(bb.GetFirst()); + DEBUG_ASSERT(labelNode != nullptr, "BB should have a label statment"); + + return labelNode->GetLabelIdx(); +} + +Stmt& LMIRBuilder::Goto(BB& dest) { + return *mirBuilder.CreateStmtGoto(OP_goto, GetBBLabelIdx(dest)); +} + +Stmt& LMIRBuilder::CondGoto(Var& cond, BB& target, bool inverseCond) { + auto opcode = inverseCond ? OP_brtrue : OP_brfalse; + return *mirBuilder.CreateStmtCondGoto(Dread(cond).GetNode(), opcode, GetBBLabelIdx(target)); +} + +Stmt& LMIRBuilder::CondGoto(Expr cond, BB& target, bool inverseCond) { + auto opcode = inverseCond ? OP_brtrue : OP_brfalse; + return *mirBuilder.CreateStmtCondGoto(cond.GetNode(), opcode, GetBBLabelIdx(target)); +} + +// TODO: not ready yet +Stmt& LMIRBuilder::CreateSwitchInternal(Type *type, Expr cond, BB& defaultBB, + std::vector>& cases) { + CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); +#if 0 + for (const std::pair casePair : cases) { + } +#endif + return *mirBuilder.CreateStmtSwitch(cond.GetNode(), GetBBLabelIdx(defaultBB), switchTable); +} + +Stmt& LMIRBuilder::Call(Function& func, Args& args_, Var *result) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto& arg : args_) { + args.emplace_back(arg.GetNode()); + } + + if (result == nullptr) { + return *mirBuilder.CreateStmtCall(func.GetPuidx(), args); + } else { + return *mirBuilder.CreateStmtCallAssigned(func.GetPuidx(), args, result); + } +} + +Stmt& LMIRBuilder::ICall(Expr funcAddr, Args& args_, Var *result) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(funcAddr.GetNode()); + for (const auto& arg : args_) { + args.emplace_back(arg.GetNode()); + } + + if (result == nullptr) { + return *mirBuilder.CreateStmtIcall(args); + } else { + return *mirBuilder.CreateStmtIcallAssigned(args, *result); + } +} + +Stmt& LMIRBuilder::IntrinsicCall(IntrinsicId func_, Args& args_, Var *result) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto& arg : args_) { + args.emplace_back(arg.GetNode()); + } + +// TODO: need to fix the type for IntrinsicId + auto func = static_cast(func_); + if (result == nullptr) { + return *mirBuilder.CreateStmtIntrinsicCall(func, args); + } else { + return *mirBuilder.CreateStmtIntrinsicCallAssigned(func, args, result); + } +} + +Stmt& LMIRBuilder::Return(Expr returnVal) { + return *mirBuilder.CreateStmtReturn(returnVal.GetNode()); +} + +Stmt& LMIRBuilder::Comment(std::string comment) { + return *mirBuilder.CreateStmtComment(comment); +} + +Stmt& LMIRBuilder::Dassign(Expr src, Var& var, FieldId fieldId) { + return *mirBuilder.CreateStmtDassign(var, fieldId, src.GetNode()); +} + +Stmt& LMIRBuilder::Iassign(Expr src, Expr addr, Type *baseType, FieldId fieldId) { + return *mirBuilder.CreateStmtIassign(*baseType, fieldId, addr.GetNode(), src.GetNode()); +} + +Expr LMIRBuilder::Dread(Var& var) { + return Expr(mirBuilder.CreateExprDread(var), var.GetType()); +} + +Expr LMIRBuilder::DreadWithField(Var& var, FieldId id) { + auto *type = var.GetType(); + CHECK_FATAL(type->IsStructType(), "DreadWithField: must be a struct type!"); + auto *fldType = static_cast(type)->GetFieldType(id); + return Expr(mirBuilder.CreateExprDread(*fldType, id, var), fldType); +} + +Expr LMIRBuilder::Iread(Type *type, Expr addr, Type *baseType, FieldId fieldId) { + return Expr(mirBuilder.CreateExprIread(*type, *baseType, fieldId, addr.GetNode()), type); +} + +PregIdx LMIRBuilder::CreatePreg(Type *mtype) { + if ((mtype->GetPrimType() != PTY_ptr) && (mtype->GetPrimType() != PTY_ref)) { + // primitive type + return GetCurFunction().GetPregTab()->CreatePreg(mtype->GetPrimType()); + } + return GetCurFunction().GetPregTab()->CreatePreg(mtype->GetPrimType(), mtype); +} + +Stmt& LMIRBuilder::Regassign(Expr src, PregIdx pregIdx) { + return *(mirBuilder.CreateStmtRegassign(src.GetType()->GetPrimType(), pregIdx, src.GetNode())); +} + +Expr LMIRBuilder::Regread(PregIdx pregIdx) { + MIRPreg *preg = GetCurFunction().GetPregTab()->PregFromPregIdx(pregIdx); + if (pregIdx < 0) { + // special register + return Expr(mirBuilder.CreateExprRegread(PTY_i64, pregIdx), i64Type); + } + if (preg->GetMIRType() != nullptr) { + return Expr(mirBuilder.CreateExprRegread(preg->GetPrimType(), pregIdx), preg->GetMIRType()); + } + // the type of value in reg is primitive type + Type *type = GetPrimitiveType(preg->GetPrimType()); + return Expr(mirBuilder.CreateExprRegread(preg->GetPrimType(), pregIdx), type); +} + +Expr LMIRBuilder::Addrof(Var& var) { + return Expr(mirBuilder.CreateAddrof(var), var.GetType()); +} + +Expr LMIRBuilder::ConstVal(Const& constVal) { + return Expr(mirBuilder.CreateConstval(&constVal), &constVal.GetType()); +} + +Expr LMIRBuilder::Not(Type *type, Expr src) { + // TODO: do we still need lnot? + return Expr(mirBuilder.CreateExprUnary(OP_bnot, *type, src.GetNode()), type); +} + +Expr LMIRBuilder::Sqrt(Type *type, Expr src) { + return Expr(mirBuilder.CreateExprUnary(OP_sqrt, *type, src.GetNode()), type); +} + +inline Expr CreateBinOpInternal(MIRBuilder& mirBuilder, Opcode op, + Type *type, Expr src1, Expr src2) { + // we don't check for type mismatch and insert type-conversion here + return Expr(mirBuilder.CreateExprBinary(op, *type, src1.GetNode(), src2.GetNode()), type); +} + +Expr LMIRBuilder::Add(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_add, type, src1, src2); +} + +Expr LMIRBuilder::Sub(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_sub, type, src1, src2); +} + +Expr LMIRBuilder::Mul(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_mul, type, src1, src2); +} + +Expr LMIRBuilder::UDiv(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_div, type, src1, src2); +} + +Expr LMIRBuilder::SDiv(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_div, type, src1, src2); +} + +Expr LMIRBuilder::URem(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_rem, type, src1, src2); +} + +Expr LMIRBuilder::SRem(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_rem, type, src1, src2); +} + +Expr LMIRBuilder::Shl(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_shl, type, src1, src2); +} + +Expr LMIRBuilder::LShr(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_lshr, type, src1, src2); +} + +Expr LMIRBuilder::AShr(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_ashr, type, src1, src2); +} + +Expr LMIRBuilder::And(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_band, type, src1, src2); +} + +Expr LMIRBuilder::Or(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_bior, type, src1, src2); +} + +Expr LMIRBuilder::Xor(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_bxor, type, src1, src2); +} + +Expr LMIRBuilder::ICmpEQ(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_eq, type, src1, src2); +} + +Expr LMIRBuilder::ICmpNE(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_ne, type, src1, src2); +} + +Expr LMIRBuilder::ICmpULT(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_lt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpULE(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_le, type, src1, src2); +} + +Expr LMIRBuilder::ICmpUGT(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_gt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpUGE(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_ge, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSLT(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_lt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSLE(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_le, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSGT(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_gt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSGE(Type *type, Expr src1, Expr src2) { + return CreateBinOpInternal(mirBuilder, OP_ge, type, src1, src2); +} + +inline Expr CreateExprCompare(MIRBuilder& mirBuilder, Opcode op, + Type *type, Expr src1, Expr src2) { + // we don't check for type mismatch and insert type-conversion here + return Expr(mirBuilder.CreateExprCompare(op, *type, *src1.GetType(), src1.GetNode(), src2.GetNode()), type); +} + +Expr LMIRBuilder::ICmp(Type *type, Expr src1, Expr src2, IntCmpCondition cond) { + Opcode opCode = OP_eq; + switch(cond) { + case kEQ: + opCode = OP_eq; + break; + case kNE: + opCode = OP_ne; + break; + case kULT: + opCode = OP_lt; + break; + case kULE: + opCode = OP_le; + break; + case kUGT: + opCode = OP_gt; + break; + case kUGE: + opCode = OP_ge; + break; + case kSLT: + opCode = OP_lt; + break; + case kSLE: + opCode = OP_le; + break; + case kSGT: + opCode = OP_gt; + break; + case kSGE: + opCode = OP_ge; + break; + } + return CreateExprCompare(mirBuilder, opCode, type, src1, src2); +} + +Expr LMIRBuilder::Select(Type *type, Expr cond, Expr ifTrue, Expr ifFalse) { + return Expr(mirBuilder.CreateExprTernary(OP_select, *type, cond.GetNode(), + ifTrue.GetNode(), ifFalse.GetNode()), type); +} + +Expr LMIRBuilder::Trunc(Type *fromType, Type *toType, Expr opnd) { + return Expr(mirBuilder.CreateExprTypeCvt( + OP_cvt, toType->GetPrimType(), fromType->GetPrimType(), *opnd.GetNode()), toType); +} + +Expr LMIRBuilder::ZExt(Type *fromType, Type *toType, Expr opnd) { + return Expr(mirBuilder.CreateExprExtractbits(OP_zext, toType->GetPrimType(), + 0, GetPrimTypeActualBitSize(fromType->GetPrimType()), opnd.GetNode()), toType); +} + +Expr LMIRBuilder::Cvt(Type *fromType, Type *toType, Expr opnd) { + if (fromType->GetPrimType() != toType->GetPrimType()) { + return Expr(mirBuilder.CreateExprTypeCvt(OP_cvt, *toType, *fromType, opnd.GetNode()), toType); + } + return Expr(opnd.GetNode(), toType); +} + +Expr LMIRBuilder::SExt(Type *fromType, Type *toType, Expr opnd) { + return Expr(mirBuilder.CreateExprExtractbits(OP_sext, toType->GetPrimType(), 0, + GetPrimTypeActualBitSize(fromType->GetPrimType()), opnd.GetNode()), toType); +} +Expr LMIRBuilder::BitCast(Type *fromType, Type *toType, Expr opnd) { + return Expr(mirBuilder.CreateExprTypeCvt( + OP_cvt, toType->GetPrimType(), fromType->GetPrimType(), *opnd.GetNode()), toType); +} + +} // namespace litecg +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/BUILD.gn b/ecmascript/mapleall/maple_driver/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..c194a15cd95dc06dc49922a27c9f9e775c4c5b9c --- /dev/null +++ b/ecmascript/mapleall/maple_driver/BUILD.gn @@ -0,0 +1,137 @@ +# +#Copyright(c)[2020 - 2021] Huawei Technologies Co., Ltd.All rights reserved. +# +#OpenArkCompiler is licensed under Mulan PSL v2. +#You can use this software according to the terms and conditions of the Mulan PSL v2. +#You may obtain a copy of Mulan PSL v2 at: +# +#http: // license.coscl.org.cn/MulanPSL2 +# +#THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON - INFRINGEMENT, MERCHANTABILITY OR +#FIT FOR A PARTICULAR PURPOSE. +#See the Mulan PSL v2 for more details. +# +#configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] +#cflags_cc = [] +#cflags_cc += [ "-fPIC" ] + +include_directories = [ + "${MAPLEALL_ROOT}", + "${MAPLEALL_ROOT}/maple_be/include", + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${MAPLEALL_ROOT}/maple_be/include/cg/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/be/aarch64", + "${MAPLEALL_ROOT}/maple_driver/defs", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", +] + +executable("maple") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = [ + "src/as_compiler.cpp", + "src/compiler.cpp", + "src/compiler_factory.cpp", + "src/dex2mpl_compiler.cpp", + "src/driver_runner.cpp", + "src/ipa_compiler.cpp", + "src/jbc2mpl_compiler.cpp", + "src/cpp2mpl_compiler.cpp", + "src/clang_compiler.cpp", + "src/ld_compiler.cpp", + "src/maple.cpp", + "src/maple_comb_compiler_wrapper.cpp", + "src/maple_comb_compiler.cpp", + "src/mpl_options.cpp", + "src/mplcg_compiler.cpp", + "src/hided_options.cpp", + ] + + include_dirs = include_directories + + deps = [ + ":libdriver_option", + ":libmaple_driver", + "${MAPLEALL_ROOT}/maple_be:libcg", + "${MAPLEALL_ROOT}/maple_be:libmplad", + "${MAPLEALL_ROOT}/maple_be:libmplbe", + "${MAPLEALL_ROOT}/maple_ipa:libmplipa", + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/maple_me:libmplme", + "${MAPLEALL_ROOT}/maple_me:libmplmewpo", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libsec_static", + ] +} + +static_library("libmaple_driver") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = [ "src/triple.cpp" ] + + include_dirs = [ "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include" + ] + + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +static_library("libdriver_option") { + sources = [ "src/driver_options.cpp" ] + + include_dirs = [ "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include" + ] + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +shared_library("libcgapi") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + if (TARGET == "x86_64") { + include_dirs = include_directories + sources = [ + "src/x64/x64_api.cpp", + "src/compiler.cpp", + "src/mpl_options.cpp", + "src/mplcg_compiler.cpp", + "src/driver_runner.cpp", + ] + deps = [ + ":libdriver_option", + ":libmaple_driver", + "${MAPLEALL_ROOT}/maple_be:libcg", + "${MAPLEALL_ROOT}/maple_be:libmplad", + "${MAPLEALL_ROOT}/maple_be:libmplbe", + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/maple_ipa:libmplipa", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/maple_me:libmplme", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${MAPLEALL_ROOT}/maple_me:libmplmewpo", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libsec_static", + ] + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + } +} + diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_clang.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_clang.def new file mode 100644 index 0000000000000000000000000000000000000000..fb70797f01067addccfdeb08a9b7a1d6fd843524 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_clang.def @@ -0,0 +1 @@ +{"-emit-ast", "", false}, \ No newline at end of file diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..04bbc03d702e2d0ceb7b15a16077132a597ff6b5 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def @@ -0,0 +1 @@ +{"--enable-variable-array", "", false}, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..2878491cab305fe95e99cca35ce2d8187761ccd0 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def @@ -0,0 +1,4 @@ +// option name, option value, append maple root path? +{ "-j100", "", false }, +{ "-litprofile", "out/target/product/maple_arm64/lib/codetricks/profile/meta.list", true }, +{ "-refine-catch", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..4d82bd2e54cfc7894a41d3c87013c5904f03c759 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def @@ -0,0 +1,15 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_ld.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_ld.def new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_ld.def @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_me.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..ddb4ef6084bcae13795a469adaa0f751c1f6d481 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_me.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..bda5ba0de368fd40763d76d96f8ce8585cafb599 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "-regnativefunc", "", false }, +{ "--maplelinker", "", false }, +{ "--profile", "out/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list", true }, +{ "--maplelinker-nolocal", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def new file mode 100644 index 0000000000000000000000000000000000000000..c015aa6e5225dc457651ad1c1e5570cee881ba39 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..c92727c76559c1b50577a3aeb6a8e2b27fb7dce9 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg.def @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "--no-pie", "", false }, +{ "--fpic", "", false }, +{ "--verbose-asm", "", false }, +{ "--maplelinker", "", false }, +{ "--duplicate_asm_list", "out/target/product/maple_arm64/lib/codetricks/asm/duplicateFunc.s", true }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def new file mode 100644 index 0000000000000000000000000000000000000000..93fedf3bcde7272bf66763e15e7327092e60c355 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..2878491cab305fe95e99cca35ce2d8187761ccd0 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def @@ -0,0 +1,4 @@ +// option name, option value, append maple root path? +{ "-j100", "", false }, +{ "-litprofile", "out/target/product/maple_arm64/lib/codetricks/profile/meta.list", true }, +{ "-refine-catch", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_me.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..47d61b942e8d94dc16a89820ec14811c9d4aa398 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_me.def @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--inlinefunclist", "out/target/product/maple_arm64/lib/codetricks/profile.pv/inline_funcs.list", true }, +{ "--no-nativeopt", "", false }, +{ "--no-ignoreipa", "", false }, +{ "--enable-ea", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_me_c.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_me_c.def new file mode 100644 index 0000000000000000000000000000000000000000..8ed5de607fd8829c860c032deeeba28d8d62b122 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_me_c.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..b74863dcc076f8c50f6db0fba121730d11a6c773 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def @@ -0,0 +1,22 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--regnativefunc", "", false }, +{ "--no-nativeopt", "", false }, +{ "--maplelinker", "", false }, +{ "--profile", "out/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list", true }, +{ "--maplelinker-nolocal", "", false }, \ No newline at end of file diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def new file mode 100644 index 0000000000000000000000000000000000000000..83c259a8086271412158d2716f8bfe626c2259d0 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, \ No newline at end of file diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..fc489f6b0e90e884a9fd48e09a88023994b3569b --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg.def @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--no-pie", "", false }, +{ "--verbose-asm", "", false }, +{ "--fpic", "", false }, +{ "--maplelinker", "", false }, +{ "--gen-c-macro-def", "", false }, +{ "--duplicate_asm_list", "out/target/product/maple_arm64/lib/codetricks/asm/duplicateFunc.s", true }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def new file mode 100644 index 0000000000000000000000000000000000000000..b4cb79a23af2ab5b3423374163ed9e77ef27bb89 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def @@ -0,0 +1,18 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplipa.def b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplipa.def new file mode 100644 index 0000000000000000000000000000000000000000..1791acef4470a9f8718fc7644eb56a8d51f70110 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/O2_options_mplipa.def @@ -0,0 +1,3 @@ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "--effectipa", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/Os_options_me.def b/ecmascript/mapleall/maple_driver/defs/default/Os_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..020c5c8c83a6ac4e2191e75fd943ab86af17a7e3 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/Os_options_me.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def b/ecmascript/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..55189dc380c24a08bdfa24e4b4a6882d441bcd9e --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, \ No newline at end of file diff --git a/ecmascript/mapleall/maple_driver/defs/default/Os_options_mplcg.def b/ecmascript/mapleall/maple_driver/defs/default/Os_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..197ad1f12714d1c2c4c7411435c6eff8f49bc232 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default/Os_options_mplcg.def @@ -0,0 +1,18 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/ecmascript/mapleall/maple_driver/defs/default_options.def b/ecmascript/mapleall/maple_driver/defs/default_options.def new file mode 100644 index 0000000000000000000000000000000000000000..9b8978ca917e949aa7fb5d0a50b814c866e5de39 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/defs/default_options.def @@ -0,0 +1,136 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H + +namespace maple { +// O0 ME options +static MplOption kMeDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_me.def" +#else +#include "default/O0_options_me.def" +#endif +}; +// O2 ME options +static MplOption kMeDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_me.def" +#else +#include "default/O2_options_me.def" +#endif +}; +// Os ME options +static MplOption kMeDefaultOptionsOs[] = { +#include "default/Os_options_me.def" +}; +// O0 mpl2mpl options +static MplOption kMpl2MplDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_mpl2mpl.def" +#else +#include "default/O0_options_mpl2mpl.def" +#endif +}; +// O2 mpl2mpl options +static MplOption kMpl2MplDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mpl2mpl.def" +#else +#include "default/O2_options_mpl2mpl.def" +#endif +}; +// Os mpl2mpl options +static MplOption kMpl2MplDefaultOptionsOs[] = { +#include "default/Os_options_mpl2mpl.def" +}; +// O0 mplcg options +static MplOption kMplcgDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_mplcg.def" +#else +#include "default/O0_options_mplcg.def" +#endif +}; +// O2 mplcg options +static MplOption kMplcgDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mplcg.def" +#else +#include "default/O2_options_mplcg.def" +#endif +}; +// Os mplcg options +static MplOption kMplcgDefaultOptionsOs[] = { +#include "default/Os_options_mplcg.def" +}; +// O2 mplipa options +static MplOption kMplipaDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mplipa.def" +#else +#include "default/O2_options_mplipa.def" +#endif +}; +// O0 dex2mpl options +static MplOption kDex2mplDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_dex2mpl.def" +#else +#include "default/O0_options_dex2mpl.def" +#endif +}; +// O2 dex2mpl options +static MplOption kDex2mplDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_dex2mpl.def" +#else +#include "default/O2_options_dex2mpl.def" +#endif +}; +// O0 mpl2mpl options for C language +static MplOption kMpl2MplDefaultOptionsO0ForC[] = { +#include "default/O0_options_mpl2mpl_c.def" +}; +// O2 mpl2mpl options for C language +static MplOption kMpl2MplDefaultOptionsO2ForC[] = { +#include "default/O2_options_mpl2mpl_c.def" +}; +// O0 mplcg options for C language +static MplOption kMplcgDefaultOptionsO0ForC[] = { +#include "default/O0_options_mplcg_c.def" +}; +// O2 ME options for C language +static MplOption kMeDefaultOptionsO2ForC[] = { +#include "default/O2_options_me_c.def" +}; +// O2 mplcg options for C language +static MplOption kMplcgDefaultOptionsO2ForC[] = { +#include "default/O2_options_mplcg_c.def" +}; +// O0 cpp2mpl options +static MplOption kCpp2MplDefaultOptionsForAst[] = { +#include "default/O0_options_cpp2mpl.def" +}; +// O0 clang options +static MplOption kClangDefaultOptions[] = { +#include "default/O0_options_clang.def" +}; +// O0 ld options +static MplOption kLdDefaultOptions[] = { +#include "default/O0_options_ld.def" +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H diff --git a/ecmascript/mapleall/maple_driver/include/compiler.h b/ecmascript/mapleall/maple_driver/include/compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..b8917de6ab8bf3b5ee618e25a1d643aa2b617fe5 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/compiler.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_COMPILER_H +#define MAPLE_DRIVER_INCLUDE_COMPILER_H +#include +#include +#include "error_code.h" +#include "mpl_options.h" +#include "cg_option.h" +#include "me_option.h" +#include "option.h" +#include "mir_module.h" +#include "mir_parser.h" +#include "driver_runner.h" +#include "bin_mplt.h" + +namespace maple { +const std::string kBinNameNone = ""; +const std::string kBinNameJbc2mpl = "jbc2mpl"; +const std::string kBinNameCpp2mpl = "hir2mpl"; +const std::string kBinNameClang = "clang"; +const std::string kBinNameDex2mpl = "dex2mpl"; +const std::string kBinNameMplipa = "mplipa"; +const std::string kBinNameMe = "me"; +const std::string kBinNameMpl2mpl = "mpl2mpl"; +const std::string kBinNameMplcg = "mplcg"; +const std::string kBinNameMapleComb = "maplecomb"; +const std::string kBinNameMapleCombWrp = "maplecombwrp"; +const std::string kMachine = "aarch64-"; +const std::string kVendor = "unknown-"; +const std::string kOperatingSystem = "linux-gnu-"; +const std::string kLdFlag = "ld"; +const std::string kGccFlag = "gcc"; +const std::string kGppFlag = "g++"; +const std::string kAsFlag = "as"; +const std::string kInputPhase = "input"; +const std::string kBinNameLd = kMachine + kOperatingSystem + kLdFlag; +const std::string kBinNameAs = kMachine + kOperatingSystem + kAsFlag; +const std::string kBinNameGcc = kMachine + kOperatingSystem + kGccFlag; +const std::string kBinNameGpp = kMachine + kOperatingSystem + kGppFlag; + +constexpr char kGccBeIlp32SysrootPathEnv[] = "GCC_BIGEND_ILP32_SYSROOT_PATH"; +constexpr char kGccBeSysrootPathEnv[] = "GCC_BIGEND_SYSROOT_PATH"; +constexpr char kGccBePathEnv[] = "GCC_BIGEND_PATH"; + +class Compiler { + public: + explicit Compiler(const std::string &name) : name(name) {} + + virtual ~Compiler() = default; + + virtual ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule); + + virtual void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const {} + + virtual std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, const Action &action) const { + return std::unordered_set(); + } + + virtual void PrintCommand(const MplOptions&, const Action&) const {} + + protected: + virtual std::string GetBinPath(const MplOptions &mplOptions) const; + virtual const std::string &GetBinName() const { + return kBinNameNone; + } + + /* Default behaviour ToolName==BinName, But some tools have another behaviour: + * AsCompiler: ToolName=kAsFlag, BinName=kMachine + kOperatingSystem + kAsFlag + */ + virtual const std::string &GetTool() const { + return GetBinName(); + } + + virtual std::string GetInputFileName(const MplOptions &options, const Action &action) const { + return action.GetInputFile(); + } + + virtual DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const { + return DefaultOption(); + } + + virtual void AppendOutputOption(std::vector &, const std::string &) const { + return; + } + + private: + const std::string name; + std::vector MakeOption(const MplOptions &options, + const Action &action) const; + void AppendDefaultOptions(std::vector &finalOptions, + const std::vector &defaultOptions, + bool isDebug) const; + void AppendExtraOptions(std::vector &finalOptions, const MplOptions &options, + bool isDebug, const Action &action) const; + void AppendInputsAsOptions(std::vector &finalOptions, + const MplOptions &mplOptions, const Action &action) const; + void ReplaceOrInsertOption(std::vector &finalOptions, + const std::string &key, const std::string &value) const; + std::vector MakeDefaultOptions(const MplOptions &options, + const Action &action) const; + int Exe(const MplOptions &mplOptions, const std::vector &options) const; + const std::string &GetName() const { + return name; + } +}; + +class Jbc2MplCompiler : public Compiler { + public: + explicit Jbc2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Jbc2MplCompiler() = default; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +}; + +class ClangCompiler : public Compiler { + public: + explicit ClangCompiler(const std::string &name) : Compiler(name) {} + + ~ClangCompiler() = default; + + private: + const std::string &GetBinName() const override; + std::string GetBinPath(const MplOptions &mplOptions) const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override ; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class ClangCompilerBeILP32 : public ClangCompiler { + public: + explicit ClangCompilerBeILP32(const std::string &name) : ClangCompiler(name) {} + private: + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; +}; + +class Cpp2MplCompiler : public Compiler { + public: + explicit Cpp2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Cpp2MplCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class Dex2MplCompiler : public Compiler { + public: + explicit Dex2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Dex2MplCompiler() = default; +#ifdef INTERGRATE_DRIVER + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; +#endif + + void PrintCommand(const MplOptions &options, const Action &action) const override; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +#ifdef INTERGRATE_DRIVER + void PostDex2Mpl(std::unique_ptr &theModule) const; + bool MakeDex2mplOptions(const MplOptions &options); +#endif +}; + +class IpaCompiler : public Compiler { + public: + explicit IpaCompiler(const std::string &name) : Compiler(name) {} + + ~IpaCompiler() = default; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; +}; + +class MapleCombCompiler : public Compiler { + public: + explicit MapleCombCompiler(const std::string &name) : Compiler(name) {} + + ~MapleCombCompiler() = default; + + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; + void PrintCommand(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + + private: + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + ErrorCode MakeMeOptions(const MplOptions &options, DriverRunner &runner); + ErrorCode MakeMpl2MplOptions(const MplOptions &options, DriverRunner &runner); + std::string DecideOutExe(const MplOptions &options); + std::string GetStringOfSafetyOption() const; +}; + +class MplcgCompiler : public Compiler { + public: + explicit MplcgCompiler(const std::string &name) : Compiler(name) {} + + ~MplcgCompiler() = default; + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; + ErrorCode CompileByText(MplOptions &options, const Action &action, std::unique_ptr &theModule); + void PrintMplcgCommand(const MplOptions &options, const Action &action, const MIRModule &md) const; + void SetOutputFileName(const MplOptions &options, const Action &action, const MIRModule &md); + std::string GetInputFile(const MplOptions &options, const Action &action, const MIRModule *md) const; + std::string GetInputText(const MplOptions &options, const Action &action, const MIRModule *md) const; + private: + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + ErrorCode GetMplcgOptions(MplOptions &options, const Action &action, const MIRModule *theModule); + ErrorCode MakeCGOptions(const MplOptions &options); + const std::string &GetBinName() const override; + std::string baseName; + std::string outputFile; +}; + +class MapleCombCompilerWrp : public Compiler { + public: + explicit MapleCombCompilerWrp(const std::string &name) : Compiler(name) {} + ~MapleCombCompilerWrp() = default; + + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +}; + +// Build .s to .o +class AsCompiler : public Compiler { + public: + explicit AsCompiler(const std::string &name) : Compiler(name) {} + + ~AsCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + const std::string &GetTool() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class AsCompilerBeILP32 : public AsCompiler { + public: + explicit AsCompilerBeILP32(const std::string &name) : AsCompiler(name) {} + private: + std::string GetBinPath(const MplOptions &options) const override; + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; +}; + +// Build .o to .so +class LdCompiler : public Compiler { + public: + explicit LdCompiler(const std::string &name) : Compiler(name) {} + + ~LdCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + const std::string &GetTool() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class LdCompilerBeILP32 : public LdCompiler { + public: + explicit LdCompilerBeILP32(const std::string &name) : LdCompiler(name) {} + private: + std::string GetBinPath(const MplOptions &options) const override; + const std::string &GetBinName() const override; +}; + +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_COMPILER_H diff --git a/ecmascript/mapleall/maple_driver/include/compiler_factory.h b/ecmascript/mapleall/maple_driver/include/compiler_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..0c6bce8139d8450229f80a27357f02f4769968f7 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/compiler_factory.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H +#define MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H +#include +#include "compiler.h" +#include "error_code.h" +#include "mir_module.h" +#include "mir_parser.h" +#include "triple.h" + +namespace maple { + +class Toolchain { + using SupportedCompilers = std::unordered_map>; + SupportedCompilers compilers; + + protected: + template + void AddCompiler(const std::string &toolName) { + compilers.insert({toolName, std::make_unique(toolName)}); + } + + public: + Compiler *Find(const std::string &toolName) { + auto it = compilers.find(toolName); + if (it != compilers.end()) { + return it->second.get(); + } + return nullptr; + } + + const SupportedCompilers &GetSupportedCompilers() const { + return compilers; + } + + virtual ~Toolchain() = default; +}; + +class Aarch64Toolchain : public Toolchain { + public: + Aarch64Toolchain() { + AddCompiler("jbc2mpl"); + AddCompiler("dex2mpl"); + AddCompiler("hir2mpl"); + AddCompiler("clang"); + AddCompiler("mplipa"); + AddCompiler("me"); + AddCompiler("mpl2mpl"); + AddCompiler("mplcg"); + AddCompiler("maplecomb"); + AddCompiler("maplecombwrp"); + AddCompiler("as"); + AddCompiler("ld"); + } +}; + +class Aarch64BeILP32Toolchain : public Toolchain { + public: + Aarch64BeILP32Toolchain() { + AddCompiler("jbc2mpl"); + AddCompiler("dex2mpl"); + AddCompiler("hir2mpl"); + AddCompiler("clang"); + AddCompiler("mplipa"); + AddCompiler("me"); + AddCompiler("mpl2mpl"); + AddCompiler("mplcg"); + AddCompiler("maplecomb"); + AddCompiler("maplecombwrp"); + AddCompiler("as"); + AddCompiler("ld"); + } +}; + +class CompilerFactory { + public: + static CompilerFactory &GetInstance(); + CompilerFactory(const CompilerFactory&) = delete; + CompilerFactory(CompilerFactory&&) = delete; + CompilerFactory &operator=(const CompilerFactory&) = delete; + CompilerFactory &operator=(CompilerFactory&&) = delete; + ~CompilerFactory() = default; + + ErrorCode Compile(MplOptions &mplOptions); + Toolchain *GetToolChain(); + + private: + CompilerFactory() = default; + + ErrorCode Select(const MplOptions &mplOptions, std::vector &selectedActions); + ErrorCode Select(Action &action, std::vector &selectedActions); + ErrorCode DeleteTmpFiles(const MplOptions &mplOptions, + const std::vector &tempFiles) const; + + bool compileFinished = false; + std::unique_ptr theModule; + std::unique_ptr toolchain; +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H diff --git a/ecmascript/mapleall/maple_driver/include/driver_options.h b/ecmascript/mapleall/maple_driver/include/driver_options.h new file mode 100644 index 0000000000000000000000000000000000000000..4bca7c19bebab0b6fa0575036f0ac060cfd88985 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/driver_options.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include + +static maplecl::OptionCategory &driverCategory = maplecl::CommandLine::GetCommandLine().defaultCategory; + +static maplecl::OptionCategory &clangCategory = maplecl::CommandLine::GetCommandLine().clangCategory; +static maplecl::OptionCategory &hir2mplCategory = maplecl::CommandLine::GetCommandLine().hir2mplCategory; +static maplecl::OptionCategory &mpl2mplCategory = maplecl::CommandLine::GetCommandLine().mpl2mplCategory; +static maplecl::OptionCategory &meCategory = maplecl::CommandLine::GetCommandLine().meCategory; +static maplecl::OptionCategory &cgCategory = maplecl::CommandLine::GetCommandLine().cgCategory; +static maplecl::OptionCategory &asCategory = maplecl::CommandLine::GetCommandLine().asCategory; +static maplecl::OptionCategory &ldCategory = maplecl::CommandLine::GetCommandLine().ldCategory; + +static maplecl::OptionCategory &dex2mplCategory = maplecl::CommandLine::GetCommandLine().dex2mplCategory; +static maplecl::OptionCategory &jbc2mplCategory = maplecl::CommandLine::GetCommandLine().jbc2mplCategory; +static maplecl::OptionCategory &ipaCategory = maplecl::CommandLine::GetCommandLine().ipaCategory; + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +extern maplecl::Option version; +extern maplecl::Option ignoreUnkOpt; +extern maplecl::Option o0; +extern maplecl::Option o1; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option verify; +extern maplecl::Option decoupleStatic; +extern maplecl::Option bigendian; +extern maplecl::Option gconly; +extern maplecl::Option timePhase; +extern maplecl::Option genMeMpl; +extern maplecl::Option compileWOLink; +extern maplecl::Option genVtable; +extern maplecl::Option verbose; +extern maplecl::Option debug; +extern maplecl::Option withDwarf; +extern maplecl::Option withIpa; +extern maplecl::Option npeNoCheck; +extern maplecl::Option npeStaticCheck; +extern maplecl::Option npeDynamicCheck; +extern maplecl::Option npeDynamicCheckSilent; +extern maplecl::Option npeDynamicCheckAll; +extern maplecl::Option boundaryNoCheck; +extern maplecl::Option boundaryStaticCheck; +extern maplecl::Option boundaryDynamicCheck; +extern maplecl::Option boundaryDynamicCheckSilent; +extern maplecl::Option safeRegionOption; +extern maplecl::Option printDriverPhases; +extern maplecl::Option ldStatic; +extern maplecl::Option maplePhase; +extern maplecl::Option genMapleBC; +extern maplecl::Option genLMBC; +extern maplecl::Option profileGen; +extern maplecl::Option profileUse; + +/* ##################### STRING Options ############################################################### */ + +extern maplecl::Option help; +extern maplecl::Option infile; +extern maplecl::Option intext; +extern maplecl::Option inFileName; +extern maplecl::Option mplt; +extern maplecl::Option partO2; +extern maplecl::List jbc2mplOpt; +extern maplecl::List hir2mplOpt; +extern maplecl::List clangOpt; +extern maplecl::List asOpt; +extern maplecl::List ldOpt; +extern maplecl::List dex2mplOpt; +extern maplecl::List mplipaOpt; +extern maplecl::List mplcgOpt; +extern maplecl::List meOpt; +extern maplecl::List mpl2mplOpt; +extern maplecl::Option profile; +extern maplecl::Option run; +extern maplecl::Option optionOpt; +extern maplecl::List ldLib; +extern maplecl::List ldLibPath; +extern maplecl::List enableMacro; +extern maplecl::List disableMacro; +extern maplecl::List includeDir; +extern maplecl::List includeSystem; +extern maplecl::Option output; +extern maplecl::Option saveTempOpt; +extern maplecl::Option target; + +/* ##################### DIGITAL Options ############################################################### */ + +extern maplecl::Option helpLevel; + +/* #################################################################################################### */ + +} /* opts */ + +#endif /* MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H */ diff --git a/ecmascript/mapleall/maple_driver/include/driver_runner.h b/ecmascript/mapleall/maple_driver/include/driver_runner.h new file mode 100644 index 0000000000000000000000000000000000000000..d22c16a92cfd6e911ac74486589db8901dd77e33 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/driver_runner.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H +#define MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H + +#include +#include +#include +#include "me_option.h" +#include "module_phase_manager.h" +#include "error_code.h" +#include "cg.h" +#include "cg_option.h" +#include "cg_phasemanager.h" +#include "maple_phase_manager.h" +namespace maple { +using namespace maplebe; + +extern const std::string mplCG; +extern const std::string mpl2Mpl; +extern const std::string mplME; + +class DriverRunner final { + public: + DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, + const std::string &mpl2mplInput, const std::string &meInput, const std::string &actualInput, + bool dwarf, bool fileParsed = false, bool timePhases = false, + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) + : theModule(theModule), + exeNames(exeNames), + mpl2mplInput(mpl2mplInput), + meInput(meInput), + actualInput(actualInput), + withDwarf(dwarf), + fileParsed(fileParsed), + timePhases(timePhases), + genVtableImpl(genVtableImpl), + genMeMpl(genMeMpl), + genMapleBC(genMapleBC), + genLMBC(genLMBC), + inputFileType(inpFileType) { + auto lastDot = actualInput.find_last_of("."); + baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); + } + + DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, + const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) + : DriverRunner(theModule, exeNames, inpFileType, "", "", actualInput, dwarf, + fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC, genLMBC) { + auto lastDot = actualInput.find_last_of("."); + baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); + } + + ~DriverRunner() = default; + + ErrorCode Run(); + void RunNewPM(const std::string &output, const std::string &vtableImplFile); + void ProcessCGPhase(const std::string &output, const std::string &originBaseName); + void SetCGInfo(CGOptions *cgOptions, const std::string &cgInput) { + this->cgOptions = cgOptions; + this->cgInput = cgInput; + } + ErrorCode ParseInput() const; + ErrorCode ParseSrcLang(MIRSrcLang &srcLang) const; + void SolveCrossModuleInJava(MIRParser &parser) const; + void SolveCrossModuleInC(MIRParser &parser) const; + void SetPrintOutExe (const std::string outExe) { + printOutExe = outExe; + } + + void SetMpl2mplOptions(Options *options) { + mpl2mplOptions = options; + } + + void SetMeOptions(MeOption *options) { + meOptions = options; + } + + private: + std::string GetPostfix(); + void ProcessMpl2mplAndMePhases(const std::string &output, const std::string &vtableImplFile); + CGOptions *cgOptions = nullptr; + std::string cgInput; + void InitProfile() const; + MIRModule *theModule; + std::vector exeNames = {}; + Options *mpl2mplOptions = nullptr; + std::string mpl2mplInput; + MeOption *meOptions = nullptr; + std::string meInput; + std::string actualInput; + bool withDwarf = false; + bool fileParsed = false; + bool timePhases = false; + bool genVtableImpl = false; + bool genMeMpl = false; + bool genMapleBC = false; + bool genLMBC = false; + std::string printOutExe = ""; + std::string baseName; + std::string outputFile; + InputFileType inputFileType; +}; +} // namespace maple + +#endif // MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H diff --git a/ecmascript/mapleall/maple_driver/include/mpl_options.h b/ecmascript/mapleall/maple_driver/include/mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..ca4cf22d5d6d27d1e7202632ee5402e80b498d55 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/mpl_options.h @@ -0,0 +1,440 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "driver_options.h" +#include "error_code.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "mir_module.h" + +namespace maple { +enum InputFileType { + kFileTypeNone, + kFileTypeClass, + kFileTypeJar, + kFileTypeAst, + kFileTypeCpp, + kFileTypeC, + kFileTypeDex, + kFileTypeMpl, + kFileTypeVtableImplMpl, + kFileTypeS, + kFileTypeObj, + kFileTypeBpl, + kFileTypeMeMpl, + kFileTypeMbc, + kFileTypeLmbc, +}; + +enum OptimizationLevel { + kO0, + kO1, + kO2, + kCLangO0, + kCLangO2, +}; + +enum RunMode { + kAutoRun, + kCustomRun, + kUnkownRun +}; + +enum SafetyCheckMode { + kNoCheck, + kStaticCheck, + kDynamicCheck, + kDynamicCheckSilent +}; + +class Compiler; + +class InputInfo { +public: + InputInfo() {} + explicit InputInfo(const std::string &inputFile) + : inputFile(inputFile) { + inputFileType = GetInputFileType(inputFile); + + inputName = FileUtils::GetFileName(inputFile, true); + inputFolder = FileUtils::GetFileFolder(inputFile); + outputFolder = inputFolder; + outputName = FileUtils::GetFileName(inputFile, false); + fullOutput = outputFolder + outputName; + } + + ~InputInfo() = default; + static InputFileType GetInputFileType(const std::string &inputFile) { + InputFileType fileType = InputFileType::kFileTypeNone; + std::string extensionName = FileUtils::GetFileExtension(inputFile); + if (extensionName == "class") { + fileType = InputFileType::kFileTypeClass; + } + else if (extensionName == "dex") { + fileType = InputFileType::kFileTypeDex; + } + else if (extensionName == "c") { + fileType = InputFileType::kFileTypeC; + } + else if (extensionName == "cpp") { + fileType = InputFileType::kFileTypeCpp; + } + else if (extensionName == "ast") { + fileType = InputFileType::kFileTypeAst; + } + else if (extensionName == "jar") { + fileType = InputFileType::kFileTypeJar; + } + else if (extensionName == "mpl" || extensionName == "bpl") { + if (inputFile.find("VtableImpl") == std::string::npos) { + if (inputFile.find(".me.mpl") != std::string::npos) { + fileType = InputFileType::kFileTypeMeMpl; + } else { + fileType = extensionName == "mpl" ? InputFileType::kFileTypeMpl : InputFileType::kFileTypeBpl; + } + } else { + fileType = InputFileType::kFileTypeVtableImplMpl; + } + } else if (extensionName == "s") { + fileType = InputFileType::kFileTypeS; + } else if (extensionName == "o") { + fileType = InputFileType::kFileTypeObj; + } else if (extensionName == "mbc") { + fileType = InputFileType::kFileTypeMbc; + } else if (extensionName == "lmbc") { + fileType = InputFileType::kFileTypeLmbc; + } + + return fileType; + } + + void SetInputTextAndInputFileName(const std::string &inText, const std::string &inputFileName) { + inputText = inText; + inputFileType = GetInputFileType(inputFileName); + inputName = FileUtils::GetFileName(inputFileName, true); + inputFolder = FileUtils::GetFileFolder(inputFileName); + outputFolder = inputFolder; + outputName = FileUtils::GetFileName(inputFileName, false); + fullOutput = outputFolder + outputName; + } + + InputFileType GetInputFileType() const { + return inputFileType; + } + + const std::string &GetInputFile() const { + return inputFile; + } + + const std::string &GetInputText() const { + return inputText; + } + + const std::string &GetOutputFolder() const { + return outputFolder; + } + + const std::string &GetOutputName() const { + return outputName; + } + + const std::string &GetFullOutputName() const { + return fullOutput; + } + +private: + std::string inputFile = ""; + std::string inputText; + InputFileType inputFileType = InputFileType::kFileTypeNone; + + std::string inputName = ""; + std::string inputFolder = ""; + std::string outputName = ""; + std::string outputFolder = ""; + std::string fullOutput = ""; +}; + +class Action { +public: + Action(const std::string &tool, const InputInfo *const inputInfo) + : inputInfo(inputInfo), tool(tool) {} + + Action(const std::string &tool, const InputInfo *const inputInfo, + std::unique_ptr &inAction) + : inputInfo(inputInfo), tool(tool) { + inputActions.push_back(std::move(inAction)); + } + + Action(const std::string &tool, std::vector> &inActions, + const InputInfo *const inputInfo) + : inputInfo(inputInfo), tool(tool) { + for (auto &inAction : inActions) { + linkInputFiles.push_back(inAction->GetInputFile()); + } + + std::move(begin(inActions), end(inActions), std::back_inserter(inputActions)); + } + + ~Action() = default; + + const std::string &GetTool() const { + return tool; + } + + const std::string &GetInputText() const { + return inputInfo->GetInputText(); + } + + const std::string &GetInputFile() const { + return inputInfo->GetInputFile(); + } + + const std::string &GetOutputFolder() const { + return inputInfo->GetOutputFolder(); + } + + const std::string &GetOutputName() const { + return inputInfo->GetOutputName(); + } + + const std::string &GetFullOutputName() const { + return inputInfo->GetFullOutputName(); + } + + InputFileType GetInputFileType() const { + return inputInfo->GetInputFileType(); + } + + const std::vector &GetLinkFiles() const { + return linkInputFiles; + } + + const std::vector> &GetInputActions() const { + return inputActions; + } + + Compiler *GetCompiler() const { + return compilerTool; + } + + void SetCompiler(Compiler *compiler) { + compilerTool = compiler; + } + + bool IsItFirstRealAction() const { + /* First action is always "Input". + * But first real action will be a tool from kMapleCompilers. + */ + if (inputActions.size() > 0 && inputActions[0]->tool == "input") { + return true; + } + return false; + } + +private: + const InputInfo *inputInfo; + + std::string tool = ""; + std::string exeFolder = ""; + std::vector linkInputFiles; + + Compiler *compilerTool = nullptr; + + /* This vector contains a links to previous actions in Action tree */ + std::vector> inputActions; +}; + +class MplOption { + public: + MplOption(){needRootPath = false;} + MplOption(const std::string &key, const std::string &value, bool needRootPath = false) + : key(key), + value(value), + needRootPath(needRootPath) { + CHECK_FATAL(!key.empty(), "MplOption got an empty key."); + } + + ~MplOption() = default; + + const std::string &GetKey() const { + return key; + } + + const std::string &GetValue() const { + return value; + } + + void SetValue(std::string val) { + value = val; + } + + void SetKey(const std::string &k) { + key = k; + } + + bool GetNeedRootPath() const { + return needRootPath; + } + + private: + // option key + std::string key; + // option value + std::string value; + bool needRootPath; +}; + +struct DefaultOption { + std::unique_ptr mplOptions; + uint32_t length = 0; +}; + +class MplOptions { + public: + MplOptions() = default; + using ExeOptMapType = std::unordered_map>; + MplOptions(const MplOptions &options) = delete; + MplOptions &operator=(const MplOptions &options) = delete; + ~MplOptions() = default; + + ErrorCode Parse(int argc, char **argv); + + int Parse(std::vector argvs); + + const ExeOptMapType &GetExeOptions() const { + return exeOptions; + } + + const std::vector &GetInputFiles() const { + return inputFiles; + } + + const std::string &GetExeFolder() const { + return exeFolder; + } + + const RunMode &GetRunMode() const { + return runMode; + } + + const std::vector &GetSaveFiles() const { + return saveFiles; + } + + const std::vector &GetRunningExes() const { + return runningExes; + } + + const std::vector &GetSelectedExes() const { + return selectedExes; + } + + bool HasSetGeneralRegOnly() const { + return generalRegOnly; + } + + SafetyCheckMode GetNpeCheckMode() const { + return npeCheckMode; + } + + SafetyCheckMode GetBoundaryCheckMode() const { + return boundaryCheckMode; + } + + const std::vector> &GetActions() const { + return rootActions; + } + + maplecl::OptionCategory *GetCategory(const std::string &tool) const; + ErrorCode AppendCombOptions(MIRSrcLang srcLang); + ErrorCode AppendMplcgOptions(MIRSrcLang srcLang); + std::string GetInputFileNameForPrint(const Action * const action) const; + void PrintCommand(const Action * const action); + void connectOptStr(std::string &optionStr, const std::string &exeName, bool &firstComb, std::string &runStr); + std::string GetCommonOptionsStr() const; + void PrintDetailCommand(const Action * const action, bool isBeforeParse); + inline void PrintDetailCommand(bool isBeforeParse) { + PrintDetailCommand(nullptr, isBeforeParse); + } + + private: + bool Init(const std::string &inputFile); + bool InitText(const std::string &input); + bool InitFileName(const std::string &inputFileName); + ErrorCode CheckInputFiles(); + ErrorCode HandleOptions(); + ErrorCode HandleInFileTextOptions(); + void HandleExtraOptions(); + ErrorCode HandleEarlyOptions(); + ErrorCode DecideRunningPhases(); + ErrorCode DecideRunningPhases(const std::vector &runExes); + std::unique_ptr DecideRunningPhasesByType(const InputInfo *const inputInfo, bool isMultipleFiles); + ErrorCode MFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &wasWrpCombCompilerCreated); + ErrorCode SFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &isCombCompiler); + InputInfo *AllocateInputInfo(const std::string &inputFile); + ErrorCode AppendDefaultOptions(const std::string &exeName, MplOption mplOptions[], unsigned int length); + void DumpAppendedOptions(const std::string &exeName, + const MplOption mplOptions[], unsigned int length) const; + void UpdateRunningExe(const std::string &args); + void UpdateExeOptions(const std::string &options, const std::string &tool); + ErrorCode UpdateExeOptions(const std::string &args); + void DumpActionTree(const Action &action, int indents) const; + void DumpActionTree() const; + + std::vector inputFiles; + std::string exeFolder; + RunMode runMode = RunMode::kUnkownRun; + std::vector saveFiles = {}; + std::vector runningExes = {}; + std::vector selectedExes = {}; + std::ostringstream printExtraOptStr; + + /* exeOptions is used to forward options to necessary tool. + * As example: --ld-opt="opt1 opt2" will be forwarded to linker */ + ExeOptMapType exeOptions; + + bool hasPrinted = false; + bool generalRegOnly = false; + SafetyCheckMode npeCheckMode = SafetyCheckMode::kNoCheck; + SafetyCheckMode boundaryCheckMode = SafetyCheckMode::kNoCheck; + bool needFile = true; + + std::vector> inputInfos; + std::vector> rootActions; +}; + +enum Level { + kLevelZero = 0, + kLevelOne = 1, + kLevelTwo = 2, + kLevelThree = 3, + kLevelFour = 4 +}; + +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H diff --git a/ecmascript/mapleall/maple_driver/include/safe_exe.h b/ecmascript/mapleall/maple_driver/include/safe_exe.h new file mode 100644 index 0000000000000000000000000000000000000000..eddb9f64654c0a0ddf0dfccd1a7a850ba4df7d8b --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/safe_exe.h @@ -0,0 +1,303 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_SAFE_EXE_H +#define MAPLE_DRIVER_INCLUDE_SAFE_EXE_H + +/* To start a new process for dex2mpl/mplipa, we need sys/wait on unix-like systems to + * make it complete. However, there is not a sys/wait.h for mingw, so we used createprocess + * in windows.h instead + */ +#ifdef _WIN32 +#include +#else +#include +#endif + +#include +#include +#include +#include "error_code.h" +#include "mpl_logging.h" +#include "mpl_options.h" +#include "string_utils.h" +#include "securec.h" + +namespace maple { +class SafeExe { + public: +#ifndef _WIN32 + static ErrorCode HandleCommand(const std::string &cmd, const std::string &args) { + std::vector vectorArgs = ParseArgsVector(cmd, args); + // extra space for exe name and args + char **argv = new char *[vectorArgs.size() + 1]; + // argv[0] is program name + // copy args + for (size_t j = 0; j < vectorArgs.size(); ++j) { + size_t strLength = vectorArgs[j].size(); + argv[j] = new char[strLength + 1]; + strncpy_s(argv[j], strLength + 1, vectorArgs[j].c_str(), strLength); + argv[j][strLength] = '\0'; + } + // end of arguments sentinel is nullptr + argv[vectorArgs.size()] = nullptr; + pid_t pid = fork(); + ErrorCode ret = kErrorNoError; + if (pid == 0) { + // child process + fflush(nullptr); + if (execv(cmd.c_str(), argv) < 0) { + for (size_t j = 0; j < vectorArgs.size(); ++j) { + delete [] argv[j]; + } + delete [] argv; + exit(1); + } + } else { + // parent process + int status = -1; + waitpid(pid, &status, 0); + if (!WIFEXITED(status)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + ret = kErrorCompileFail; + } else if (WEXITSTATUS(status) != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + ret = kErrorCompileFail; + } + } + + for (size_t j = 0; j < vectorArgs.size(); ++j) { + delete [] argv[j]; + } + delete [] argv; + return ret; + } + + static ErrorCode HandleCommand(const std::string &cmd, + const std::vector &options) { + size_t argIndex; + char **argv; + std::tie(argv, argIndex) = GenerateUnixArguments(cmd, options); + + LogInfo::MapleLogger() << "Run: " << cmd; + for (auto &opt : options) { + LogInfo::MapleLogger() << " " << opt.GetKey() << " " << opt.GetValue(); + } + LogInfo::MapleLogger() << "\n"; + + pid_t pid = fork(); + ErrorCode ret = kErrorNoError; + if (pid == 0) { + // child process + fflush(nullptr); + if (execv(cmd.c_str(), argv) < 0) { + /* last argv[argIndex] is nullptr, so it's j < argIndex (NOT j <= argIndex) */ + for (size_t j = 0; j < argIndex; ++j) { + delete [] argv[j]; + } + delete [] argv; + exit(1); + } + } else { + // parent process + int status = -1; + waitpid(pid, &status, 0); + if (!WIFEXITED(status)) { + ret = kErrorCompileFail; + } else if (WEXITSTATUS(status) != 0) { + ret = kErrorCompileFail; + } + + if (ret != kErrorNoError) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: "; + for (auto &opt : options) { + LogInfo::MapleLogger() << opt.GetKey() << " " << opt.GetValue(); + } + LogInfo::MapleLogger() << "\n"; + } + } + + /* last argv[argIndex] is nullptr, so it's j < argIndex (NOT j <= argIndex) */ + for (size_t j = 0; j < argIndex; ++j) { + delete [] argv[j]; + } + delete [] argv; + return ret; + } +#else + static ErrorCode HandleCommand(const std::string &cmd, const std::string &args) { + ErrorCode ret = ErrorCode::kErrorNoError; + + STARTUPINFO startInfo; + PROCESS_INFORMATION pInfo; + DWORD exitCode; + + errno_t retSafe = memset_s(&startInfo, sizeof(STARTUPINFO), 0, sizeof(STARTUPINFO)); + CHECK_FATAL(retSafe == EOK, "memset_s for StartUpInfo failed when HandleComand"); + + startInfo.cb = sizeof(STARTUPINFO); + + char* appName = strdup(cmd.c_str()); + char* cmdLine = strdup(args.c_str()); + CHECK_FATAL(appName != nullptr, "strdup for appName failed"); + CHECK_FATAL(cmdLine != nullptr, "strdup for cmdLine failed"); + + bool success = CreateProcess(appName, cmdLine, NULL, NULL, FALSE, + NORMAL_PRIORITY_CLASS, NULL, NULL, &startInfo, &pInfo); + CHECK_FATAL(success != 0, "CreateProcess failed when HandleCommond"); + + WaitForSingleObject(pInfo.hProcess, INFINITE); + GetExitCodeProcess(pInfo.hProcess, &exitCode); + + if (exitCode != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args + << " exitCode: " << exitCode << '\n'; + ret = ErrorCode::kErrorCompileFail; + } + + free(appName); + free(cmdLine); + appName = nullptr; + cmdLine = nullptr; + return ret; + } + + static ErrorCode HandleCommand(const std::string &cmd, + const std::vector &options) { + ErrorCode ret = ErrorCode::kErrorNoError; + + STARTUPINFO startInfo; + PROCESS_INFORMATION pInfo; + DWORD exitCode; + + errno_t retSafe = memset_s(&startInfo, sizeof(STARTUPINFO), 0, sizeof(STARTUPINFO)); + CHECK_FATAL(retSafe == EOK, "memset_s for StartUpInfo failed when HandleComand"); + + startInfo.cb = sizeof(STARTUPINFO);\ + std::string argString; + for (auto &opt : options) { + argString += opt.GetKey() + " " + opt.GetValue() + " "; + } + + char* appName = strdup(cmd.c_str()); + char* cmdLine = strdup(argString.c_str()); + CHECK_FATAL(appName != nullptr, "strdup for appName failed"); + CHECK_FATAL(cmdLine != nullptr, "strdup for cmdLine failed"); + + bool success = CreateProcess(appName, cmdLine, NULL, NULL, FALSE, + NORMAL_PRIORITY_CLASS, NULL, NULL, &startInfo, &pInfo); + CHECK_FATAL(success != 0, "CreateProcess failed when HandleCommond"); + + WaitForSingleObject(pInfo.hProcess, INFINITE); + GetExitCodeProcess(pInfo.hProcess, &exitCode); + + if (exitCode != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << argString + << " exitCode: " << exitCode << '\n'; + ret = ErrorCode::kErrorCompileFail; + } + + free(appName); + free(cmdLine); + appName = nullptr; + cmdLine = nullptr; + return ret; + } +#endif + + static ErrorCode Exe(const std::string &cmd, const std::string &args) { + LogInfo::MapleLogger() << "Starting:" << cmd << args << '\n'; + if (StringUtils::HasCommandInjectionChar(cmd) || StringUtils::HasCommandInjectionChar(args)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + return kErrorCompileFail; + } + ErrorCode ret = HandleCommand(cmd, args); + return ret; + } + + static ErrorCode Exe(const std::string &cmd, + const std::vector &options) { + if (StringUtils::HasCommandInjectionChar(cmd)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << '\n'; + return kErrorCompileFail; + } + ErrorCode ret = HandleCommand(cmd, options); + return ret; + } + + private: + static std::vector ParseArgsVector(const std::string &cmd, const std::string &args) { + std::vector tmpArgs; + StringUtils::Split(args, tmpArgs, ' '); + // remove ' ' in vector + for (auto iter = tmpArgs.begin(); iter != tmpArgs.end();) { + if (*iter == " " || *iter == "") { + iter = tmpArgs.erase(iter); + } else { + ++iter; + } + } + (void)tmpArgs.insert(tmpArgs.begin(), cmd); + return tmpArgs; + } + + static std::tuple GenerateUnixArguments(const std::string &cmd, + const std::vector &options) { + /* argSize=2, because we reserve 1st arg as exe binary, and another arg as last nullptr arg */ + size_t argSize = 2; + + /* Calculate how many args are needed. + * (* 2) is needed, because we have key and value arguments in each option + */ + argSize += options.size() * 2; + + /* extra space for exe name and args */ + char **argv = new char *[argSize]; + + // argv[0] is program name + // copy args + auto cmdSize = cmd.size() + 1; // +1 for NUL terminal + argv[0] = new char[cmdSize]; + strncpy_s(argv[0], cmdSize, cmd.c_str(), cmdSize); // c_str includes NUL terminal + + /* Allocate and fill all arguments */ + size_t argIndex = 1; // firts index is reserved for cmd, so it starts with 1 + for (auto &opt : options) { + auto key = opt.GetKey(); + auto val = opt.GetValue(); + /* +1 for NUL terminal */ + auto keySize = key.size() + 1; + auto valSize = val.size() + 1; + + if (keySize != 1) { + argv[argIndex] = new char[keySize]; + strncpy_s(argv[argIndex], keySize, key.c_str(), keySize); + ++argIndex; + } + + if (valSize != 1) { + argv[argIndex] = new char[valSize]; + strncpy_s(argv[argIndex], valSize, val.c_str(), valSize); + ++argIndex; + } + } + + // end of arguments sentinel is nullptr + argv[argIndex] = nullptr; + + return std::make_tuple(argv, argIndex); + } +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_SAFE_EXE_H diff --git a/ecmascript/mapleall/maple_driver/include/triple.h b/ecmascript/mapleall/maple_driver/include/triple.h new file mode 100644 index 0000000000000000000000000000000000000000..a9ff25bf04fe83cf868fedb4d76399b7969cd86b --- /dev/null +++ b/ecmascript/mapleall/maple_driver/include/triple.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_TRIPLE_H +#define MAPLE_TRIPLE_H + +#include +#include + +#include +#include +#include + +namespace maple { + +class Triple { + public: + /* Currently, only aarch64 is supported */ + enum ArchType { + UnknownArch, + aarch64, + aarch64_be, + LastArchType + }; + + /* Currently, only ILP32 and LP64 are supported */ + enum EnvironmentType { + UnknownEnvironment, + GNU, + GNUILP32, + LastEnvironmentType + }; + + ArchType GetArch() const { return arch; } + EnvironmentType GetEnvironment() const { return environment; } + + bool IsBigEndian() const { + return (GetArch() == ArchType::aarch64_be); + } + + std::string Str() const; + std::string GetArchName() const; + std::string GetEnvironmentName() const; + + static Triple &GetTriple() { + static Triple triple; + return triple; + } + Triple(const Triple &) = delete; + Triple &operator=(const Triple &) = delete; + + void Init(const std::string &target); + void Init(); + + private: + std::string data; + ArchType arch; + EnvironmentType environment; + + Triple() + : arch(UnknownArch), environment(UnknownEnvironment) {} + + Triple::ArchType ParseArch(std::string_view archStr); + Triple::EnvironmentType ParseEnvironment(std::string_view environmentType); +}; + +} // namespace maple + +#endif /* MAPLE_TRIPLE_H */ diff --git a/ecmascript/mapleall/maple_driver/src/as_compiler.cpp b/ecmascript/mapleall/maple_driver/src/as_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a254ba423783d9f7477be6090b5e0cb348f93290 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/as_compiler.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { +static const std::string kAarch64BeIlp32As = "aarch64_be-linux-gnuilp32-as"; +static const std::string kAarch64BeAs = "aarch64_be-linux-gnu-as"; + +std::string AsCompilerBeILP32::GetBinPath(const MplOptions&) const { + std::string gccPath = FileUtils::SafeGetenv(kGccBePathEnv) + "/"; + const std::string &gccTool = Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32 ? + kAarch64BeIlp32As : kAarch64BeAs; + std::string gccToolPath = gccPath + gccTool; + + if (!FileUtils::IsFileExists(gccToolPath)) { + LogInfo::MapleLogger(kLlErr) << kGccBePathEnv << " environment variable must be set as the path to " + << gccTool << "\n"; + CHECK_FATAL(false, "%s environment variable must be set as the path to %s\n", + kGccBePathEnv, gccTool.c_str()); + } + + return gccPath; +} + +const std::string &AsCompilerBeILP32::GetBinName() const { + if (Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + return kAarch64BeIlp32As; + } else { + return kAarch64BeAs; + } +} + +DefaultOption AsCompilerBeILP32::GetDefaultOptions(const MplOptions &options, const Action &action) const { + + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64_be || + triple.GetEnvironment() == Triple::EnvironmentType::UnknownEnvironment) { + CHECK_FATAL(false, "ClangCompilerBeILP32 supports only aarch64_be GNU/GNUILP32 targets\n"); + } + + uint32_t len = 1; // for -o option + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + ++len; // for -mabi=ilp32 + } + DefaultOption defaultOptions = { std::make_unique(len), len }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".o"); + + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + defaultOptions.mplOptions[1].SetKey("-mabi=ilp32"); + defaultOptions.mplOptions[1].SetValue(""); + } + + return defaultOptions; +} + +std::string AsCompiler::GetBinPath(const MplOptions&) const { +#ifdef ANDROID + return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; +#else + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +#endif +} + +const std::string &AsCompiler::GetBinName() const { + return kBinNameAs; +} + +/* the tool name must be the same as exeName field in Descriptor structure */ +const std::string &AsCompiler::GetTool() const { + return kAsFlag; +} + +DefaultOption AsCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + + uint32_t len = 1; // for -o option + DefaultOption defaultOptions = { std::make_unique(len), len }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".o"); + + return defaultOptions; +} + +std::string AsCompiler::GetInputFileName(const MplOptions &options, const Action &action) const { + return action.GetFullOutputName() + ".s"; +} + +void AsCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".o"); +} + +std::unordered_set AsCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + auto finalOutputs = std::unordered_set(); + (void)finalOutputs.insert(action.GetFullOutputName() + ".o"); + return finalOutputs; +} + +void AsCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/clang_compiler.cpp b/ecmascript/mapleall/maple_driver/src/clang_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..21954b27e5ae2c29888c8b8b5966388811f09873 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/clang_compiler.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "compiler.h" +#include "file_utils.h" +#include "mpl_timer.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { + +DefaultOption ClangCompilerBeILP32::GetDefaultOptions(const MplOptions &options, + const Action &action) const { + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64_be || + triple.GetEnvironment() == Triple::EnvironmentType::UnknownEnvironment) { + CHECK_FATAL(false, "ClangCompilerBeILP32 supports only aarch64_be GNU/GNUILP32 targets\n"); + } + + uint32_t additionalLen = 4; // -o and --target + uint32_t fullLen = (sizeof(kClangDefaultOptions) / sizeof(MplOption)) + additionalLen; + DefaultOption defaultOptions = { std::make_unique(fullLen), fullLen }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".ast"); + + defaultOptions.mplOptions[1].SetKey("-target"); + defaultOptions.mplOptions[1].SetValue(triple.Str()); + + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + defaultOptions.mplOptions[2].SetKey("--sysroot=" + FileUtils::SafeGetenv(kGccBeIlp32SysrootPathEnv)); + } else { + defaultOptions.mplOptions[2].SetKey("--sysroot=" + FileUtils::SafeGetenv(kGccBeSysrootPathEnv)); + } + defaultOptions.mplOptions[2].SetValue(""); + + defaultOptions.mplOptions[3].SetKey("-U__SIZEOF_INT128__"); + defaultOptions.mplOptions[3].SetValue(""); + + for (uint32_t i = additionalLen, j=0; i < fullLen; ++i,++j) { + defaultOptions.mplOptions[i] = kClangDefaultOptions[j]; + } + for (uint32_t i = additionalLen; i < fullLen; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + + return defaultOptions; +} + +std::string ClangCompiler::GetBinPath(const MplOptions&) const{ + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +} + +const std::string &ClangCompiler::GetBinName() const { + return kBinNameClang; +} + +static uint32_t FillSpecialDefaulOpt(std::unique_ptr &opt, + const Action &action) { + uint32_t additionalLen = 1; // for -o option + + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64 || + triple.GetEnvironment() != Triple::EnvironmentType::GNU) { + CHECK_FATAL(false, "Use -target option to select another toolchain\n"); + } + + additionalLen += 3; // 3 options are filled below + opt = std::make_unique(additionalLen); + + opt[0].SetKey("-isystem"); + opt[0].SetValue(FileUtils::SafeGetenv(kMapleRoot) + + "/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include"); + + opt[1].SetKey("-isystem"); + opt[1].SetValue(FileUtils::SafeGetenv(kMapleRoot) + + "/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include"); + + opt[2].SetKey("-target"); + opt[2].SetValue(triple.Str()); + + /* Set last option as -o option */ + opt[additionalLen-1].SetKey("-o"); + opt[additionalLen-1].SetValue(action.GetFullOutputName() + ".ast"); + + return additionalLen; +} + +DefaultOption ClangCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + DefaultOption defaultOptions; + uint32_t fullLen = 0; + uint32_t defaultLen = 0; + uint32_t additionalLen = 0; + std::unique_ptr additionalOptions; + + additionalLen = FillSpecialDefaulOpt(additionalOptions, action); + defaultLen = sizeof(kClangDefaultOptions) / sizeof(MplOption); + fullLen = defaultLen + additionalLen; + + defaultOptions = { std::make_unique(fullLen), fullLen }; + + for (uint32_t i = 0; i < defaultLen; ++i) { + defaultOptions.mplOptions[i] = kClangDefaultOptions[i]; + } + for (uint32_t defInd = defaultLen, additionalInd = 0; + additionalInd < additionalLen; ++additionalInd) { + defaultOptions.mplOptions[defInd++] = additionalOptions[additionalInd]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void ClangCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".ast"); +} + +std::unordered_set ClangCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".ast"); + return finalOutputs; +} + +void ClangCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + finalOptions.emplace_back("-o", name); +} + +} diff --git a/ecmascript/mapleall/maple_driver/src/compiler.cpp b/ecmascript/mapleall/maple_driver/src/compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ab2fd3a64e59a46a68c6798ae2402f8861f59587 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/compiler.cpp @@ -0,0 +1,184 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include +#include "driver_options.h" +#include "file_utils.h" +#include "safe_exe.h" +#include "mpl_timer.h" + +namespace maple { + +int Compiler::Exe(const MplOptions &mplOptions, + const std::vector &options) const { + std::ostringstream ostrStream; + ostrStream << GetBinPath(mplOptions) << GetBinName(); + std::string binPath = ostrStream.str(); + return SafeExe::Exe(binPath, options); +} + +std::string Compiler::GetBinPath(const MplOptions &mplOptions) const { +#ifdef MAPLE_PRODUCT_EXECUTABLE // build flag -DMAPLE_PRODUCT_EXECUTABLE + std::string binPath = std::string(MAPLE_PRODUCT_EXECUTABLE); + if (binPath.empty()) { + binPath = mplOptions.GetExeFolder(); + } else { + binPath = binPath + kFileSeperatorChar; + } +#else + std::string binPath = mplOptions.GetExeFolder(); +#endif + return binPath; +} + +ErrorCode Compiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + MPLTimer timer = MPLTimer(); + LogInfo::MapleLogger() << "Starting " << GetName() << '\n'; + timer.Start(); + + std::vector generatedOptions = MakeOption(options, action); + if (generatedOptions.empty()) { + return kErrorInvalidParameter; + } + if (Exe(options, generatedOptions) != 0) { + return kErrorCompileFail; + } + timer.Stop(); + LogInfo::MapleLogger() << (GetName() + " consumed ") << timer.Elapsed() << "s\n"; + return kErrorNoError; +} + +std::vector Compiler::MakeOption(const MplOptions &options, + const Action &action) const { + std::vector finalOptions; + std::vector defaultOptions = MakeDefaultOptions(options, action); + + AppendInputsAsOptions(finalOptions, options, action); + AppendDefaultOptions(finalOptions, defaultOptions, opts::debug); + AppendExtraOptions(finalOptions, options, opts::debug, action); + + return finalOptions; +} + +void Compiler::AppendDefaultOptions(std::vector &finalOptions, + const std::vector &defaultOptions, + bool isDebug) const { + for (const auto &defaultIt : defaultOptions) { + finalOptions.push_back(defaultIt); + } + + if (isDebug) { + LogInfo::MapleLogger() << Compiler::GetName() << " Default Options: "; + for (const auto &defaultIt : defaultOptions) { + LogInfo::MapleLogger() << defaultIt.GetKey() << " " + << defaultIt.GetValue(); + } + LogInfo::MapleLogger() << '\n'; + } +} + +void Compiler::AppendExtraOptions(std::vector &finalOptions, const MplOptions &options, + bool isDebug, const Action &action) const { + const std::string &binName = GetTool(); + + if (isDebug) { + LogInfo::MapleLogger() << Compiler::GetName() << " Extra Options: "; + } + + /* Append options setting by: --run=binName --option="-opt1 -opt2" */ + auto &exeOptions = options.GetExeOptions(); + auto it = exeOptions.find(binName); + if (it != exeOptions.end()) { + for (auto &opt : it->second) { + finalOptions.emplace_back(opt, ""); + if (isDebug) { + LogInfo::MapleLogger() << opt << " "; + } + } + } + + maplecl::OptionCategory *category = options.GetCategory(binName); + DEBUG_ASSERT(category != nullptr, "Undefined tool: %s", binName.data()); + + /* Append options setting directly for special category. Example: --verbose */ + for (const auto &opt : category->GetEnabledOptions()) { + for (const auto &val : opt->GetRawValues()) { + if (opt->GetEqualType() == maplecl::EqualType::kWithEqual) { + finalOptions.emplace_back(opt->GetName() + "=" + val, ""); + } else { + finalOptions.emplace_back(opt->GetName(), val); + } + + if (isDebug) { + LogInfo::MapleLogger() << opt->GetName() << " " << val << " "; + } + } + } + + /* output file can not be specified for several last actions. As exaple: + * If last actions are assembly tool for 2 files (to get file1.o, file2.o), + * we can not have one output name for them. */ + if (opts::output.IsEnabledByUser() && options.GetActions().size() == 1) { + /* Set output file for last compilation tool */ + if (&action == options.GetActions()[0].get()) { + /* the tool may not support "-o" for output option */ + AppendOutputOption(finalOptions, opts::output.GetValue()); + } + } + + if (isDebug) { + LogInfo::MapleLogger() << '\n'; + } +} + +void Compiler::ReplaceOrInsertOption(std::vector &finalOptions, + const std::string &key, const std::string &value) const { + bool wasFound = false; + for (auto &opt : finalOptions) { + if (opt.GetKey() == key) { + opt.SetValue(value); + wasFound = true; + } + } + + if (!wasFound) { + (void)finalOptions.emplace_back(MplOption(key, value)); + } +} + +void Compiler::AppendInputsAsOptions(std::vector &finalOptions, + const MplOptions &mplOptions, const Action &action) const { + std::vector splittedInputFileNames; + std::string inputFileNames = GetInputFileName(mplOptions, action); + StringUtils::Split(inputFileNames, splittedInputFileNames, ' '); + + for (auto &inputFileName : splittedInputFileNames) { + finalOptions.emplace_back(MplOption(inputFileName, "")); + } +} + +std::vector Compiler::MakeDefaultOptions(const MplOptions &options, + const Action &action) const { + DefaultOption rawDefaultOptions = GetDefaultOptions(options, action); + std::vector defaultOptions; + if (rawDefaultOptions.mplOptions != nullptr) { + for (uint32_t i = 0; i < rawDefaultOptions.length; ++i) { + defaultOptions.push_back(rawDefaultOptions.mplOptions[i]); + } + } + return defaultOptions; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/compiler_factory.cpp b/ecmascript/mapleall/maple_driver/src/compiler_factory.cpp new file mode 100644 index 0000000000000000000000000000000000000000..13c1ed939b60d535bae8392063226f75d8cfd325 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/compiler_factory.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler_factory.h" +#include +#include "driver_options.h" +#include "file_utils.h" +#include "string_utils.h" +#include "mpl_logging.h" + +using namespace maple; + +CompilerFactory &CompilerFactory::GetInstance() { + static CompilerFactory instance; + return instance; +} + +ErrorCode CompilerFactory::DeleteTmpFiles(const MplOptions &mplOptions, + const std::vector &tempFiles) const { + int ret = 0; + for (const std::string &tmpFile : tempFiles) { + bool isSave = false; + for (auto saveFile : mplOptions.GetSaveFiles()) { + if (!saveFile.empty() && std::regex_match(tmpFile, std::regex(StringUtils::Replace(saveFile, "*", ".*?")))) { + isSave = true; + break; + } + } + + auto &inputs = mplOptions.GetInputFiles(); + if (!isSave && (std::find(inputs.begin(), inputs.end(), tmpFile) == inputs.end())) { + bool isNeedRemove = true; + /* If we compile several files we can have several last Actions, + * so we need to NOT remove output files for each last Action. + */ + for (auto &lastAction : mplOptions.GetActions()) { + auto finalOutputs = lastAction->GetCompiler()->GetFinalOutputs(mplOptions, *lastAction); + /* do not remove output files */ + if (finalOutputs.find(tmpFile) != finalOutputs.end()) { + isNeedRemove = false; + } + } + + if (isNeedRemove == true) { + FileUtils::Remove(tmpFile); + } + } + } + return ret == 0 ? kErrorNoError : kErrorFileNotFound; +} + +Toolchain *CompilerFactory::GetToolChain() { + if (toolchain == nullptr) { + if (maple::Triple::GetTriple().GetArch() == Triple::ArchType::aarch64_be) { + toolchain = std::make_unique(); + } else { + toolchain = std::make_unique(); + } + } + + return toolchain.get(); +} + +ErrorCode CompilerFactory::Select(Action &action, std::vector &selectedActions) { + ErrorCode ret = kErrorNoError; + + /* Traverse Action tree recursively and select compilers in + * "from leaf(clang) to root(ld)" order */ + for (const std::unique_ptr &a : action.GetInputActions()) { + if (a == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Action is not Initialized\n"; + return kErrorToolNotFound; + } + + ret = Select(*a, selectedActions); + if (ret != kErrorNoError) { + return ret; + } + } + + Toolchain *toolChain = GetToolChain(); + if (toolChain == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Wrong ToolChain\n"; + return kErrorToolNotFound; + } + Compiler *compiler = toolChain->Find(action.GetTool()); + + if (compiler == nullptr) { + if (action.GetTool() != "input") { + LogInfo::MapleLogger(kLlErr) << "Fatal error: " << action.GetTool() + << " tool is not supported" << "\n"; + LogInfo::MapleLogger(kLlErr) << "Supported Tool: "; + + auto print = [](const auto &supportedComp) { std::cout << " " << supportedComp.first; }; + std::for_each(toolChain->GetSupportedCompilers().begin(), + toolChain->GetSupportedCompilers().end(), print); + LogInfo::MapleLogger(kLlErr) << "\n"; + + return kErrorToolNotFound; + } + } else { + action.SetCompiler(compiler); + selectedActions.push_back(&action); + } + + return ret; +} + +ErrorCode CompilerFactory::Select(const MplOptions &mplOptions, std::vector &selectedActions) { + for (const std::unique_ptr &action : mplOptions.GetActions()) { + if (action == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Action is not Initialized\n"; + return kErrorToolNotFound; + } + ErrorCode ret = Select(*action, selectedActions); + if (ret != kErrorNoError) { + return ret; + } + } + + return selectedActions.empty() ? kErrorToolNotFound : kErrorNoError; +} + +ErrorCode CompilerFactory::Compile(MplOptions &mplOptions) { + if (compileFinished) { + LogInfo::MapleLogger() << + "Failed! Compilation has been completed in previous time and multi-instance compilation is not supported\n"; + return kErrorCompileFail; + } + + /* Actions owner is MplOption, so while MplOption is alive we can use raw pointers here */ + std::vector actions; + ErrorCode ret = Select(mplOptions, actions); + if (ret != kErrorNoError) { + return ret; + } + + for (auto *action : actions) { + if (action == nullptr) { + LogInfo::MapleLogger() << "Failed! Compiler is null." << "\n"; + return kErrorCompileFail; + } + + Compiler *compiler = action->GetCompiler(); + if (compiler == nullptr) { + return kErrorToolNotFound; + } + + ret = compiler->Compile(mplOptions, *action, this->theModule); + if (ret != kErrorNoError) { + return ret; + } + } + if (opts::debug) { + mplOptions.PrintDetailCommand(false); + } + // Compiler finished + compileFinished = true; + + if (!opts::saveTempOpt.IsEnabledByUser() || !mplOptions.GetSaveFiles().empty()) { + std::vector tmpFiles; + + for (auto *action : actions) { + action->GetCompiler()->GetTmpFilesToDelete(mplOptions, *action, tmpFiles); + } + + ret = DeleteTmpFiles(mplOptions, tmpFiles); + } + return ret; +} diff --git a/ecmascript/mapleall/maple_driver/src/cpp2mpl_compiler.cpp b/ecmascript/mapleall/maple_driver/src/cpp2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e92f4deeb3bbcd6569b23a974ed1027088fb5808 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/cpp2mpl_compiler.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "default_options.def" + +namespace maple { +std::string Cpp2MplCompiler::GetBinPath(const MplOptions &mplOptions) const{ + return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + + FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; +} + +const std::string &Cpp2MplCompiler::GetBinName() const { + return kBinNameCpp2mpl; +} + +std::string Cpp2MplCompiler::GetInputFileName(const MplOptions &options, const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + // Get base file name + auto idx = action.GetOutputName().find(".ast"); + std::string outputName = action.GetOutputName(); + if (idx != std::string::npos) { + outputName = action.GetOutputName().substr(0, idx); + } + return action.GetOutputFolder() + outputName + ".ast"; +} + +DefaultOption Cpp2MplCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = sizeof(kCpp2MplDefaultOptionsForAst) / sizeof(MplOption); + DefaultOption defaultOptions = { std::make_unique(len), len }; + + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kCpp2MplDefaultOptionsForAst[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void Cpp2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Cpp2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} + +void Cpp2MplCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/dex2mpl_compiler.cpp b/ecmascript/mapleall/maple_driver/src/dex2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..23ddece8d3a071e2e2f7d268ee022aa396239d39 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/dex2mpl_compiler.cpp @@ -0,0 +1,262 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "default_options.def" +#include +#ifdef INTERGRATE_DRIVER +#include "dex2mpl_runner.h" +#include "mir_function.h" +#endif + +namespace maple { +const std::string &Dex2MplCompiler::GetBinName() const { + return kBinNameDex2mpl; +} + +DefaultOption Dex2MplCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = 0; + MplOption *kDex2mplDefaultOptions = nullptr; + + if (opts::o0) { + len = sizeof(kDex2mplDefaultOptionsO0) / sizeof(MplOption); + kDex2mplDefaultOptions = kDex2mplDefaultOptionsO0; + } else if (opts::o2) { + len = sizeof(kDex2mplDefaultOptionsO2) / sizeof(MplOption); + kDex2mplDefaultOptions = kDex2mplDefaultOptionsO2; + } + + if (kDex2mplDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kDex2mplDefaultOptions[i]; + } + + for (unsigned int i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void Dex2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Dex2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + auto finalOutputs = std::unordered_set(); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} + +#ifdef INTERGRATE_DRIVER +void Dex2MplCompiler::PostDex2Mpl(std::unique_ptr &theModule) const { + // for each function + for (auto *func : theModule->GetFunctionList()) { + if (func == nullptr) { + continue; + } + + MIRSymbolTable *symTab = func->GetSymTab(); + // for each symbol + for (size_t i = 0; i != symTab->GetSymbolTableSize(); ++i) { + MIRSymbol *currSymbol = symTab->GetSymbolFromStIdx(i); + if (currSymbol == nullptr) { + continue; + } + // (1) replace void ptr with void ref + if (theModule->IsJavaModule() && currSymbol->GetType() == GlobalTables::GetTypeTable().GetVoidPtr()) { + MIRType *voidRef = GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetVoid(), PTY_ref); + currSymbol->SetTyIdx(voidRef->GetTypeIndex()); + } + // (2) replace String ref with String ptr if symbol's name starts with "L_STR" + if (currSymbol->GetType()->GetKind() == kTypePointer && currSymbol->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(currSymbol->GetTyIdx()); + auto *ptrTy = static_cast(ty->CopyMIRTypeNode()); + DEBUG_ASSERT(ptrTy != nullptr, "null ptr check"); + ptrTy->SetPrimType(PTY_ptr); + TyIdx newTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(ptrTy); + delete ptrTy; + currSymbol->SetTyIdx(newTyIdx); + } + } + + // (3) reset pregIndex of pregTab if function has body + if (func->GetBody() != nullptr) { + uint32 maxPregNo = 0; + for (uint32 i = 0; i < func->GetFormalCount(); ++i) { + MIRSymbol *formalSt = func->GetFormal(i); + if (formalSt->IsPreg()) { + // no special register appears in the formals + uint32 pRegNo = static_cast(formalSt->GetPreg()->GetPregNo()); + if (pRegNo > maxPregNo) { + maxPregNo = pRegNo; + } + } + } + if (func->GetPregTab() == nullptr) { + continue; + } + func->GetPregTab()->SetIndex(maxPregNo + 1); + } + } + + // (4) fix unmatched MIRConst type of global symbols + for (size_t i = 0; i != GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (symbol == nullptr || !symbol->IsConst()) { + continue; + } + TyIdx stTyIdx = symbol->GetTyIdx(); + if (stTyIdx == 0) { + continue; + } + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stTyIdx); + MIRConst *mirConst = symbol->GetKonst(); + + if (mirConst == nullptr || mirConst->GetKind() != kConstInt) { + continue; + } + if (static_cast(mirConst)->GetValue() != 0) { + continue; + } + MIRType &valueType = mirConst->GetType(); + if (valueType.GetTypeIndex() != stTyIdx) { + auto *newIntConst = theModule->GetMemPool()->New(0, *stType); + symbol->SetValue({newIntConst}); + } + } + + // (5) remove type attr `rcunowned` of local symbol in rclocalunowned function specified by dex2mpl + for (auto *func : theModule->GetFunctionList()) { + if (func == nullptr || !func->GetAttr(FUNCATTR_rclocalunowned)) { + continue; + } + MIRSymbolTable *symTab = func->GetSymTab(); + for (size_t i = 0; i != symTab->GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = symTab->GetSymbolFromStIdx(i); + if (symbol == nullptr) { + continue; + } + if (symbol->GetAttr(ATTR_rcunowned)) { + symbol->ResetAttr(ATTR_rcunowned); + } + } + } + + // 1: MIRStructType::isImported has different meaning for dex2mpl and binary mplt importer. + // for dex2mpl, `isImported` means whether a type is imported from mplt file instead of dex file, so all types from + // mplt are marked imported. But for binary mplt importer, `isImported` means whether a type is loaded successfully, + // so only complete types are marked imported. + // The workaround is to reset `isImported` according to the completeness of a type. + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr) { + continue; + } + MIRTypeKind typeKind = type->GetKind(); + if (typeKind == kTypeStructIncomplete || typeKind == kTypeClassIncomplete || typeKind == kTypeInterfaceIncomplete) { + auto *structType = static_cast(type); + structType->SetIsImported(false); + } else if (typeKind == kTypeClass || typeKind == kTypeInterface) { + auto *structType = static_cast(type); + structType->SetIsImported(true); + } + } +} +#endif + +void Dex2MplCompiler::PrintCommand(const MplOptions &options, const Action &action) const { + std::string runStr = "--run="; + std::string optionStr = "--option=\""; + std::string connectSym = ""; + if (options.GetExeOptions().find(kBinNameDex2mpl) != options.GetExeOptions().end()) { + runStr += "dex2mpl"; + auto inputDex2mplOptions = options.GetExeOptions().find(kBinNameDex2mpl); + for (auto &opt : inputDex2mplOptions->second) { + optionStr += " --" + opt; + } + } + optionStr += "\""; + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " << optionStr + << " --infile " << GetInputFileName(options, action) << '\n'; +} + +#ifdef INTERGRATE_DRIVER +bool Dex2MplCompiler::MakeDex2mplOptions(const MplOptions &options) { + Dex2mplOptions &dex2mplOptions = Dex2mplOptions::GetInstance(); + dex2mplOptions.LoadDefault(); + auto it = options.GetExeOptions().find(kBinNameDex2mpl); + if (it == options.GetExeOptions().end()) { + LogInfo::MapleLogger() << "no dex2mpl input options\n"; + return false; + } + bool result = dex2mplOptions.SolveOptions(it->second, options.HasSetDebugFlag()); + if (result == false) { + LogInfo::MapleLogger() << "Meet error dex2mpl options\n"; + return false; + } + return true; +} + +ErrorCode Dex2MplCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + Dex2mplOptions &dex2mplOptions = Dex2mplOptions::GetInstance(); + bool result = MakeDex2mplOptions(options); + if (!result) { + return ErrorCode::kErrorCompileFail; + } + // .dex + std::string dexFileName = action.GetInputFile(); + theModule = std::make_unique(dexFileName); + + const auto &runningExes = options.GetRunningExes(); + bool isDex2mplFinalExe = (runningExes[runningExes.size() - 1] == kBinNameDex2mpl); + std::unique_ptr dex2mpl = std::make_unique(dexFileName, dex2mplOptions, + std::move(theModule), options.HasSetSaveTmps(), isDex2mplFinalExe); + if (dex2mpl == nullptr) { + ERR(kLncErr, "new Dex2mplRunner failed."); + return ErrorCode::kErrorCompileFail; + } + LogInfo::MapleLogger() << "Starting dex2mpl" << '\n'; + int ret = dex2mpl->Init(); + if (ret != 0) { + return ErrorCode::kErrorCompileFail; + } + ret = dex2mpl->Run(); + if (ret != 0) { + ERR(kLncErr, "(ToIDEUser)dex2mpl failed."); + return ErrorCode::kErrorCompileFail; + } + // Check that whether the kBinNameDex2mpl is the final compiler + // If not, we need to call PostDex2Mpl() to deal with some differences in theModule to + // adapt to the needs of maplecomb + if (runningExes[runningExes.size() - 1] != kBinNameDex2mpl) { + dex2mpl->MoveMirModule(theModule); + PostDex2Mpl(theModule); + } + PrintCommand(options, action); + return ErrorCode::kErrorNoError; +} +#endif +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/driver_options.cpp b/ecmascript/mapleall/maple_driver/src/driver_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e33f291e83a57c0ee03430c1727ea3425934ff8f --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/driver_options.cpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" +#include "cl_option.h" + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +maplecl::Option version({"--version", "-v"}, + " --version [command] \tPrint version and exit.\n", + {driverCategory}); + +maplecl::Option ignoreUnkOpt({"--ignore-unknown-options"}, + " --ignore-unknown-options \tIgnore unknown compilation options\n", + {driverCategory}); + +maplecl::Option o0({"--O0", "-O0"}, + " -O0 \tNo optimization.\n", + {driverCategory}); + +maplecl::Option o1({"--O1", "-O1"}, + " -O1 \tDo some optimization.\n", + {driverCategory}); + +maplecl::Option o2({"--O2", "-O2"}, + " -O2 \tDo more optimization. (Default)\n", + {driverCategory}); + +maplecl::Option os({"--Os", "-Os"}, + " -Os \tOptimize for size, based on O2.\n", + {driverCategory, hir2mplCategory}); + +maplecl::Option verify({"--verify"}, + " --verify \tVerify mpl file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory}); + +maplecl::Option decoupleStatic({"--decouple-static", "-decouple-static"}, + " --decouple-static \tDecouple the static method and field\n" + " --no-decouple-static \tDon't decouple the static method and field\n", + {driverCategory, dex2mplCategory, meCategory, mpl2mplCategory}, + maplecl::DisableWith("--no-decouple-static")); + +maplecl::Option gconly({"--gconly", "-gconly"}, + " --gconly \tMake gconly is enable\n" + " --no-gconly \tDon't make gconly is enable\n", + {driverCategory, dex2mplCategory, meCategory, + mpl2mplCategory, cgCategory}, + maplecl::DisableWith("--no-gconly")); + +maplecl::Option timePhase({"-time-phases"}, + " -time-phases \tTiming phases and print percentages\n", + {driverCategory}); + +maplecl::Option genMeMpl({"--genmempl"}, + " --genmempl \tGenerate me.mpl file\n", + {driverCategory}); + +maplecl::Option compileWOLink({"-c"}, + " -c \tCompile the source files without linking\n", + {driverCategory}); + +maplecl::Option genVtable({"--genVtableImpl"}, + " --genVtableImpl \tGenerate VtableImpl.mpl file\n", + {driverCategory}); + +maplecl::Option verbose({"-verbose"}, + " -verbose \tPrint informations\n", + {driverCategory, jbc2mplCategory, hir2mplCategory, + meCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option debug({"--debug"}, + " --debug \tPrint debug info.\n", + {driverCategory}); + +maplecl::Option withDwarf({"-g"}, + " --debug \tPrint debug info.\n", + {driverCategory}); + +maplecl::Option withIpa({"--with-ipa"}, + " --with-ipa \tRun IPA when building\n" + " --no-with-ipa \n", + {driverCategory}, + maplecl::DisableWith("--no-with-ipa")); + +maplecl::Option npeNoCheck({"--no-npe-check"}, + " --no-npe-check \tDisable null pointer check (Default)\n", + {driverCategory}); + +maplecl::Option npeStaticCheck({"--npe-check-static"}, + " --npe-check-static \tEnable null pointer static check only\n", + {driverCategory}); + +maplecl::Option npeDynamicCheck({"--npe-check-dynamic"}, + " --npe-check-dynamic \tEnable null " + "pointer dynamic check with static warning\n", + {driverCategory}); + +maplecl::Option npeDynamicCheckSilent({"--npe-check-dynamic-silent"}, + " --npe-check-dynamic-silent \tEnable null pointer dynamic " + "without static warning\n", + {driverCategory}); + +maplecl::Option npeDynamicCheckAll({"--npe-check-dynamic-all"}, + " --npe-check-dynamic-all \tKeep dynamic check before dereference, " + "used with --npe-check-dynamic* options\n", + {driverCategory}); + +maplecl::Option boundaryNoCheck({"--no-boundary-check"}, + " --no-boundary-check \tDisable boundary check (Default)\n", + {driverCategory}); + +maplecl::Option boundaryStaticCheck({"--boundary-check-static"}, + " --boundary-check-static \tEnable boundary static check\n", + {driverCategory}); + +maplecl::Option boundaryDynamicCheck({"--boundary-check-dynamic"}, + " --boundary-check-dynamic \tEnable boundary dynamic check " + "with static warning\n", + {driverCategory}); + +maplecl::Option boundaryDynamicCheckSilent({"--boundary-check-dynamic-silent"}, + " --boundary-check-dynamic-silent \tEnable boundary dynamic " + "check without static warning\n", + {driverCategory}); + +maplecl::Option safeRegionOption({"--safe-region"}, + " --safe-region \tEnable safe region\n", + {driverCategory}); + +maplecl::Option printDriverPhases({"--print-driver-phases"}, + " --print-driver-phases \tPrint Driver Phases\n", + {driverCategory}); + +maplecl::Option ldStatic({"-static", "--static"}, + " -static \tForce the linker to link a program statically\n", + {driverCategory, ldCategory}); + +maplecl::Option maplePhase({"--maple-phase"}, + " --maple-phase \tRun maple phase only\n --no-maple-phase\n", + {driverCategory}, + maplecl::DisableWith("--maple-toolchain"), + maplecl::Init(true)); + +maplecl::Option genMapleBC({"--genmaplebc"}, + " --genmaplebc \tGenerate .mbc file\n", + {driverCategory}); + +maplecl::Option genLMBC({"--genlmbc"}, + " --genlmbc \tGenerate .lmbc file\n", + {driverCategory, mpl2mplCategory}); + +maplecl::Option profileGen({"--profileGen"}, + " --profileGen \tGenerate profile data for static languages\n", + {driverCategory, meCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option profileUse({"--profileUse"}, + " --profileUse \tOptimize static languages with profile data\n", + {driverCategory, mpl2mplCategory}); + +/* ##################### STRING Options ############################################################### */ +maplecl::Option help({"--help", "-h"}, + " --help \tPrint help\n", + {driverCategory}, + maplecl::optionalValue); + +maplecl::Option infile({"--infile"}, + " --infile file1,file2,file3 \tInput files.\n", + {driverCategory}); + +maplecl::Option intext({"--intext"}, + " --infile=file content \tInput file content.\n", + {driverCategory}); + +maplecl::Option inFileName({"--inFileName"}, + " --inFileName=filename \tInput file name .\n", + {driverCategory}); + +maplecl::Option mplt({"--mplt", "-mplt"}, + " --mplt=file1,file2,file3 \tImport mplt files.\n", + {driverCategory, dex2mplCategory, jbc2mplCategory}); + +maplecl::Option partO2({"--partO2"}, + " --partO2 \tSet func list for O2\n", + {driverCategory}); + +maplecl::List jbc2mplOpt({"--jbc2mpl-opt"}, + " --jbc2mpl-opt \tSet options for jbc2mpl\n", + {driverCategory}); + +maplecl::List hir2mplOpt({"--hir2mpl-opt"}, + " --hir2mpl-opt \tSet options for hir2mpl\n", + {driverCategory}); + +maplecl::List clangOpt({"--clang-opt"}, + " --clang-opt \tSet options for clang as AST generator\n", + {driverCategory}); + +maplecl::List asOpt({"--as-opt"}, + " --as-opt \tSet options for as\n", + {driverCategory}); + +maplecl::List ldOpt({"--ld-opt"}, + " --ld-opt \tSet options for ld\n", + {driverCategory}); + +maplecl::List dex2mplOpt({"--dex2mpl-opt"}, + " --dex2mpl-opt \tSet options for dex2mpl\n", + {driverCategory}); + +maplecl::List mplipaOpt({"--mplipa-opt"}, + " --mplipa-opt \tSet options for mplipa\n", + {driverCategory}); + +maplecl::List mplcgOpt({"--mplcg-opt"}, + " --mplcg-opt \tSet options for mplcg\n", + {driverCategory}); + +maplecl::List meOpt({"--me-opt"}, + " --me-opt \tSet options for me\n", + {driverCategory}); + +maplecl::List mpl2mplOpt({"--mpl2mpl-opt"}, + " --mpl2mpl-opt \tSet options for mpl2mpl\n", + {driverCategory}); + +maplecl::Option profile({"--profile"}, + " --profile \tFor PGO optimization\n" + " \t--profile=list_file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option run({"--run"}, + " --run=cmd1:cmd2 \tThe name of executables that are going\n" + " \tto execute. IN SEQUENCE.\n" + " \tSeparated by \":\".Available exe names:\n" + " \tjbc2mpl, me, mpl2mpl, mplcg\n" + " \tInput file must match the tool can\n" + " \thandle\n", + {driverCategory}); + +maplecl::Option optionOpt({"--option"}, + " --option=\"opt1:opt2\" \tOptions for each executable,\n" + " \tseparated by \":\".\n" + " \tThe sequence must match the sequence in\n" + " \t--run.\n", + {driverCategory}); + +maplecl::List ldLib({"-l"}, + " -l \tLinks with a library file\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); + +maplecl::List ldLibPath({"-L"}, + " -L \tAdd directory to library search path\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); + +maplecl::List enableMacro({"-D"}, + " -D = \tDefine to " + "(or 1 if omitted)\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List disableMacro({"-U"}, + " -U \tUndefine macro \n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List includeDir({"-I"}, + " -I \tAdd directory to include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List includeSystem({"--isystem"}, + " -isystem \tAdd directory to SYSTEM include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::Option output({"-o"}, + " -o \tPlace the output into \n", + {driverCategory}, + maplecl::Init("a.out")); + +maplecl::Option saveTempOpt({"--save-temps"}, + " --save-temps \tDo not delete intermediate files.\n" + " \t--save-temps Save all intermediate files.\n" + " \t--save-temps=file1,file2,file3 Save the\n" + " \ttarget files.\n", + {driverCategory}, + maplecl::optionalValue); + +maplecl::Option target({"--target", "-target"}, + " --target= \tDescribe target platform\n" + " \t\t\t\tExample: --target=aarch64-gnu or --target=aarch64_be-gnuilp32\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +/* ##################### DIGITAL Options ############################################################### */ + +maplecl::Option helpLevel({"--level"}, + " --level=NUM \tPrint the help info of specified level.\n" + " \tNUM=0: All options (Default)\n" + " \tNUM=1: Product options\n" + " \tNUM=2: Experimental options\n" + " \tNUM=3: Debug options\n", + {driverCategory}); + +/* #################################################################################################### */ + +} /* namespace opts */ diff --git a/ecmascript/mapleall/maple_driver/src/driver_runner.cpp b/ecmascript/mapleall/maple_driver/src/driver_runner.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c901cc076b05a1b94d7be2af10919b85fa62ed87 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/driver_runner.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_runner.h" +#include +#include +#include "mpl_timer.h" +#include "mir_function.h" +#include "mir_parser.h" +#include "file_utils.h" +#include "constantfold.h" +#include "lower.h" +#include "me_phase_manager.h" +#include "lfo_loop_vec.h" +#include "seqvec.h" + +using namespace maplebe; + +#define JAVALANG (theModule->IsJavaModule()) +#define CLANG (theModule->IsCModule()) + +#define CHECK_MODULE(errorCode...) \ + do { \ + if (theModule == nullptr) { \ + LogInfo::MapleLogger() << "Fatal error: the module is null" << '\n'; \ + return errorCode; \ + } \ + } while (0) + +#define RELEASE(pointer) \ + do { \ + if (pointer != nullptr) { \ + delete pointer; \ + pointer = nullptr; \ + } \ + } while (0) + +#define ADD_PHASE(name, condition) \ + if ((condition)) { \ + phases.push_back(std::string(name)); \ + } + +#define ADD_EXTRA_PHASE(name, timephases, timeStart) \ + if (timephases) { \ + auto duration = std::chrono::system_clock::now() - (timeStart); \ + extraPhasesTime.emplace_back(std::chrono::duration_cast(duration).count()); \ + extraPhasesName.emplace_back(name); \ + } + +namespace maple { +const std::string kMplCg = "mplcg"; +const std::string kMpl2mpl = "mpl2mpl"; +const std::string kMplMe = "me"; + +enum OptLevel { + kLevelO0, + kLevelO1, + kLevelO2 +}; + +ErrorCode DriverRunner::Run() { + CHECK_MODULE(kErrorExit); + + if (exeNames.empty()) { + LogInfo::MapleLogger() << "Fatal error: no exe specified" << '\n'; + return kErrorExit; + } + std::string originBaseName = baseName; + outputFile = baseName; + outputFile.append(GetPostfix()); + if (mpl2mplOptions != nullptr || meOptions != nullptr) { + std::string vtableImplFile = originBaseName; + std::string postFix = ""; + if (theModule->GetSrcLang() == kSrcLangC) { + postFix = ".me"; + } else { + postFix = ".VtableImpl"; + } + vtableImplFile.append(postFix + ".mpl"); + originBaseName.append(postFix); + ProcessMpl2mplAndMePhases(outputFile, vtableImplFile); + } + return kErrorNoError; +} + +std::string DriverRunner::GetPostfix() { + if (printOutExe == kMplMe) { + return ".me.mpl"; + } + if (printOutExe == kMpl2mpl) { + return ".VtableImpl.mpl"; + } + if (printOutExe == kMplCg) { + if (theModule->GetSrcLang() == kSrcLangC) { + return ".s"; + } else { + return ".VtableImpl.s"; + } + } + return ""; +} + +// trim both leading and trailing space and tab +static void TrimString(std::string &str) { + size_t pos = str.find_first_not_of(kSpaceTabStr); + if (pos != std::string::npos) { + str = str.substr(pos); + } else { + str.clear(); + } + pos = str.find_last_not_of(kSpaceTabStr); + if (pos != std::string::npos) { + str = str.substr(0, pos + 1); + } +} + +void DriverRunner::SolveCrossModuleInJava(MIRParser &parser) const { + if (MeOption::optLevel < kLevelO2 || Options::lazyBinding || + Options::skipPhase == "inline" || Options::buildApp != 0 || + !Options::useInline || !Options::useCrossModuleInline) { + return; + } + std::string originBaseName = baseName; + // read in optimized mpl routines + const MapleVector &inputMplt = theModule->GetImportedMplt(); + auto it = inputMplt.cbegin(); + for (++it; it != inputMplt.cend(); ++it) { + const std::string &curStr = *it; + auto lastDotInner = curStr.find_last_of("."); + std::string tmp = (lastDotInner == std::string::npos) ? curStr : curStr.substr(0, lastDotInner); + if (tmp.find("framework") != std::string::npos && originBaseName.find("framework") != std::string::npos) { + continue; + } + // Skip the import file + if (tmp.find(FileUtils::GetFileName(originBaseName, true)) != std::string::npos) { + continue; + } + size_t index = curStr.rfind("."); + CHECK_FATAL(index != std::string::npos, "can not find ."); + + std::string inputInline = curStr.substr(0, index + 1) + "mplt_inline"; + std::ifstream optFile(inputInline); + if (!optFile.is_open()) { + continue; + } + + LogInfo::MapleLogger() << "Starting parse " << inputInline << '\n'; + bool parsed = parser.ParseInlineFuncBody(optFile); + if (!parsed) { + parser.EmitError(actualInput); + } + optFile.close(); + } +} + +void DriverRunner::SolveCrossModuleInC(MIRParser &parser) const { + if (MeOption::optLevel < kLevelO2 || !Options::useInline || + !Options::useCrossModuleInline || Options::skipPhase == "inline" || + Options::importFileList == "") { + return; + } + char absPath[PATH_MAX]; + if (theModule->GetFileName().size() > PATH_MAX || realpath(theModule->GetFileName().c_str(), absPath) == nullptr) { + CHECK_FATAL(false, "invalid file path"); + } + std::ifstream infile(Options::importFileList); + if (!infile.is_open()) { + LogInfo::MapleLogger(kLlErr) << "Cannot open importfilelist file " << Options::importFileList << '\n'; + } + LogInfo::MapleLogger() << "[CROSS_MODULE] read importfile from list: " << Options::importFileList << '\n'; + std::string input; + while (getline(infile, input)) { + TrimString(input); + if (input.empty() || input.find(absPath) != std::string::npos) { + continue; + } + std::ifstream optFile(input); + if (!optFile.is_open()) { + abort(); + } + LogInfo::MapleLogger() << "Starting parse " << input << '\n'; + bool parsed = parser.ParseInlineFuncBody(optFile); + if (!parsed) { + parser.EmitError(actualInput); + } + optFile.close(); + } + infile.close(); +} + +ErrorCode DriverRunner::ParseInput() const { + CHECK_MODULE(kErrorExit); + LogInfo::MapleLogger() << "Starting parse input" << '\n'; + MPLTimer timer; + timer.Start(); + MIRParser parser(*theModule); + ErrorCode ret = kErrorNoError; + if (!fileParsed) { + if (inputFileType != kFileTypeBpl) { + bool parsed = parser.ParseMIR(0, 0, false, true); + if (!parsed) { + ret = kErrorExit; + parser.EmitError(actualInput); + } + } else { + BinaryMplImport binMplt(*theModule); + binMplt.SetImported(false); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.Import(modid, true); + if (!imported) { + ret = kErrorExit; + LogInfo::MapleLogger() << "Cannot open .bpl file: %s" << modid << '\n'; + } + } + } + if (CLANG) { + SolveCrossModuleInC(parser); + } + timer.Stop(); + LogInfo::MapleLogger() << "Parse consumed " << timer.Elapsed() << "s" << '\n'; + return ret; +} + +ErrorCode DriverRunner::ParseSrcLang(MIRSrcLang &srcLang) const { + ErrorCode ret = kErrorNoError; + if (inputFileType != kFileTypeBpl) { + MIRParser parser(*theModule); + bool parsed = parser.ParseSrcLang(srcLang); + if (!parsed) { + ret = kErrorExit; + parser.EmitError(actualInput); + } + } else { + BinaryMplImport binMplt(*theModule); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.ImportForSrcLang(modid, srcLang); + if (!imported) { + ret = kErrorExit; + LogInfo::MapleLogger() << "Cannot open .bpl file: %s" << modid << '\n'; + } + } + return ret; +} + +void DriverRunner::RunNewPM(const std::string &output, const std::string &vtableImplFile) { + LogInfo::MapleLogger() << "Processing maplecomb in new phasemanager" << '\n'; + auto PMMemPool = std::make_unique(memPoolCtrler, "PM module mempool"); + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&MEBETopLevelManager::id); + auto *topLevelPhaseManager = static_cast(curPhase->GetConstructor()(PMMemPool.get())); + topLevelPhaseManager->SetRunMpl2Mpl(mpl2mplOptions != nullptr); + topLevelPhaseManager->SetRunMe(meOptions != nullptr); + topLevelPhaseManager->SetQuiet(Options::quiet); + if (timePhases) { + topLevelPhaseManager->InitTimeHandler(); + } + MeFuncPM::genMeMpl = genMeMpl; + MeFuncPM::genMapleBC = genMapleBC; + MeFuncPM::genLMBC = genLMBC; + MeFuncPM::timePhases = timePhases; + MPLTimer timer; + timer.Start(); + topLevelPhaseManager->DoPhasesPopulate(*theModule); + topLevelPhaseManager->Run(*theModule); + if (timePhases) { + topLevelPhaseManager->DumpPhaseTime(); + } + // emit after module phase + if (printOutExe == kMpl2mpl || printOutExe == kMplMe) { + theModule->Emit(output); + } else if (genVtableImpl || Options::emitVtableImpl) { + theModule->Emit(vtableImplFile); + } + PMMemPool.reset(); + timer.Stop(); + LogInfo::MapleLogger() << "maplecomb consumed " << timer.Elapsed() << "s" << '\n'; + // dump vectorized loop counter here + { + LogInfo::MapleLogger() << "\n" << LoopVectorization::vectorizedLoop << " loop vectorized\n"; + LogInfo::MapleLogger() << "\n" << SeqVectorize::seqVecStores << " sequencestores vectorized\n"; + LogInfo::MapleLogger() << "\n" << LfoUnrollOneLoop::countOfLoopsUnrolled << " loops unrolled\n"; + } +} + +void DriverRunner::ProcessMpl2mplAndMePhases(const std::string &output, const std::string &vtableImplFile) { + CHECK_MODULE(); + theMIRModule = theModule; + if (mpl2mplOptions != nullptr || meOptions != nullptr) { + // multi-thread is not supported for now. + MeOption::threads = 1; + // main entry of newpm for me&mpl2mpl + RunNewPM(output, vtableImplFile); + } + if (withDwarf && !theModule->IsWithDbgInfo()) { + LogInfo::MapleLogger() << "set up debug info " << '\n'; + theMIRModule->GetDbgInfo()->BuildDebugInfo(); + } +} + +void DriverRunner::ProcessCGPhase(const std::string &output, const std::string &originBaseName) { + CHECK_MODULE(); + theMIRModule = theModule; + if (withDwarf && !theModule->IsWithDbgInfo()) { + LogInfo::MapleLogger() << "set up debug info " << '\n'; + theMIRModule->GetDbgInfo()->BuildDebugInfo(); + } + if (cgOptions == nullptr) { + return; + } + LogInfo::MapleLogger() << "Processing mplcg in new phaseManager" << '\n'; + MPLTimer timer; + timer.Start(); + theModule->SetBaseName(originBaseName); + theModule->SetOutputFileName(output); + cgOptions->SetDefaultOptions(*theModule); + if (timePhases) { + CGOptions::EnableTimePhases(); + } + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + MAD mad; + Globals::GetInstance()->SetMAD(mad); + + auto cgPhaseManager = std::make_unique(memPoolCtrler, "cg function phasemanager"); + const MaplePhaseInfo *cgPMInfo = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&CgFuncPM::id); + auto *cgfuncPhaseManager = static_cast(cgPMInfo->GetConstructor()(cgPhaseManager.get())); + cgfuncPhaseManager->SetQuiet(CGOptions::IsQuiet()); + if (timePhases) { + cgfuncPhaseManager->InitTimeHandler(); + } + /* It is a specifc work around (need refactor) */ + cgfuncPhaseManager->SetCGOptions(cgOptions); + (void) cgfuncPhaseManager->PhaseRun(*theModule); + if (timePhases) { + cgfuncPhaseManager->DumpPhaseTime(); + } + timer.Stop(); + theMIRModule->ReleasePragmaMemPool(); + LogInfo::MapleLogger() << "Mplcg consumed " << timer.ElapsedMilliseconds() << "ms" << '\n'; +} + +void DriverRunner::InitProfile() const { + if (!cgOptions->IsProfileDataEmpty()) { + uint32 dexNameIdx = theModule->GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + const std::string &dexName = GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(dexNameIdx)); + bool deCompressSucc = theModule->GetProfile().DeCompress(CGOptions::GetProfileData(), dexName); + if (!deCompressSucc) { + LogInfo::MapleLogger() << "WARN: DeCompress() " << CGOptions::GetProfileData() << "failed in mplcg()\n"; + } + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/hided_options.cpp b/ecmascript/mapleall/maple_driver/src/hided_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e105ff4079197c11888d9bd7fde43c054ab525dc --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/hided_options.cpp @@ -0,0 +1,161 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" +#include "cl_option.h" + +#include +#include + +namespace opts { + +maplecl::Option MD({"-MD"}, + " -MD \tWrite a depfile containing user and system headers\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option MT({"-MT"}, + " -MT \tSpecify name of main file output in depfile\n", + {driverCategory, clangCategory}, maplecl::hide, maplecl::joinedValue); + +maplecl::Option MF({"-MF"}, + " -MF \tWrite depfile output from -MD, -M to \n", + {driverCategory, clangCategory}, maplecl::hide, maplecl::joinedValue); + +/* Should we use std option in hir2mpl ??? */ +maplecl::Option std({"-std"}, + " -std Ignonored\n", + {driverCategory, clangCategory}); + +/* ##################### Warnings Options ############################################################### */ + +maplecl::Option wUnusedMacro({"-Wunused-macros"}, + " -Wunused-macros \twarning: macro is not used\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wBadFunctionCast({"-Wbad-function-cast"}, + " -Wbad-function-cast \twarning: " + "cast from function call of type A to non-matching type B\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wStrictPrototypes({"-Wstrict-prototypes"}, + " -Wstrict-prototypes \twarning: " + "Warn if a function is declared or defined without specifying the argument types\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wUndef({"-Wundef"}, + " -Wundef \twarning: " + "Warn if an undefined identifier is evaluated in an #if directive. " + "Such identifiers are replaced with zero\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wCastQual({"-Wcast-qual"}, + " -Wcast-qual \twarning: " + "Warn whenever a pointer is cast so as to remove a type qualifier from the target type. " + "For example, warn if a const char * is cast to an ordinary char *\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wMissingFieldInitializers({"-Wmissing-field-initializers"}, + " -Wmissing-field-initializers\twarning: " + "Warn if a structure’s initializer has some fields missing\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-missing-field-initializers")); + +maplecl::Option wUnusedParameter({"-Wunused-parameter"}, + " -Wunused-parameter \twarning: " + "Warn whenever a function parameter is unused aside from its declaration\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-unused-parameter")); + +maplecl::Option wAll({"-Wall"}, + " -Wall \tThis enables all the warnings about constructions " + "that some users consider questionable\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wExtra({"-Wextra"}, + " -Wextra \tEnable some extra warning flags that are not enabled by -Wall\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wWriteStrings({"-Wwrite-strings"}, + " -Wwrite-strings \tWhen compiling C, give string constants the type " + "const char[length] so that copying the address of one into " + "a non-const char * pointer produces a warning\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wVla({"-Wvla"}, + " -Wvla \tWarn if a variable-length array is used in the code\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFormatSecurity({"-Wformat-security"}, + " -Wformat-security \tWwarn about uses of format " + "functions that represent possible security problems\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShadow({"-Wshadow"}, + " -Wshadow \tWarn whenever a local variable " + "or type declaration shadows another variable\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wTypeLimits({"-Wtype-limits"}, + " -Wtype-limits \tWarn if a comparison is always true or always " + "false due to the limited range of the data type\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wSignCompare({"-Wsign-compare"}, + " -Wsign-compare \tWarn when a comparison between signed and " + " unsigned values could produce an incorrect result when the signed value is converted " + "to unsigned\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShiftNegativeValue({"-Wshift-negative-value"}, + " -Wshift-negative-value \tWarn if left " + "shifting a negative value\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wPointerArith({"-Wpointer-arith"}, + " -Wpointer-arith \tWarn about anything that depends on the " + "“size of” a function type or of void\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wIgnoredQualifiers({"-Wignored-qualifiers"}, + " -Wignored-qualifiers \tWarn if the return type of a " + "function has a type qualifier such as const\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFormat({"-Wformat"}, + " -Wformat \tCheck calls to printf and scanf, etc., " + "to make sure that the arguments supplied have types appropriate " + "to the format string specified\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFloatEqual({"-Wfloat-equal"}, + " -Wfloat-equal \tWarn if floating-point values are used " + "in equality comparisons\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wDateTime({"-Wdate-time"}, + " -Wdate-time \tWarn when macros __TIME__, __DATE__ or __TIMESTAMP__ " + "are encountered as they might prevent bit-wise-identical reproducible compilations\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wImplicitFallthrough({"-Wimplicit-fallthrough"}, + " -Wimplicit-fallthrough \tWarn when a switch case falls through\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShiftOverflow({"-Wshift-overflow"}, + " -Wshift-overflow \tWarn about left shift overflows\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-shift-overflow")); + +} /* namespace opts */ diff --git a/ecmascript/mapleall/maple_driver/src/ipa_compiler.cpp b/ecmascript/mapleall/maple_driver/src/ipa_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dfce2041082643b3d4b88ca6f2ec071dc889ac0d --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/ipa_compiler.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "default_options.def" + +namespace maple { +const std::string &IpaCompiler::GetBinName() const { + return kBinNameMplipa; +} + +DefaultOption IpaCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = 0; + MplOption *kMplipaDefaultOptions = nullptr; + + if (opts::o2) { + len = sizeof(kMplipaDefaultOptionsO2) / sizeof(MplOption); + kMplipaDefaultOptions = kMplipaDefaultOptionsO2; + } + + if (kMplipaDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kMplipaDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +std::string IpaCompiler::GetInputFileName(const MplOptions &options, const Action &action) const { + return action.GetFullOutputName() + ".mpl"; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/jbc2mpl_compiler.cpp b/ecmascript/mapleall/maple_driver/src/jbc2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5eb1b154fc38571160594f99353dc6850ec0a512 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/jbc2mpl_compiler.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "default_options.def" + +namespace maple { +const std::string &Jbc2MplCompiler::GetBinName() const { + return kBinNameJbc2mpl; +} + +DefaultOption Jbc2MplCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + return DefaultOption(); +} + +void Jbc2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Jbc2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/ld_compiler.cpp b/ecmascript/mapleall/maple_driver/src/ld_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9320eb04052d811b73275a05ca10e91c15fb28c --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/ld_compiler.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { + +static const std::string kAarch64BeIlp32Gcc = "aarch64_be-linux-gnuilp32-gcc"; +static const std::string kAarch64BeGcc = "aarch64_be-linux-gnu-gcc"; + +std::string LdCompilerBeILP32::GetBinPath(const MplOptions&) const { + std::string gccPath = FileUtils::SafeGetenv(kGccBePathEnv) + "/"; + const std::string &gccTool = Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32 ? + kAarch64BeIlp32Gcc : kAarch64BeGcc; + std::string gccToolPath = gccPath + gccTool; + + if (!FileUtils::IsFileExists(gccToolPath)) { + LogInfo::MapleLogger(kLlErr) << kGccBePathEnv << " environment variable must be set as the path to " + << gccTool << "\n"; + CHECK_FATAL(false, "%s environment variable must be set as the path to %s\n", + kGccBePathEnv, gccTool.c_str()); + } + + return gccPath; +} + +const std::string &LdCompilerBeILP32::GetBinName() const { + if (Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + return kAarch64BeIlp32Gcc; + } else { + return kAarch64BeGcc; + } +} + +std::string LdCompiler::GetBinPath(const MplOptions&) const { +#ifdef ANDROID + return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; +#else + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +#endif +} + +// Required to use ld instead of gcc; ld will be implemented later +const std::string &LdCompiler::GetBinName() const { + return kBinNameGcc; +} + +/* the tool name must be the same as exeName field in Descriptor structure */ +const std::string &LdCompiler::GetTool() const { + return kLdFlag; +} + +DefaultOption LdCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = sizeof(kLdDefaultOptions) / sizeof(MplOption); + DefaultOption defaultOptions = { std::make_unique(len), len }; + + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kLdDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +std::string LdCompiler::GetInputFileName(const MplOptions &options, const Action &action) const { + std::string files; + + bool isFirstEntry = true; + for (const auto &file : action.GetLinkFiles()) { + /* Split Input files with " "; (except first entry) */ + if (isFirstEntry == true) { + isFirstEntry = false; + } else { + files += " "; + } + + files += StringUtils::GetStrBeforeLast(file, ".") + ".o"; + } + return files; +} + +void LdCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/maple.cpp b/ecmascript/mapleall/maple_driver/src/maple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f1e43f4d751423b73799aa3b3dcc95b62214e7d --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/maple.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler_factory.h" +#include "error_code.h" +#include "mpl_options.h" +#include "mpl_logging.h" + +using namespace maple; + +int main(int argc, char **argv) { + MplOptions mplOptions; + int ret = static_cast(mplOptions.Parse(argc, argv)); + if (ret == kErrorNoError) { + ret = CompilerFactory::GetInstance().Compile(mplOptions); + } + PrintErrorMessage(ret); + return ret; +} diff --git a/ecmascript/mapleall/maple_driver/src/maple_comb_compiler.cpp b/ecmascript/mapleall/maple_driver/src/maple_comb_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..edefcc0d64eeaf9d8bfd425c5bee820eec42fdb6 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/maple_comb_compiler.cpp @@ -0,0 +1,269 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_options.h" +#include "string_utils.h" +#include "mpl_logging.h" +#include "driver_runner.h" +#include "inline.h" +#include "me_phase_manager.h" +#include "constantfold.h" + +namespace maple { + +std::string MapleCombCompiler::GetInputFileName(const MplOptions &options, const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + if (action.GetInputFileType() == InputFileType::kFileTypeVtableImplMpl) { + return action.GetFullOutputName() + ".VtableImpl.mpl"; + } + if (action.GetInputFileType() == InputFileType::kFileTypeBpl) { + return action.GetFullOutputName() + ".bpl"; + } + return action.GetFullOutputName() + ".mpl"; +} + +void MapleCombCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + std::string filePath; + filePath = action.GetFullOutputName() + ".data.muid"; + tempFiles.push_back(filePath); + filePath = action.GetFullOutputName() + ".func.muid"; + tempFiles.push_back(filePath); + for (auto iter = tempFiles.begin(); iter != tempFiles.end();) { + std::ifstream infile; + infile.open(*iter); + if (infile.fail()) { + iter = tempFiles.erase(iter); + } else { + ++iter; + } + infile.close(); + } +} + +std::unordered_set MapleCombCompiler::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".VtableImpl.mpl"); + return finalOutputs; +} + +void MapleCombCompiler::PrintCommand(const MplOptions &options, const Action &action) const { + std::string runStr = "--run="; + std::ostringstream optionStr; + optionStr << "--option=\""; + std::string connectSym = ""; + bool firstComb = false; + if (options.GetExeOptions().find(kBinNameMe) != options.GetExeOptions().end()) { + runStr += "me"; + auto it = options.GetExeOptions().find(kBinNameMe); + for (auto &opt : it->second) { + optionStr << " " << opt; + } + firstComb = true; + } + if (options.GetExeOptions().find(kBinNameMpl2mpl) != options.GetExeOptions().end()) { + if (firstComb) { + runStr += ":mpl2mpl"; + optionStr << ":"; + } else { + runStr += "mpl2mpl"; + } + auto it = options.GetExeOptions().find(kBinNameMpl2mpl); + for (auto &opt : it->second) { + optionStr << " " << opt; + } + } + + std::string driverOptions = options.GetCommonOptionsStr(); + + optionStr << "\""; + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " + << optionStr.str() << " " << driverOptions << GetInputFileName(options, action) << '\n'; +} + +std::string MapleCombCompiler::GetStringOfSafetyOption() const { + std::string safetyOptionStr = ""; + if (MeOption::safeRegionMode == true) { + safetyOptionStr += "safe-region "; + } + if (MeOption::isNpeCheckAll == true) { + safetyOptionStr += "npe-check-dynamic-all "; + } + switch (MeOption::npeCheckMode) { + case kStaticCheck: + safetyOptionStr += "npe-check-static "; + break; + case kDynamicCheck: + safetyOptionStr += "npe-check-dynamic "; + break; + case kDynamicCheckSilent: + safetyOptionStr += "npe-check-dynamic-silent "; + break; + default: + break; + } + switch (MeOption::boundaryCheckMode) { + case kStaticCheck: + safetyOptionStr += "boundary-check-static "; + break; + case kDynamicCheck: + safetyOptionStr += "boundary-check-dynamic "; + break; + case kDynamicCheckSilent: + safetyOptionStr += "boundary-check-dynamic-silent "; + break; + default: + break; + } + return safetyOptionStr; +} + +ErrorCode MapleCombCompiler::MakeMeOptions(const MplOptions &options, DriverRunner &runner) { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMe); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + + auto itOpt = options.GetExeOptions().find(kBinNameMe); + if (itOpt != options.GetExeOptions().end()) { + const auto &meExeOpts = itOpt->second; + const std::deque strMeOptions(meExeOpts.begin(), meExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strMeOptions, meCategory); + } + + bool result = MeOption::GetInstance().SolveOptions(opts::debug); + if (result == false) { + LogInfo::MapleLogger() << "Meet error me options\n"; + return kErrorCompileFail; + } + MeOption::generalRegOnly = options.HasSetGeneralRegOnly(); + MeOption::npeCheckMode = options.GetNpeCheckMode(); + MeOption::isNpeCheckAll = opts::npeDynamicCheckAll; + MeOption::boundaryCheckMode = options.GetBoundaryCheckMode(); + MeOption::safeRegionMode = opts::safeRegionOption; + if (MeOption::optLevel == 0) { + std::string safetyOptionStr = GetStringOfSafetyOption(); + if (!safetyOptionStr.empty()) { + safetyOptionStr.erase(safetyOptionStr.end() - 1); + WARN(kLncWarn, "warning: The safety option %s must be used in conjunction with O2 mode", + safetyOptionStr.c_str()); + } + } + + // Set me options for driver runner + runner.SetMeOptions(&MeOption::GetInstance()); + return kErrorNoError; +} + +ErrorCode MapleCombCompiler::MakeMpl2MplOptions(const MplOptions &options, DriverRunner &runner) { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMpl2mpl); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + + auto itOpt = options.GetExeOptions().find(kBinNameMpl2mpl); + if (itOpt != options.GetExeOptions().end()) { + const auto &mpl2mplExeOpts = itOpt->second; + const std::deque strMpl2mplOptions(mpl2mplExeOpts.begin(), mpl2mplExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strMpl2mplOptions, mpl2mplCategory); + } + + auto &mpl2mplOption = Options::GetInstance(); + bool result = mpl2mplOption.SolveOptions(opts::debug); + if (result == false) { + LogInfo::MapleLogger() << "Meet error mpl2mpl options\n"; + return kErrorCompileFail; + } + // Set mpl2mpl options for driver runner + runner.SetMpl2mplOptions(&Options::GetInstance()); + return kErrorNoError; +} + +std::string MapleCombCompiler::DecideOutExe(const MplOptions &options) { + std::string printOutExe = ""; + auto &selectExes = options.GetSelectedExes(); + if (selectExes[selectExes.size() - 1] == kBinNameMapleComb) { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMpl2mpl); + if (it != options.GetRunningExes().end()) { + printOutExe = kBinNameMpl2mpl; + return printOutExe; + } + it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMe); + if (it != options.GetRunningExes().end()) { + printOutExe = kBinNameMe; + return printOutExe; + } + } + return selectExes[selectExes.size() - 1]; +} + +ErrorCode MapleCombCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + std::string fileName = GetInputFileName(options, action); + bool fileParsed = true; + if (theModule == nullptr) { + theModule = std::make_unique(fileName); + fileParsed = false; + } + options.PrintCommand(&action); + LogInfo::MapleLogger() << "Starting maplecomb\n"; + theModule->InitPartO2List(opts::partO2); + DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, + fileName, fileName, opts::withDwarf, fileParsed, + opts::timePhase, opts::genVtable, + opts::genMeMpl, opts::genMapleBC, + opts::genLMBC); + ErrorCode ret = kErrorNoError; + + MIRParser parser(*theModule); + MIRSrcLang srcLang = kSrcLangUnknown; + ret = runner.ParseSrcLang(srcLang); + if (ret != kErrorNoError) { + return ret; + } + theModule->SetSrcLang(srcLang); + + // Add running phases and default options according to the srcLang (only for auto mode) + ret = options.AppendCombOptions(theModule->GetSrcLang()); + if (ret != kErrorNoError) { + return ret; + } + + ret = MakeMeOptions(options, runner); + if (ret != kErrorNoError) { + return ret; + } + ret = MakeMpl2MplOptions(options, runner); + if (ret != kErrorNoError) { + return ret; + } + runner.SetPrintOutExe(DecideOutExe(options)); + + // Parse the input file + ret = runner.ParseInput(); + if (ret != kErrorNoError) { + return ret; + } + + if (opts::debug) { + PrintCommand(options, action); + } + ErrorCode nErr = runner.Run(); + return nErr; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp b/ecmascript/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fb5aaeb7e7d754a2515573e1d32d6e2cbc056322 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "types_def.h" +#include + +namespace maple { + +// FixMe +static const std::string kTmpBin = "maple"; + +const std::string &MapleCombCompilerWrp::GetBinName() const { + return kTmpBin; +} + +std::string MapleCombCompilerWrp::GetBinPath(const MplOptions &mplOptions) const { + return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + + FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; +} + +DefaultOption MapleCombCompilerWrp::GetDefaultOptions(const MplOptions &options, const Action &action) const { + /* need to add --maple-phase option to run only maple phase. + * linker will be called as separated step (AsCompiler). + */ + opts::maplePhase.SetValue(true); + + /* opts::infile must be cleared because we should run compilation for each file separately. + * Separated input file are set in Actions. + */ + opts::infile.Clear(); + + return DefaultOption(); +} + +std::string MapleCombCompilerWrp::GetInputFileName(const MplOptions &options, const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + + InputFileType fileType = action.GetInputFileType(); + auto fullOutput = action.GetFullOutputName(); + if (fileType == InputFileType::kFileTypeVtableImplMpl) { + return fullOutput + ".VtableImpl.mpl"; + } + if (fileType == InputFileType::kFileTypeBpl) { + return fullOutput + ".bpl"; + } + return fullOutput + ".mpl"; +} + +void MapleCombCompilerWrp::GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".s"); +} + +std::unordered_set MapleCombCompilerWrp::GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".s"); + return finalOutputs; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/mpl_options.cpp b/ecmascript/mapleall/maple_driver/src/mpl_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1749242be4f43fe304bde71621577723846ec8ef --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/mpl_options.cpp @@ -0,0 +1,961 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mpl_options.h" +#include +#include +#include +#include +#include "compiler.h" +#include "compiler_factory.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "string_utils.h" +#include "version.h" +#include "default_options.def" +#include "me_option.h" +#include "option.h" +#include "cg_option.h" +#include "driver_options.h" +#include "triple.h" + + +namespace maple { +using namespace maplebe; + +/* tool -> OptionCategory map: ld -> ldCategory, me -> meCategory and etc... */ +static std::unordered_map exeCategories = + { + {"maple", &driverCategory}, + {maple::kBinNameClang, &clangCategory}, + {maple::kBinNameCpp2mpl, &hir2mplCategory}, + {maple::kBinNameMpl2mpl, &mpl2mplCategory}, + {maple::kBinNameMe, &meCategory}, + {maple::kBinNameMplcg, &cgCategory}, + {maple::kAsFlag, &asCategory}, + {maple::kLdFlag, &ldCategory}, + {maple::kBinNameDex2mpl, &dex2mplCategory}, + {maple::kBinNameJbc2mpl, &jbc2mplCategory}, + {maple::kBinNameMplipa, &ipaCategory} + }; + +#ifdef ANDROID +const std::string kMapleDriverVersion = "MapleDriver " + std::to_string(Version::GetMajorVersion()) + "." + + std::to_string(Version::GetMinorVersion()) + " 20190929"; +#else +const std::string kMapleDriverVersion = "Maple Version : " + Version::GetVersionStr(); +#endif + +const std::vector kMapleCompilers = { "jbc2mpl", "hir2mpl", + "dex2mpl", "mplipa", "as", "ld", + "me", "mpl2mpl", "mplcg", "clang"}; + +ErrorCode MplOptions::Parse(int argc, char **argv) { + (void)maplecl::CommandLine::GetCommandLine().Parse(argc, argv); + exeFolder = FileUtils::GetFileFolder(FileUtils::GetExecutable()); + + // We should recognize O0, O2 and run options firstly to decide the real options + ErrorCode ret = HandleEarlyOptions(); + if (ret != kErrorNoError) { + return ret; + } + + /* Check whether the input files were valid */ + ret = CheckInputFiles(); + if (ret != kErrorNoError) { + return ret; + } + + // Decide runningExes for default options(O0, O2) by input files + if (runMode != RunMode::kCustomRun) { + ret = DecideRunningPhases(); + if (ret != kErrorNoError) { + return ret; + } + } else { // kCustomRun + /* kCustomRun run mode is set if --run=tool1:tool2 option is used. + * This Option is parsed on DecideRunType step. DecideRunType fills runningExes vector. + * DecideRunningPhases(runningExes) creates ActionsTree in kCustomRun mode. + * Maybe we can create Actions tree in DecideRunType in order to not use runningExes? + */ + ret = DecideRunningPhases(runningExes); + if (ret != kErrorNoError) { + return ret; + } + } + + ret = HandleOptions(); + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +int MplOptions::Parse(std::vector argvs) { + maplecl::CommandLine::GetCommandLine().Parse(argvs); + exeFolder = FileUtils::GetFileFolder(FileUtils::GetExecutable()); + + // We should recognize O0, O2 and run options firstly to decide the real options + ErrorCode ret = HandleEarlyOptions(); + if (ret != kErrorNoError) { + return ret; + } + + ret = HandleInFileTextOptions(); + if (ret != kErrorNoError) { + return ret; + } + + if (needFile) { + /* Check whether the input files were valid */ + ret = CheckInputFiles(); + if (ret != kErrorNoError) { + return ret; + } + } + + // Decide runningExes for default options(O0, O2) by input files + if (runMode != RunMode::kCustomRun) { + ret = DecideRunningPhases(); + if (ret != kErrorNoError) { + return ret; + } + } else { // kCustomRun + /* kCustomRun run mode is set if --run=tool1:tool2 option is used. + * This Option is parsed on DecideRunType step. DecideRunType fills runningExes vector. + * DecideRunningPhases(runningExes) creates ActionsTree in kCustomRun mode. + * Maybe we can create Actions tree in DecideRunType in order to not use runningExes? + */ + ret = DecideRunningPhases(runningExes); + if (ret != kErrorNoError) { + return ret; + } + } + + ret = HandleOptions(); + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +ErrorCode MplOptions::HandleOptions() { + if (opts::output.IsEnabledByUser() && GetActions().size() > 1) { + LogInfo::MapleLogger(kLlErr) << "Cannot specify -o when generating multiple output\n"; + return kErrorInvalidParameter; + } + + if (opts::saveTempOpt.IsEnabledByUser()) { + opts::genMeMpl.SetValue(true); + opts::genVtable.SetValue(true); + StringUtils::Split(opts::saveTempOpt, saveFiles, ','); + } + + if (opts::target.IsEnabledByUser()) { + Triple::GetTriple().Init(opts::target.GetValue()); + } else { + Triple::GetTriple().Init(); + } + + if (!opts::safeRegionOption) { + if (opts::npeNoCheck) { + npeCheckMode = SafetyCheckMode::kNoCheck; + } + + if (opts::npeStaticCheck) { + npeCheckMode = SafetyCheckMode::kStaticCheck; + } + + if (opts::boundaryNoCheck) { + boundaryCheckMode = SafetyCheckMode::kNoCheck; + } + + if (opts::boundaryStaticCheck) { + boundaryCheckMode = SafetyCheckMode::kStaticCheck; + } + } else { /* safeRegionOption is eanbled */ + npeCheckMode = SafetyCheckMode::kDynamicCheck; + boundaryCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::npeDynamicCheck) { + npeCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::npeDynamicCheckSilent) { + npeCheckMode = SafetyCheckMode::kDynamicCheckSilent; + } + + if (opts::boundaryDynamicCheck) { + boundaryCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::boundaryDynamicCheckSilent) { + boundaryCheckMode = SafetyCheckMode::kDynamicCheckSilent; + } + + HandleExtraOptions(); + + return kErrorNoError; +} + +ErrorCode MplOptions::HandleEarlyOptions() { + if (opts::version) { + LogInfo::MapleLogger() << kMapleDriverVersion << "\n"; + + /* exit, if only one "version" option is set. Else: continue compilation */ + if (driverCategory.GetEnabledOptions().size() == 1) { + return kErrorExitHelp; + } + } + + if (opts::printDriverPhases) { + DumpActionTree(); + return kErrorExitHelp; + } + + if (opts::help.IsEnabledByUser()) { + if (auto it = exeCategories.find(opts::help.GetValue()); it != exeCategories.end()) { + maplecl::CommandLine::GetCommandLine().HelpPrinter(*it->second); + } else { + maple::LogInfo::MapleLogger() << "USAGE: maple [options]\n\n" + " Example 1: /maple --run=me:mpl2mpl:mplcg " + "--option=\"[MEOPT]:[MPL2MPLOPT]:[MPLCGOPT]\"\n" + " --mplt=MPLTPATH inputFile.mpl\n" + " Example 2: /maple -O2 --mplt=mpltPath inputFile.dex\n\n" + "==============================\n" + " Options:\n"; + maplecl::CommandLine::GetCommandLine().HelpPrinter(); + } + return kErrorExitHelp; + } + + if (opts::o0.IsEnabledByUser() || + opts::o1.IsEnabledByUser() || + opts::o2.IsEnabledByUser() || + opts::os.IsEnabledByUser()) { + + if (opts::run.IsEnabledByUser()) { + /* -Ox and --run should not appear at the same time */ + LogInfo::MapleLogger(kLlErr) << "Cannot set auto mode and run mode at the same time!\n"; + return kErrorInvalidParameter; + } else { + runMode = RunMode::kAutoRun; + } + } else if (opts::run.IsEnabledByUser()) { + runMode = RunMode::kCustomRun; + + UpdateRunningExe(opts::run); + if (!opts::optionOpt.GetValue().empty()) { + if (UpdateExeOptions(opts::optionOpt) != kErrorNoError) { + return kErrorInvalidParameter; + } + } + } else { + runMode = RunMode::kAutoRun; + opts::o0.SetValue(true); // enable default -O0 + } + + return kErrorNoError; +} + +void MplOptions::HandleExtraOptions() { + for (const auto &val : opts::clangOpt.GetValues()) { + UpdateExeOptions(val, kBinNameClang); + } + + for (const auto &val : opts::hir2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameCpp2mpl); + } + + for (const auto &val : opts::mpl2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMpl2mpl); + printExtraOptStr << " --mpl2mpl-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::meOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMe); + printExtraOptStr << " --me-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::mplcgOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMplcg); + printExtraOptStr << " --mplcg-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::asOpt.GetValues()) { + UpdateExeOptions(val, kAsFlag); + } + + for (const auto &val : opts::ldOpt.GetValues()) { + UpdateExeOptions(val, kLdFlag); + } + + for (const auto &val : opts::dex2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameDex2mpl); + } + + for (const auto &val : opts::jbc2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameJbc2mpl); + } + + for (const auto &val : opts::mplipaOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMplipa); + } + + // A workaround to pass --general-reg-only from the cg options to global options + auto it = exeOptions.find(kBinNameMplcg); + if (it != exeOptions.end()) { + for (const auto &opt : it->second) { + if (opt == "--general-reg-only") { + generalRegOnly = true; + break; + } + } + } +} + +ErrorCode MplOptions::HandleInFileTextOptions() { + if (opts::intext.GetValue() == "" && opts::inFileName.GetValue() == "") { + return kErrorNoError; + } + if (opts::intext.GetValue() != "" && opts::inFileName.GetValue() != "") { + InputInfo *inputInfo = new InputInfo(); + inputInfo->SetInputTextAndInputFileName(opts::intext.GetValue().c_str(), opts::inFileName.GetValue().c_str()); + needFile = false; + inputInfos.push_back(std::unique_ptr(inputInfo)); + return kErrorNoError; + } + return kErrorInitFail; +} + +std::unique_ptr MplOptions::DecideRunningPhasesByType(const InputInfo *const inputInfo, + bool isMultipleFiles) { + InputFileType inputFileType = inputInfo->GetInputFileType(); + std::unique_ptr currentAction = std::make_unique(kInputPhase, inputInfo); + std::unique_ptr newAction; + + bool isNeedMapleComb = true; + bool isNeedMplcg = true; + bool isNeedAs = true; + switch (inputFileType) { + case InputFileType::kFileTypeC: + case InputFileType::kFileTypeCpp: + UpdateRunningExe(kBinNameClang); + newAction = std::make_unique(kBinNameClang, inputInfo, currentAction); + currentAction = std::move(newAction); + [[clang::fallthrough]]; + case InputFileType::kFileTypeAst: + UpdateRunningExe(kBinNameCpp2mpl); + newAction = std::make_unique(kBinNameCpp2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + break; + case InputFileType::kFileTypeJar: + // fall-through + case InputFileType::kFileTypeClass: + UpdateRunningExe(kBinNameJbc2mpl); + newAction = std::make_unique(kBinNameJbc2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + isNeedAs = false; + break; + case InputFileType::kFileTypeDex: + UpdateRunningExe(kBinNameDex2mpl); + newAction = std::make_unique(kBinNameDex2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + isNeedAs = false; + break; + case InputFileType::kFileTypeMpl: + break; + case InputFileType::kFileTypeMeMpl: + case InputFileType::kFileTypeVtableImplMpl: + isNeedMapleComb = false; + break; + case InputFileType::kFileTypeS: + isNeedMplcg = false; + isNeedMapleComb = false; + break; + case InputFileType::kFileTypeBpl: + break; + case InputFileType::kFileTypeObj: + isNeedMplcg = false; + isNeedMapleComb = false; + isNeedAs = false; + break; + case InputFileType::kFileTypeNone: + return nullptr; + break; + default: + return nullptr; + break; + } + + if (opts::maplePhase == true) { + isNeedAs = false; + } + + if (isNeedMapleComb) { + if (isMultipleFiles) { + selectedExes.push_back(kBinNameMapleCombWrp); + newAction = std::make_unique(kBinNameMapleCombWrp, inputInfo, currentAction); + currentAction = std::move(newAction); + } else { + selectedExes.push_back(kBinNameMapleComb); + newAction = std::make_unique(kBinNameMapleComb, inputInfo, currentAction); + currentAction = std::move(newAction); + } + } + if (isNeedMplcg && !isMultipleFiles) { + selectedExes.push_back(kBinNameMplcg); + runningExes.push_back(kBinNameMplcg); + newAction = std::make_unique(kBinNameMplcg, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + if (isNeedAs == true) { + UpdateRunningExe(kAsFlag); + newAction = std::make_unique(kAsFlag, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + if (!opts::compileWOLink) { + UpdateRunningExe(kLdFlag); + /* "Linking step" Action can have several inputActions. + * Each inputAction links to previous Actions to create the action tree. + * For linking step, inputActions are all assembly actions. + * Linking step Action is created outside this function because + * we must create all assembly actions (for all input files) before. + */ + } + + return currentAction; +} + +ErrorCode MplOptions::DecideRunningPhases() { + ErrorCode ret = kErrorNoError; + std::vector> linkActions; + std::unique_ptr lastAction; + bool isMultipleFiles = (inputInfos.size() > 1); + + for (auto &inputInfo : inputInfos) { + CHECK_FATAL(inputInfo != nullptr, "InputInfo must be created!!"); + + lastAction = DecideRunningPhasesByType(inputInfo.get(), isMultipleFiles); + + /* Add a message interface for correct exit with compilation error. And use it here instead of CHECK_FATAL. */ + CHECK_FATAL(lastAction != nullptr, "Incorrect input file type: %s", + inputInfo->GetInputFile().c_str()); + + if ((lastAction->GetTool() == kAsFlag && !opts::compileWOLink) || + lastAction->GetTool() == kInputPhase) { + /* 1. For linking step, inputActions are all assembly actions; + * 2. If we try to link with maple driver, inputActions are all kInputPhase objects; + */ + linkActions.push_back(std::move(lastAction)); + } else { + rootActions.push_back(std::move(lastAction)); + } + } + + if (!linkActions.empty()) { + /* "a.out" is the default output file name - fix if it's needed */ + auto currentAction = std::make_unique(kLdFlag, linkActions, AllocateInputInfo("a.out")); + rootActions.push_back(std::move(currentAction)); + } + + return ret; +} + +ErrorCode MplOptions::MFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &wasWrpCombCompilerCreated) { + ErrorCode ret = kErrorNoError; + + if (exe == kBinNameMe || exe == kBinNameMpl2mpl || exe == kBinNameMplcg) { + if (wasWrpCombCompilerCreated == false) { + auto newAction = std::make_unique(kBinNameMapleCombWrp, inputInfo, currentAction); + currentAction = std::move(newAction); + wasWrpCombCompilerCreated = true; + } else { + return ret; + } + } + + else { + auto newAction = std::make_unique(exe, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + return ret; +} + +ErrorCode MplOptions::SFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &isCombCompiler) { + ErrorCode ret = kErrorNoError; + + if (exe == kBinNameMe || exe == kBinNameMpl2mpl) { + if (isCombCompiler == false) { + auto newAction = std::make_unique(kBinNameMapleComb, inputInfo, currentAction); + currentAction = std::move(newAction); + isCombCompiler = true; + } else { + return ret; + } + } + + else { + auto newAction = std::make_unique(exe, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + return ret; +} + +ErrorCode MplOptions::DecideRunningPhases(const std::vector &runExes) { + ErrorCode ret = kErrorNoError; + + bool isMultipleFiles = (inputInfos.size() > 1); + + for (auto &inputInfo : inputInfos) { + CHECK_FATAL(inputInfo != nullptr, "InputInfo must be created!!"); + /* MplOption is owner of all InputInfos. MplOption is alive during compilation, + * so we can use raw pointer inside an Action. + */ + const InputInfo *const rawInputInfo = inputInfo.get(); + + bool isCombCompiler = false; + bool wasWrpCombCompilerCreated = false; + + auto currentAction = std::make_unique(kInputPhase, inputInfo.get()); + + for (const auto &exe : runExes) { + if (isMultipleFiles == true) { + ret = MFCreateActionByExe(exe, currentAction, rawInputInfo, wasWrpCombCompilerCreated); + if (ret != kErrorNoError) { + return ret; + } + } else { + ret = SFCreateActionByExe(exe, currentAction, rawInputInfo, isCombCompiler); + if (ret != kErrorNoError) { + return ret; + } + } + } + + rootActions.push_back(std::move(currentAction)); + } + + return ret; +} + +void MplOptions::DumpActionTree() const { + for (auto &rNode : rootActions) { + DumpActionTree(*rNode, 0); + } +} + +void MplOptions::DumpActionTree(const Action &action, int indents) const { + for (const std::unique_ptr &a : action.GetInputActions()) { + DumpActionTree(*a, indents + 1); + } + + if (indents != 0) { + LogInfo::MapleLogger() << "|"; + /* print indents */ + for (int i = 0; i < indents; ++i) { + LogInfo::MapleLogger() << "-"; + } + } + + if (action.GetTool() == kInputPhase) { + LogInfo::MapleLogger() << action.GetTool() << " " << action.GetInputFile() << '\n'; + } else { + LogInfo::MapleLogger() << action.GetTool() << '\n'; + } +} + +std::string MplOptions::GetCommonOptionsStr() const { + std::string driverOptions; + static const std::vector extraExclude = { &opts::run, + &opts::optionOpt, + &opts::infile, + &opts::mpl2mplOpt, &opts::meOpt, &opts::mplcgOpt, + &opts::o0, &opts::o1, &opts::o2, &opts::os }; + + for (auto const &opt : driverCategory.GetEnabledOptions()) { + if (!(std::find(std::begin(extraExclude), std::end(extraExclude), opt) != std::end(extraExclude))) { + for (const auto &val : opt->GetRawValues()) { + if (!val.empty()) { + driverOptions += opt->GetName() + " " + val + " "; + } else { + driverOptions += opt->GetName() + " "; + } + } + } + } + + return driverOptions; +} + +InputInfo *MplOptions::AllocateInputInfo(const std::string &inputFile) { + auto inputInfo = std::make_unique(inputFile); + InputInfo *ret = inputInfo.get(); + + inputInfos.push_back(std::move(inputInfo)); + + /* inputInfo continue to exist in inputInfos vector of unique_ptr so we can return raw pointer */ + return ret; +} + +ErrorCode MplOptions::CheckInputFiles() { + auto &badArgs = maplecl::CommandLine::GetCommandLine().badCLArgs; + + /* Set input files with --infile="file1 file2" option */ + if (opts::infile.IsEnabledByUser()) { + if (StringUtils::Trim(opts::infile).empty()) { + return kErrorFileNotFound; + } + + std::vector splitsInputFiles; + StringUtils::Split(opts::infile, splitsInputFiles, ','); + + /* inputInfo describes each input file for driver */ + for (auto &inFile : splitsInputFiles) { + if (FileUtils::IsFileExists(inFile)) { + inputFiles.push_back(inFile); + inputInfos.push_back(std::make_unique(inFile)); + } else { + LogInfo::MapleLogger(kLlErr) << "File does not exist: " << inFile << "\n"; + return kErrorFileNotFound; + } + } + } + + /* Set input files directly: maple file1 file2 */ + for (auto &arg : badArgs) { + if (FileUtils::IsFileExists(arg.first)) { + inputFiles.push_back(arg.first); + inputInfos.push_back(std::make_unique(arg.first)); + } else { + LogInfo::MapleLogger(kLlErr) << "Unknown option or non-existent input file: " << arg.first << "\n"; + if (!opts::ignoreUnkOpt) { + return kErrorInvalidParameter; + } + } + } + + if (inputFiles.empty()) { + return kErrorFileNotFound; + } + + return kErrorNoError; +} + +ErrorCode MplOptions::AppendCombOptions(MIRSrcLang srcLang) { + ErrorCode ret = kErrorNoError; + if (runMode == RunMode::kCustomRun) { + return ret; + } + + if (opts::o0) { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO0, sizeof(kMeDefaultOptionsO0) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO0, + sizeof(kMpl2MplDefaultOptionsO0) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO0ForC, + sizeof(kMpl2MplDefaultOptionsO0ForC) / sizeof(MplOption)); + } + } else if (opts::o2) { + if (opts::withIpa) { + UpdateRunningExe(kBinNameMplipa); + } + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO2, + sizeof(kMeDefaultOptionsO2) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO2, + sizeof(kMpl2MplDefaultOptionsO2) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO2ForC, + sizeof(kMeDefaultOptionsO2ForC) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO2ForC, + sizeof(kMpl2MplDefaultOptionsO2ForC) / sizeof(MplOption)); + } + } else if (opts::os) { + if (srcLang == kSrcLangJava) { + return kErrorNotImplement; + } + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsOs, + sizeof(kMeDefaultOptionsOs) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsOs, + sizeof(kMpl2MplDefaultOptionsOs) / sizeof(MplOption)); + } + + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +ErrorCode MplOptions::AppendMplcgOptions(MIRSrcLang srcLang) { + ErrorCode ret = kErrorNoError; + if (runMode == RunMode::kCustomRun) { + return ret; + } + if (opts::o0) { + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO0, + sizeof(kMplcgDefaultOptionsO0) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO0ForC, + sizeof(kMplcgDefaultOptionsO0ForC) / sizeof(MplOption)); + } + } else if (opts::o2) { + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO2, + sizeof(kMplcgDefaultOptionsO2) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO2ForC, + sizeof(kMplcgDefaultOptionsO2ForC) / sizeof(MplOption)); + } + } else if (opts::os) { + if (srcLang == kSrcLangJava) { + return kErrorNotImplement; + } + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsOs, + sizeof(kMplcgDefaultOptionsOs) / sizeof(MplOption)); + } + + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +void MplOptions::DumpAppendedOptions(const std::string &exeName, + const MplOption mplOptions[], unsigned int length) const { + LogInfo::MapleLogger() << exeName << " Default Options: "; + for (size_t i = 0; i < length; ++i) { + LogInfo::MapleLogger() << mplOptions[i].GetKey() << " " + << mplOptions[i].GetValue() << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << exeName << " Extra Options: "; + auto it = exeOptions.find(exeName); + if (it != exeOptions.end()) { + for (auto &opt : it->second) { + LogInfo::MapleLogger() << opt << " "; + } + } + + LogInfo::MapleLogger() << "\n"; +} + +ErrorCode MplOptions::AppendDefaultOptions(const std::string &exeName, + MplOption mplOptions[], unsigned int length) { + if (opts::debug) { + DumpAppendedOptions(exeName, mplOptions, length); + } + + for (unsigned int i = 0; i < length; ++i) { + mplOptions[i].SetValue(FileUtils::AppendMapleRootIfNeeded(mplOptions[i].GetNeedRootPath(), + mplOptions[i].GetValue(), GetExeFolder())); + auto &key = mplOptions[i].GetKey(); + auto &val = mplOptions[i].GetValue(); + + if (!val.empty()) { + exeOptions[exeName].push_front(val); + } + if (!key.empty()) { + exeOptions[exeName].push_front(key); + } + } + + auto iter = std::find(runningExes.begin(), runningExes.end(), exeName); + if (iter == runningExes.end()) { + runningExes.push_back(exeName); + } + return kErrorNoError; +} + +void MplOptions::UpdateExeOptions(const std::string &options, const std::string &tool) { + std::vector splittedOptions; + StringUtils::Split(options, splittedOptions, ' '); + + auto &toolOptions = exeOptions[tool]; // generate empty entry, if it does not exist + for (auto &opt : splittedOptions) { + if (!opt.empty()) { + toolOptions.push_back(opt); + } + } +} + +ErrorCode MplOptions::UpdateExeOptions(const std::string &args) { + std::vector options; + StringUtils::Split(args, options, ':'); + + /* The number of a tools and options for them must be the same */ + if (options.size() != runningExes.size()) { + LogInfo::MapleLogger(kLlErr) << "The --run and --option are not matched, please check them." + << "(Too many or too few)\n"; + return kErrorInvalidParameter; + } + + auto tool = runningExes.begin(); + for (auto &opt : options) { + UpdateExeOptions(opt, *tool); + ++tool; + } + + return kErrorNoError; +} + +maplecl::OptionCategory *MplOptions::GetCategory(const std::string &tool) const { + auto it = exeCategories.find(tool); + if (it == exeCategories.end()) { + return nullptr; + } + + return it->second; +} + +void MplOptions::UpdateRunningExe(const std::string &args) { + std::vector results; + StringUtils::Split(args, results, ':'); + for (size_t i = 0; i < results.size(); ++i) { + auto iter = std::find(runningExes.begin(), runningExes.end(), results[i]); + if (iter == runningExes.end()) { + runningExes.push_back(results[i]); + selectedExes.push_back(results[i]); + } + } +} + +std::string MplOptions::GetInputFileNameForPrint(const Action * const action) const { + + auto genInputs = [](const auto &container) { + std::string inputs; + for (const auto &in : container) { + inputs += " " + in; + } + return inputs; + }; + + if (!runningExes.empty()) { + if (runningExes[0] == kBinNameMe || runningExes[0] == kBinNameMpl2mpl || + runningExes[0] == kBinNameMplcg) { + return genInputs(GetInputFiles()); + } + } + + if (action == nullptr) { + return genInputs(GetInputFiles()); + } + + if (action->GetInputFileType() == InputFileType::kFileTypeVtableImplMpl) { + return action->GetFullOutputName() + ".VtableImpl.mpl"; + } + if (action->GetInputFileType() == InputFileType::kFileTypeBpl) { + return action->GetFullOutputName() + ".bpl"; + } + return action->GetFullOutputName() + ".mpl"; +} + +void MplOptions::PrintCommand(const Action * const action) { + if (hasPrinted) { + return; + } + + std::ostringstream optionStr; + if (runMode == RunMode::kAutoRun) { + if (opts::o0) { + optionStr << " -O0"; + } else if (opts::o1) { + optionStr << " -O1"; + } else if (opts::o2) { + optionStr << " -O2"; + } else if (opts::os) { + optionStr << " -Os"; + } + + std::string driverOptions = GetCommonOptionsStr(); + auto inputs = GetInputFileNameForPrint(action); + LogInfo::MapleLogger() << "Starting:" << exeFolder << "maple" << optionStr.str() + << printExtraOptStr.str() << " " << driverOptions << inputs << '\n'; + } + if (runMode == RunMode::kCustomRun) { + PrintDetailCommand(action, true); + } + hasPrinted = true; +} + +void MplOptions::connectOptStr(std::string &optionStr, const std::string &exeName, bool &firstComb, + std::string &runStr) { + std::string connectSym = ""; + if (exeOptions.find(exeName) != exeOptions.end()) { + if (!firstComb) { + runStr += (":" + exeName); + optionStr += ":"; + } else { + runStr += exeName; + firstComb = false; + } + auto it = exeOptions.find(exeName); + for (const auto &opt : it->second) { + optionStr += (" " + opt); + } + } +} + +void MplOptions::PrintDetailCommand(const Action * const action, bool isBeforeParse) { + if (exeOptions.find(kBinNameMe) == exeOptions.end() && exeOptions.find(kBinNameMpl2mpl) == exeOptions.end() + && exeOptions.find(kBinNameMplcg) == exeOptions.end()) { + return; + } + std::string runStr = "--run="; + std::string optionStr; + optionStr += "--option=\""; + bool firstComb = true; + connectOptStr(optionStr, kBinNameMe, firstComb, runStr); + connectOptStr(optionStr, kBinNameMpl2mpl, firstComb, runStr); + connectOptStr(optionStr, kBinNameMplcg, firstComb, runStr); + optionStr += "\""; + + std::string driverOptions = GetCommonOptionsStr(); + auto inputs = GetInputFileNameForPrint(action); + + if (isBeforeParse) { + LogInfo::MapleLogger() << "Starting:" << exeFolder << "maple " << runStr << " " << optionStr << " " + << printExtraOptStr.str() << " " << driverOptions << inputs << '\n'; + } else { + LogInfo::MapleLogger() << "Finished:" << exeFolder << "maple " << runStr << " " << optionStr << " " + << driverOptions << inputs << '\n'; + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/mplcg_compiler.cpp b/ecmascript/mapleall/maple_driver/src/mplcg_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..347127fde7a7c5827f37ed82f1170314b3542bc2 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/mplcg_compiler.cpp @@ -0,0 +1,301 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_options.h" +#include "default_options.def" +#include "mpl_logging.h" +#include "mpl_timer.h" +#include "driver_runner.h" + +namespace maple { +using namespace maplebe; + +DefaultOption MplcgCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = 0; + MplOption *kMplcgDefaultOptions = nullptr; + + if (opts::o0) { + len = sizeof(kMplcgDefaultOptionsO0) / sizeof(MplOption); + kMplcgDefaultOptions = kMplcgDefaultOptionsO0; + } else if (opts::o2) { + len = sizeof(kMplcgDefaultOptionsO2) / sizeof(MplOption); + kMplcgDefaultOptions = kMplcgDefaultOptionsO2; + } + + if (kMplcgDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kMplcgDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +const std::string &MplcgCompiler::GetBinName() const { + return kBinNameMplcg; +} + +std::string MplcgCompiler::GetInputFile(const MplOptions &options, const Action &action, const MIRModule *md) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + // Get base file name + auto idx = action.GetOutputName().find(".VtableImpl"); + std::string outputName = action.GetOutputName(); + if (idx != std::string::npos) { + outputName = action.GetOutputName().substr(0, idx); + } + if (md != nullptr && md->GetSrcLang() == kSrcLangC) { + return action.GetOutputFolder() + outputName + ".me.mpl"; + } + return action.GetOutputFolder() + outputName + ".VtableImpl.mpl"; +} + +std::string MplcgCompiler::GetInputText(const MplOptions &, const Action &action, + const MIRModule *md) const { + if (action.IsItFirstRealAction()) { + return action.GetInputText(); + } + // todo Get base file text + return ""; +} + +void MplcgCompiler::SetOutputFileName(const MplOptions &options, const Action &action, const MIRModule &md) { + if (md.GetSrcLang() == kSrcLangC) { + baseName = action.GetFullOutputName(); + } else { + baseName = action.GetOutputFolder() + FileUtils::GetFileName(GetInputFile(options, action, &md), false); + } + outputFile = baseName + ".s"; +} + +void MplcgCompiler::PrintMplcgCommand(const MplOptions &options, const Action &action, + const MIRModule &md) const { + std::string runStr = "--run="; + std::string optionStr = "--option=\""; + std::string connectSym = ""; + if (options.GetExeOptions().find(kBinNameMplcg) != options.GetExeOptions().end()) { + runStr += "mplcg"; + auto it = options.GetExeOptions().find(kBinNameMplcg); + if (it == options.GetExeOptions().end()) { + return; + } + for (auto &opt : it->second) { + optionStr += (" " + opt); + } + } + optionStr += "\""; + + std::string driverOptions = options.GetCommonOptionsStr(); + + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " << optionStr << " " + << driverOptions << "--infile " << GetInputFile(options, action, &md) << '\n'; +} + +ErrorCode MplcgCompiler::MakeCGOptions(const MplOptions &options) { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMplcg); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + CGOptions &cgOption = CGOptions::GetInstance(); + cgOption.SetOption(CGOptions::kDefaultOptions); +#if DEBUG + /* for convinence .loc is generated by default for debug maple compiler */ + cgOption.SetOption(CGOptions::kWithLoc); +#endif + /* use maple flags to set cg flags */ + if (opts::withDwarf) { + cgOption.SetOption(CGOptions::kWithDwarf); + } + cgOption.SetGenerateFlags(CGOptions::kDefaultGflags); + + auto itOpt = options.GetExeOptions().find(kBinNameMplcg); + if (itOpt != options.GetExeOptions().end()) { + const auto &cgExeOpts = itOpt->second; + const std::deque strCgOptions(cgExeOpts.begin(), cgExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strCgOptions, cgCategory); + } + + bool result = cgOption.SolveOptions(opts::debug); + if (result == false) { + LogInfo::MapleLogger() << "Meet error mplcg options\n"; + return kErrorCompileFail; + } + return kErrorNoError; +} + +ErrorCode MplcgCompiler::GetMplcgOptions(MplOptions &options, const Action &action, + const MIRModule *theModule) { + ErrorCode ret; + if (options.GetRunMode() == kAutoRun) { + if (theModule == nullptr) { + std::string fileName = GetInputFile(options, action, theModule); + MIRModule module(fileName); + std::unique_ptr theParser; + theParser.reset(new MIRParser(module)); + MIRSrcLang srcLang = kSrcLangUnknown; + bool parsed = theParser->ParseSrcLang(srcLang); + if (!parsed) { + return kErrorCompileFail; + } + ret = options.AppendMplcgOptions(srcLang); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + } else { + ret = options.AppendMplcgOptions(theModule->GetSrcLang()); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + } + } + + ret = MakeCGOptions(options); + return ret; +} + +ErrorCode MplcgCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + ErrorCode ret = GetMplcgOptions(options, action, theModule.get()); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + CGOptions &cgOption = CGOptions::GetInstance(); + std::string fileName = GetInputFile(options, action, theModule.get()); + bool fileRead = true; + if (theModule == nullptr) { + MPLTimer timer; + timer.Start(); + fileRead = false; + theModule = std::make_unique(fileName); + theModule->SetWithMe( + std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), + kBinNameMe) != options.GetRunningExes().end()); + if (action.GetInputFileType() != kFileTypeBpl && + action.GetInputFileType() != kFileTypeMbc && + action.GetInputFileType() != kFileTypeLmbc) { + std::unique_ptr theParser; + theParser.reset(new MIRParser(*theModule)); + bool parsed = theParser->ParseMIR(0, cgOption.GetParserOption()); + if (parsed) { + if (!CGOptions::IsQuiet() && theParser->GetWarning().size()) { + theParser->EmitWarning(fileName); + } + } else { + if (theParser != nullptr) { + theParser->EmitError(fileName); + } + return kErrorCompileFail; + } + } else { + BinaryMplImport binMplt(*theModule); + binMplt.SetImported(false); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.Import(modid, true); + if (!imported) { + return kErrorCompileFail; + } + } + timer.Stop(); + LogInfo::MapleLogger() << "Mplcg Parser consumed " << timer.ElapsedMilliseconds() << "ms\n"; + } + SetOutputFileName(options, action, *theModule); + theModule->SetInputFileName(fileName); + LogInfo::MapleLogger() << "Starting mplcg\n"; + DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, + opts::withDwarf, fileRead, opts::timePhase); + if (opts::debug) { + PrintMplcgCommand(options, action, *theModule); + } + runner.SetPrintOutExe(kBinNameMplcg); + runner.SetCGInfo(&cgOption, fileName); + runner.ProcessCGPhase(outputFile, baseName); + return kErrorNoError; +} + + +ErrorCode MplcgCompiler::CompileByText(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + ErrorCode ret = GetMplcgOptions(options, action, theModule.get()); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + CGOptions &cgOption = CGOptions::GetInstance(); + + std::string fileText = GetInputText(options, action, theModule.get()); + CHECK_FATAL(fileText != "", "file text is empty"); + std::string fileName = GetInputFile(options, action, theModule.get()); + bool fileRead = true; + if (theModule == nullptr) { + MPLTimer timer; + timer.Start(); + fileRead = false; + + theModule = std::make_unique(fileName); + theModule->SetFileText(fileText); + theModule->SetWithMe( + std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), + kBinNameMe) != options.GetRunningExes().end()); + if (action.GetInputFileType() != kFileTypeBpl && + action.GetInputFileType() != kFileTypeMbc && + action.GetInputFileType() != kFileTypeLmbc) { + std::unique_ptr theParser; + theParser.reset(new MIRParser(*theModule)); + bool parsed = theParser->ParseMIR(0, cgOption.GetParserOption()); + if (parsed) { + if (!CGOptions::IsQuiet() && theParser->GetWarning().size()) { + theParser->EmitWarning(fileName); + } + } else { + if (theParser != nullptr) { + theParser->EmitError(fileName); + } + return kErrorCompileFail; + } + } else { + BinaryMplImport binMplt(*theModule); + binMplt.SetImported(false); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.Import(modid, true); + if (!imported) { + return kErrorCompileFail; + } + } + timer.Stop(); + LogInfo::MapleLogger() << "Mplcg Parser consumed " << timer.ElapsedMilliseconds() << "ms\n"; + } + SetOutputFileName(options, action, *theModule); + theModule->SetInputFileName(fileName); + LogInfo::MapleLogger() << "Starting mplcg\n"; + DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, + opts::withDwarf, fileRead, opts::timePhase); + if (opts::debug) { + PrintMplcgCommand(options, action, *theModule); + } + runner.SetPrintOutExe(kBinNameMplcg); + runner.SetCGInfo(&cgOption, fileName); + runner.ProcessCGPhase(outputFile, baseName); + return kErrorNoError; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/triple.cpp b/ecmascript/mapleall/maple_driver/src/triple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7cbdca38194292e09fc213e6b776d38777633f92 --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/triple.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "triple.h" +#include "driver_options.h" + +namespace opts { + +maplecl::Option bigendian({"-Be", "--Be", "--BigEndian", "-be", "--be", "-mbig-endian"}, + " --BigEndian/-Be \tUsing BigEndian\n" + " --no-BigEndian \tUsing LittleEndian\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}, + maplecl::DisableWith("--no-BigEndian")); + +maplecl::Option ilp32({"--ilp32", "-ilp32", "--arm64-ilp32"}, + " --ilp32 \tarm64 with a 32-bit ABI instead of a 64bit ABI\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +maplecl::Option mabi({"-mabi"}, + " -mabi= \tSpecify integer and floating-point calling convention\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +} + +namespace maple { + +Triple::ArchType Triple::ParseArch(std::string_view archStr) { + if (maple::utils::Contains({"aarch64", "aarch64_le"}, archStr)) { + return Triple::ArchType::aarch64; + } else if (maple::utils::Contains({"aarch64_be"}, archStr)) { + return Triple::ArchType::aarch64_be; + } + + // Currently Triple support only aarch64 + return Triple::UnknownArch; +} + +Triple::EnvironmentType Triple::ParseEnvironment(std::string_view archStr) { + if (maple::utils::Contains({"ilp32", "gnu_ilp32", "gnuilp32"}, archStr)) { + return Triple::EnvironmentType::GNUILP32; + } else if (maple::utils::Contains({"gnu"}, archStr)) { + return Triple::EnvironmentType::GNU; + } + + // Currently Triple support only ilp32 and default gnu/LP64 ABI + return Triple::UnknownEnvironment; +} + +void Triple::Init() { + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64 + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + arch = (opts::bigendian) ? Triple::ArchType::aarch64_be : Triple::ArchType::aarch64; + environment = (opts::ilp32) ? Triple::EnvironmentType::GNUILP32 : Triple::EnvironmentType::GNU; + + if (opts::mabi.IsEnabledByUser()) { + auto tmpEnvironment = ParseEnvironment(opts::mabi.GetValue()); + if (tmpEnvironment != Triple::UnknownEnvironment) { + environment = tmpEnvironment; + } + } +#endif +} + +void Triple::Init(const std::string &target) { + data = target; + + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64. + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + Init(); + + std::vector components; + maple::StringUtils::SplitSV(data, components, '-'); + if (components.size() == 0) { // as minimum 1 component must be + return; + } + + auto tmpArch = ParseArch(components[0]); // to not overwrite arch seting by opts::bigendian + if (tmpArch == Triple::UnknownArch) { + return; + } + arch = tmpArch; + + /* Try to check environment in option. + * As example, it can be: aarch64-none-linux-gnu or aarch64-linux-gnu or aarch64-gnu, where gnu is environment */ + for (int i = 1; i < components.size(); ++i) { + auto tmpEnvironment = ParseEnvironment(components[i]); + if (tmpEnvironment != Triple::UnknownEnvironment) { + environment = tmpEnvironment; + break; + } + } +#endif +} + +std::string Triple::GetArchName() const { + switch (arch) { + case ArchType::aarch64_be: return "aarch64_be"; + case ArchType::aarch64: return "aarch64"; + default: DEBUG_ASSERT(false, "Unknown Architecture Type\n"); + } + return ""; +} + +std::string Triple::GetEnvironmentName() const { + switch (environment) { + case EnvironmentType::GNUILP32: return "gnu_ilp32"; + case EnvironmentType::GNU: return "gnu"; + default: DEBUG_ASSERT(false, "Unknown Environment Type\n"); + } + return ""; +} + +std::string Triple::Str() const { + if (!data.empty()) { + return data; + } + + if (GetArch() != ArchType::UnknownArch && + GetEnvironment() != Triple::EnvironmentType::UnknownEnvironment) { + /* only linux platform is supported, so "-linux-" is hardcoded */ + return GetArchName() + "-linux-" + GetEnvironmentName(); + } + + CHECK_FATAL(false, "Only aarch64/aarch64_be GNU/GNUILP32 targets are supported\n"); + return data; +} + +} // namespace maple diff --git a/ecmascript/mapleall/maple_driver/src/x64/x64_api.cpp b/ecmascript/mapleall/maple_driver/src/x64/x64_api.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d1a5bc3003ae8ee6020ad2b988fffaef9e40b90b --- /dev/null +++ b/ecmascript/mapleall/maple_driver/src/x64/x64_api.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "compiler.h" +#include "string_utils.h" + +using namespace maple; + +int LiteCG(std::string inputText, std::string option, std::vector globalOption) { + MplOptions mplOptions; + std::string inText = "--intext=" + inputText; + std::string optionStr = "--option=" + option; + std::string mplcg = "--run=mplcg"; + std::vector argvs = {mplcg, optionStr, inText}; + argvs.insert(argvs.end(), globalOption.begin(), globalOption.end()); + int res = mplOptions.Parse(argvs); + std::unique_ptr theModule; + for (const std::unique_ptr &action : mplOptions.GetActions()) { + MplcgCompiler mplcgCompiler("mplcg"); + mplcgCompiler.CompileByText(mplOptions, *action, theModule); + } + return res; +} + +int LiteCG(std::unique_ptr theModule, std::string cgOption, std::vector globalOption) { + MplOptions mplOptions; + std::string optionStr = "--option=" + cgOption; + std::string mplcg = "--run=mplcg"; + std::vector argvs = {mplcg, optionStr}; + argvs.insert(argvs.end(), globalOption.begin(), globalOption.end()); + int res = mplOptions.Parse(argvs); + for (const std::unique_ptr &action : mplOptions.GetActions()) { + MplcgCompiler mplcgCompiler("mplcg"); + mplcgCompiler.CompileByText(mplOptions, *action, theModule); + } + return res; +} diff --git a/ecmascript/mapleall/maple_ipa/BUILD.gn b/ecmascript/mapleall/maple_ipa/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..af85b5e3c032d8a9b5a362d3011eee98c2f8a202 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/BUILD.gn @@ -0,0 +1,50 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", +] + +src_libmplipa = [ + "src/old/ipa_escape_analysis.cpp", + "src/old/do_ipa_escape_analysis.cpp", + "src/old/ea_connection_graph.cpp", + "src/old/ipa_option.cpp", + "src/ipa_side_effect.cpp", + "src/ipa_phase_manager.cpp", + "src/prop_return_null.cpp", + "src/prop_parameter_type.cpp", + "src/ipa_collect.cpp", + "src/ipa_clone.cpp", +] + +#configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +static_library("libmplipa") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] +# cflags_cc += [ "-fPIC" ] + sources = src_libmplipa + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} diff --git a/ecmascript/mapleall/maple_ipa/include/func_desc.def b/ecmascript/mapleall/maple_ipa/include/func_desc.def new file mode 100644 index 0000000000000000000000000000000000000000..08089847ae06b1c062b4bf412c9a4357211cb331 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/func_desc.def @@ -0,0 +1,536 @@ + /* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +//assert.h +{"assert", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"static_assert", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//fenv.h +{"feclearexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fetestexcept", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"feraiseexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fegetexceptflag", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"fesetexceptflag", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"fesetround", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fegetround", {FI::kPure, RI::kNoAlias, {}}}, +{"fegetenv", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fesetenv", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"feholdexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"feupdateenv", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, + +//ctype.h +{"isalnum", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isalpha", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"islower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isxdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iscntrl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isgraph", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isspace", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isblank", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isprint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ispunct", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tolower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"toupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atoi", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atol", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atoll", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strtol", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoll", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoul", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoull", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtof", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtod", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtold", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtoimax", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoumax", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strcpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcpy_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strncpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strncpy_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcat", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcat_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strncat", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strncat_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strxfrm", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strdup", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strndup", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strlen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strnlen_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strncmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcoll", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strchr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strrchr", {FI::kPure, RI::kUnknown, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strspn", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcspn", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strpbrk", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strstr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strtok", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strtok_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"memchr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memcmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memset", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memset_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memcpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memcpy_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memmove", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memmove_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memccpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"strerror", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"strerror_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"strerrorlen_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, + +//complex.h +{"CMPLXF", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"CMPLX", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"CMPLXL", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"crealf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creall", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimagf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimag", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimagl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabsf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabsl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cargf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"carg", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cargl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conjf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conj", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conjl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cprojf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cproj", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cprojl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"crealf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creall", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexpf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexpl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clogf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clog", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clogl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cpowf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"cpow", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"cpowl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"csqrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csqrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csqrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, + +//local.h +{"setlocale", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"localeconv", {FI::kUnknown, RI::kNoAlias, {}}}, + +//math.h +{"abs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"labs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"imaxabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"div", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"lldiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"imaxdiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabsf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabsl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs32", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs64", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs128", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fmodf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmod", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmodl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainderf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainder", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainderl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remquo", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"remquof", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"remquol", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaxf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmax", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaxl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fminf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fminl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdimf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdim", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdiml", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nanf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nanl", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan32", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan64", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan128", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"exp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1p", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1pf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1pl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"pow", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"powf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"powl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sqrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sqrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sqrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"hypot", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"hypotf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"hypotl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atan2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"atan2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"atan2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"coshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"coshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erff", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfcf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfcl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgamma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgammaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgammal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgamma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgammaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgammal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceil", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceilf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceill", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floor", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floorf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floorl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"trunc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"truncf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"truncl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"round", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"roundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"roundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lround", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lroundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lroundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llround", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llroundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llroundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"frexp", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"frexpf", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"frexpl", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"ldexp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldexpf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldexpl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"modf", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"modff", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"modfl", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"scalbn", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbnf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbnl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbln", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalblnf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalblnl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ilogb", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ilogbf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ilogbl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logb", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logbf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logbl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nextafter", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nextafterf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nextafterl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttoward", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttowardf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttowardl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysign", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysignf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysignl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fpclassify", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isfinite", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isnan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isnormal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"signbit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isgreater", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isgreaterequal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isless", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"islessequal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"islessgreater", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isunordered", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//setjmp.h +{"abort", {FI::kConst, RI::kNoAlias, {}}}, +{"exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"quick_exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"_Exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atexit", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"at_quick_exit", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"system", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getenv", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getenv_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"signal", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"raise", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"setjmp", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"longjmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, + +//stdarg.h +{"va_start", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"va_arg", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"va_copy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"va_end", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, + +//stdlib.h +{"malloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"calloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"realloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"free", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"aligned_alloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"mblen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbtowc", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"wctomb", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"wctomb_s", {FI::kUnknown, RI::kAliasParam1, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"mbstowcs", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbstowcs_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbsinit", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"btowc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"wctob", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"mbrlen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbrtowc", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcrtomb", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcrtomb_s", {FI::kUnknown, RI::kAliasParam1, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbsrtowcs", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbsrtowcs_s", {FI::kUnknown, RI::kAliasParam1, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcsrtombs", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcsrtombs_s", {FI::kUnknown, RI::kAliasParam1, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, + +//wctype.h +{"iswalnum", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswalpha", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswlower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswxdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswcntrl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswgraph", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswspace", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswblank", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswprint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswpunct", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswctype", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"towlower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"towupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"towctrans", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//time.h +{"difftime", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"time", {FI::kPure, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"timespec_get", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"timespec_getes", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"asctime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"asctime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"asctime_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"ctime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ctime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"ctime_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strftime", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"wcsftime", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"gmtime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"gmtime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"gmtime_s", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"localtime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"localtime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"localtime_s", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"mktime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, + +//stdalign.h +{"offsetof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, + +//stdio.h +{"fopen", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fopen_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"freopen", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"freopen_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fclose", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fflush", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"setbuf", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"setvbuf", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fwide", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"fread", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"fwrite", {FI::kUnknown, RI::kAliasParam3, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fgetc", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"getc", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fgets", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fputc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"putc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fputs", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"getchar", {FI::kUnknown, RI::kNoAlias, {}}}, +{"gets", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"gets_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"putchar", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"puts", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ungetc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fgetwc", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getwc", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fgetws", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"fputwc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"putwc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fputws", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"getwchar", {FI::kUnknown, RI::kNoAlias, {}}}, +{"putwchar", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ungetwc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"scanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"sscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"scanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"sscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"printf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"sprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"snprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"printf_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"sprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"snprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"wscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fwscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"swscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"wscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fwscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"swscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"wprintf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fwprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"swprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"wprintf_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fwprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"swprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"snwprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"ftell", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fgetpos", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fseek", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fsetpos", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"rewind", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"clearerr", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"feof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ferror", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"perror", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"remove", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"rename", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"tmpnam", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"tmpnam_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, + +//for java +{"MCC_GetOrInsertLiteral", {FI::kPure, RI::kUnknown, {PI::kReadMemoryOnly}}} diff --git a/ecmascript/mapleall/maple_ipa/include/ipa_clone.h b/ecmascript/mapleall/maple_ipa/include/ipa_clone.h new file mode 100644 index 0000000000000000000000000000000000000000..6b60f3d873cdf82aae5dd4621aea485dabf13b7d --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/ipa_clone.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPACLONE_H +#define MAPLE_IPA_INCLUDE_IPACLONE_H +#include "mir_module.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "class_hierarchy_phase.h" +#include "me_ir.h" +#include "maple_phase_manager.h" +namespace maple { +constexpr uint32 kNumOfImpExprUpper = 64; +class IpaClone : public AnalysisResult { + public: + IpaClone(MIRModule *mod, MemPool *memPool, MIRBuilder &builder) + : AnalysisResult(memPool), mirModule(mod), allocator(memPool), mirBuilder(builder), curFunc(nullptr) {} + ~IpaClone() { + mirModule = nullptr; + curFunc = nullptr; + } + + static MIRSymbol *IpaCloneLocalSymbol(const MIRSymbol &oldSym, const MIRFunction &newFunc); + static void IpaCloneSymbols(MIRFunction &newFunc, const MIRFunction &oldFunc); + static void IpaCloneLabels(MIRFunction &newFunc, const MIRFunction &oldFunc); + static void IpaClonePregTable(MIRFunction &newFunc, const MIRFunction &oldFunc); + MIRFunction *IpaCloneFunction(MIRFunction &originalFunction, const std::string &fullName) const; + MIRFunction *IpaCloneFunctionWithFreq(MIRFunction &originalFunction, + const std::string &fullName, int64_t callSiteFreq) const; + void DoIpaClone(); + void InitParams(); + void CopyFuncInfo(MIRFunction &originalFunction, MIRFunction &newFunc) const; + void IpaCloneArgument(MIRFunction &originalFunction, ArgVector &argument) const; + void RemoveUnneedParameter(MIRFunction *newFunc, uint32 paramIndex, int64_t value); + void DecideCloneFunction(std::vector &result, uint32 paramIndex, std::map> &evalMap); + void ReplaceIfCondtion(MIRFunction *newFunc, std::vector &result, uint64_t res); + void EvalCompareResult(std::vector &result, std::map> &evalMap, + std::map> &summary, uint32 index); + void EvalImportantExpression(MIRFunction *func, std::vector &result); + bool CheckCostModel(MIRFunction *newFunc, uint32 paramIndex, std::vector &calleeValue, uint32 impSize); + void ComupteValue(const IntVal& value, const IntVal& paramValue, CompareNode *cond, uint64_t &bitRes); + void CloneNoImportantExpressFunction(MIRFunction *func, uint32 paramIndex); + void ModifyParameterSideEffect(MIRFunction *newFunc, uint32 paramIndex); + + private: + MIRModule *mirModule; + MapleAllocator allocator; + MIRBuilder &mirBuilder; + MIRFunction *curFunc; + uint32 numOfCloneVersions = 0; + uint32 numOfImpExprLowBound = 0; + uint32 numOfImpExprHighBound = 0; + uint32 numOfCallSiteLowBound = 0; + uint32 numOfCallSiteUpBound = 0; + uint32 numOfConstpropValue = 0; +}; +MAPLE_MODULE_PHASE_DECLARE_BEGIN(M2MIpaClone) + IpaClone *GetResult() { + return cl; + } + IpaClone *cl = nullptr; +OVERRIDE_DEPENDENCE +MAPLE_MODULE_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPACLONE_H diff --git a/ecmascript/mapleall/maple_ipa/include/ipa_collect.h b/ecmascript/mapleall/maple_ipa/include/ipa_collect.h new file mode 100644 index 0000000000000000000000000000000000000000..7af1748f1eb155d32796c9febc228d1c1bcbcb45 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/ipa_collect.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H +#define MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "module_phase_manager.h" +#include "maple_phase.h" +#include "ipa_phase_manager.h" +#include "mir_module.h" +namespace maple { +union ParamValue { + bool valueBool; + int64_t valueInt; + float valueFloat; + double valueDouble; +}; + +enum valueType { + kBool, + kInt, + kFloat, + kDouble, +}; + +class CollectIpaInfo { + public: + CollectIpaInfo(MIRModule &mod, AnalysisDataManager &dataMap) + : module(mod), builder(*mod.GetMIRBuilder()), + dataMap(dataMap), curFunc(nullptr) {} + virtual ~CollectIpaInfo() = default; + void runOnScc(maple::SCCNode &scc); + void UpdateCaleeParaAboutFloat(MeStmt &meStmt, float paramValue, uint32 index, CallerSummary &summary); + void UpdateCaleeParaAboutDouble(MeStmt &meStmt, double paramValue, uint32 index, CallerSummary &summary); + void UpdateCaleeParaAboutInt(MeStmt &meStmt, int64_t paramValue, uint32 index, CallerSummary &summary); + bool IsConstKindValue(MeExpr *expr); + bool CheckImpExprStmt(const MeStmt &meStmt); + bool CollectImportantExpression(const MeStmt &meStmt, uint32 &index); + void TraversalMeStmt(MeStmt &meStmt); + bool IsParameterOrUseParameter(const VarMeExpr *varExpr, uint32 &index); + void Perform(const MeFunction &func); + + private: + MIRModule &module; + MIRBuilder &builder; + AnalysisDataManager &dataMap; + MIRFunction *curFunc; +}; +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCCollectIpaInfo, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H diff --git a/ecmascript/mapleall/maple_ipa/include/ipa_phase_manager.h b/ecmascript/mapleall/maple_ipa/include/ipa_phase_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..ba3f67727273ddd2975e7f0e61e475438e1f78dc --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/ipa_phase_manager.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H +#define MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H +#include +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "mir_function.h" +#include "me_phase_manager.h" +#include "ipa_side_effect.h" +#include "prop_return_null.h" +#include "prop_parameter_type.h" +#include "ipa_collect.h" + +namespace maple { +/* ==== new phase manager ==== */ +class IpaSccPM : public SccPM { + public: + explicit IpaSccPM(MemPool *memPool) : SccPM(memPool, &id) {} + bool PhaseRun(MIRModule &m) override; + PHASECONSTRUCTOR(IpaSccPM); + ~IpaSccPM() override {} + std::string PhaseName() const override; + private: + void GetAnalysisDependence(AnalysisDep &aDep) const override; + virtual void DoPhasesPopulate(const MIRModule &mirModule); +}; + +class SCCPrepare : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCPrepare(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCPrepare() override = default; + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCPrepare); + bool PhaseRun(SCCNode &f) override; + void Dump(const MeFunction &f, const std::string phaseName); + AnalysisDataManager *GetResult() { + return result; + } + private: + AnalysisDataManager *result = nullptr; +}; + +class SCCEmit : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCEmit(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCEmit() override = default; + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCEmit); + bool PhaseRun(SCCNode &f) override; + void Dump(const MeFunction &f, const std::string phaseName); + private: + void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; +}; + +class SCCProfile : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCProfile(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCProfile() override { + result = nullptr; + } + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCProfile); + bool PhaseRun(SCCNode &f) override; + AnalysisDataManager *GetResult() { + return result; + } + private: + AnalysisDataManager *result = nullptr; +}; +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H diff --git a/ecmascript/mapleall/maple_ipa/include/ipa_side_effect.h b/ecmascript/mapleall/maple_ipa/include/ipa_side_effect.h new file mode 100644 index 0000000000000000000000000000000000000000..d98b9fb62f63a60ea9f17f215f660ed69c281a24 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/ipa_side_effect.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H +#define MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H +#include "me_phase_manager.h" +#include "ipa_phase_manager.h" + +namespace maple { +class SideEffect { + public: + SideEffect(MeFunction *meFunc, Dominance *dom, AliasClass *alias, CallGraph *cg) + : meFunc(meFunc), dom(dom), alias(alias), callGraph(cg) { + defGlobal = false; + defArg = false; + useGlobal = false; + vstsValueAliasWithFormal.resize(std::min(meFunc->GetMirFunc()->GetFormalCount(), kMaxParamCount)); + } + ~SideEffect() { + alias = nullptr; + dom = nullptr; + meFunc = nullptr; + curFuncDesc = nullptr; + } + bool Perform(MeFunction &f); + static const FuncDesc &GetFuncDesc(MeFunction &f); + static const FuncDesc &GetFuncDesc(MIRFunction &f); + static const std::map &GetWhiteList(); + + private: + void DealWithOperand(MeExpr *expr); + void DealWithOst(OStIdx ostIdx); + void DealWithStmt(MeStmt &stmt); + void PropAllInfoFromCallee(const MeStmt &call, MIRFunction &callee); + void PropParamInfoFromCallee(const MeStmt &call, MIRFunction &callee); + void PropInfoFromOpnd(MeExpr &opnd, const PI &calleeParamInfo); + void ParamInfoUpdater(size_t vstIdx, const PI &calleeParamInfo); + void DealWithOst(const OriginalSt *ost); + void DealWithReturn(const RetMeStmt &retMeStmt); + void AnalysisFormalOst(); + void SolveVarArgs(MeFunction &f); + void CollectFormalOst(MeFunction &f); + void CollectAllLevelOst(size_t vstIdx, std::set &result); + + std::set> analysisLater; + std::vector> vstsValueAliasWithFormal; + MeFunction *meFunc = nullptr; + FuncDesc *curFuncDesc = nullptr; + Dominance *dom = nullptr; + AliasClass *alias = nullptr; + CallGraph *callGraph = nullptr; + + bool defGlobal = false; + bool defArg = false; + bool useGlobal = false; +}; + +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCSideEffect, SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H diff --git a/ecmascript/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h b/ecmascript/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..877e39ac198032b8094e4a7553a4ebfc34b596f2 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H +#define INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H +#include "mir_parser.h" +#include "mir_function.h" +#include "me_function.h" +#include "opcode_info.h" +#include "mir_builder.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "call_graph.h" +#include "mir_nodes.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "ipa_escape_analysis.h" +#include "me_loop_analysis.h" + +namespace maple { +#ifdef NOT_USED +class DoIpaEA : public MeFuncPhase { + public: + explicit DoIpaEA(MePhaseID id) : MeFuncPhase(id) {} + ~DoIpaEA() = default; + AnalysisResult *Run(MeFunction*, MeFuncResultMgr*, ModuleResultMgr*) override; + std::string PhaseName() const override { + return "ipaea"; + } +}; + +class DoIpaEAOpt : public MeFuncPhase { + public: + explicit DoIpaEAOpt(MePhaseID id) : MeFuncPhase(id) {} + ~DoIpaEAOpt() = default; + AnalysisResult *Run(MeFunction*, MeFuncResultMgr*, ModuleResultMgr*) override; + std::string PhaseName() const override { + return "ipaeaopt"; + } +}; +#endif +} +#endif // INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H diff --git a/ecmascript/mapleall/maple_ipa/include/old/ea_connection_graph.h b/ecmascript/mapleall/maple_ipa/include/old/ea_connection_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..5dec11bca8057df0be7692e7117a54eab97a59bf --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/old/ea_connection_graph.h @@ -0,0 +1,697 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#define MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#include +#include +#include +#include "call_graph.h" +#include "me_ir.h" +#include "irmap.h" + +namespace maple { +enum NodeKind { + kObejectNode, + kReferenceNode, + kActualNode, + kFieldNode, + kPointerNode +}; + +enum EAStatus { + kNoEscape, + kReturnEscape, + kArgumentEscape, + kGlobalEscape +}; + +const inline std::string EscapeName(EAStatus esc) { + switch (esc) { + case kNoEscape: + return "NoEsc"; + case kReturnEscape: + return "RetEsc"; + case kArgumentEscape: + return "ArgEsc"; + case kGlobalEscape: + return "GlobalEsc"; + default: + return ""; + } +} + +class Location { + public: + Location(const std::string &modName, uint32 fileId, uint32 lineId) + : modName(modName), + fileId(fileId), + lineId(lineId) {}; + ~Location() = default; + + const std::string &GetModName() const { + return modName; + } + + uint32 GetFileId() const { + return fileId; + } + + uint32 GetLineId() const { + return lineId; + } + + private: + std::string modName; + uint32 fileId; + uint32 lineId; +}; + +class EACGBaseNode; +class EACGObjectNode; +class EACGFieldNode; +class EACGRefNode; +class EACGActualNode; +class EACGPointerNode; + +class EAConnectionGraph { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGBaseNode; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGRefNode; + friend class EACGPointerNode; + // If import is false, need init globalNode. + EAConnectionGraph(MIRModule *m, MapleAllocator *allocator, const GStrIdx &funcName, bool import = false) + : mirModule(m), + alloc(allocator), + nodes(allocator->Adapter()), + expr2Nodes(allocator->Adapter()), + funcArgNodes(allocator->Adapter()), + callSite2Nodes(allocator->Adapter()), + funcStIdx(funcName), + hasUpdated(false), + needConv(false), + imported(import), + exprIdMax(0), + globalObj(nullptr), + globalRef(nullptr), + globalField(nullptr) {}; + ~EAConnectionGraph() = default; + + EACGObjectNode *CreateObjectNode(MeExpr *expr, EAStatus initialEas, bool isPh, TyIdx tyIdx); + EACGRefNode *CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic); + EACGActualNode *CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, uint8 argIdx, + uint32 callSiteInfo); + EACGFieldNode *CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, EACGObjectNode *belongTo, bool isPh); + EACGPointerNode *CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL); + EACGBaseNode *GetCGNodeFromExpr(MeExpr *me); + EACGFieldNode *GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID); + EACGActualNode *GetReturnNode() const; + const MapleVector *GetFuncArgNodeVector() const; + void TouchCallSite(uint32 callSiteInfo); + MapleVector *GetCallSiteArgNodeVector(uint32 callSite); + bool ExprCanBeOptimized(MeExpr &expr); + + bool CGHasUpdated() const { + return hasUpdated; + } + + void UnSetCGUpdateFlag() { + hasUpdated = false; + } + + void SetCGHasUpdated() { + hasUpdated = true; + } + + void SetExprIdMax(int max) { + exprIdMax = max; + } + + void SetNeedConservation() { + needConv = true; + } + + bool GetNeedConservation() const { + return needConv; + } + + GStrIdx GetFuncNameStrIdx() const { + return funcStIdx; + } + + EACGObjectNode *GetGlobalObject() { + return globalObj; + } + + const EACGObjectNode *GetGlobalObject() const { + return globalObj; + } + + EACGRefNode *GetGlobalReference() { + return globalRef; + } + + const EACGRefNode *GetGlobalReference() const { + return globalRef; + } + + const MapleVector &GetNodes() const { + return nodes; + } + + void ResizeNodes(size_t size, EACGBaseNode *val) { + nodes.resize(size, val); + } + + EACGBaseNode *GetNode(uint32 idx) const { + CHECK_FATAL(idx < nodes.size(), "array check fail"); + return nodes[idx]; + } + + void SetNodeAt(size_t index, EACGBaseNode *val) { + nodes[index] = val; + } + + const MapleVector &GetFuncArgNodes() const { + return funcArgNodes; + } + + const MapleMap*> &GetCallSite2Nodes() const { + return callSite2Nodes; + } + + void InitGlobalNode(); + void AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee); + void UpdateExprOfNode(EACGBaseNode &node, MeExpr *me); + void UpdateExprOfGlobalRef(MeExpr *me); + void PropogateEAStatus(); + bool MergeCG(MapleVector &caller, const MapleVector *callee); + void TrimGlobalNode() const; + void UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg); + void DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec = nullptr); + void DeleteEACG() const; + void RestoreStatus(bool old); + void CountObjEAStatus() const; + + const std::string &GetFunctionName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx); + } + + private: + MIRModule *mirModule; + MapleAllocator *alloc; + MapleVector nodes; + MapleMap*> expr2Nodes; + // this vector contain func arg nodes first in declaration order and the last is return node + MapleVector funcArgNodes; + MapleMap*> callSite2Nodes; + GStrIdx funcStIdx; + bool hasUpdated; + bool needConv; + bool imported; + int exprIdMax; + EACGObjectNode *globalObj; + EACGRefNode *globalRef; + EACGFieldNode *globalField; + // this is used as a tmp varible for merge cg + std::map> callee2Caller; + void CheckArgNodeOrder(MapleVector &funcArgV); + void UpdateCallerNodes(const MapleVector &caller, const MapleVector &callee); + void UpdateCallerRetNode(MapleVector &caller, const MapleVector &callee); + void UpdateCallerEdges(); + void UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2); + void UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime); + void UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, bool firstTime); + + void SetCGUpdateFlag() { + hasUpdated = true; + } +}; + +class EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGActualNode; + friend class EACGRefNode; + friend class EACGPointerNode; + friend class EAConnectionGraph; + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph *ec) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(nullptr), eaStatus(kNoEscape), id(0), eaCG(ec) {} + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(expr), eaStatus(initialEas), id(i), eaCG(&ec) { + ec.SetCGUpdateFlag(); + } + + virtual ~EACGBaseNode() = default; + + virtual bool IsFieldNode() const { + return kind == kFieldNode; + } + + virtual bool IsObjectNode() const { + return kind == kObejectNode; + } + + virtual bool IsReferenceNode() const { + return kind == kReferenceNode; + } + + virtual bool IsActualNode() const { + return kind == kActualNode; + } + + virtual bool IsPointerNode() const { + return kind == kPointerNode; + } + + virtual const MeExpr *GetMeExpr() const { + return meExpr; + } + + virtual void SetMeExpr(MeExpr &newExpr) { + if (IsFieldNode() && newExpr.GetMeOp() != kMeOpIvar && newExpr.GetMeOp() != kMeOpOp) { + CHECK_FATAL(false, "must be kMeOpIvar or kMeOpOp"); + } else if (IsReferenceNode() == true && newExpr.GetMeOp() != kMeOpVar && newExpr.GetMeOp() != kMeOpReg && + newExpr.GetMeOp() != kMeOpAddrof && newExpr.GetMeOp() != kMeOpConststr) { + CHECK_FATAL(false, "must be kMeOpVar, kMeOpReg, kMeOpAddrof or kMeOpConststr"); + } + meExpr = &newExpr; + } + + const std::set &GetPointsToSet() const { + CHECK_FATAL(!IsPointerNode(), "must be pointer node"); + return pointsTo; + }; + + virtual bool AddOutNode(EACGBaseNode &newOut); + + virtual EAStatus GetEAStatus() const { + return eaStatus; + } + + virtual const std::set &GetInSet() const { + return in; + } + + virtual void InsertInSet(EACGBaseNode *val) { + (void)in.insert(val); + } + + virtual const std::set &GetOutSet() const { + CHECK_FATAL(IsActualNode(), "must be actual node"); + return out; + } + + virtual void InsertOutSet(EACGBaseNode *val) { + (void)out.insert(val); + } + + virtual bool UpdateEAStatus(EAStatus newEas) { + if (newEas > eaStatus) { + eaStatus = newEas; + PropagateEAStatusForNode(this); + eaCG->SetCGUpdateFlag(); + return true; + } + return false; + } + + bool IsBelongTo(const EAConnectionGraph *cg) const { + return this->eaCG == cg; + } + + const EAConnectionGraph *GetEACG() const { + return eaCG; + } + + EAConnectionGraph *GetEACG() { + return eaCG; + } + + void SetEACG(EAConnectionGraph *cg) { + this->eaCG = cg; + } + + void SetID(int setId) { + this->id = static_cast(setId); + } + + bool CanIgnoreRC() const; + + protected: + Location *locInfo; + MIRModule *mirModule; + MapleAllocator *alloc; + NodeKind kind; + MeExpr *meExpr; + EAStatus eaStatus; + size_t id; + // OBJ<->Field will not in following Set + std::set in; + std::set out; + std::set pointsTo; + EAConnectionGraph *eaCG; + + virtual void CheckAllConnectionInNodes(); + virtual std::string GetName(const IRMap *irMap) const; + virtual void DumpDotFile(std::ostream&, std::map&, bool, const IRMap *irMap = nullptr) = 0; + virtual void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const; + virtual void GetNodeFormatInDot(std::string &label, std::string &color) const; + virtual bool UpdatePointsTo(const std::set &cPointsTo); + + virtual void SetEAStatus(EAStatus status) { + this->eaStatus = status; + } + + virtual NodeKind GetNodeKind() const { + return kind; + } + + private: + virtual bool ReplaceByGlobalNode() { + CHECK_FATAL(false, "impossible"); + return false; + } +}; + +class EACGPointerNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kPointerNode, ec), indirectLevel(0) {} + + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + int indirectL) + : EACGBaseNode(md, alloc, kPointerNode, ec, expr, initialEas, i), indirectLevel(indirectL) {}; + ~EACGPointerNode() = default; + + void SetLocation(Location *loc) { + this->locInfo = loc; + } + + int GetIndirectLevel() const { + return indirectLevel; + } + + bool AddOutNode(EACGBaseNode &newOut) override { + if (indirectLevel == 1) { + CHECK_FATAL(!newOut.IsPointerNode(), "must be pointer node"); + (void)pointingTo.insert(&newOut); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } else { + pointingTo.insert(&newOut); + CHECK_FATAL(pointingTo.size() == 1, "the size must be one"); + CHECK_FATAL(newOut.IsPointerNode(), "must be pointer node"); + CHECK_FATAL((indirectLevel - static_cast(newOut).GetIndirectLevel()) == 1, "must be one"); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } + return false; + } + + const std::set &GetPointingTo() const { + return pointingTo; + } + + bool UpdatePointsTo(const std::set&) override { + CHECK_FATAL(false, "impossible to update PointsTo"); + return true; + }; + + void PropagateEAStatusForNode(const EACGBaseNode*) const override { + CHECK_FATAL(false, "impossible to propagate EA status for node"); + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override {} + + private: + int indirectLevel; + std::set pointingTo; + bool ReplaceByGlobalNode() override { + CHECK_FATAL(false, "impossible to replace by global node"); + return true; + } +}; + +class EACGObjectNode : public EACGBaseNode { + public: + friend class EACGFieldNode; + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kObejectNode, ec), rcOperations(0), ignorRC(false), isPhantom(false) {} + + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isPh) + : EACGBaseNode(md, alloc, kObejectNode, ec, expr, initialEas, i), rcOperations(0), ignorRC(false), + isPhantom(isPh) { + (void)pointsBy.insert(this); + (void)pointsTo.insert(this); + }; + ~EACGObjectNode() = default; + bool IsPhantom() const { + return isPhantom == true; + }; + + void SetLocation(Location *loc) { + this->locInfo = loc; + } + + const std::map &GetFieldNodeMap() const { + return fieldNodes; + } + + EACGFieldNode *GetFieldNodeFromIdx(FieldID fId) { + if (fieldNodes.find(-1) != fieldNodes.end()) { // -1 expresses global + return fieldNodes[-1]; + } + if (fieldNodes.find(fId) == fieldNodes.end()) { + return nullptr; + } + return fieldNodes[fId]; + } + + bool AddOutNode(EACGBaseNode &newOut) override; + bool UpdatePointsTo(const std::set&) override { + CHECK_FATAL(false, "impossible"); + return true; + }; + + bool IsPointedByFieldNode() const; + void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const override; + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override; + + void Insert2PointsBy(EACGBaseNode *node) { + (void)pointsBy.insert(node); + } + + void EraseNodeFromPointsBy(EACGBaseNode *node) { + pointsBy.erase(node); + } + + void IncresRCOperations() { + ++rcOperations; + } + + void IncresRCOperations(int num) { + rcOperations += num; + } + + int GetRCOperations() const { + return rcOperations; + } + + bool GetIgnorRC() const { + return ignorRC; + } + + void SetIgnorRC(bool ignore) { + ignorRC = ignore; + } + + private: + std::set pointsBy; + int rcOperations; + bool ignorRC; + bool isPhantom; + std::map fieldNodes; + bool ReplaceByGlobalNode() override; +}; + +class EACGRefNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kReferenceNode, ec), isStaticField(false), sym(nullptr), version(0) {} + + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isS = false) + : EACGBaseNode(md, alloc, kReferenceNode, ec, expr, initialEas, i), + isStaticField(isS), + sym(nullptr), + version(0) {}; + ~EACGRefNode() = default; + bool IsStaticRef() const { + return isStaticField; + }; + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx) { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be sym"); + CHECK_FATAL(versionIdx == version, "must be version "); + } + sym = mirSym; + version = versionIdx; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + + private: + bool isStaticField; + MIRSymbol *sym; + int version; + bool ReplaceByGlobalNode(); +}; +class EACGFieldNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kFieldNode, ec), + fieldID(0), + isPhantom(false), + sym(nullptr), + version(0), + mirFieldId(0) {} + + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + FieldID fId, EACGObjectNode *bt, bool isPh) + : EACGBaseNode(md, alloc, kFieldNode, ec, expr, initialEas, i), + fieldID(fId), + isPhantom(isPh), + sym(nullptr), + version(0), + mirFieldId(0) { + bt->fieldNodes[fieldID] = this; + (void)belongsTo.insert(bt); + }; + + ~EACGFieldNode() = default; + + FieldID GetFieldID() const { + return fieldID; + }; + + void SetFieldID(FieldID id) { + fieldID = id; + } + + bool IsPhantom() const { + return isPhantom; + } + + const std::set &GetBelongsToObj() const { + return belongsTo; + } + + void AddBelongTo(EACGObjectNode *newObj) { + (void)belongsTo.insert(newObj); + } + + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx, FieldID fID) { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be mirSym"); + CHECK_FATAL(version == versionIdx, "must be version"); + CHECK_FATAL(mirFieldId == fID, "must be mir FieldId"); + } + sym = mirSym; + version = versionIdx; + mirFieldId = fID; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + + private: + FieldID fieldID; + std::set belongsTo; + bool isPhantom; + MIRSymbol *sym; + int version; + FieldID mirFieldId; + bool ReplaceByGlobalNode(); +}; + +class EACGActualNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kActualNode, ec), isReturn(false), isPhantom(false), argIdx(0), callSiteInfo(0) {}; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isR, bool isPh, uint8 aI, uint32 callSite) + : EACGBaseNode(md, alloc, kActualNode, ec, expr, initialEas, i), + isReturn(isR), + isPhantom(isPh), + argIdx(aI), + callSiteInfo(callSite) {}; + ~EACGActualNode() = default; + + bool IsReturn() const { + return isReturn; + }; + + bool IsPhantom() const { + return isPhantom; + }; + + uint32 GetArgIndex() const { + return argIdx; + }; + + uint32 GetCallSite() const { + return callSiteInfo; + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + + private: + bool isReturn; + bool isPhantom; + uint8 argIdx; + uint32 callSiteInfo; + bool ReplaceByGlobalNode(); +}; +} // namespace maple +#endif diff --git a/ecmascript/mapleall/maple_ipa/include/old/ipa_escape_analysis.h b/ecmascript/mapleall/maple_ipa/include/old/ipa_escape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..6f89215363e5d9c31f7f980c0fd1ec2a9f4fc0f7 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/old/ipa_escape_analysis.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H +#define MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H + +#include +#include "class_hierarchy.h" +#include "call_graph.h" +#include "irmap.h" +#include "me_function.h" +#include "ea_connection_graph.h" +#include "intrinsics.h" + +namespace maple { +class IPAEscapeAnalysis { + public: + static constexpr int kCalleeCandidateLimit = 600; + static constexpr int kFuncInSCCLimit = 200000; + static constexpr int kSCCConvergenceLimit = 20; + static constexpr int kCalleeNodeLimit = 100000; + static constexpr int kRCOperLB = 0; + static constexpr bool kDebug = false; + + IPAEscapeAnalysis(KlassHierarchy *khTmp, IRMap *irMapTmp, MeFunction *funcTmp, MemPool *mp, CallGraph *pcgTmp) + : kh(khTmp), + irMap(irMapTmp), + ssaTab(&irMap->GetSSATab()), + mirModule(&irMapTmp->GetMIRModule()), + func(funcTmp), + eaCG(func->GetMirFunc()->GetEACG()), + pcg(pcgTmp), + allocator(mp), + cgChangedInSCC(false), + tempCount(0), + retVar(nullptr) {} + ~IPAEscapeAnalysis() = default; + void ConstructConnGraph(); + void DoOptimization(); + + private: + TyIdx GetAggElemType(const MIRType &aggregate) const; + bool IsSpecialEscapedObj(const MeExpr &alloc) const; + EACGRefNode *GetOrCreateCGRefNodeForVar(VarMeExpr &var, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForAddrof(AddrofMeExpr &var, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForReg(RegMeExpr ®, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForVarOrReg(MeExpr &var, bool createObjNode = false); + void GetArrayBaseNodeForReg(std::vector &nodes, RegMeExpr ®Var, MeStmt &stmt); + void GetOrCreateCGFieldNodeForIvar(std::vector &fieldNodes, IvarMeExpr &ivar, MeStmt &stmt, + bool createObjNode); + void GetOrCreateCGFieldNodeForIAddrof(std::vector &fieldNodes, OpMeExpr &expr, MeStmt &stmt, + bool createObjNode); + EACGObjectNode *GetOrCreateCGObjNode(MeExpr *expr, const MeStmt *stmt = nullptr, EAStatus easOfPhanObj = kNoEscape); + void GetCGNodeForMeExpr(std::vector &nodes, MeExpr &expr, MeStmt &stmt, bool createObjNode); + void CollectDefStmtForReg(std::set &visited, std::set &defStmts, RegMeExpr ®Var); + void UpdateEscConnGraphWithStmt(MeStmt &stmt); + void UpdateEscConnGraphWithPhi(const BB &bb); + void HandleParaAtFuncEntry(); + void HandleParaAtCallSite(uint32 callInfo, CallMeStmt &call); + void HandleSingleCallee(CallMeStmt &callMeStmt); + bool HandleSpecialCallee(CallMeStmt *callMeStmt); + void HandleMultiCallees(const CallMeStmt &callMeStmt); + EAConnectionGraph *GetEAConnectionGraph(MIRFunction &function) const; + void ProcessNoAndRetEscObj(); + void ProcessRetStmt(); + VarMeExpr *CreateEATempVarWithName(const std::string &name); + VarMeExpr *CreateEATempVar(); + VarMeExpr *GetOrCreateEARetTempVar(); + VarMeExpr *CreateEATempVarMeExpr(OriginalSt &ost); + OriginalSt *CreateEATempOstWithName(const std::string &name); + OriginalSt *CreateEATempOst(); + OriginalSt *CreateEARetTempOst(); + VarMeExpr *GetOrCreateEARetTempVarMeExpr(OriginalSt &ost); + void CountObjRCOperations(); + void DeleteRedundantRC(); + + KlassHierarchy *kh; + IRMap *irMap; + SSATab *ssaTab; + MIRModule *mirModule; + MeFunction *func; + EAConnectionGraph *eaCG; + CallGraph *pcg; + MapleAllocator allocator; + bool cgChangedInSCC; + uint32 tempCount; + std::vector noAndRetEscObj; + VarMeExpr *retVar; + std::vector noAndRetEscOst; + std::vector gcStmts; +}; +} // namespace maple +#endif diff --git a/ecmascript/mapleall/maple_ipa/include/old/ipa_option.h b/ecmascript/mapleall/maple_ipa/include/old/ipa_option.h new file mode 100644 index 0000000000000000000000000000000000000000..ac1b85c42d70f052fe739c96f45eb299fcce6b9c --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/old/ipa_option.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_OPTION_H +#define MAPLE_IPA_OPTION_H +#include +#include +#include +#include "mir_parser.h" +#include "opcode_info.h" +#include "option.h" +#include "inline.h" +#include "bin_mpl_export.h" +#include "me_phase_manager.h" + +namespace maple { +class IpaOption { + public: + static IpaOption &GetInstance(); + + ~IpaOption() = default; + + bool SolveOptions() const; + + bool ParseCmdline(int argc, char **argv, std::vector &fileNames); + + private: + IpaOption() = default; +}; + +class MeFuncPM1 : public MeFuncPM { + public: + explicit MeFuncPM1(MemPool *memPool) : MeFuncPM(memPool) { + SetPhaseID(&MeFuncPM1::id); + } + PHASECONSTRUCTOR(MeFuncPM1); + std::string PhaseName() const override; + ~MeFuncPM1() override {} + + private: + void GetAnalysisDependence(AnalysisDep &aDep) const override; + void DoPhasesPopulate(const MIRModule &m) override; +}; + +class MeFuncPM2 : public MeFuncPM { + public: + explicit MeFuncPM2(MemPool *memPool) : MeFuncPM(memPool) { + SetPhaseID(&MeFuncPM2::id); + } + PHASECONSTRUCTOR(MeFuncPM2); + std::string PhaseName() const override; + ~MeFuncPM2() override {} + + private: + void GetAnalysisDependence(AnalysisDep &aDep) const override; + void DoPhasesPopulate(const MIRModule &m) override; +}; +} // namespace maple +#endif // MAPLE_IPA_OPTION_H \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ipa/include/old/mrt_info.def b/ecmascript/mapleall/maple_ipa/include/old/mrt_info.def new file mode 100644 index 0000000000000000000000000000000000000000..e8bd87cf7da66941db95af8c79fab90745d78d47 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/old/mrt_info.def @@ -0,0 +1,639 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// pure, defArg, defGlobal, retGlobal, throwEh, retArg, defPrivate, name +{ true, false, false, false, false, false, false, "MCC_GetOrInsertLiteral"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringString"}, +{ true, false, false, false, false, false, false, "MCC_CStrToJStr"}, +{ true, false, false, false, false, false, false, "MCC_String_Equals_NotallCompress"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringInt"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringJcharString"}, +{ true, false, false, false, true, false, false, "MCC_ThrowStringIndexOutOfBoundsException"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_charAt__I"}, +{ false, true, false, false, true, false, false, "Native_java_lang_String_getCharsNoCheck__II_3CI"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_toCharArray__"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_fastSubstring__II"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_compareTo__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_intern__"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_doReplace__CC"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_concat__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_fastIndexOf__II"}, +{ true, false, false, false, true, false, false, "Native_java_lang_Object_clone_Ljava_lang_Object__"}, +{ false, false, false, true, true, false, false, "Native_Thread_currentThread"}, + +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisProxy_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetTypeName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7Ccast_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPrimitiveClass_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetComponentType_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredField_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetGenericInterfaces_7C_28_29ALjava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisInstance_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CforName_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CforName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAssignableFrom_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetModifiers_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisPrimitive_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisArray_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInterfaces_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSimpleName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSuperclass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisInterface_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetCanonicalName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisEnum_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotationsByType_7C_28Ljava_2Flang_2FClass_3B_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CcannotCastMsg_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CclassForName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CclassNameImpliesTopLevel_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CfindInterfaceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicMethodRecursive_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructor0_7C_28ALjava_2Flang_2FClass_3BI_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructorInternal_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructorsInternal_7C_28Z_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethodInternal_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingConstructorNative_7C_28_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingMethodNative_7C_28_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInnerClassFlags_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInnerClassName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInterfacesInternal_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3BZ_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetNameNative_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicFieldRecursive_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicFieldsRecursive_7C_28Ljava_2Futil_2FList_3B_29V"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicMethodsInternal_7C_28Ljava_2Futil_2FList_3B_29V"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethodsUnchecked_7C_28Z_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSignatureAnnotation_7C_28_29ALjava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSignatureAttribute_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisDeclaredAnnotationPresent_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisLocalOrAnonymousClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisLocalClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnonymousClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CresolveName_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CasSubclass_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CdesiredAssertionStatus_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAccessFlags_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetClasses_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredClasses_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFieldsUnchecked_7C_28Z_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaringClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingConstructor_7C_28_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingMethod_7C_28_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnumConstants_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnumConstantsShared_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetField_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetGenericSuperclass_7C_28_29Ljava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInstanceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPackage_7C_28_29Ljava_2Flang_2FPackage_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPackageName_24_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetProtectionDomain_7C_28_29Ljava_2Fsecurity_2FProtectionDomain_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSigners_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisMemberClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetTypeParameters_7C_28_29ALjava_2Flang_2Freflect_2FTypeVariable_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnnotation_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnnotationPresent_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisFinalizable_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisSynthetic_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CtoGenericString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog10_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csignum_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CaddExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CmultiplyExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorDiv_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorMod_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CsubtractExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoIntExact_7C_28J_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CmultiplyExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CaddExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CsubtractExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csqrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CcopySign_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CgetExponent_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cfloor_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ctan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cacos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Casin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Catan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Catan2_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cpow_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cround_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cceil_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CpowerOfTwoD_7C_28I_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CIEEEremainder_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccbrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CcopySign_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccosh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CdecrementExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CdecrementExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cexp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cexpm1_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorDiv_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorMod_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CgetExponent_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Chypot_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CincrementExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CincrementExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog1p_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnegateExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnegateExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextAfter_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextAfter_7C_28FD_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextDown_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextDown_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextUp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextUp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CpowerOfTwoF_7C_28I_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Crint_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cround_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cscalb_7C_28DI_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cscalb_7C_28FI_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csignum_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csinh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ctanh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoDegrees_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoRadians_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Culp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Culp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csqrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CIEEEremainder_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cacos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CaddExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CaddExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Casin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Catan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Catan2_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccbrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cceil_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorOrCeil_7C_28DDDD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CcopySign_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CcopySign_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccosh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cexp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cexpm1_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cfloor_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorDiv_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorDiv_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorMod_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorMod_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CgetExponent_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CgetExponent_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Chypot_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog10_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog1p_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CmultiplyExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CmultiplyExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextAfter_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextAfter_7C_28FD_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextDown_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextDown_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextUp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextUp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cpow_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Crint_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cround_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cround_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cscalb_7C_28DI_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cscalb_7C_28FI_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csignum_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csignum_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csinh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CsubtractExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CsubtractExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ctan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ctanh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoDegrees_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoIntExact_7C_28J_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoRadians_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Culp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Culp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Ccompare_7C_28DD_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleToLongBits_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ChashCode_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CvalueOf_7C_28D_29Ljava_2Flang_2FDouble_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ClongBitsToDouble_7C_28J_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisNaN_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisInfinite_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoString_7C_28D_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleValue_7C_28_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CparseDouble_7C_28Ljava_2Flang_2FString_3B_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleToRawLongBits_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisFinite_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Csum_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoHexString_7C_28D_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CvalueOf_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FDouble_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CbyteValue_7C_28_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CcompareTo_7C_28Ljava_2Flang_2FDouble_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CfloatValue_7C_28_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CintValue_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisInfinite_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisNaN_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ClongValue_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CshortValue_7C_28_29S"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatToIntBits_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CintBitsToFloat_7C_28I_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisNaN_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoString_7C_28F_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CvalueOf_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FFloat_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatValue_7C_28_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatToRawIntBits_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CparseFloat_7C_28Ljava_2Flang_2FString_3B_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisFinite_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisInfinite_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CvalueOf_7C_28F_29Ljava_2Flang_2FFloat_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Ccompare_7C_28FF_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ChashCode_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Csum_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoHexString_7C_28F_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CbyteValue_7C_28_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CcompareTo_7C_28Ljava_2Flang_2FFloat_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CdoubleValue_7C_28_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CintValue_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisInfinite_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisNaN_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ClongValue_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CshortValue_7C_28_29S"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cdigit_7C_28CI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Ccompare_7C_28CC_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CvalueOf_7C_28C_29Ljava_2Flang_2FCharacter_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcharCount_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28Ljava_2Flang_2FCharSequence_3BI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisHighSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoCodePoint_7C_28CC_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28ACI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAtImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28Ljava_2Flang_2FCharSequence_3BI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28ACI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBeforeImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCount_7C_28Ljava_2Flang_2FCharSequence_3BII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCount_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCountImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cdigit_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CdigitImpl_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CforDigit_7C_28II_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionality_7C_28C_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionality_7C_28I_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetType_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionalityImpl_7C_28I_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetName_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisValidCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNameImpl_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValue_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValue_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValueImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetType_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetTypeImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChashCode_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChighSurrogate_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisAlphabetic_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisAlphabeticImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisBmpCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefined_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefined_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefinedImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigit_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigitImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisISOControl_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisISOControl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorable_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorable_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorableImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdeographic_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdeographicImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierPart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierPart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierStart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierStart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaLetter_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaLetterOrDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetter_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetter_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigit_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigitImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirrored_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirrored_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirroredImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpace_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceChar_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceChar_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceCharImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSupplementaryCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSurrogatePair_7C_28CC_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPartImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStartImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespace_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespace_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespaceImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ClowSurrogate_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePoints_7C_28Ljava_2Flang_2FCharSequence_3BII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePoints_7C_28ACIIII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePointsImpl_7C_28ACIIII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CreverseBytes_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoChars_7C_28I_29AC"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoString_7C_28C_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcharValue_7C_28_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcompareTo_7C_28Ljava_2Flang_2FCharacter_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28I_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarDate_7C_28_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_24Date_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28J_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetJulianCalendar_7C_28_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28Lsun_2Futil_2Fcalendar_2FBaseCalendar_24Date_3B_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetMillisOf_7C_28Ljava_2Futil_2FDate_3B_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotationNative_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetNameInternal_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetSignatureAnnotation_7C_28_29ALjava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetSignatureAttribute_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDeclaringClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetType_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotationsByType_7C_28Ljava_2Flang_2FClass_3B_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetArtField_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetBoolean_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetByte_7C_28Ljava_2Flang_2FObject_3B_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetChar_7C_28Ljava_2Flang_2FObject_3B_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDeclaredAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDexFieldIndex_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDouble_7C_28Ljava_2Flang_2FObject_3B_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetFloat_7C_28Ljava_2Flang_2FObject_3B_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetGenericType_7C_28_29Ljava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetInt_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetLong_7C_28Ljava_2Flang_2FObject_3B_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetModifiers_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetOffset_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetShort_7C_28Ljava_2Flang_2FObject_3B_29S"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CgetRegionForCalendar_7C_28Landroid_2Ficu_2Futil_2FULocale_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CgetType_7C_28_29Ljava_2Flang_2FString_3B"}, + +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopy__Ljava_lang_Object_2ILjava_lang_Object_2II"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyByteUnchecked___3BI_3BII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyBooleanUnchecked___3ZI_3ZII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyShortUnchecked___3SI_3SII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyLongUnchecked___3JI_3JII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyIntUnchecked___3II_3III"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyFloatUnchecked___3FI_3FII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyDoubleUnchecked___3DI_3DII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyCharUnchecked___3CI_3CII"}, + +{ false, false, false, true, true, false, false, "Ljava_2Flang_2FThread_3B_7CcurrentThread_7C_28_29Ljava_2Flang_2FThread_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromChars_7C_28IIAC_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastSubstring_7C_28II_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CcompareTo_7C_28Ljava_2Flang_2FString_3B_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CgetCharsNoCheck_7C_28IIACI_29V"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromBytes_7C_28ABIII_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromString_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CtoCharArray_7C_28_29AC"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7Cconcat_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastSubstring_7C_28II_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7Cintern_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CdoReplace_7C_28CC_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastIndexOf_7C_28II_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7Carraycopy_7C_28Ljava_2Flang_2FObject_3BILjava_2Flang_2FObject_3BII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyCharUnchecked_7C_28ACIACII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyByteUnchecked_7C_28ABIABII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyShortUnchecked_7C_28ASIASII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyIntUnchecked_7C_28AIIAIII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyLongUnchecked_7C_28AJIAJII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyFloatUnchecked_7C_28AFIAFII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyDoubleUnchecked_7C_28ADIADII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyBooleanUnchecked_7C_28AZIAZII_29V"}, + +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromChars__II_3C"}, +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromString__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromBytes___3BIII"}, + +{ false, false, false, true, false, false, false, "Ljava_2Flang_2FThrowable_3B_7CnativeFillInStackTrace_7C_28_29Ljava_2Flang_2FObject_3B"}, + +{ false, true, false, false, false, false, true, "Ljava_2Futil_2FMap_3B_7Cput_7C_28Ljava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ false, true, false, false, false, false, true, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FObject_3B_7CidentityHashCodeNative_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FString_3B_7CcharAt_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FProperties_3B_7CgetProperty_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FProperties_3B_7CgetProperty_7C_28Ljava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, + +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7Cget_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7CgetBoolean_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FBoolean_3B"}, +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7CgetInteger_7C_28Ljava_2Flang_2FString_3BI_29Ljava_2Flang_2FInteger_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2Fref_2FReference_3B_7CgetReferent_7C_28_29Ljava_2Flang_2FObject_3B", }, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CnanoTime_7C_28_29J"}, +{ true, false, false, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cgettid_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cgetpid_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CcurrentTimeMillis_7C_28_29J"}, +{ true, false, false, false, true, false, false, "Lsun_2Futil_2Flocale_2FBaseLocale_24Cache_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FString_3B_7Cformat_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FGregorianCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FAnnualTimeZoneRule_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FTimeZoneRule_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FGregorianCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, + +{ true, false, false, false, true, false, false, "Landroid_2Fview_2FSurface_3B_7CnativeIsValid_7C_28J_29Z"}, +{ true, false, false, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CgetObjectVolatile_7C_28Ljava_2Flang_2FObject_3BJ_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeHoldsLock_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, true, false, false, "Landroid_2Futil_2FLog_3B_7Clogger__entry__max__payload__native_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__get__int_7C_28Ljava_2Flang_2FString_3BI_29I"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeGetStatus_7C_28Z_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Ftime_2Fformat_2FDateTimeFormatterBuilder_24ReducedPrinterParser_3B_7Clambda_24setValue_240_24DateTimeFormatterBuilder_24ReducedPrinterParser_7C_28Ljava_2Ftime_2Fformat_2FDateTimeParseContext_3BJIILjava_2Ftime_2Fchrono_2FChronology_3B_29V"}, + +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FMessageQueue_3B_7CnativePollOnce_7C_28JI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeTraceBegin_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FLog_3B_7Cprintln__native_7C_28IILjava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__add__change__callback_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CreadProcFile_7C_28Ljava_2Flang_2FString_3BAIALjava_2Flang_2FString_3BAJAF_29Z"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cstat_7C_28Ljava_2Flang_2FString_3B_29Landroid_2Fsystem_2FStructStat_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileInputStream_3B_7Copen0_7C_28Ljava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetGidForName_7C_28Ljava_2Flang_2FString_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FEventLog_3B_7CwriteEvent_7C_28IALjava_2Flang_2FObject_3B_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CinternalClone_7C_28_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CwriteBytes_7C_28Ljava_2Fio_2FFileDescriptor_3BLjava_2Flang_2FObject_3BII_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetPidsForCommands_7C_28ALjava_2Flang_2FString_3B_29AI"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7CcreateFileExclusively0_7C_28Ljava_2Flang_2FString_3B_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Clist0_7C_28Ljava_2Fio_2FFile_3B_29ALjava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cclose_7C_28Ljava_2Fio_2FFileDescriptor_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FAsynchronousCloseMonitor_3B_7CsignalBlockedThreads_7C_28Ljava_2Fio_2FFileDescriptor_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CreadBytes_7C_28Ljava_2Fio_2FFileDescriptor_3BLjava_2Flang_2FObject_3BII_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CsetThreadStrictModePolicy_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CfindNextImpl_7C_28JAI_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CsetInputImpl_7C_28JLjava_2Flang_2FString_3BII_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FNativeConverter_3B_7CsetCallbackDecode_7C_28JIILjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Copen_7C_28Ljava_2Flang_2FString_3BII_29Ljava_2Fio_2FFileDescriptor_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinderProxy_3B_7CtransactNative_7C_28ILandroid_2Fos_2FParcel_3BLandroid_2Fos_2FParcel_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CrestoreCallingIdentity_7C_28J_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fnet_2FLocalSocketImpl_3B_7Creadba__native_7C_28ABIILjava_2Fio_2FFileDescriptor_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetPids_7C_28Ljava_2Flang_2FString_3BAI_29AI"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeAsyncTraceBegin_7C_28JLjava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeAsyncTraceEnd_7C_28JLjava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeAppendFrom_7C_28JJII_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileDescriptor_3B_7CisSocket_7C_28I_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CsetProcessGroup_7C_28II_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CsetThreadPriority_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeEnforceInterface_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteFloat_7C_28JF_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CioctlInetAddress_7C_28Ljava_2Fio_2FFileDescriptor_3BILjava_2Flang_2FString_3B_29Ljava_2Fnet_2FInetAddress_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CidentityHashCodeNative_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CgetCallingPid_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CflushPendingCommands_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CgetCallingUid_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteFileDescriptor_7C_28JLjava_2Fio_2FFileDescriptor_3B_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeReadStrongBinder_7C_28J_29Landroid_2Fos_2FIBinder_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinderProxy_3B_7ClinkToDeath_7C_28Landroid_2Fos_2FIBinder_24DeathRecipient_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeReadLong_7C_28J_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteLong_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CputObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3B_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CputOrderedObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeUnmarshall_7C_28JABII_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeIsRogSupport_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7CcheckAccess0_7C_28Ljava_2Fio_2FFile_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FMemoryIntArray_3B_7CnativeGet_7C_28IJI_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FEventLog_3B_7CwriteEvent_7C_28II_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CgetsockoptLinger_7C_28Ljava_2Fio_2FFileDescriptor_3BII_29Landroid_2Fsystem_2FStructLinger_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Ccanonicalize0_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FRegion_3B_7CnativeOp_7C_28JJJI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FMemoryIntArray_3B_7CnativeSet_7C_28IJII_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Crename0_7C_28Ljava_2Fio_2FFile_3BLjava_2Fio_2FFile_3B_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FClass_3B_7CclassForName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7CnewUnpaddedArray_7C_28Ljava_2Flang_2FClass_3BI_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cchmod_7C_28Ljava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FUEventObserver_3B_7CnativeSetup_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeSetActiveConfig_7C_28Landroid_2Fos_2FIBinder_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileDescriptor_3B_7Csync_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FMatrix_3B_7CnGetValues_7C_28JAF_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FBaseCanvas_3B_7CnDrawColor_7C_28JII_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FRegion_3B_7CnativeSetRect_7C_28JIIII_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FDebug_3B_7CgetPss_7C_28IAJAJ_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FXmlBlock_3B_7CnativeNext_7C_28J_29I"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Fch_2FFileDispatcherImpl_3B_7Cread0_7C_28Ljava_2Fio_2FFileDescriptor_3BJI_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cfstat_7C_28Ljava_2Fio_2FFileDescriptor_3B_29Landroid_2Fsystem_2FStructStat_3B"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Ffs_2FUnixNativeDispatcher_3B_7Copen0_7C_28JII_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Futil_2FNativeAllocationRegistry_3B_7CapplyFreeFunction_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7CregisterNativeFree_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7Cinterrupted_7C_28_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeCreateByteArray_7C_28J_29AB"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeGetContentFrameStats_7C_28JLandroid_2Fview_2FWindowContentFrameStats_3B_29Z"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Fch_2FFileDispatcherImpl_3B_7Cwrite0_7C_28Ljava_2Fio_2FFileDescriptor_3BJI_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeInterrupt_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeSetPriority_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FRuntime_3B_7CnativeExit_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cstrerror_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FXmlBlock_3B_7CnativeDestroy_7C_28J_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetThreadPriority_7C_28I_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FFileObserver_24ObserverThread_3B_7Cinit_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurface_3B_7CnativeSyncFrameInfo_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FICU_3B_7CgetCurrencyCode_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__get_7C_28Ljava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7Cproperties_7C_28_29ALjava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CopenImpl_7C_28J_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FPattern_3B_7CcompileImpl_7C_28Ljava_2Flang_2FString_3BI_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThrowable_3B_7CnativeFillInStackTrace_7C_28_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FNativeConverter_3B_7CopenConverter_7C_28Ljava_2Flang_2FString_3B_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FDisplayEventReceiver_3B_7CnativeInit_7C_28Ljava_2Flang_2Fref_2FWeakReference_3BLandroid_2Fos_2FMessageQueue_3BI_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FStringBlock_3B_7CnativeGetString_7C_28JI_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Futil_2FCharsetUtils_3B_7CtoUtf8Bytes_7C_28Ljava_2Flang_2FString_3BII_29AB"}, diff --git a/ecmascript/mapleall/maple_ipa/include/prop_parameter_type.h b/ecmascript/mapleall/maple_ipa/include/prop_parameter_type.h new file mode 100644 index 0000000000000000000000000000000000000000..2376369f9cc7e3ec685508e72afcc2d341428aec --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/prop_parameter_type.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H +#define MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "module_phase_manager.h" +#include "maple_phase.h" +#include "ipa_phase_manager.h" +namespace maple { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-private-field" + +class PropParamType { + public: + PropParamType(MemPool &memPool, MapleAllocator &alloc, MIRModule &mod, CallGraph &cg, AnalysisDataManager &dataMap) + : memPool(memPool), alloc(alloc), module(mod), builder(*mod.GetMIRBuilder()), + cg(cg), dataMap(dataMap), curFunc(nullptr), debug(false) {} + virtual ~PropParamType() = default; + bool CheckOpndZero(const MeExpr *expr); + void ResolveCallStmt(MeStmt &meStmt); + void InsertNullCheck(CallMeStmt &callStmt, const std::string &funcName, uint32 index, MeExpr &receiver); + bool CheckCondtionStmt(const MeStmt &meStmt); + void ResolveIreadExpr(MeExpr &expr); + void TraversalMeStmt(MeStmt &meStmt); + void runOnScc(maple::SCCNode &scc); + void Prop(MIRFunction &func); + + private: + MemPool &memPool; + MapleAllocator &alloc; + MIRModule &module; + MIRBuilder &builder; + CallGraph &cg; + AnalysisDataManager &dataMap; + std::map formalMapLocal; + MIRFunction *curFunc; + bool debug; +}; +#pragma clang diagnostic pop +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCPropParamType, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H diff --git a/ecmascript/mapleall/maple_ipa/include/prop_return_null.h b/ecmascript/mapleall/maple_ipa/include/prop_return_null.h new file mode 100644 index 0000000000000000000000000000000000000000..d4f64e7f1390116f31265ce704020760e13b4fdf --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/include/prop_return_null.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H +#define MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "module_phase_manager.h" +#include "maple_phase.h" +#include "ipa_phase_manager.h" +namespace maple { +class PropReturnAttr { + public: + PropReturnAttr(MemPool &memPool, MapleAllocator &alloc, MIRModule &mod, CallGraph &cg, AnalysisDataManager &dataMap) + : memPool(memPool), alloc(alloc), module(mod), builder(*mod.GetMIRBuilder()), + cg(cg), dataMap(dataMap), inferredRetTyIdx(0), + retTy(kNotSeen), maybeNull(true), debug(false) {} + virtual ~PropReturnAttr() = default; + TyIdx GetInferredTyIdx(MeExpr &expr) const; + void InsertNullCheck(const CallMeStmt &callStmt, MeExpr &receiver) const; + void PropVarInferredType(VarMeExpr &varMeExpr) const; + void PropIvarInferredType(IvarMeExpr &ivar) const; + void VisitVarPhiNode(MePhiNode &varPhi); + void VisitMeExpr(MeExpr *meExpr) const; + void ReturnTyIdxInferring(const RetMeStmt &retMeStmt); + void TraversalMeStmt(MeStmt &meStmt); + void TraversalBB(BB *bb); + void Perform(MeFunction &func); + void Initialize(maple::SCCNode &scc); + void Prop(maple::SCCNode &scc); + bool PhaseRun(maple::SCCNode &scc); + + private: + MemPool &memPool; + MapleAllocator &alloc; + MIRModule &module; + MIRBuilder &builder; + CallGraph &cg; + AnalysisDataManager &dataMap; + TyIdx inferredRetTyIdx; + enum TagRetTyIdx { + kNotSeen, + kSeen, + kFailed + } retTy; + bool maybeNull; + bool debug; +}; + +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCPropReturnAttr, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H diff --git a/ecmascript/mapleall/maple_ipa/src/ipa_clone.cpp b/ecmascript/mapleall/maple_ipa/src/ipa_clone.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f6ce4c55cdcce92b30cda0113e3398cb84634b5b --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/ipa_clone.cpp @@ -0,0 +1,537 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_clone.h" +#include "clone.h" +#include "mir_symbol.h" +#include "func_desc.h" +#include "inline.h" + +// For some funcs, when we can ignore their return-values, we clone a new func of +// them without return-values. We configure a list to save these funcs and clone +// at the very beginning so that clones can also enjoy the optimizations after. +// This mainly contains the clone of funcbody(include labels, symbols, arguments, +// etc.) and the update of the new func infomation. +namespace maple { +void IpaClone::InitParams() { + if (Options::optForSize) { + numOfCloneVersions = 2; + numOfImpExprLowBound = 2; + numOfImpExprHighBound = 5; + numOfCallSiteLowBound = 2; + numOfCallSiteUpBound = 10; + numOfConstpropValue = 2; + } else { + numOfCloneVersions = Options::numOfCloneVersions; + numOfImpExprLowBound = Options::numOfImpExprLowBound; + numOfImpExprHighBound = Options::numOfImpExprHighBound; + numOfCallSiteLowBound = Options::numOfCallSiteLowBound; + numOfCallSiteUpBound = Options::numOfCallSiteUpBound; + numOfConstpropValue = Options::numOfConstpropValue; + } +} + +MIRSymbol *IpaClone::IpaCloneLocalSymbol(const MIRSymbol &oldSym, const MIRFunction &newFunc) { + MemPool *newMP = newFunc.GetDataMemPool(); + MIRSymbol *newSym = newMP->New(oldSym); + if (oldSym.GetSKind() == kStConst) { + newSym->SetKonst(oldSym.GetKonst()->Clone(*newMP)); + } else if (oldSym.GetSKind() == kStPreg) { + newSym->SetPreg(newMP->New(*oldSym.GetPreg())); + } else if (oldSym.GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym.GetName().c_str()); + } + return newSym; +} + +void IpaClone::IpaCloneSymbols(MIRFunction &newFunc, const MIRFunction &oldFunc) { + size_t symTabSize = oldFunc.GetSymbolTabSize(); + for (size_t i = oldFunc.GetFormalCount() + 1; i < symTabSize; ++i) { + MIRSymbol *sym = oldFunc.GetSymbolTabItem(static_cast(i)); + if (sym == nullptr) { + continue; + } + MIRSymbol *newSym = IpaCloneLocalSymbol(*sym, newFunc); + if (!newFunc.GetSymTab()->AddStOutside(newSym)) { + CHECK_FATAL(false, "%s already existed in func %s", sym->GetName().c_str(), newFunc.GetName().c_str()); + } + } +} + +void IpaClone::IpaCloneLabels(MIRFunction &newFunc, const MIRFunction &oldFunc) { + size_t labelTabSize = oldFunc.GetLabelTab()->GetLabelTableSize(); + for (size_t i = 1; i < labelTabSize; ++i) { + GStrIdx strIdx = oldFunc.GetLabelTab()->GetSymbolFromStIdx(static_cast(i)); + (void)newFunc.GetLabelTab()->AddLabel(strIdx); + } +} + +void IpaClone::IpaClonePregTable(MIRFunction &newFunc, const MIRFunction &oldFunc) { + newFunc.AllocPregTab(); + size_t pregTableSize = oldFunc.GetPregTab()->Size(); + MIRPregTable *newPregTable = newFunc.GetPregTab(); + for (size_t i = 0; i < pregTableSize; ++i) { + MIRPreg *temp = const_cast(oldFunc.GetPregTab()->GetPregTableItem(static_cast(i))); + if (temp != nullptr) { + PregIdx id = newPregTable->CreatePreg(temp->GetPrimType(), temp->GetMIRType()); + MIRPreg *newPreg = newPregTable->PregFromPregIdx(id); + if (newPreg == nullptr || newPreg->GetPregNo() != temp->GetPregNo()) { + DEBUG_ASSERT(false, "The cloned pregNo isn't consistent"); + } + } + } +} + +// IpaClone a function +MIRFunction *IpaClone::IpaCloneFunction(MIRFunction &originalFunction, const std::string &fullName) const { + MapleAllocator cgAlloc(originalFunction.GetDataMemPool()); + ArgVector argument(cgAlloc.Adapter()); + IpaCloneArgument(originalFunction, argument); + MIRType *retType = originalFunction.GetReturnType(); + MIRFunction *newFunc = + mirBuilder.CreateFunction(fullName, *retType, argument, false, originalFunction.GetBody() != nullptr); + CHECK_FATAL(newFunc != nullptr, "create cloned function failed"); + mirBuilder.GetMirModule().AddFunction(newFunc); + newFunc->SetFlag(originalFunction.GetFlag()); + newFunc->SetSrcPosition(originalFunction.GetSrcPosition()); + newFunc->SetFuncAttrs(originalFunction.GetFuncAttrs()); + newFunc->SetBaseClassFuncNames(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName)); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + newFunc->SetPuidxOrigin(newFunc->GetPuidx()); + if (originalFunction.GetBody() != nullptr) { + CopyFuncInfo(originalFunction, *newFunc); + newFunc->SetBody( + originalFunction.GetBody()->CloneTree(newFunc->GetCodeMempoolAllocator())); + IpaCloneSymbols(*newFunc, originalFunction); + IpaCloneLabels(*newFunc, originalFunction); + IpaClonePregTable(*newFunc, originalFunction); + } + newFunc->SetFuncDesc(originalFunction.GetFuncDesc()); + // All the cloned functions cannot be accessed from other transform unit. + newFunc->SetAttr(FUNCATTR_static); + return newFunc; +} + +MIRFunction *IpaClone::IpaCloneFunctionWithFreq(MIRFunction &originalFunction, + const std::string &fullName, int64_t callSiteFreq) const { + MapleAllocator cgAlloc(originalFunction.GetDataMemPool()); + ArgVector argument(cgAlloc.Adapter()); + IpaCloneArgument(originalFunction, argument); + MIRType *retType = originalFunction.GetReturnType(); + MIRFunction *newFunc = + mirBuilder.CreateFunction(fullName, *retType, argument, false, originalFunction.GetBody() != nullptr); + CHECK_FATAL(newFunc != nullptr, "create cloned function failed"); + mirBuilder.GetMirModule().AddFunction(newFunc); + newFunc->SetFlag(originalFunction.GetFlag()); + newFunc->SetSrcPosition(originalFunction.GetSrcPosition()); + newFunc->SetFuncAttrs(originalFunction.GetFuncAttrs()); + newFunc->SetBaseClassFuncNames(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName)); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + newFunc->SetPuidxOrigin(newFunc->GetPuidx()); + GcovFuncInfo *origProfData = originalFunction.GetFuncProfData(); + auto *moduleMp = mirBuilder.GetMirModule().GetMemPool(); + GcovFuncInfo *newProfData = moduleMp->New(&mirBuilder.GetMirModule().GetMPAllocator(), + newFunc->GetPuidx(), 0, 0); // skip checksum information + newFunc->SetFuncProfData(newProfData); + newProfData->SetFuncFrequency(callSiteFreq); + newProfData->SetFuncRealFrequency(callSiteFreq); + // original function need to update frequency by real entry value + // update real left frequency + origProfData->SetFuncRealFrequency(origProfData->GetFuncRealFrequency() - callSiteFreq); + if (originalFunction.GetBody() != nullptr) { + CopyFuncInfo(originalFunction, *newFunc); + BlockNode *newbody = originalFunction.GetBody()->CloneTreeWithFreqs(newFunc->GetCodeMempoolAllocator(), + newProfData->GetStmtFreqs(), origProfData->GetStmtFreqs(), + static_cast(callSiteFreq), /* numer */ + static_cast(origProfData->GetFuncFrequency()), /* denom */ + (kKeepOrigFreq | kUpdateFreqbyScale)); + newFunc->SetBody(newbody); + IpaCloneSymbols(*newFunc, originalFunction); + IpaCloneLabels(*newFunc, originalFunction); + IpaClonePregTable(*newFunc, originalFunction); + } + newFunc->SetFuncDesc(originalFunction.GetFuncDesc()); + // All the cloned functions cannot be accessed from other transform unit. + newFunc->SetAttr(FUNCATTR_static); + return newFunc; +} + +void IpaClone::IpaCloneArgument(MIRFunction &originalFunction, ArgVector &argument) const { + for (size_t i = 0; i < originalFunction.GetFormalCount(); ++i) { + auto &formalName = originalFunction.GetFormalName(i); + argument.push_back(ArgPair(formalName, originalFunction.GetNthParamType(i))); + } +} + +void IpaClone::CopyFuncInfo(MIRFunction &originalFunction, MIRFunction &newFunc) const { + const auto &funcNameIdx = newFunc.GetBaseFuncNameStrIdx(); + const auto &fullNameIdx = newFunc.GetNameStrIdx(); + const auto &classNameIdx = newFunc.GetBaseClassNameStrIdx(); + const static auto &metaFullNameIdx = mirBuilder.GetOrCreateStringIndex(kFullNameStr); + const static auto &metaClassNameIdx = mirBuilder.GetOrCreateStringIndex(kClassNameStr); + const static auto &metaFuncNameIdx = mirBuilder.GetOrCreateStringIndex(kFuncNameStr); + MIRInfoVector &fnInfo = originalFunction.GetInfoVector(); + const MapleVector &infoIsString = originalFunction.InfoIsString(); + size_t size = fnInfo.size(); + for (size_t i = 0; i < size; ++i) { + if (fnInfo[i].first == metaFullNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, fullNameIdx)); + } else if (fnInfo[i].first == metaFuncNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, funcNameIdx)); + } else if (fnInfo[i].first == metaClassNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, classNameIdx)); + } else { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, fnInfo[i].second)); + } + newFunc.PushbackIsString(infoIsString[i]); + } +} + +bool IpaClone::CheckCostModel(MIRFunction *newFunc, uint32 paramIndex, std::vector &calleeValue, + uint32 impSize) { + if (impSize >= numOfImpExprHighBound) { + return true; + } + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + CalleePair keyPair(curFunc->GetPuidx(), paramIndex); + uint32 callSiteSize = 0; + for (auto &value : calleeValue) { + callSiteSize += static_cast(calleeInfo[keyPair][value].size()); + } + if (callSiteSize >= numOfCallSiteUpBound) { + return true; + } + if (callSiteSize < numOfCallSiteLowBound || impSize < numOfImpExprLowBound) { + return false; + } + // Later: we will consider the body size + return true; +} + +void IpaClone::ReplaceIfCondtion(MIRFunction *newFunc, std::vector &result, uint64_t res) { + DEBUG_ASSERT(newFunc != nullptr, "null ptr check"); + MemPool *currentFunMp = newFunc->GetCodeMempool(); + auto elemPrimType = PTY_u8; + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + MIRConst *constVal = nullptr; + for (int32 index = static_cast(result.size()) - 1; index >= 0; --index) { + uint32 stmtId = result[static_cast(index)].GetStmtId(); + StmtNode *newReplace = newFunc->GetStmtNodeFromMeId(stmtId); + DEBUG_ASSERT(newReplace != nullptr, "null ptr check"); + if (newReplace->GetOpCode() != OP_if && newReplace->GetOpCode() != OP_brtrue && + newReplace->GetOpCode() != OP_brfalse) { + DEBUG_ASSERT(false, "ERROR: cann't find the replace statement"); + } + IfStmtNode *IfstmtNode = static_cast(newReplace); + constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(res & 0x1), *type); + res >>= 1; + ConstvalNode *constNode = currentFunMp->New(constVal->GetType().GetPrimType(), constVal); + IfstmtNode->SetOpnd(constNode, 0); + } + return; +} + +void IpaClone::ModifyParameterSideEffect(MIRFunction *newFunc, uint32 paramIndex) { + DEBUG_ASSERT(newFunc != nullptr, "null ptr check"); + auto &desc = newFunc->GetFuncDesc(); + if (paramIndex >= kMaxParamCount) { + return; + } + for (size_t idx = paramIndex; idx < kMaxParamCount - 1; ++idx) { + desc.SetParamInfo(idx, desc.GetParamInfo(idx + 1)); + } + desc.SetParamInfo(kMaxParamCount - 1, PI::kUnknown); + return; +} + +void IpaClone::RemoveUnneedParameter(MIRFunction *newFunc, uint32 paramIndex, int64_t value) { + DEBUG_ASSERT(newFunc != nullptr, "null ptr check"); + if (newFunc->GetBody() != nullptr) { + MemPool *newFuncMP = newFunc->GetCodeMempool(); + // Create the const value + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(PTY_i64); + MIRIntConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(value, *type); + ConstvalNode *constNode = newFuncMP->New(constVal->GetType().GetPrimType(), constVal); + // Create the dassign statement. + DassignNode *dass = newFuncMP->New(); + MIRSymbol *sym = newFunc->GetFormal(paramIndex); + dass->SetStIdx(sym->GetStIdx()); + dass->SetOpnd(constNode, 0); + dass->SetFieldID(0); + // Insert this dassign statment to the body. + newFunc->GetBody()->InsertFirst(dass); + // Remove the unneed function parameter. + auto &formalVec = newFunc->GetFormalDefVec(); + for (size_t i = paramIndex; i < newFunc->GetFormalCount() - 1; ++i) { + formalVec[i] = formalVec[i + 1]; + } + formalVec.resize(formalVec.size() - 1); + sym->SetStorageClass(kScAuto); + // fix the paramTypelist && paramTypeAttrs. + MIRFuncType *funcType = newFunc->GetMIRFuncType(); + std::vector paramTypeList; + std::vector paramTypeAttrsList; + for (size_t i = 0; i < newFunc->GetParamTypes().size(); i++) { + if (i != paramIndex) { + paramTypeList.push_back(funcType->GetParamTypeList()[i]); + paramTypeAttrsList.push_back(funcType->GetParamAttrsList()[i]); + } + } + MIRSymbol *funcSymbol = newFunc->GetFuncSymbol(); + DEBUG_ASSERT(funcSymbol != nullptr, "null ptr check"); + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType(funcType->GetRetTyIdx(), paramTypeList, + paramTypeAttrsList, funcType->IsVarargs(), funcType->GetRetAttrs())->GetTypeIndex()); + auto *newFuncType = static_cast(funcSymbol->GetType()); + newFunc->SetMIRFuncType(newFuncType); + // Modify the parameter sideeffect + ModifyParameterSideEffect(newFunc, paramIndex); + } + return; +} + +// Clone Function steps: +// 1. clone Function && replace the condtion +// 2. modify the callsite and update the call_graph +void IpaClone::DecideCloneFunction(std::vector &result, uint32 paramIndex, + std::map> &evalMap) { + uint32 puidx = curFunc->GetPuidx(); + CalleePair keyPair(puidx, paramIndex); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + uint32 index = 0; + for (auto &eval : evalMap) { + uint64_t evalValue = eval.first; + std::vector calleeValue = eval.second; + if (!CheckCostModel(curFunc, paramIndex, calleeValue, static_cast(result.size()))) { + continue; + } + if (index > numOfCloneVersions) { + break; + } + std::string newFuncName = curFunc->GetName() + ".clone." + std::to_string(index++); + MInline::ConvertPStaticToFStatic(*curFunc); + MIRFunction *newFunc = nullptr; + if (Options::profileUse && curFunc->GetFuncProfData()) { + int64_t clonedSiteFreqs = 0; + for (auto &value: calleeValue) { + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + int64_t callsiteFreq = callerFunc->GetFuncProfData()->GetStmtFreq(stmtId); + CHECK_FATAL(callsiteFreq >= 0, "sanity check"); + clonedSiteFreqs += callsiteFreq; + } + } + newFunc = IpaCloneFunctionWithFreq(*curFunc, newFuncName, clonedSiteFreqs); + } else { + newFunc = IpaCloneFunction(*curFunc, newFuncName); + } + ReplaceIfCondtion(newFunc, result, evalValue); + for (auto &value: calleeValue) { + bool optCallerParam = false; + if (calleeValue.size() == 1) { + optCallerParam = true; + //If the callleeValue just have one value, it means we can add a dassign stmt. + RemoveUnneedParameter(newFunc, paramIndex, value); + } + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + oldCallNode->SetPUIdx(newFunc->GetPuidx()); + if (optCallerParam) { + for (size_t i = paramIndex; i < oldCallNode->GetNopndSize() - 1; ++i) { + oldCallNode->SetNOpndAt(i, oldCallNode->GetNopndAt(i + 1)); + } + oldCallNode->GetNopnd().resize(static_cast(oldCallNode->GetNumOpnds() - 1)); + oldCallNode->SetNumOpnds(static_cast(oldCallNode->GetNumOpnds() - 1)); + } + } + } + } +} + +void IpaClone::ComupteValue(const IntVal& value, const IntVal& paramValue, CompareNode *cond, uint64_t &bitRes) { + if (cond->GetOpCode() == OP_gt) { + bitRes = (value > paramValue) | (bitRes << 1); + } else if (cond->GetOpCode() == OP_eq) { + bitRes = (value == paramValue) | (bitRes << 1); + } else if (cond->GetOpCode() == OP_lt) { + bitRes = (value < paramValue) | (bitRes << 1); + } else if (cond->GetOpCode() == OP_ge) { + bitRes = (value >= paramValue) | (bitRes << 1); + } else if (cond->GetOpCode() == OP_le) { + bitRes = (value <= paramValue) | (bitRes << 1); + } else if (cond->GetOpCode() == OP_ne) { + bitRes = (value != paramValue) | (bitRes << 1); + } +} + +void IpaClone::EvalCompareResult(std::vector &result, std::map > &evalMap, + std::map> &summary, uint32 index) { + for (auto &it: summary) { + int64 value = it.first; + uint64_t bitRes = 0; + bool runFlag = false; + for (auto &expr : result) { + StmtNode *stmt = curFunc->GetStmtNodeFromMeId(expr.GetStmtId()); + if (stmt == nullptr || expr.GetParamIndex() != index) { + continue; + } + runFlag = true; + IfStmtNode* ifStmt = static_cast(stmt); + CompareNode *cond = static_cast(ifStmt->Opnd(0)); + if (cond->Opnd(0)->GetOpCode() == OP_intrinsicop && + static_cast(cond->Opnd(0))->GetIntrinsic() == INTRN_C___builtin_expect) { + cond = static_cast(static_cast(cond->Opnd(0))->Opnd(0)); + } + PrimType primType = cond->GetOpndType(); + BaseNode *opnd1 = cond->Opnd(1); + ConstvalNode *constNode = static_cast(opnd1); + MIRIntConst *constVal = safe_cast(constNode->GetConstVal()); + DEBUG_ASSERT(constVal, "invalid const type"); + if (primType != PTY_i64 && primType != PTY_u64 && primType != PTY_i32 && primType != PTY_u32 && + primType != PTY_i16 && primType != PTY_u16 && primType != PTY_i8 && primType != PTY_u8) { + runFlag = false; + break; + } + IntVal paramValue = { constVal->GetValue(), primType }; + IntVal newValue = { static_cast(value), primType }; + ComupteValue(newValue, paramValue, cond, bitRes); + } + if (runFlag) { + evalMap[bitRes].emplace_back(value); + } + } + return; +} + +void IpaClone::EvalImportantExpression(MIRFunction *func, std::vector &result) { + int paramSize = static_cast(func->GetFormalCount()); + uint32 puidx = func->GetPuidx(); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + for (int index = 0; index < paramSize; ++index) { + CalleePair keyPair(puidx, index); + if (calleeInfo.find(keyPair) == calleeInfo.end()) { + continue; + } + std::map > evalMap; + EvalCompareResult(result, evalMap ,calleeInfo[keyPair], static_cast(index)); + // Later: Now we just the consider one parameter important expression + std::vector filterRes; + if (!evalMap.empty()) { + for (auto &expr : result) { + if (expr.GetParamIndex() == static_cast(index) && + func->GetStmtNodeFromMeId(expr.GetStmtId()) != nullptr) { + filterRes.emplace_back(expr); + // Resolve most numOfImpExprUpper important expression + if (filterRes.size() > kNumOfImpExprUpper) { + break; + } + } + } + DecideCloneFunction(filterRes, static_cast(index), evalMap); + return; + } + } +} + +void IpaClone::CloneNoImportantExpressFunction(MIRFunction *func, uint32 paramIndex) { + uint32 puidx = curFunc->GetPuidx(); + CalleePair keyPair(puidx, paramIndex); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + std::string newFuncName = func->GetName() + ".constprop." + std::to_string(paramIndex); + MInline::ConvertPStaticToFStatic(*func); + MIRFunction *newFunc = nullptr; + if (Options::profileUse && func->GetFuncProfData()) { + int64_t clonedSiteFreqs = 0; + int64_t value = calleeInfo[keyPair].begin()->first; + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + int64_t callsiteFreq = callerFunc->GetFuncProfData()->GetStmtFreq(stmtId); + CHECK_FATAL(callsiteFreq >= 0, "sanity check"); + clonedSiteFreqs += callsiteFreq; + } + newFunc = IpaCloneFunctionWithFreq(*func, newFuncName, clonedSiteFreqs); + } else { + newFunc = IpaCloneFunction(*func, newFuncName); + } + int64_t value = calleeInfo[keyPair].begin()->first; + RemoveUnneedParameter(newFunc, paramIndex, value); + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + oldCallNode->SetPUIdx(newFunc->GetPuidx()); + for (size_t i = paramIndex; i < oldCallNode->GetNopndSize() - 1; ++i) { + oldCallNode->SetNOpndAt(i, oldCallNode->GetNopndAt(i + 1)); + } + oldCallNode->GetNopnd().resize(static_cast(oldCallNode->GetNumOpnds() - 1)); + oldCallNode->SetNumOpnds(static_cast(oldCallNode->GetNumOpnds() - 1)); + } +} + +void IpaClone::DoIpaClone() { + InitParams(); + for (uint32 i = 0; i < GlobalTables::GetFunctionTable().GetFuncTable().size(); ++i) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(i); + if (func == nullptr) { + continue; + } + curFunc = func; + std::map> &funcImportantExpr = mirModule->GetFuncImportantExpr(); + if (funcImportantExpr.find(func->GetPuidx()) != funcImportantExpr.end()) { + EvalImportantExpression(func, funcImportantExpr[func->GetPuidx()]); + } else { + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + for (uint index = 0; index < func->GetFormalCount(); ++index) { + CalleePair keyPair(func->GetPuidx(), index); + if (calleeInfo.find(keyPair) != calleeInfo.end() && calleeInfo[keyPair].size() == 1 && + (calleeInfo[keyPair].begin())->second.size() > numOfConstpropValue) { + CloneNoImportantExpressFunction(func, index); + break; + } + } + } + } +} + +void M2MIpaClone::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} + +bool M2MIpaClone::PhaseRun(maple::MIRModule &m) { + maple::MIRBuilder dexMirBuilder(&m); + cl = GetPhaseAllocator()->New(&m, GetPhaseMemPool(), dexMirBuilder); + cl->DoIpaClone(); + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(m.GetUniqueID(), &M2MCallGraph::id); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase(&M2MCallGraph::id, m); + return true; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ipa/src/ipa_collect.cpp b/ecmascript/mapleall/maple_ipa/src/ipa_collect.cpp new file mode 100644 index 0000000000000000000000000000000000000000..074fcc9b24b48e428378091d481cfeea9339aa8c --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/ipa_collect.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "call_graph.h" +#include "maple_phase.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "mir_function.h" +#include "me_dominance.h" +#include "ipa_collect.h" + + +namespace maple { +void CollectIpaInfo::UpdateCaleeParaAboutFloat(MeStmt &meStmt, float paramValue, uint32 index, + CallerSummary &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutFloat = + module.GetCalleeParamAboutFloat(); + calleeParamAboutFloat[calleeKey][paramValue].emplace_back(summary); +} + +void CollectIpaInfo::UpdateCaleeParaAboutDouble(MeStmt &meStmt, double paramValue, uint32 index, + CallerSummary &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutDouble = + module.GetCalleeParamAboutDouble(); + calleeParamAboutDouble[calleeKey][paramValue].emplace_back(summary); +} + +void CollectIpaInfo::UpdateCaleeParaAboutInt(MeStmt &meStmt, int64_t paramValue, uint32 index, CallerSummary + &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutInt = + module.GetCalleeParamAboutInt(); + calleeParamAboutInt[calleeKey][paramValue].emplace_back(summary); +} + +bool CollectIpaInfo::IsConstKindValue(MeExpr *expr) { + if (expr->GetMeOp() != kMeOpConst) { + return false; + } + MIRConst *constV = static_cast(expr)->GetConstVal(); + return constV->GetKind() == kConstInt || constV->GetKind() == kConstFloatConst || + constV->GetKind() == kConstDoubleConst; +} + +bool CollectIpaInfo::CheckImpExprStmt(const MeStmt &meStmt) { + auto *node = meStmt.GetOpnd(0); + return IsConstKindValue(node->GetOpnd(0)) || IsConstKindValue(node->GetOpnd(1)); +} + +bool CollectIpaInfo::IsParameterOrUseParameter(const VarMeExpr *varExpr, uint32 &index) { + OriginalSt *sym = varExpr->GetOst(); + MIRSymbol *paramSym = sym->GetMIRSymbol(); + if (sym->IsFormal() && sym->GetIndirectLev() == 0 && varExpr->IsDefByNo() && !varExpr->IsVolatile()) { + for (uint32 i = 0; i < curFunc->GetFormalCount(); i++) { + MIRSymbol *formalSt = curFunc->GetFormal(i); + if (formalSt != nullptr && paramSym->GetNameStrIdx() == formalSt->GetNameStrIdx()) { + index = i; + return true; + } + } + } + return false; +} + +// Now we just resolve two cases, we will collect more case in the future. +bool CollectIpaInfo::CollectImportantExpression(const MeStmt &meStmt, uint32 &index) { + auto *opnd = meStmt.GetOpnd(0); + if (opnd->GetOp() == OP_eq || opnd->GetOp() == OP_ne || opnd->GetOp() == OP_gt || + opnd->GetOp() == OP_ge || opnd->GetOp() == OP_lt || opnd->GetOp() == OP_le) { + if (CheckImpExprStmt(meStmt)) { + auto subOpnd0 = opnd->GetOpnd(0); + auto subOpnd1 = opnd->GetOpnd(1); + MeExpr *expr = IsConstKindValue(subOpnd0) ? subOpnd1 : subOpnd0; + if (expr->GetOp() == OP_dread) { + if (IsParameterOrUseParameter(static_cast(expr), index)) { + return true; + } + } + } + } + return false; +} + +void CollectIpaInfo::TraversalMeStmt(MeStmt &meStmt) { + Opcode op = meStmt.GetOp(); + if (meStmt.GetOp() == OP_brfalse || meStmt.GetOp() == OP_brtrue) { + uint32 index = 0; + if (CollectImportantExpression(meStmt, index)) { + ImpExpr imp(meStmt.GetMeStmtId(), index); + module.GetFuncImportantExpr()[curFunc->GetPuidx()].emplace_back(imp); + return; + } + } + if (op != OP_callassigned && op != OP_call) { + return; + } + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + if (called.IsExtern() || called.IsVarargs()) { + return; + } + for (uint32 i = 0; i < callMeStmt->NumMeStmtOpnds() && i < called.GetFormalCount(); ++i) { + if (callMeStmt->GetOpnd(i)->GetMeOp() == kMeOpConst) { + ConstMeExpr* constExpr = static_cast(callMeStmt->GetOpnd(i)); + MIRSymbol *formalSt = called.GetFormal(i); + // Some vargs2 We cann't get the actual type + if (formalSt == nullptr) { + continue; + } + if (constExpr->GetConstVal()->GetKind() == kConstInt) { + if (IsPrimitiveInteger(formalSt->GetType()->GetPrimType())) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *intConst = safe_cast(constExpr->GetConstVal()); + IntVal value = { intConst->GetValue(), formalSt->GetType()->GetPrimType() }; + UpdateCaleeParaAboutInt(meStmt, value.GetExtValue(), i, summary); + } + } else if (constExpr->GetConstVal()->GetKind() == kConstFloatConst) { + if (IsPrimitiveFloat(formalSt->GetType()->GetPrimType())) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *floatConst = safe_cast(constExpr->GetConstVal()); + UpdateCaleeParaAboutFloat(meStmt, floatConst->GetValue(), i, summary); + } + } else if (constExpr->GetConstVal()->GetKind() == kConstDoubleConst) { + if (formalSt->GetType()->GetPrimType() == PTY_f64) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *doubleConst = safe_cast(constExpr->GetConstVal()); + UpdateCaleeParaAboutDouble(meStmt, doubleConst->GetValue(), i, summary); + } + } + } + } +} + +void CollectIpaInfo::Perform(const MeFunction &func) { + // Pre-order traverse the dominance tree, so that each def is traversed + // before its use + Dominance *dom = static_cast(dataMap.GetVaildAnalysisPhase(func.GetUniqueID(), + &MEDominance::id))->GetResult(); + for (auto *bb : dom->GetReversePostOrder()) { + if (bb == nullptr) { + return; + } + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(meStmt); + } + } +} + +void CollectIpaInfo::runOnScc(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + curFunc = func; + MeFunction *meFunc = func->GetMeFunc(); + Perform(*meFunc); + } +} + +void SCCCollectIpaInfo::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool SCCCollectIpaInfo::PhaseRun(maple::SCCNode &scc) { + MIRModule *m = ((scc.GetNodes()[0])->GetMIRFunction())->GetModule(); + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + CollectIpaInfo collect(*m, *dataMap); + collect.runOnScc(scc); + return true; +} +} diff --git a/ecmascript/mapleall/maple_ipa/src/ipa_phase_manager.cpp b/ecmascript/mapleall/maple_ipa/src/ipa_phase_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5168234255c647790cd613333890b686b1cd7bc --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/ipa_phase_manager.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_phase_manager.h" +#include "pme_emit.h" +#include "gcov_parser.h" + +#define JAVALANG (mirModule.IsJavaModule()) +#define CLANG (mirModule.IsCModule()) + +namespace maple { +bool IpaSccPM::PhaseRun(MIRModule &m) { + if (theMIRModule->HasPartO2List()) { + return false; + } + SetQuiet(true); + bool oldProp = MeOption::propDuringBuild; + bool oldMerge = MeOption::mergeStmts; + bool oldLayout = MeOption::layoutWithPredict; + MeOption::mergeStmts = false; + MeOption::propDuringBuild = false; + MeOption::layoutWithPredict = false; + DoPhasesPopulate(m); + bool changed = false; + auto admMempool = AllocateMemPoolInPhaseManager("Ipa Phase Manager's Analysis Data Manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + CallGraph *cg = GET_ANALYSIS(M2MCallGraph, m); + // Need reverse sccV + const MapleVector*> &topVec = cg->GetSCCTopVec(); + for (MapleVector*>::const_reverse_iterator it = topVec.rbegin(); it != topVec.rend(); ++it) { + if (!IsQuiet()) { + LogInfo::MapleLogger() << ">>>>>>>>>>> Optimizing SCC ---\n"; + (*it)->Dump(); + } + auto meFuncMP = std::make_unique(memPoolCtrler, "maple_ipa per-scc mempool"); + auto meFuncStackMP = std::make_unique(memPoolCtrler, ""); + bool runScc = false; + for (auto *cgNode : (*it)->GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + runScc = true; + m.SetCurFunction(func); + MemPool *versMP = new ThreadLocalMemPool(memPoolCtrler, "first verst mempool"); + MeFunction &meFunc = *(meFuncMP->New(&m, func, meFuncMP.get(), *meFuncStackMP, versMP, "unknown")); + func->SetMeFunc(&meFunc); + meFunc.PartialInit(); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Preparing Function for scc phase < " << func->GetName() << " > ---\n"; + } + meFunc.IPAPrepare(); + } + if (!runScc) { + continue; + } + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Run scc " << (curPhase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << curPhase->PhaseName() << " ]---\n"; + } + changed |= RunAnalysisPhase>, SCCNode>( + *curPhase, *serialADM, **it); + } + serialADM->EraseAllAnalysisPhase(); + } + MeOption::mergeStmts = oldMerge; + MeOption::propDuringBuild = oldProp; + MeOption::layoutWithPredict = oldLayout; + return changed; +} + +void IpaSccPM::DoPhasesPopulate(const MIRModule &mirModule) { + (void)mirModule; + if (Options::profileGen) { + AddPhase("sccprofile", true); + } else { + AddPhase("sccprepare", true); + AddPhase("prop_param_type", MeOption::npeCheckMode != SafetyCheckMode::kNoCheck); + AddPhase("prop_return_attr", MeOption::npeCheckMode != SafetyCheckMode::kNoCheck); + AddPhase("collect_ipa_info", true); + AddPhase("sccsideeffect", Options::sideEffect); + AddPhase("sccemit", true); + } +} + +void IpaSccPM::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); + aDep.AddPreserved(); + if (Options::profileUse) { + aDep.AddRequired(); + aDep.AddPreserved(); + } +} + +void SCCPrepare::Dump(const MeFunction &f, const std::string phaseName) { + if (Options::dumpIPA && (f.GetName() == Options::dumpFunc || f.GetName() == "*")) { + LogInfo::MapleLogger() << ">>>>> Dump after " << phaseName << " <<<<<\n"; + f.Dump(false); + LogInfo::MapleLogger() << ">>>>> Dump after End <<<<<\n\n"; + } +} + +bool SCCPrepare::PhaseRun(SCCNode &scc) { + SetQuiet(true); + AddPhase("mecfgbuild", true); + if (Options::profileUse) { + AddPhase("splitcriticaledge", true); + AddPhase("profileUse", true); + } + AddPhase("ssatab", true); + AddPhase("aliasclass", true); + AddPhase("ssa", true); + AddPhase("irmapbuild", true); + AddPhase("hprop", true); + + // Not like other phasemanager which use temp mempool to hold analysis results generated from the sub phases. + // Here we use GetManagerMemPool which lives longer than this phase(manager) itself to hold all the analysis result. + // So the following phase can access the result in this phase. + result = GetManagerMemPool()->New(*GetPhaseMemPool()); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + MeFunction &meFunc = *func->GetMeFunc(); + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " >> Prepare " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ] <<\n"; + } + if (phase->IsAnalysis()) { + (void)RunAnalysisPhase(*phase, *result, meFunc, 1); + } else { + (void)RunTransformPhase(*phase, *result, meFunc, 1); + } + Dump(meFunc,phase->PhaseName()); + } + } + return false; +} + +void SCCEmit::Dump(const MeFunction &f, const std::string phaseName) { + if (Options::dumpIPA && (f.GetName() == Options::dumpFunc || f.GetName() == "*")) { + LogInfo::MapleLogger() << ">>>>> Dump after " << phaseName << " <<<<<\n"; + f.DumpFunctionNoSSA(); + LogInfo::MapleLogger() << ">>>>> Dump after End <<<<<\n\n"; + } +} + +bool SCCEmit::PhaseRun(SCCNode &scc) { + SetQuiet(true); + auto *map = GET_ANALYSIS(SCCPrepare, scc); + if (map == nullptr) { + return false; + } + auto admMempool = AllocateMemPoolInPhaseManager("Ipa Phase Manager's Analysis Data Manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + serialADM->CopyAnalysisResultFrom(*map); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->GetBody() == nullptr) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&MEPreMeEmission::id); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " ---call " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ]---\n"; + } + (void)RunAnalysisPhase(*phase, *serialADM, *func->GetMeFunc()); + Dump(*func->GetMeFunc(), phase->PhaseName()); + delete func->GetMeFunc()->GetPmeMempool(); + func->GetMeFunc()->SetPmeMempool(nullptr); + } + serialADM->EraseAllAnalysisPhase(); + return false; +} + +void SCCEmit::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool SCCProfile::PhaseRun(SCCNode &scc) { + SetQuiet(true); + AddPhase("mecfgbuild", true); + if (Options::profileGen) { + AddPhase("splitcriticaledge", true); + AddPhase("profileGen", true); + } + AddPhase("emitforipa", true); + // Not like other phasemanager which use temp mempool to hold analysis results generated from the sub phases. + // Here we use GetManagerMemPool which lives longer than this phase(manager) itself to hold all the analysis result. + // So the following phase can access the result in this phase. + result = GetManagerMemPool()->New(*GetPhaseMemPool()); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + MeFunction &meFunc = *func->GetMeFunc(); + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " >> Prepare " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ] <<\n"; + } + if (phase->IsAnalysis()) { + (void)RunAnalysisPhase(*phase, *result, meFunc, 1); + } else { + (void)RunTransformPhase(*phase, *result, meFunc, 1); + } + } + } + return false; +} + +MAPLE_ANALYSIS_PHASE_REGISTER(SCCPrepare, sccprepare) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCProfile, sccprofile) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCCollectIpaInfo, collect_ipa_info); +MAPLE_ANALYSIS_PHASE_REGISTER(SCCPropReturnAttr, prop_return_attr); +MAPLE_TRANSFORM_PHASE_REGISTER(SCCPropParamType, prop_param_type); +MAPLE_ANALYSIS_PHASE_REGISTER(SCCSideEffect, sccsideeffect) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCEmit, sccemit) +} // namespace maple diff --git a/ecmascript/mapleall/maple_ipa/src/ipa_side_effect.cpp b/ecmascript/mapleall/maple_ipa/src/ipa_side_effect.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f913b2489c1b48913f5b1d88089e5d080c2c15c2 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/ipa_side_effect.cpp @@ -0,0 +1,442 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_side_effect.h" +#include "func_desc.h" +namespace maple { +const std::map whiteList = { +#include "func_desc.def" +}; + +const FuncDesc &SideEffect::GetFuncDesc(MeFunction &f) { + return SideEffect::GetFuncDesc(*f.GetMirFunc()); +} + +const FuncDesc &SideEffect::GetFuncDesc(MIRFunction &f) { + auto it = whiteList.find(f.GetName()); + if (it != whiteList.end()) { + return it->second; + } + return f.GetFuncDesc(); +} + +const std::map &SideEffect::GetWhiteList() { + return whiteList; +} + +void SideEffect::ParamInfoUpdater(size_t vstIdx, const PI &calleeParamInfo) { + for (size_t callerFormalIdx = 0; callerFormalIdx < vstsValueAliasWithFormal.size(); ++callerFormalIdx) { + auto &formalValueAlias = vstsValueAliasWithFormal[callerFormalIdx]; + if (formalValueAlias.find(vstIdx) != formalValueAlias.end()) { + curFuncDesc->SetParamInfoNoBetterThan(callerFormalIdx, calleeParamInfo); + } + } +} + +void SideEffect::PropInfoFromOpnd(MeExpr &opnd, const PI &calleeParamInfo) { + MeExpr &base = opnd.GetAddrExprBase(); + OriginalSt *ost = nullptr; + switch (base.GetMeOp()) { + case kMeOpVar: { + auto &dread = static_cast(base); + ost = dread.GetOst(); + for (auto vstIdx : ost->GetVersionsIndices()) { + ParamInfoUpdater(vstIdx, calleeParamInfo); + } + break; + } + case kMeOpAddrof: { + AddrofMeExpr &addrofMeExpr = static_cast(base); + // As in CollectFormalOst, this is conservative to make sure it's right. + // For example: + // void callee(int *p) : write memory that p points to. + // call callee(&x) : this will modify x but we prop info of 'write memory' to x. + ost = meFunc->GetMeSSATab()->GetOriginalStFromID(addrofMeExpr.GetOstIdx()); + DEBUG_ASSERT(ost != nullptr, "null ptr check"); + for (auto vstIdx : ost->GetVersionsIndices()) { + ParamInfoUpdater(vstIdx, calleeParamInfo); + } + break; + } + case kMeOpOp: { + if (base.GetOp() == OP_select) { + PropInfoFromOpnd(*base.GetOpnd(kSecondOpnd), calleeParamInfo); + PropInfoFromOpnd(*base.GetOpnd(kThirdOpnd), calleeParamInfo); + } + break; + } + default: + break; + } +} + +void SideEffect::PropParamInfoFromCallee(const MeStmt &call, MIRFunction &callee) { + const FuncDesc &desc = callee.GetFuncDesc(); + size_t skipFirstOpnd = kOpcodeInfo.IsICall(call.GetOp()) ? 1 : 0; + for (size_t formalIdx = 0; formalIdx < callee.GetFormalCount(); ++formalIdx) { + MeExpr *opnd = call.GetOpnd(formalIdx + skipFirstOpnd); + PropInfoFromOpnd(*opnd, desc.GetParamInfo(formalIdx)); + } +} + +void SideEffect::PropAllInfoFromCallee(const MeStmt &call, MIRFunction &callee) { + const FuncDesc &desc = callee.GetFuncDesc(); + if (!desc.IsPure() && !desc.IsConst()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + if (desc.IsPure()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } + if (desc.IsConst()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kConst); + } + PropParamInfoFromCallee(call, callee); +} + +void SideEffect::DealWithStmt(MeStmt &stmt) { + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { + DealWithOperand(stmt.GetOpnd(i)); + } + RetMeStmt *ret = safe_cast(&stmt); + if (ret != nullptr) { + DealWithReturn(*ret); + } + CallMeStmt *call = safe_cast(&stmt); + if (call != nullptr) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + PropAllInfoFromCallee(*call, *calleeFunc); + } + IcallMeStmt *icall = safe_cast(&stmt); + if (icall != nullptr) { + MIRFunction *mirFunc = meFunc->GetMirFunc(); + CGNode *icallCGNode = callGraph->GetCGNode(mirFunc); + CallInfo callInfo(stmt.GetMeStmtId()); + auto &callees = icallCGNode->GetCallee(); + auto it = callees.find(&callInfo); + if (it == callees.end() || it->second->empty()) { + // no candidates found, process conservatively + for (size_t formalIdx = 1; formalIdx < icall->NumMeStmtOpnds(); ++formalIdx) { + PropInfoFromOpnd(*icall->GetOpnd(formalIdx), PI::kUnknown); + } + } else { + for (auto *cgNode : *it->second) { + MIRFunction *calleeFunc = cgNode->GetMIRFunction(); + PropAllInfoFromCallee(*icall, *calleeFunc); + } + } + } + if (stmt.GetMuList() == nullptr) { + return; + } + // this may cause some kWriteMemoryOnly regard as kReadWriteMemory. + // Example: {a.f = b} mulist in return stmt will regard param a as used. + for (auto &mu : *stmt.GetMuList()) { + DealWithOst(mu.first); + } +} + +void SideEffect::DealWithOst(OStIdx ostIdx) { + OriginalSt *ost = meFunc->GetMeSSATab()->GetSymbolOriginalStFromID(ostIdx); + DealWithOst(ost); +} + +void SideEffect::DealWithOst(const OriginalSt *ost) { + if (ost == nullptr) { + return; + } + for (auto &pair : analysisLater) { + if (pair.first == ost) { + curFuncDesc->SetParamInfoNoBetterThan(pair.second, PI::kReadWriteMemory); + return; + } + } +} + +void SideEffect::DealWithOperand(MeExpr *expr) { + if (expr == nullptr) { + return; + } + for (uint32 i = 0; i < expr->GetNumOpnds(); ++i) { + DealWithOperand(expr->GetOpnd(i)); + } + switch (expr->GetMeOp()) { + case kMeOpVar: { + ScalarMeExpr *dread = static_cast(expr); + OriginalSt *ost = dread->GetOst(); + DealWithOst(ost); + break; + } + case kMeOpIvar: { + auto *base = static_cast(expr)->GetBase(); + if (base->GetMeOp() == kMeOpVar) { + ScalarMeExpr *dread = static_cast(base); + DealWithOst(dread->GetOst()); + } + break; + } + default: + break; + } + return; +} + +void SideEffect::DealWithReturn(const RetMeStmt &retMeStmt) { + if (retMeStmt.NumMeStmtOpnds() == 0) { + return; + } + MeExpr *ret = retMeStmt.GetOpnd(0); + if (ret->GetPrimType() == PTY_agg) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + if (!IsAddress(ret->GetPrimType())) { + return; + } + if (ret->GetType() != nullptr && ret->GetType()->IsMIRPtrType()) { + auto *ptrType = static_cast(ret->GetType()); + if (ptrType->GetPointedType()->GetPrimType() == PTY_agg) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + } + OriginalSt *retOst = nullptr; + size_t vstIdxOfRet = 0; + if (ret->IsScalar()) { + retOst = static_cast(ret)->GetOst(); + vstIdxOfRet = static_cast(ret)->GetVstIdx(); + } else if (ret->GetMeOp() == kMeOpIvar) { + auto *base = static_cast(ret)->GetBase(); + if (base->IsScalar()) { + retOst = static_cast(base)->GetOst(); + vstIdxOfRet = static_cast(base)->GetVstIdx(); + } + } + if (retOst == nullptr) { + return; + } + if (retOst->IsFormal()) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + std::set result; + alias->GetValueAliasSetOfVst(vstIdxOfRet, result); + for (auto valueAliasVstIdx : result) { + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(valueAliasVstIdx)); + // meExpr of valueAliasVstIdx not created in IRMap, it must not occured in hssa-mefunction + if (meExpr == nullptr) { + continue; + } + OriginalSt *aliasOst = nullptr; + if (meExpr->GetMeOp() == kMeOpAddrof) { + auto ostIdx = static_cast(meExpr)->GetOstIdx(); + aliasOst = meFunc->GetMeSSATab()->GetOriginalStFromID(ostIdx); + } else if (meExpr->IsScalar()) { + aliasOst = static_cast(meExpr)->GetOst(); + } else { + CHECK_FATAL(false, "not supported meExpr"); + } + DEBUG_ASSERT(aliasOst != nullptr, "null ptr check"); + if (aliasOst->IsFormal()) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + } + } +} + +void SideEffect::SolveVarArgs(MeFunction &f) { + MIRFunction *func = f.GetMirFunc(); + if (func->IsVarargs()) { + for (size_t i = func->GetFormalCount(); i < kMaxParamCount; ++i) { + curFuncDesc->SetParamInfoNoBetterThan(i, PI::kUnknown); + } + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } +} + +void SideEffect::CollectAllLevelOst(size_t vstIdx, std::set &result) { + result.insert(vstIdx); + auto *nextLevelOsts = meFunc->GetMeSSATab()->GetNextLevelOsts(vstIdx); + if (nextLevelOsts == nullptr) { + return; + } + for (auto *nlOst : *nextLevelOsts) { + for (auto vstIdOfNextLevelOst : nlOst->GetVersionsIndices()) { + CollectAllLevelOst(vstIdOfNextLevelOst, result); + } + } +} + +void SideEffect::CollectFormalOst(MeFunction &f) { + MIRFunction *func = f.GetMirFunc(); + for (auto *ost : f.GetMeSSATab()->GetOriginalStTable().GetOriginalStVector()) { + if (ost == nullptr) { + continue; + } + if (!ost->IsLocal()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + if (ost->GetVersionsIndices().size() > 1) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + if (ost->IsFormal() && ost->GetIndirectLev() == 0) { + auto idx = func->GetFormalIndex(ost->GetMIRSymbol()); + if (idx >= kMaxParamCount) { + continue; + } + + // Put level -1 ost into it, so we can get a conservative result. + // Because when we solve all vstsValueAliasWithFormal we regard every ost in it as lev 0. + + std::set vstValueAliasFormal; + if (ost->IsAddressTaken()) { + CollectAllLevelOst(ost->GetPointerVstIdx(), vstsValueAliasWithFormal[idx]); + alias->GetValueAliasSetOfVst(ost->GetPointerVstIdx(), vstValueAliasFormal); + } + CollectAllLevelOst(ost->GetZeroVersionIndex(), vstsValueAliasWithFormal[idx]); + alias->GetValueAliasSetOfVst(ost->GetZeroVersionIndex(), vstValueAliasFormal); + + for (size_t vstIdx: vstValueAliasFormal) { + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(vstIdx)); + if (meExpr == nullptr || meExpr->GetMeOp() == kMeOpAddrof) { + // corresponding ScalarMeExpr has not been created in irmap for vstIdx. + CollectAllLevelOst(vstIdx, vstsValueAliasWithFormal[idx]); + continue; + } + CHECK_FATAL(meExpr->IsScalar(), "not supported MeExpr type"); + CHECK_FATAL(static_cast(meExpr)->GetVstIdx() == vstIdx, "VersionSt index must be equal"); + auto *aliasOst = static_cast(meExpr)->GetOst(); + if (aliasOst != ost) { + for (auto vstIdxOfAliasOst : aliasOst->GetVersionsIndices()) { + CollectAllLevelOst(vstIdxOfAliasOst, vstsValueAliasWithFormal[idx]); + } + } + } + } + } +} + +void SideEffect::AnalysisFormalOst() { + for (size_t formalIndex = 0; formalIndex < vstsValueAliasWithFormal.size(); ++formalIndex) { + for (size_t vstIdx : vstsValueAliasWithFormal[formalIndex]) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadSelfOnly); + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(vstIdx)); + if (meExpr == nullptr) { + continue; + } + if (meExpr->GetMeOp() == kMeOpAddrof) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + continue; + } + CHECK_FATAL(meExpr->IsScalar(), "must be me scalar"); + auto *ost = static_cast(meExpr)->GetOst(); + if (ost->GetIndirectLev() == 0 && ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadSelfOnly); + continue; + } + if (ost->GetIndirectLev() == 1) { + if (ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } else { + analysisLater.insert(std::make_pair(ost, formalIndex)); + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kWriteMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + continue; + } + if (ost->GetIndirectLev() > 1) { + if (ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } else { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + } + } +} + +bool SideEffect::Perform(MeFunction &f) { + MIRFunction *func = f.GetMirFunc(); + curFuncDesc = &func->GetFuncDesc(); + FuncDesc oldDesc = *curFuncDesc; + + if (func->GetFuncDesc().IsConfiged()) { + return false; + } + SolveVarArgs(f); + CollectFormalOst(f); + AnalysisFormalOst(); + for (auto *bb : dom->GetReversePostOrder()) { + for (auto &stmt : bb->GetMeStmts()) { + DealWithStmt(stmt); + } + } + return !curFuncDesc->Equals(oldDesc); +} + +bool SCCSideEffect::PhaseRun(SCCNode &scc) { + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + if (func != nullptr && !func->GetFuncDesc().IsConfiged()) { + func->InitFuncDescToBest(); + func->GetFuncDesc().SetReturnInfo(RI::kUnknown); + if (func->GetParamSize() > kMaxParamCount) { + func->GetFuncDesc().SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + } + bool changed = true; + while (changed) { + changed = false; + auto *map = GET_ANALYSIS(SCCPrepare, scc); + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + if (func == nullptr) { + continue; + } + MeFunction *meFunc = func->GetMeFunc(); + if (meFunc == nullptr || meFunc->GetCfg()->NumBBs() == 0) { + continue; + } + auto *phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MEDominance::id); + Dominance *dom = static_cast(phase)->GetResult(); + phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MEAliasClass::id); + AliasClass *alias = static_cast(phase)->GetResult(); + + phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MESSATab::id); + SSATab *meSSATab = static_cast(phase)->GetResult(); + CHECK_FATAL(meSSATab == meFunc->GetMeSSATab(), "IPA_PM may be wrong."); + MaplePhase *it = GetAnalysisInfoHook()->GetOverIRAnalyisData(*func->GetModule()); + CallGraph *cg = static_cast(it)->GetResult(); + SideEffect se(meFunc, dom, alias, cg); + changed |= se.Perform(*meFunc); + } + } + if (Options::dumpIPA) { + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + FuncDesc &desc = func->GetFuncDesc(); + std::cout << "funcid: " << func->GetPuidx() << " funcName: " << func->GetName() << ":\n"; + desc.Dump(); + } + } + return false; +} + +void SCCSideEffect::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp b/ecmascript/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..43cb079d1ba0ccdb1ad63ed53d80e4d5af80defe --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "do_ipa_escape_analysis.h" +#include + +namespace maple { +#ifdef NOT_USED +AnalysisResult *DoIpaEA::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) { + if (func == nullptr) { + return nullptr; + } + MIRFunction *mirFunc = func->GetMirFunc(); + const std::map &summaryMap = mirFunc->GetModule()->GetEASummary(); + if (!mirFunc->GetModule()->IsInIPA() && summaryMap.size() == 0) { + return nullptr; + } + CHECK_FATAL(mrm != nullptr, "Needs module result manager for ipa"); + KlassHierarchy *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + CHECK_FATAL(kh != nullptr, "KlassHierarchy phase has problem"); + MeIRMap *irMap = static_cast(m->GetAnalysisResult(MeFuncPhase_IRMAPBUILD, func)); + CHECK_FATAL(irMap != nullptr, "irMap phase has problem"); + + CallGraph *pcg = nullptr; + if (mirFunc->GetModule()->IsInIPA()) { + pcg = static_cast(mrm->GetAnalysisResult(MoPhase_CALLGRAPH_ANALYSIS, &func->GetMIRModule())); + } + MemPool *eaMemPool = memPoolCtrler.NewMemPool(PhaseName(), false /* isLcalPool */); + mirFunc->GetModule()->SetCurFunction(mirFunc); + + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEA BEGIN======== " << mirFunc->GetName() << std::endl; + } + + IPAEscapeAnalysis ipaEA(kh, irMap, func, eaMemPool, pcg); + ipaEA.ConstructConnGraph(); + func->GetMirFunc()->GetEACG()->TrimGlobalNode(); + if (!mirFunc->GetModule()->IsInIPA()) { + auto it = summaryMap.find(func->GetMirFunc()->GetNameStrIdx()); + if (it != summaryMap.end() && it->second != nullptr) { + it->second->DeleteEACG(); + } + } + if (!mirFunc->GetModule()->IsInIPA() && IPAEscapeAnalysis::kDebug) { + func->GetMirFunc()->GetEACG()->CountObjEAStatus(); + } + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEA END========" << mirFunc->GetName() << std::endl; + } + + delete eaMemPool; + return nullptr; +} + +AnalysisResult *DoIpaEAOpt::Run(MeFunction *func, MeFuncResultMgr *mgr, ModuleResultMgr *mrm) { + if (func == nullptr) { + return nullptr; + } + MIRFunction *mirFunc = func->GetMirFunc(); + const std::map &summaryMap = mirFunc->GetModule()->GetEASummary(); + if (!mirFunc->GetModule()->IsInIPA() && summaryMap.size() == 0) { + return nullptr; + } + CHECK_FATAL(mrm != nullptr, "Needs module result manager for ipa"); + KlassHierarchy *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + CHECK_FATAL(kh != nullptr, "KlassHierarchy phase has problem"); + MeIRMap *irMap = static_cast(mgr->GetAnalysisResult(MeFuncPhase_IRMAPBUILD, func)); + CHECK_FATAL(irMap != nullptr, "irMap phase has problem"); + + mgr->InvalidAnalysisResult(MeFuncPhase_MELOOP, func); + IdentifyLoops *meLoop = static_cast(mgr->GetAnalysisResult(MeFuncPhase_MELOOP, func)); + CHECK_FATAL(meLoop != nullptr, "meLoop phase has problem"); + meLoop->MarkBB(); + + CallGraph *pcg = nullptr; + if (mirFunc->GetModule()->IsInIPA()) { + pcg = static_cast(mrm->GetAnalysisResult(MoPhase_CALLGRAPH_ANALYSIS, &func->GetMIRModule())); + } + MemPool *eaMemPool = memPoolCtrler.NewMemPool(PhaseName(), false /* isLcalPool */); + mirFunc->GetModule()->SetCurFunction(mirFunc); + + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEAOPT BEGIN======== " << mirFunc->GetName() << std::endl; + } + + IPAEscapeAnalysis ipaEA(kh, irMap, func, eaMemPool, pcg); + ipaEA.DoOptimization(); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEAOPT END========" << mirFunc->GetName() << std::endl; + } + + delete eaMemPool; + return nullptr; +} +#endif +} diff --git a/ecmascript/mapleall/maple_ipa/src/old/ea_connection_graph.cpp b/ecmascript/mapleall/maple_ipa/src/old/ea_connection_graph.cpp new file mode 100644 index 0000000000000000000000000000000000000000..765f10c8ed894bf9fc0051b05d420dce3458a9be --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/old/ea_connection_graph.cpp @@ -0,0 +1,1059 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ea_connection_graph.h" + +namespace maple { +constexpr maple::uint32 kInvalid = 0xffffffff; +void EACGBaseNode::CheckAllConnectionInNodes() { +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGObjectNode *obj : pointsTo) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + DEBUG_ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + if (IsFieldNode()) { + for (EACGObjectNode *obj : static_cast(this)->GetBelongsToObj()) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + DEBUG_ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + } +#endif +} + +bool EACGBaseNode::AddOutNode(EACGBaseNode &newOut) { + if (out.find(&newOut) != out.end()) { + return false; + } + bool newIsLocal = newOut.UpdateEAStatus(eaStatus); + if (eaStatus == kGlobalEscape && pointsTo.size() > 0) { + if (newIsLocal) { + eaCG->SetCGUpdateFlag(); + } + return newIsLocal; + } + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + DEBUG_ASSERT(newOut.pointsTo.size() != 0, "must be greater than zero"); + bool hasChanged = UpdatePointsTo(newOut.pointsTo); + eaCG->SetCGUpdateFlag(); + return hasChanged; +} + +void EACGBaseNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot) const { + for (EACGBaseNode *outNode : out) { + (void)outNode->UpdateEAStatus(eaStatus); + } +} + +std::string EACGBaseNode::GetName(const IRMap *irMap) const { + std::string name; + if (irMap == nullptr || meExpr == nullptr) { + name += std::to_string(id); + } else { + name += std::to_string(id); + name += "\\n"; + if (meExpr->GetMeOp() == kMeOpVar) { + VarMeExpr *varMeExpr = static_cast(meExpr); + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += ((sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(varMeExpr->GetFieldID())); + } else if (meExpr->GetMeOp() == kMeOpIvar) { + IvarMeExpr *ivarMeExpr = static_cast(meExpr); + MeExpr *base = ivarMeExpr->GetBase(); + VarMeExpr *varMeExpr = nullptr; + if (base->GetMeOp() == kMeOpVar) { + varMeExpr = static_cast(base); + } else { + name += std::to_string(id); + return name; + } + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += (std::string("base :") + (sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(ivarMeExpr->GetFieldID())); + } else if (meExpr->GetOp() == OP_gcmalloc || meExpr->GetOp() == OP_gcmallocjarray) { + name += "mx" + std::to_string(meExpr->GetExprID()); + } + } + return name; +} + +bool EACGBaseNode::UpdatePointsTo(const std::set &cPointsTo) { + size_t oldPtSize = pointsTo.size(); + pointsTo.insert(cPointsTo.begin(), cPointsTo.end()); + if (oldPtSize == pointsTo.size()) { + return false; + } + for (EACGObjectNode *pt : pointsTo) { + pt->Insert2PointsBy(this); + } + for (EACGBaseNode *pred : in) { + (void)pred->UpdatePointsTo(pointsTo); + } + return true; +} + +void EACGBaseNode::GetNodeFormatInDot(std::string &label, std::string &color) const { + switch (GetEAStatus()) { + case kNoEscape: + label += "NoEscape"; + color = "darkgreen"; + break; + case kArgumentEscape: + label += "ArgEscape"; + color = "brown"; + break; + case kReturnEscape: + label += "RetEscape"; + color = "orange"; + break; + case kGlobalEscape: + label += "GlobalEscape"; + color = "red"; + break; + } +} + +bool EACGBaseNode::CanIgnoreRC() const { + for (auto obj : pointsTo) { + if (!obj->GetIgnorRC()) { + return false; + } + } + return true; +} + +void EACGObjectNode::CheckAllConnectionInNodes() { +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGBaseNode *pBy : pointsBy) { + ASSERT_NOT_NULL(eaCG->nodes[pBy->id - 1]); + DEBUG_ASSERT(eaCG->nodes[pBy->id - 1] == pBy, "must be pBy"); + } + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + DEBUG_ASSERT(field->fieldID == fieldPair.first, "must be fieldPair.first"); + ASSERT_NOT_NULL(eaCG->nodes[field->id - 1]); + DEBUG_ASSERT(eaCG->nodes[field->id - 1] == field, "must be filed"); + } +#endif +} + +bool EACGObjectNode::IsPointedByFieldNode() const { + for (EACGBaseNode *pBy : pointsBy) { + if (pBy->IsFieldNode()) { + return true; + } + } + return false; +} + +bool EACGObjectNode::AddOutNode(EACGBaseNode &newOut) { + DEBUG_ASSERT(newOut.IsFieldNode(), "must be fieldNode"); + EACGFieldNode *field = static_cast(&newOut); + fieldNodes[field->GetFieldID()] = field; + (void)newOut.UpdateEAStatus(eaStatus); + field->AddBelongTo(this); + return true; +} + +bool EACGObjectNode::ReplaceByGlobalNode() { + DEBUG_ASSERT(out.size() == 0, "must be zero"); + for (EACGBaseNode *node : pointsBy) { + node->pointsTo.erase(this); + (void)node->pointsTo.insert(eaCG->GetGlobalObject()); + } + pointsBy.clear(); + for (EACGBaseNode *inNode : in) { + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalObject()); + } + in.clear(); + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + field->belongsTo.erase(this); + } + fieldNodes.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalObject()); + } + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGObjectNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot) const { + for (auto fieldNodePair : fieldNodes) { + EACGFieldNode *field = fieldNodePair.second; + (void)field->UpdateEAStatus(eaStatus); + } +} + +void EACGObjectNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + " Object\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=box, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + for (auto fieldPair : fieldNodes) { + EACGBaseNode *field = fieldPair.second; + fout << name << "->" << field->GetName(nullptr) << ";" << "\n"; + } + for (auto fieldPair : fieldNodes) { + EACGBaseNode *field = fieldPair.second; + field->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGRefNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + " Reference\\n"; + if (IsStaticRef()) { + label += "Static\\n"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" << "\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";" << "\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";" << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGRefNode::ReplaceByGlobalNode() { + for (EACGBaseNode *inNode : in) { + DEBUG_ASSERT(inNode->id > 3, "must be greater than three"); // the least valid idx is 3 + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalReference()); + } + in.clear(); + for (EACGBaseNode *outNode : out) { + (void)outNode->in.erase(this); + } + out.clear(); + for (EACGObjectNode *base : pointsTo) { + base->EraseNodeFromPointsBy(this); + } + pointsTo.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalReference()); + } + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGPointerNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + "\\nPointer Indirect Level : " + std::to_string(indirectLevel) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" << "\n"; + for (EACGBaseNode *outNode : out) { + fout << name << "->" << outNode->GetName(nullptr) << " [style =\"dotted\", color = \"blue\"];" << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGActualNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + if (IsReturn()) { + label = GetName(irMap) + "\\nRet Idx : " + std::to_string(GetArgIndex()) + "\\n"; + } else { + label = GetName(irMap) + "\\nArg Idx : " + std::to_string(GetArgIndex()) + + " Call Site : " + std::to_string(GetCallSite()) + "\\n"; + } + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGActualNode::ReplaceByGlobalNode() { + DEBUG_ASSERT(callSiteInfo == kInvalid, "must be invalid"); + DEBUG_ASSERT(out.size() == 1, "the size of out must be one"); + DEBUG_ASSERT(pointsTo.size() == 1, "the size of pointsTo must be one"); + for (EACGBaseNode *inNode : in) { + inNode->out.erase(this); + } + in.clear(); + return false; +} + +void EACGFieldNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + "\\nFIdx : " + std::to_string(GetFieldID()) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=circle, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << + ", margin=0];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGFieldNode::ReplaceByGlobalNode() { + for (EACGObjectNode *obj : pointsTo) { + obj->pointsBy.erase(this); + } + pointsTo.clear(); + (void)pointsTo.insert(eaCG->GetGlobalObject()); + for (EACGBaseNode *outNode : out) { + outNode->in.erase(this); + } + out.clear(); + (void)out.insert(eaCG->GetGlobalObject()); + bool canDelete = true; + std::set tmp = belongsTo; + for (EACGObjectNode *obj : tmp) { + if (obj->GetEAStatus() != kGlobalEscape) { + canDelete = false; + } else { + belongsTo.erase(obj); + } + } + if (canDelete) { + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + for (EACGBaseNode *inNode : in) { + DEBUG_ASSERT(!inNode->IsObjectNode(), "must be ObjectNode"); + inNode->out.erase(this); + (void)inNode->out.insert(eaCG->globalField); + } + for (auto exprPair : eaCG->expr2Nodes) { + size_t eraseSize = exprPair.second->erase(this); + if (eraseSize != 0 && exprPair.first->GetMeOp() != kMeOpIvar && exprPair.first->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + if (exprPair.second->size() == 0) { + exprPair.second->insert(eaCG->globalField); + } + } + in.clear(); + return true; + } + return false; +} + +void EAConnectionGraph::DeleteEACG() const { + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + delete node; + node = nullptr; + } +} + +void EAConnectionGraph::TrimGlobalNode() const { + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + constexpr int leastIdx = 3; + if (node->id <= leastIdx) { + continue; + } + bool canDelete = false; + if (node->GetEAStatus() == kGlobalEscape) { + canDelete = node->ReplaceByGlobalNode(); + } +#ifdef DEBUG + node->CheckAllConnectionInNodes(); +#endif + if (canDelete) { + delete node; + node = nullptr; + } + } +} + +void EAConnectionGraph::InitGlobalNode() { + globalObj = CreateObjectNode(nullptr, kNoEscape, true, TyIdx(0)); + globalRef = CreateReferenceNode(nullptr, kNoEscape, true); + (void)globalRef->AddOutNode(*globalObj); + (void)globalRef->AddOutNode(*globalRef); + globalField = CreateFieldNode(nullptr, kNoEscape, -1, globalObj, true); // -1 expresses global + (void)globalField->AddOutNode(*globalObj); + (void)globalField->AddOutNode(*globalRef); + (void)globalField->AddOutNode(*globalField); + (void)globalRef->AddOutNode(*globalField); + globalObj->eaStatus = kGlobalEscape; + globalField->eaStatus = kGlobalEscape; + globalRef->eaStatus = kGlobalEscape; +} + +EACGObjectNode *EAConnectionGraph::CreateObjectNode(MeExpr *expr, EAStatus initialEas, bool isPh, TyIdx tyIdx) { + EACGObjectNode *newObjNode = + new (std::nothrow) EACGObjectNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isPh); + ASSERT_NOT_NULL(newObjNode); + nodes.push_back(newObjNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newObjNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + return newObjNode; +} + +EACGPointerNode *EAConnectionGraph::CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL) { + EACGPointerNode *newPointerNode = + new (std::nothrow) EACGPointerNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, inderictL); + ASSERT_NOT_NULL(newPointerNode); + nodes.push_back(newPointerNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newPointerNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + return newPointerNode; +} + +EACGRefNode *EAConnectionGraph::CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic) { + EACGRefNode *newRefNode = + new (std::nothrow) EACGRefNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isStatic); + ASSERT_NOT_NULL(newRefNode); + nodes.push_back(newRefNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newRefNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + if (expr->GetMeOp() != kMeOpVar && expr->GetMeOp() != kMeOpAddrof && + expr->GetMeOp() != kMeOpReg && expr->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpVar, kMeOpAddrof, kMeOpReg or kMeOpOp"); + } + } + return newRefNode; +} + +void EAConnectionGraph::TouchCallSite(uint32 callSiteInfo) { + CHECK_FATAL(callSite2Nodes.find(callSiteInfo) != callSite2Nodes.end(), "find failed"); + if (callSite2Nodes[callSiteInfo] == nullptr) { + MapleVector *tmp = alloc->GetMemPool()->New>(alloc->Adapter()); + callSite2Nodes[callSiteInfo] = tmp; + } +} + +EACGActualNode *EAConnectionGraph::CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, + uint8 argIdx, uint32 callSiteInfo) { + MeExpr *expr = nullptr; + DEBUG_ASSERT(isPh, "must be ph"); + DEBUG_ASSERT(callSiteInfo != 0, "must not be zero"); + EACGActualNode *newActNode = new (std::nothrow) EACGActualNode( + mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isReurtn, isPh, argIdx, callSiteInfo); + ASSERT_NOT_NULL(newActNode); + nodes.push_back(newActNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newActNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + if (callSiteInfo != kInvalid) { + DEBUG_ASSERT(callSite2Nodes[callSiteInfo] != nullptr, "must touched before"); + callSite2Nodes[callSiteInfo]->push_back(newActNode); +#ifdef DEBUG + CheckArgNodeOrder(*callSite2Nodes[callSiteInfo]); +#endif + } else { + funcArgNodes.push_back(newActNode); + } + return newActNode; +} + +EACGFieldNode *EAConnectionGraph::CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, + EACGObjectNode *belongTo, bool isPh) { + EACGFieldNode *newFieldNode = new (std::nothrow) EACGFieldNode( + mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, fId, belongTo, isPh); + ASSERT_NOT_NULL(newFieldNode); + nodes.push_back(newFieldNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newFieldNode); + } else { + expr2Nodes[expr]->insert(newFieldNode); + } + if (expr->GetMeOp() != kMeOpIvar && expr->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + } + return newFieldNode; +} + +EACGBaseNode *EAConnectionGraph::GetCGNodeFromExpr(MeExpr *me) { + if (expr2Nodes.find(me) == expr2Nodes.end()) { + return nullptr; + } + return *(expr2Nodes[me]->begin()); +} + +void EAConnectionGraph::UpdateExprOfNode(EACGBaseNode &node, MeExpr *me) { + if (expr2Nodes.find(me) == expr2Nodes.end()) { + expr2Nodes[me] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[me]->insert(&node); + } else { + if (node.IsFieldNode()) { + expr2Nodes[me]->insert(&node); + } else { + if (expr2Nodes[me]->find(&node) == expr2Nodes[me]->end()) { + CHECK_FATAL(false, "must be filed node"); + } + } + } + node.SetMeExpr(*me); +} + +void EAConnectionGraph::UpdateExprOfGlobalRef(MeExpr *me) { + UpdateExprOfNode(*globalRef, me); +} + +EACGActualNode *EAConnectionGraph::GetReturnNode() const { + if (funcArgNodes.size() == 0) { + return nullptr; + } + EACGActualNode *ret = static_cast(funcArgNodes[funcArgNodes.size() - 1]); + if (ret->IsReturn()) { + return ret; + } + return nullptr; +} +#ifdef DEBUG +void EAConnectionGraph::CheckArgNodeOrder(MapleVector &funcArgV) { + uint8 preIndex = 0; + for (size_t i = 0; i < funcArgV.size(); ++i) { + DEBUG_ASSERT(funcArgV[i]->IsActualNode(), "must be ActualNode"); + EACGActualNode *actNode = static_cast(funcArgV[i]); + if (i == funcArgV.size() - 1) { + if (actNode->IsReturn()) { + continue; + } else { + DEBUG_ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + } else { + DEBUG_ASSERT(!actNode->IsReturn(), "must be return"); + DEBUG_ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + preIndex = actNode->GetArgIndex(); + } +} +#endif +bool EAConnectionGraph::ExprCanBeOptimized(MeExpr &expr) { + if (expr2Nodes.find(&expr) == expr2Nodes.end()) { + MeExpr *rhs = nullptr; + if (expr.GetMeOp() == kMeOpVar) { + DEBUG_ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + DEBUG_ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_dassign, "must be OP_dassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + DassignMeStmt *dassignStmt = static_cast(defStmt); + rhs = dassignStmt->GetRHS(); + } else if (expr.GetMeOp() == kMeOpReg) { + DEBUG_ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + DEBUG_ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_regassign, "must be OP_regassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + AssignMeStmt *regassignStmt = static_cast(defStmt); + rhs = regassignStmt->GetRHS(); + } else { + CHECK_FATAL(false, "impossible"); + } + DEBUG_ASSERT(expr2Nodes.find(rhs) != expr2Nodes.end(), "impossible"); + expr = *rhs; + } + MapleSet &nodesTmp = *expr2Nodes[&expr]; + + for (EACGBaseNode *node : nodesTmp) { + for (EACGObjectNode *obj : node->GetPointsToSet()) { + if (obj->GetEAStatus() != kNoEscape && obj->GetEAStatus() != kReturnEscape) { + return false; + } + } + } + return true; +} + +MapleVector *EAConnectionGraph::GetCallSiteArgNodeVector(uint32 callSite) { + CHECK_FATAL(callSite2Nodes.find(callSite) != callSite2Nodes.end(), "find failed"); + ASSERT_NOT_NULL(callSite2Nodes[callSite]); + return callSite2Nodes[callSite]; +} + +// if we have scc of connection graph, it will be more efficient. +void EAConnectionGraph::PropogateEAStatus() { + bool oldStatus = CGHasUpdated(); + do { + UnSetCGUpdateFlag(); + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + if (node->IsObjectNode()) { + EACGObjectNode *obj = static_cast(node); + for (auto fieldPair : obj->GetFieldNodeMap()) { + EACGBaseNode *field = fieldPair.second; + (void)field->UpdateEAStatus(obj->GetEAStatus()); + } + } else { + for (EACGBaseNode *pointsToNode : node->GetPointsToSet()) { + (void)pointsToNode->UpdateEAStatus(node->GetEAStatus()); + } + } + } + DEBUG_ASSERT(!CGHasUpdated(), "must be Updated"); + } while (CGHasUpdated()); + RestoreStatus(oldStatus); +} + +const MapleVector *EAConnectionGraph::GetFuncArgNodeVector() const { + return &funcArgNodes; +} + +// this func is called from callee context +void EAConnectionGraph::UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg) { + DEBUG_ASSERT(abs(static_cast(callerCallSiteArg.size()) - static_cast(calleeFuncArg.size())) <= 1, "greater than"); + + UnSetCGUpdateFlag(); + for (uint32 i = 0; i < callerCallSiteArg.size(); ++i) { + EACGBaseNode *callerNode = callerCallSiteArg[i]; + ASSERT_NOT_NULL(callerNode); + DEBUG_ASSERT(callerNode->IsActualNode(), "must be ActualNode"); + if ((i == callerCallSiteArg.size() - 1) && static_cast(callerNode)->IsReturn()) { + continue; + } + bool hasGlobalEA = false; + for (EACGObjectNode *obj : callerNode->GetPointsToSet()) { + if (obj->GetEAStatus() == kGlobalEscape) { + hasGlobalEA = true; + break; + } + } + if (hasGlobalEA) { + EACGBaseNode *calleeNode = (calleeFuncArg)[i]; + for (EACGObjectNode *obj : calleeNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + } + } + if (CGHasUpdated()) { + PropogateEAStatus(); + } + TrimGlobalNode(); +} + +void EAConnectionGraph::DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec) { + if (dumpVec == nullptr) { + dumpVec = &nodes; + } + std::filebuf fb; + std::string outFile = GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) + "-connectiongraph.dot"; + fb.open(outFile, std::ios::trunc | std::ios::out); + CHECK_FATAL(fb.is_open(), "open file failed"); + std::ostream cgDotFile(&fb); + cgDotFile << "digraph connectiongraph{\n"; + std::map dumped; + for (auto node : nodes) { + dumped[node] = false; + } + for (EACGBaseNode *node : *dumpVec) { + if (node == nullptr) { + continue; + } + if (dumped[node]) { + continue; + } + node->DumpDotFile(cgDotFile, dumped, dumpPt, irMap); + dumped[node] = true; + } + cgDotFile << "}\n"; + fb.close(); +} + +void EAConnectionGraph::CountObjEAStatus() const { + int sum = 0; + int eaCount[4]; // There are four EAStatus. + for (size_t i = 0; i < 4; ++i) { + eaCount[i] = 0; + } + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + + if (node->IsObjectNode()) { + EACGObjectNode *objNode = static_cast(node); + if (!objNode->IsPhantom()) { + CHECK_FATAL(objNode->locInfo != nullptr, "Impossible"); + MIRType *type = nullptr; + const MeExpr *expr = objNode->GetMeExpr(); + CHECK_FATAL(expr != nullptr, "Impossible"); + if (expr->GetOp() == OP_gcmalloc || expr->GetOp() == OP_gcpermalloc) { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + LogInfo::MapleLogger() << "[LOCATION] [" << objNode->locInfo->GetModName() << " " << + objNode->locInfo->GetFileId() << " " << objNode->locInfo->GetLineId() << " " << + EscapeName(objNode->GetEAStatus()) << " " << expr->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << "]\n"; + ++sum; + ++eaCount[node->GetEAStatus()]; + } + } + } + LogInfo::MapleLogger() << "[gcmalloc object statistics] " << + GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) << " " << + "Gcmallocs: " << sum << " " << "NoEscape: " << eaCount[kNoEscape] << " " << + "RetEscape: " << eaCount[kReturnEscape] << " " << "ArgEscape: " << eaCount[kArgumentEscape] << " " << + "GlobalEscape: " << eaCount[kGlobalEscape] << "\n"; +} + +void EAConnectionGraph::RestoreStatus(bool old) { + if (old) { + SetCGHasUpdated(); + } else { + UnSetCGUpdateFlag(); + } +} + +// Update caller's ConnectionGraph using callee's summary information. +// If the callee's summary is not found, we just mark all the pointsTo nodes of caller's actual node to GlobalEscape. +// Otherwise, we do these steps: +// +// 1, update caller nodes using callee's summary, new node might be added into caller's CG in this step. +// +// 2, update caller edges using callee's summary, new points-to edge might be added into caller's CG in this step. +bool EAConnectionGraph::MergeCG(MapleVector &caller, const MapleVector *callee) { + TrimGlobalNode(); + bool cgChanged = false; + bool oldStatus = CGHasUpdated(); + UnSetCGUpdateFlag(); + if (callee == nullptr) { + for (EACGBaseNode *actualInCaller : caller) { + for (EACGObjectNode *p : actualInCaller->GetPointsToSet()) { + (void)p->UpdateEAStatus(EAStatus::kGlobalEscape); + } + } + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; + } + size_t callerSize = caller.size(); + size_t calleeSize = callee->size(); + if (callerSize > calleeSize) { + DEBUG_ASSERT((callerSize - calleeSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } else { + DEBUG_ASSERT((calleeSize - callerSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } + if (callerSize == 0 || calleeSize == 0) { + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + return cgChanged; + } + if ((callerSize != calleeSize) && + (callerSize != calleeSize + 1 || static_cast(callee->back())->IsReturn()) && + (callerSize != calleeSize - 1 || !static_cast(callee->back())->IsReturn())) { + DEBUG_ASSERT(false, "Impossible"); + } + + callee2Caller.clear(); + UpdateCallerNodes(caller, *callee); + UpdateCallerEdges(); + UpdateCallerRetNode(caller, *callee); + callee2Caller.clear(); + + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; +} + +void EAConnectionGraph::AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee) { + if (callee2Caller.find(callee) == callee2Caller.end()) { + std::set callerSet; + callee2Caller[callee] = callerSet; + } + (void)callee2Caller[callee].insert(caller); +} + +void EAConnectionGraph::UpdateCallerRetNode(MapleVector &caller, + const MapleVector &callee) { + EACGActualNode *lastInCaller = static_cast(caller.back()); + EACGActualNode *lastInCallee = static_cast(callee.back()); + if (!lastInCaller->IsReturn()) { + return; + } + CHECK_FATAL(lastInCaller->GetOutSet().size() == 1, "Impossible"); + for (EACGBaseNode *callerRetNode : lastInCaller->GetOutSet()) { + for (EACGObjectNode *calleeRetNode : lastInCallee->GetPointsToSet()) { + for (EACGObjectNode *objInCaller : callee2Caller[calleeRetNode]) { + auto pointsToSet = callerRetNode->GetPointsToSet(); + if (pointsToSet.find(objInCaller) == pointsToSet.end()) { + (void)callerRetNode->AddOutNode(*objInCaller); + } + } + } + } +} + +// Update caller node by adding some nodes which are mapped from callee. +void EAConnectionGraph::UpdateCallerNodes(const MapleVector &caller, + const MapleVector &callee) { + const size_t callerSize = caller.size(); + const size_t calleeSize = callee.size(); + const size_t actualCount = ((callerSize < calleeSize) ? callerSize : calleeSize); + bool firstTime = true; + + for (size_t i = 0; i < actualCount; ++i) { + EACGBaseNode *actualInCaller = caller.at(i); + EACGBaseNode *actualInCallee = callee.at(i); + UpdateNodes(*actualInCallee, *actualInCaller, firstTime); + } +} + +// Update caller edges using information from callee. +void EAConnectionGraph::UpdateCallerEdges() { + std::set set; + for (auto pair : callee2Caller) { + (void)set.insert(pair.first); + } + for (EACGObjectNode *p : set) { + for (auto tempPair : p->GetFieldNodeMap()) { + int32 fieldID = tempPair.first; + EACGBaseNode *fieldNode = tempPair.second; + for (EACGObjectNode *q : fieldNode->GetPointsToSet()) { + UpdateCallerEdgesInternal(p, fieldID, q); + } + } + } +} + +// Update caller edges using information of given ObjectNode from callee. +void EAConnectionGraph::UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2) { + CHECK_FATAL(callee2Caller.find(node1) != callee2Caller.end(), "find failed"); + CHECK_FATAL(callee2Caller.find(node2) != callee2Caller.end(), "find failed"); + for (EACGObjectNode *p1 : callee2Caller[node1]) { + for (EACGObjectNode *q1 : callee2Caller[node2]) { + EACGFieldNode *fieldNode = p1->GetFieldNodeFromIdx(fieldID); + if (fieldNode == nullptr) { + CHECK_NULL_FATAL(node1); + fieldNode = node1->GetFieldNodeFromIdx(fieldID); + CHECK_FATAL(fieldNode != nullptr, "fieldNode must not be nullptr because we have handled it before!"); + CHECK_FATAL(fieldNode->IsBelongTo(this), "must be belong to this"); + (void)p1->AddOutNode(*fieldNode); + } + (void)fieldNode->AddOutNode(*q1); + } + } +} + +void EAConnectionGraph::UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime) { + DEBUG_ASSERT(actualInCallee.GetPointsToSet().size() > 0, "actualInCallee->GetPointsToSet().size() must gt 0!"); + for (EACGObjectNode *objInCallee : actualInCallee.GetPointsToSet()) { + if (actualInCaller.GetPointsToSet().size() == 0) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.size() > 0) { + for (EACGObjectNode *temp : mapsTo) { + (void)actualInCaller.AddOutNode(*temp); + } + } else if (objInCallee->IsBelongTo(this)) { + DEBUG_ASSERT(false, "must be belong to this"); + } else { + EACGObjectNode *phantom = CreateObjectNode(nullptr, actualInCaller.GetEAStatus(), true, TyIdx(0)); + (void)actualInCaller.AddOutNode(*phantom); + AddMaps2Object(phantom, objInCallee); + UpdateCallerWithCallee(*phantom, *objInCallee, firstTime); + } + } else { + for (EACGObjectNode *objInCaller : actualInCaller.GetPointsToSet()) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.find(objInCaller) == mapsTo.end()) { + AddMaps2Object(objInCaller, objInCallee); + UpdateCallerWithCallee(*objInCaller, *objInCallee, firstTime); + } + } + } + } +} + +// The escape state of the nodes in MapsTo(which is the object node in caller) is marked +// GlobalEscape if the escape state of object node in callee is GlobalEscape. +// Otherwise, the escape state of the caller nodes is not affected. +void EAConnectionGraph::UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, + bool firstTime) { + if (objInCallee.GetEAStatus() == EAStatus::kGlobalEscape) { + (void)objInCaller.UpdateEAStatus(EAStatus::kGlobalEscape); + } + + // At this moment, a node in caller is mapped to the corresponding node in callee, + // we need make sure that all the field nodes also exist in caller. If not, + // we create both the field node and the phantom object node it should point to for the caller. + for (auto tempPair : objInCallee.GetFieldNodeMap()) { + EACGFieldNode *fieldInCaller = objInCaller.GetFieldNodeFromIdx(tempPair.first); + EACGFieldNode *fieldInCallee = tempPair.second; + if (fieldInCaller == nullptr && fieldInCallee->IsBelongTo(this)) { + (void)objInCaller.AddOutNode(*fieldInCallee); + } + fieldInCaller = GetOrCreateFieldNodeFromIdx(objInCaller, tempPair.first); + UpdateNodes(*fieldInCallee, *fieldInCaller, firstTime); + } +} + +EACGFieldNode *EAConnectionGraph::GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID) { + EACGFieldNode *ret = obj.GetFieldNodeFromIdx(fieldID); + if (ret == nullptr) { + // this node is always phantom + ret = CreateFieldNode(nullptr, obj.GetEAStatus(), fieldID, &obj, true); + } + return ret; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp b/ecmascript/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..756c3d84b20221267c7bd07fa87faf4355ba2e32 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp @@ -0,0 +1,1614 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_escape_analysis.h" +#include "me_cfg.h" +#include + +namespace maple { +constexpr maple::uint32 kInvalid = 0xffffffff; +static bool IsExprRefOrPtr(const MeExpr &expr) { + return expr.GetPrimType() == PTY_ref || expr.GetPrimType() == PTY_ptr; +} + +static bool IsTypeRefOrPtr(PrimType type) { + return type == PTY_ref || type == PTY_ptr; +} + +static bool IsGlobal(const SSATab &ssaTab, const VarMeExpr &expr) { + const OriginalSt *symOst = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + DEBUG_ASSERT(symOst != nullptr, "null ptr check"); + if (symOst->GetMIRSymbol()->GetStIdx().IsGlobal()) { + return true; + } + return false; +} + +static bool IsGlobal(const SSATab &ssaTab, const AddrofMeExpr &expr) { + const OriginalSt *symOst = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + DEBUG_ASSERT(symOst != nullptr, "null ptr check"); + if (symOst->GetMIRSymbol()->GetStIdx().IsGlobal()) { + return true; + } + return false; +} + +static bool IsZeroConst(const VarMeExpr *expr) { + if (expr == nullptr) { + return false; + } + if (expr->GetDefBy() != kDefByStmt) { + return false; + } + MeStmt *stmt = expr->GetDefStmt(); + if (stmt->GetOp() != OP_dassign) { + return false; + } + DassignMeStmt *dasgn = static_cast(stmt); + if (dasgn->GetRHS()->GetMeOp() != kMeOpConst) { + return false; + } + ConstMeExpr *constExpr = static_cast(dasgn->GetRHS()); + if (constExpr->GetConstVal()->GetKind() == kConstInt && constExpr->GetConstVal()->IsZero()) { + return true; + } + return false; +} + +static bool StartWith(const std::string &str, const std::string &head) { + return str.compare(0, head.size(), head) == 0; +} + +static bool IsVirtualVar(const SSATab &ssaTab, const VarMeExpr &expr) { + const OriginalSt *ost = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + DEBUG_ASSERT(ost != nullptr, "null ptr check"); + return ost->GetIndirectLev() > 0; +} + +static bool IsInWhiteList(const MIRFunction &func) { + std::vector whiteList = { + "MCC_Reflect_Check_Casting_Array", + "MCC_Reflect_Check_Casting_NoArray", + "MCC_ThrowStringIndexOutOfBoundsException", + "MCC_ArrayMap_String_Int_clear", + "MCC_ArrayMap_String_Int_put", + "MCC_ArrayMap_String_Int_getOrDefault", + "MCC_ArrayMap_String_Int_size", + "MCC_ThrowSecurityException", + "MCC_String_Equals_NotallCompress", + "memcmpMpl", + "Native_java_lang_String_compareTo__Ljava_lang_String_2", + "Native_java_lang_String_getCharsNoCheck__II_3CI", + "Native_java_lang_String_toCharArray__", + "Native_java_lang_System_arraycopyBooleanUnchecked___3ZI_3ZII", + "Native_java_lang_System_arraycopyByteUnchecked___3BI_3BII", + "Native_java_lang_System_arraycopyCharUnchecked___3CI_3CII", + "Native_java_lang_System_arraycopyDoubleUnchecked___3DI_3DII", + "Native_java_lang_System_arraycopyFloatUnchecked___3FI_3FII", + "Native_java_lang_System_arraycopyIntUnchecked___3II_3III", + "Native_java_lang_System_arraycopy__Ljava_lang_Object_2ILjava_lang_Object_2II", + "Native_java_lang_System_arraycopyLongUnchecked___3JI_3JII", + "Native_java_lang_System_arraycopyShortUnchecked___3SI_3SII", + "getpriority", + "setpriority" + }; + for (std::string name : whiteList) { + if (func.GetName() == name) { + // close all the whitelist + return false; + } + } + return false; +} + +static bool IsNoSideEffect(CallMeStmt &call) { + CallMeStmt &callAssign = utils::ToRef(&call); + MIRFunction &mirFunc = callAssign.GetTargetFunction(); + if (IsInWhiteList(mirFunc)) { + return true; + } + // Non-nullptr means it has return value + CHECK_FATAL(callAssign.GetMustDefList() != nullptr, "Impossible"); + if (callAssign.GetMustDefList()->size() == 1) { + if (callAssign.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + callAssign.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "NYI"); + } + if (IsExprRefOrPtr(*callAssign.GetMustDefListItem(0).GetLHS())) { + return false; + } + } + + const MapleVector &opnds = call.GetOpnds(); + const size_t size = opnds.size(); + for (size_t i = 0; i < size; ++i) { + if (IsExprRefOrPtr(*opnds[i])) { + return false; + } + } + return true; +} + +static bool IsRegAssignStmtForClassMeta(const AssignMeStmt ®Assign) { + MeExpr &rhs = utils::ToRef(regAssign.GetRHS()); + if (rhs.GetOp() == OP_add) { + return true; + } + + if (instance_of(rhs)) { + IvarMeExpr &ivar = static_cast(rhs); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); + MIRPtrType &ptrType = utils::ToRef(safe_cast(type)); + if (ptrType.GetPointedType()->GetName() == "__class_meta__" || + (ptrType.GetPointedType()->GetName() == "Ljava_2Flang_2FObject_3B" && ivar.GetFieldID() == 1)) { + return true; + } + } + return false; +} + +TyIdx IPAEscapeAnalysis::GetAggElemType(const MIRType &aggregate) const { + switch (aggregate.GetKind()) { + case kTypePointer: { + const MIRPtrType *pointType = static_cast(&aggregate); + const MIRType *pointedType = pointType->GetPointedType(); + switch (pointedType->GetKind()) { + case kTypeClass: + case kTypeScalar: + return pointedType->GetTypeIndex(); + case kTypePointer: + case kTypeJArray: + return GetAggElemType(*pointedType); + default: + return TyIdx(0); + } + } + case kTypeJArray: { + const MIRJarrayType *arrType = static_cast(&aggregate); + const MIRType *elemType = arrType->GetElemType(); + CHECK_NULL_FATAL(elemType); + switch (elemType->GetKind()) { + case kTypeScalar: + return elemType->GetTypeIndex(); + case kTypePointer: + case kTypeJArray: + return GetAggElemType(*elemType); + default: // Not sure what type is + return TyIdx(0); + } + } + default: + CHECK_FATAL(false, "Should not reach here"); + return TyIdx(0); // to eliminate compilation warning + } +} + +// check whether the newly allocated object implements Runnable, Throwable, extends Reference or has a finalizer +bool IPAEscapeAnalysis::IsSpecialEscapedObj(const MeExpr &alloc) const { + if (alloc.GetOp() == OP_gcpermalloc || alloc.GetOp() == OP_gcpermallocjarray) { + return true; + } + TyIdx tyIdx; + const static TyIdx runnableInterface = kh->GetKlassFromLiteral("Ljava_2Flang_2FRunnable_3B")->GetTypeIdx(); + if (alloc.GetOp() == OP_gcmalloc) { + tyIdx = static_cast(&alloc)->GetTyIdx(); + } else { + CHECK_FATAL(alloc.GetOp() == OP_gcmallocjarray, "must be OP_gcmallocjarray"); + MIRType *arrType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(&alloc)->GetTyIdx()); + tyIdx = GetAggElemType(*arrType); + if (tyIdx == TyIdx(0)) { + return true; // deal as escape + } + } + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type->GetKind() == kTypeScalar) { + return false; + } + Klass *klass = kh->GetKlassFromTyIdx(tyIdx); + CHECK_FATAL(klass, "impossible"); + for (Klass *inter : klass->GetImplInterfaces()) { + if (inter->GetTypeIdx() == runnableInterface) { + return true; + } + } + if (klass->HasFinalizer() || klass->IsExceptionKlass()) { + return true; + } + + // check subclass of Reference class, such as WeakReference, PhantomReference, SoftReference and Cleaner + const static Klass *referenceKlass = kh->GetKlassFromLiteral("Ljava_2Flang_2Fref_2FReference_3B"); + if (kh->IsSuperKlass(referenceKlass, klass)) { + return true; + } + return false; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForReg(RegMeExpr ®, bool createObjNode) { + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(®); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(®, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForAddrof(AddrofMeExpr &var, bool createObjNode) { + if (IsGlobal(*ssaTab, var)) { + eaCG->UpdateExprOfGlobalRef(&var); + return eaCG->GetGlobalReference(); + } + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(&var); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(&var, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForVar(VarMeExpr &var, bool createObjNode) { + if (IsGlobal(*ssaTab, var)) { + eaCG->UpdateExprOfGlobalRef(&var); + return eaCG->GetGlobalReference(); + } + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(&var); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(&var, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForVarOrReg(MeExpr &var, bool createObjNode) { + if (var.GetMeOp() == kMeOpVar) { + return GetOrCreateCGRefNodeForVar(static_cast(var), createObjNode); + } else if (var.GetMeOp() == kMeOpReg) { + return GetOrCreateCGRefNodeForReg(static_cast(var), createObjNode); + } + CHECK_FATAL(false, "Impossible"); + return nullptr; +} + +static FieldID GetBaseFieldId(const KlassHierarchy &kh, const TyIdx &tyIdx, FieldID fieldId) { + FieldID ret = fieldId; + Klass *klass = kh.GetKlassFromTyIdx(tyIdx); + CHECK_FATAL(klass != nullptr, "Impossible"); + Klass *super = klass->GetSuperKlass(); + if (super == nullptr) { + return ret; + } + MIRStructType *structType = super->GetMIRStructType(); + TyIdx typeIdx = structType->GetFieldTyIdx(fieldId - 1); + while (typeIdx != 0u) { + --ret; + klass = super; + super = klass->GetSuperKlass(); + if (super == nullptr) { + return ret; + } + structType = super->GetMIRStructType(); + typeIdx = structType->GetFieldTyIdx(ret - 1); + } + return ret; +} + +void IPAEscapeAnalysis::GetOrCreateCGFieldNodeForIvar(std::vector &fieldNodes, IvarMeExpr &ivar, + MeStmt &stmt, bool createObjNode) { + MeExpr *base = ivar.GetBase(); + FieldID fieldId = ivar.GetFieldID(); + std::vector baseNodes; + if (base->GetMeOp() == kMeOpReg && fieldId == 0) { + GetArrayBaseNodeForReg(baseNodes, static_cast(*base), stmt); + } else { + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + } + bool ifHandled = (eaCG->GetCGNodeFromExpr(&ivar) != nullptr); + if (ivar.GetFieldID() != 0) { + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx())); + TyIdx tyIdx = ptrType->GetPointedTyIdx(); + fieldId = GetBaseFieldId(*kh, tyIdx, ivar.GetFieldID()); + } + for (const auto &baseNode : baseNodes) { + for (const auto &objNode : baseNode->GetPointsToSet()) { + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(fieldId); + if (!ifHandled && fieldNode != nullptr) { + eaCG->UpdateExprOfNode(*fieldNode, &ivar); + } else if (!ifHandled && fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(&ivar, objNode->GetEAStatus(), fieldId, objNode, false); + cgChangedInSCC = true; + if (createObjNode) { + EACGObjectNode *phanObjNode = GetOrCreateCGObjNode(nullptr); + (void)fieldNode->AddOutNode(*phanObjNode); + } + } + if (fieldNode != nullptr) { + fieldNodes.push_back(fieldNode); + } + } + } +} + +void IPAEscapeAnalysis::GetOrCreateCGFieldNodeForIAddrof(std::vector &fieldNodes, OpMeExpr &expr, + MeStmt &stmt, bool createObjNode) { + MeExpr *base = expr.GetOpnd(0); + FieldID fieldId = expr.GetFieldID(); + std::vector baseNodes; + if (base->GetMeOp() == kMeOpReg && fieldId == 0) { + GetArrayBaseNodeForReg(baseNodes, static_cast(*base), stmt); + } else { + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + } + bool ifHandled = (eaCG->GetCGNodeFromExpr(&expr) != nullptr); + if (expr.GetFieldID() != 0) { + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx())); + TyIdx tyIdx = ptrType->GetPointedTyIdx(); + fieldId = GetBaseFieldId(*kh, tyIdx, expr.GetFieldID()); + } + for (const auto &baseNode : baseNodes) { + for (const auto &objNode : baseNode->GetPointsToSet()) { + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(fieldId); + if (!ifHandled && fieldNode != nullptr) { + eaCG->UpdateExprOfNode(*fieldNode, &expr); + } else if (!ifHandled && fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(&expr, objNode->GetEAStatus(), fieldId, objNode, false); + cgChangedInSCC = true; + if (createObjNode) { + EACGObjectNode *phanObjNode = GetOrCreateCGObjNode(nullptr); + (void)fieldNode->AddOutNode(*phanObjNode); + } + } + if (fieldNode != nullptr) { + fieldNodes.push_back(fieldNode); + } + } + } +} + +EACGObjectNode *IPAEscapeAnalysis::GetOrCreateCGObjNode(MeExpr *expr, const MeStmt *stmt, EAStatus easOfPhanObj) { + EAStatus eas = kNoEscape; + TyIdx tyIdx; + Location *location = nullptr; + bool isPhantom; + if (expr != nullptr) { + EACGBaseNode *cgNode = eaCG->GetCGNodeFromExpr(expr); + if (cgNode != nullptr) { + CHECK_FATAL(cgNode->IsObjectNode(), "should be object"); + EACGObjectNode *objNode = static_cast(cgNode); + return objNode; + } + if (expr->IsGcmalloc()) { + CHECK_FATAL(stmt != nullptr, "Impossible"); + location = mirModule->GetMemPool()->New(mirModule->GetFileName(), stmt->GetSrcPosition().FileNum(), + stmt->GetSrcPosition().LineNum()); + isPhantom = false; + if (IsSpecialEscapedObj(*expr)) { + eas = kGlobalEscape; + } + if (expr->GetOp() == OP_gcmalloc || expr->GetOp() == OP_gcpermalloc) { + tyIdx = static_cast(expr)->GetTyIdx(); + } else { + tyIdx = static_cast(expr)->GetTyIdx(); + } + } else { + isPhantom = true; + eas = easOfPhanObj; + tyIdx = kInitTyIdx; + } + } else { // null alloc means creating phantom object + isPhantom = true; + eas = easOfPhanObj; + tyIdx = kInitTyIdx; + } + if (eas == kGlobalEscape) { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), expr); + return eaCG->GetGlobalObject(); + } + cgChangedInSCC = true; + EACGObjectNode *objectNode = eaCG->CreateObjectNode(expr, eas, isPhantom, tyIdx); + if (location != nullptr) { + objectNode->SetLocation(location); + } + return objectNode; +} + +void IPAEscapeAnalysis::CollectDefStmtForReg(std::set &visited, std::set &defStmts, + RegMeExpr ®Var) { + if (regVar.GetDefBy() == kDefByStmt) { + AssignMeStmt *regAssignStmt = static_cast(regVar.GetDefStmt()); + (void)defStmts.insert(regAssignStmt); + } else if (regVar.GetDefBy() == kDefByPhi) { + if (visited.find(®Var) == visited.end()) { + (void)visited.insert(®Var); + MePhiNode ®PhiNode = regVar.GetDefPhi(); + for (auto ® : regPhiNode.GetOpnds()) { + CollectDefStmtForReg(visited, defStmts, static_cast(*reg)); + } + } + } else { + CHECK_FATAL(false, "not kDefByStmt or kDefByPhi"); + } +} + +void IPAEscapeAnalysis::GetArrayBaseNodeForReg(std::vector &nodes, RegMeExpr ®Var, MeStmt &stmt) { + std::set defStmts; + std::set visited; + CollectDefStmtForReg(visited, defStmts, regVar); + for (auto ®AssignStmt : defStmts) { + MeExpr *rhs = regAssignStmt->GetRHS(); + CHECK_FATAL(rhs != nullptr, "Impossible"); + CHECK_FATAL(rhs->GetOp() == OP_array, "Impossible, funcName: %s", func->GetName().c_str()); + NaryMeExpr *array = static_cast(rhs); + CHECK_FATAL(array->GetOpnds().size() > 0, "access array->GetOpnds() failed"); + MeExpr *base = array->GetOpnd(0); + std::vector baseNodes; + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + for (auto baseNode : baseNodes) { + nodes.push_back(baseNode); + } + } +} + +void IPAEscapeAnalysis::GetCGNodeForMeExpr(std::vector &nodes, MeExpr &expr, MeStmt &stmt, + bool createObjNode) { + if (expr.GetMeOp() == kMeOpVar) { + VarMeExpr *var = static_cast(&expr); + EACGRefNode *refNode = GetOrCreateCGRefNodeForVar(*var, createObjNode); + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpIvar) { + IvarMeExpr *ivar = static_cast(&expr); + GetOrCreateCGFieldNodeForIvar(nodes, *ivar, stmt, createObjNode); + } else if (expr.IsGcmalloc()) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(&expr, &stmt); + nodes.push_back(objNode); + } else if (expr.GetMeOp() == kMeOpReg) { + RegMeExpr *regVar = static_cast(&expr); + if (regVar->GetRegIdx() < 0) { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } else { + if (regVar->GetDefBy() != kDefByStmt && regVar->GetDefBy() != kDefByMustDef) { + CHECK_FATAL(false, "impossible"); + } + EACGRefNode *refNode = GetOrCreateCGRefNodeForReg(*regVar, createObjNode); + nodes.push_back(refNode); + } + } else if (expr.GetMeOp() == kMeOpOp && (expr.GetOp() == OP_retype || expr.GetOp() == OP_cvt)) { + MeExpr *retypeRhs = (static_cast(&expr))->GetOpnd(0); + if (IsExprRefOrPtr(*retypeRhs)) { + GetCGNodeForMeExpr(nodes, *retypeRhs, stmt, createObjNode); + } else { + EACGObjectNode *objNode = nullptr; + VarMeExpr *var = static_cast(retypeRhs); + if (IsZeroConst(var)) { + objNode = GetOrCreateCGObjNode(&expr, nullptr, kNoEscape); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + objNode = eaCG->GetGlobalObject(); + } + nodes.push_back(objNode); + } + } else if (expr.GetMeOp() == kMeOpOp && expr.GetOp() == OP_select) { + OpMeExpr *opMeExpr = static_cast(&expr); + EACGBaseNode *refNode = eaCG->GetCGNodeFromExpr(opMeExpr); + if (refNode == nullptr) { + refNode = eaCG->CreateReferenceNode(opMeExpr, kNoEscape, false); + for (size_t i = 1; i < 3; ++i) { // OP_select expr has three operands. + std::vector opndNodes; + GetCGNodeForMeExpr(opndNodes, *opMeExpr->GetOpnd(i), stmt, true); + for (auto opndNode : opndNodes) { + (void)refNode->AddOutNode(*opndNode); + } + } + } + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpAddrof && expr.GetOp() == OP_addrof) { + AddrofMeExpr *var = static_cast(&expr); + EACGRefNode *refNode = GetOrCreateCGRefNodeForAddrof(*var, createObjNode); + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpOp && expr.GetOp() == OP_iaddrof) { + OpMeExpr *opExpr = static_cast(&expr); + GetOrCreateCGFieldNodeForIAddrof(nodes, *opExpr, stmt, createObjNode); + } else if (expr.GetMeOp() == kMeOpNary && + (expr.GetOp() == OP_intrinsicopwithtype || expr.GetOp() == OP_intrinsicop)) { + NaryMeExpr *naryMeExpr = static_cast(&expr); + if (naryMeExpr->GetIntrinsic() == INTRN_JAVA_CONST_CLASS) { + // get some class's "Class", metadata + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } else if (naryMeExpr->GetIntrinsic() == INTRN_JAVA_MERGE) { + CHECK_FATAL(naryMeExpr->GetOpnds().size() == 1, "must have one opnd"); + MeExpr *opnd = naryMeExpr->GetOpnd(0); + if (IsExprRefOrPtr(*opnd)) { + GetCGNodeForMeExpr(nodes, *opnd, stmt, createObjNode); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } + } else { + stmt.Dump(irMap); + CHECK_FATAL(false, "NYI"); + } + } else if (expr.GetMeOp() == kMeOpNary && expr.GetOp() == OP_array) { + NaryMeExpr *array = static_cast(&expr); + CHECK_FATAL(array->GetOpnds().size() > 0, "access array->GetOpnds() failed"); + MeExpr *arrayBase = array->GetOpnd(0); + GetCGNodeForMeExpr(nodes, *arrayBase, stmt, createObjNode); + } else if (expr.GetMeOp() == kMeOpConst) { + ConstMeExpr *constExpr = static_cast(&expr); + EACGObjectNode *objNode = nullptr; + if (constExpr->GetConstVal()->GetKind() == kConstInt && constExpr->IsZero()) { + objNode = GetOrCreateCGObjNode(&expr, nullptr, kNoEscape); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + objNode = eaCG->GetGlobalObject(); + } + nodes.push_back(objNode); + } else if (expr.GetMeOp() == kMeOpConststr) { + nodes.push_back(eaCG->GetGlobalReference()); + eaCG->UpdateExprOfGlobalRef(&expr); + } else { + stmt.Dump(irMap); + CHECK_FATAL(false, "NYI funcName: %s", func->GetName().c_str()); + } +} + +void IPAEscapeAnalysis::UpdateEscConnGraphWithStmt(MeStmt &stmt) { + switch (stmt.GetOp()) { + case OP_dassign: { + DassignMeStmt *dasgn = static_cast(&stmt); + if (!IsExprRefOrPtr(*dasgn->GetLHS())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*dasgn->GetRHS()), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*static_cast(dasgn->GetVarLHS()), false); + + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *dasgn->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + break; + } + case OP_iassign: { + IassignMeStmt *iasgn = static_cast(&stmt); + if (!IsExprRefOrPtr(*iasgn->GetLHSVal())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*iasgn->GetRHS()), "type mis-match"); + // get or create field nodes for lhs (may need to create a phantom object node) + std::vector lhsNodes; + GetOrCreateCGFieldNodeForIvar(lhsNodes, *iasgn->GetLHSVal(), stmt, false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *iasgn->GetRHS(), stmt, true); + for (const auto &lhsNode : lhsNodes) { + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } + break; + } + case OP_maydassign: { + MaydassignMeStmt *mdass = static_cast(&stmt); + CHECK_FATAL(mdass->GetChiList() != nullptr, "Impossible"); + if (mdass->GetChiList()->empty() || !IsExprRefOrPtr(*mdass->GetRHS())) { + break; + } + for (std::pair it : *mdass->GetChiList()) { + ChiMeNode *chi = it.second; + CHECK_FATAL(IsExprRefOrPtr(*chi->GetLHS()), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*static_cast(chi->GetLHS()), false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *mdass->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } + break; + } + case OP_regassign: { + AssignMeStmt *regasgn = static_cast(&stmt); + CHECK_FATAL(regasgn->GetLHS() != nullptr, "Impossible"); + CHECK_FATAL(regasgn->GetRHS() != nullptr, "Impossible"); + if (!IsExprRefOrPtr(*regasgn->GetLHS())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*regasgn->GetRHS()), "type mis-match"); + if (IsRegAssignStmtForClassMeta(*regasgn) || regasgn->GetRHS()->GetOp() == OP_array) { + break; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForReg(*regasgn->GetLHS(), false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *regasgn->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + break; + } + case OP_throw: { + ThrowMeStmt *throwStmt = static_cast(&stmt); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *throwStmt->GetOpnd(), stmt, true); + for (const auto &node : nodes) { + for (const auto &objNode : node->GetPointsToSet()) { + if (objNode->GetEAStatus() != kGlobalEscape) { + (void)objNode->UpdateEAStatus(kGlobalEscape); + cgChangedInSCC = true; + } + } + } + break; + } + case OP_return: { + RetMeStmt *retMeStmt = static_cast(&stmt); + EACGActualNode *retNode = eaCG->GetReturnNode(); + MIRFunction *mirFunc = func->GetMirFunc(); + if (!IsTypeRefOrPtr(mirFunc->GetReturnType()->GetPrimType())) { + break; + } + if (retNode == nullptr && retMeStmt->GetOpnds().size() > 0) { + retNode = eaCG->CreateActualNode(kReturnEscape, true, true, + static_cast(mirFunc->GetFormalCount()), kInvalid); + cgChangedInSCC = true; + } + for (const auto &expr : retMeStmt->GetOpnds()) { + if (!IsExprRefOrPtr(*expr)) { + continue; + } + if (expr->GetMeOp() != kMeOpVar && expr->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "should be"); + } + EACGRefNode *refNode = GetOrCreateCGRefNodeForVarOrReg(*expr, true); + cgChangedInSCC = (retNode->AddOutNode(*refNode) ? true : cgChangedInSCC); + } + break; + } + case OP_icall: + case OP_customcall: + case OP_polymorphiccall: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_xintrinsiccall: + case OP_icallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_xintrinsiccallassigned: { + CHECK_FATAL(false, "NYI"); + break; + } + case OP_intrinsiccall: { + IntrinsiccallMeStmt *intrn = static_cast(&stmt); + if (intrn->GetIntrinsic() != INTRN_MPL_CLEANUP_LOCALREFVARS && + intrn->GetIntrinsic() != INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP && + intrn->GetIntrinsic() != INTRN_MCCSetPermanent && + intrn->GetIntrinsic() != INTRN_MCCIncRef && + intrn->GetIntrinsic() != INTRN_MCCDecRef && + intrn->GetIntrinsic() != INTRN_MCCIncDecRef && + intrn->GetIntrinsic() != INTRN_MCCDecRefReset && + intrn->GetIntrinsic() != INTRN_MCCIncDecRefReset && + intrn->GetIntrinsic() != INTRN_MCCWrite && + intrn->GetIntrinsic() != INTRN_MCCWriteNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteReferent && + intrn->GetIntrinsic() != INTRN_MCCWriteS && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteSVol && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteVol && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteVolWeak && + intrn->GetIntrinsic() != INTRN_MCCWriteWeak && + intrn->GetIntrinsic() != INTRN_MCCDecRefResetPair) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrn->GetIntrinsic(), func->GetName().c_str()); + } + + if (intrn->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS || + intrn->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP || + intrn->GetIntrinsic() == INTRN_MCCSetPermanent || + intrn->GetIntrinsic() == INTRN_MCCIncRef || + intrn->GetIntrinsic() == INTRN_MCCDecRef || + intrn->GetIntrinsic() == INTRN_MCCIncDecRef || + intrn->GetIntrinsic() == INTRN_MCCDecRefReset || + intrn->GetIntrinsic() == INTRN_MCCIncDecRefReset || + intrn->GetIntrinsic() == INTRN_MCCWriteReferent || + intrn->GetIntrinsic() == INTRN_MCCDecRefResetPair) { + break; + } + + CHECK_FATAL(intrn->GetOpnds().size() > 1, "must be"); + const size_t opndIdx = 2; + MeExpr *lhs = intrn->GetOpnd(intrn->NumMeStmtOpnds() - opndIdx); + MeExpr *rhs = intrn->GetOpnds().back(); + std::vector lhsNodes; + GetCGNodeForMeExpr(lhsNodes, *lhs, stmt, false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *rhs, stmt, true); + for (auto lhsNode : lhsNodes) { + for (auto rhsNode : rhsNodes) { + (void)lhsNode->AddOutNode(*rhsNode); + } + } + break; + } + case OP_call: + case OP_callassigned: + case OP_superclasscallassigned: + case OP_interfaceicallassigned: + case OP_interfacecallassigned: + case OP_virtualicallassigned: + case OP_virtualcallassigned: { + CallMeStmt *callMeStmt = static_cast(&stmt); + MIRFunction &mirFunc = callMeStmt->GetTargetFunction(); + uint32 callInfo = callMeStmt->GetStmtID(); + if (callInfo == 0) { + if (mirFunc.GetName() != "MCC_SetObjectPermanent" && mirFunc.GetName() != "MCC_DecRef_NaiveRCFast") { + CHECK_FATAL(false, "funcName: %s", mirFunc.GetName().c_str()); + } + break; + } + eaCG->TouchCallSite(callInfo); + + // If a function has no reference parameter or return value, then skip it. + if (IsNoSideEffect(*callMeStmt)) { + HandleParaAtCallSite(callInfo, *callMeStmt); + break; + } + + HandleParaAtCallSite(callInfo, *callMeStmt); + if (stmt.GetOp() == OP_call || stmt.GetOp() == OP_callassigned || stmt.GetOp() == OP_superclasscallassigned) { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[INVOKE] call func " << mirFunc.GetName() << "\n"; + } + HandleSingleCallee(*callMeStmt); + } else { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[INVOKE] vcall func " << mirFunc.GetName() << "\n"; + } + HandleMultiCallees(*callMeStmt); + } + break; + } + // mainly for JAVA_CLINIT_CHECK + case OP_intrinsiccallwithtype: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() != INTRN_JAVA_CLINIT_CHECK && + intrnMestmt->GetIntrinsic() != INTRN_JAVA_CHECK_CAST) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + // 1. INTRN_JAVA_CLINIT_CHECK: Because all the operations in clinit are to initialize the static field + // of the Class, this will not affect the escape status of any reference or object node. + // 2. INTRN_JAVA_CHECK_CAST: When mephase precheckcast is enabled, this will happen, we only hava to solve + // the next dassign stmt. + break; + } + // mainly for JAVA_ARRAY_FILL and JAVA_POLYMORPHIC_CALL + case OP_intrinsiccallassigned: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_POLYMORPHIC_CALL) { + // this intrinsiccall is MethodHandle.invoke, it is a native method. + const MapleVector &opnds = intrnMestmt->GetOpnds(); + const size_t size = opnds.size(); + for (size_t i = 0; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var)) { + continue; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *intrnMestmt, true); + for (auto realArgNode : nodes) { + for (EACGObjectNode *obj : realArgNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + } + } + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() <= 1, "Impossible"); + if (intrnMestmt->GetMustDefList()->size() == 0) { + break; + } + if (intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "impossible"); + } + if (!IsExprRefOrPtr(*intrnMestmt->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGRefNode *realRetNode = GetOrCreateCGRefNodeForVarOrReg(*intrnMestmt->GetMustDefListItem(0).GetLHS(), true); + for (EACGObjectNode *obj : realRetNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + break; + } else if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_ARRAY_FILL) { + // JAVA_ARRAY_FILL can be skipped. + break; + } else { + if (intrnMestmt->GetIntrinsic() != INTRN_MCCIncRef && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRef && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefS && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefSVol && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefVol && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadWeak && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadWeakVol) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() == 1, "Impossible"); + if (intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "impossible"); + } + + if (!IsExprRefOrPtr(*intrnMestmt->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGRefNode *retNode = GetOrCreateCGRefNodeForVarOrReg(*intrnMestmt->GetMustDefListItem(0).GetLHS(), false); + MeExpr *rhs = intrnMestmt->GetOpnds().back(); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *rhs, stmt, true); + for (auto rhsNode : rhsNodes) { + (void)retNode->AddOutNode(*rhsNode); + } + break; + } + } + // mainly for JAVA_CHECK_CAST and JAVA_FILL_NEW_ARRAY + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_CHECK_CAST) { + // We regard this as dassign + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() <= 1, "Impossible"); + if (intrnMestmt->GetMustDefList()->size() == 0) { + break; + } + CHECK_FATAL(intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *lhs = static_cast(intrnMestmt->GetMustDefListItem(0).GetLHS()); + if (!IsExprRefOrPtr(*lhs)) { + break; + } + CHECK_FATAL(intrnMestmt->GetOpnds().size() == 1, "Impossible"); + CHECK_FATAL(intrnMestmt->GetOpnd(0)->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *rhs = static_cast(intrnMestmt->GetOpnd(0)); + CHECK_FATAL(IsExprRefOrPtr(*rhs), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*lhs, false); + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*rhs, true); + (void)lhsNode->AddOutNode(*rhsNode); + } else if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_FILL_NEW_ARRAY) { + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() == 1, "Impossible"); + CHECK_FATAL(intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *lhs = static_cast(intrnMestmt->GetMustDefListItem(0).GetLHS()); + if (!IsExprRefOrPtr(*lhs)) { + break; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*lhs, true); + CHECK_FATAL(intrnMestmt->GetOpnds().size() >= 1, "Impossible"); + for (MeExpr *expr : intrnMestmt->GetOpnds()) { + CHECK_FATAL(expr->GetMeOp() == kMeOpVar, "Impossible"); + VarMeExpr *rhs = static_cast(expr); + if (!IsExprRefOrPtr(*rhs)) { + continue; + } + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*rhs, true); + for (const auto &objNode : lhsNode->GetPointsToSet()) { + // for array case, only one field node represents all elements + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(0); + if (fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(nullptr, objNode->GetEAStatus(), 0, objNode, true); + } + (void)fieldNode->AddOutNode(*rhsNode); + } + } + } else { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + break; + } + + default: + break; + } +} + +EAConnectionGraph *IPAEscapeAnalysis::GetEAConnectionGraph(MIRFunction &function) const { + if (function.GetEACG() != nullptr) { + return function.GetEACG(); + } + const std::map &summaryMap = mirModule->GetEASummary(); + GStrIdx nameStrIdx = function.GetNameStrIdx(); + auto it = summaryMap.find(nameStrIdx); + if (it != summaryMap.end() && it->second != nullptr) { + return it->second; + } + return nullptr; +} + +void IPAEscapeAnalysis::HandleParaAtCallSite(uint32 callInfo, CallMeStmt &call) { + MapleVector *argVector = eaCG->GetCallSiteArgNodeVector(callInfo); + if (argVector != nullptr && argVector->size() > 0) { + // We have handled this callsite before, skip it. + return; + } + const MapleVector &opnds = call.GetOpnds(); + const uint32 size = static_cast(opnds.size()); + + bool isOptIcall = (call.GetOp() == OP_interfaceicallassigned || call.GetOp() == OP_virtualicallassigned); + uint32 firstParmIdx = (isOptIcall ? 1 : 0); + + for (uint32 i = firstParmIdx; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var) || var->GetOp() == OP_add) { + continue; + } + // for func(u, v), we assume that there exists assignment: a1 = u; a2 = v; + // a1, a2 are phantomArgNode and u, v are realArgNode, we add edge from a1 to u, etc. + EACGActualNode *phantomArgNode = + eaCG->CreateActualNode(kNoEscape, false, true, static_cast(i), callInfo); + // node for u, v. + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, call, true); + for (auto realArgNode : nodes) { + (void)phantomArgNode->AddOutNode(*realArgNode); + } + } + // Non-nullptr means it has return value + CHECK_FATAL(call.GetMustDefList() != nullptr, "Impossible"); + if (call.GetMustDefList()->size() == 1) { + if (call.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + call.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "NYI"); + } + if (!IsExprRefOrPtr(*call.GetMustDefListItem(0).GetLHS())) { + return; + } + // for x = func(u, v), we assume that there exists assignment: r = x; + // r is a phantom return node, and x is the real return node, we add edge from r to x. + EACGActualNode *phantomRetNode = + eaCG->CreateActualNode(kNoEscape, true, true, static_cast(size), callInfo); + // node for x + EACGRefNode *realRetNode = GetOrCreateCGRefNodeForVarOrReg(*call.GetMustDefListItem(0).GetLHS(), true); + (void)phantomRetNode->AddOutNode(*realRetNode); + } +} + +void IPAEscapeAnalysis::HandleSingleCallee(CallMeStmt &callMeStmt) { + uint32 callInfoId = callMeStmt.GetStmtID(); + MIRFunction &calleeCandidate = callMeStmt.GetTargetFunction(); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] ready to merge func " << calleeCandidate.GetName() << "\n"; + } + if (calleeCandidate.IsAbstract()) { + CHECK_FATAL(false, "Impossible"); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because it is abstract." << "\n"; + } + return; + } + + EAConnectionGraph *calleeSummary = GetEAConnectionGraph(calleeCandidate); + + if (!mirModule->IsInIPA()) { + // This phase is in maplecomb, we need handle single callee differently + if (calleeSummary == nullptr) { + if (!calleeCandidate.IsNative() && !calleeCandidate.IsEmpty()) { + CHECK_FATAL(false, "Impossible"); + } + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } else { + MapleVector *caller = eaCG->GetCallSiteArgNodeVector(callInfoId); + const MapleVector *callee = calleeSummary->GetFuncArgNodeVector(); + bool changedAfterMerge = eaCG->MergeCG(*caller, callee); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } + return; + } + + CGNode *callerNode = pcg->GetCGNode(func->GetMirFunc()); + CHECK_FATAL(callerNode != nullptr, "Impossible, funcName: %s", func->GetName().c_str()); + CGNode *calleeNode = pcg->GetCGNode(&calleeCandidate); + CHECK_FATAL(calleeNode != nullptr, "Impossible, funcName: %s", calleeCandidate.GetName().c_str()); + if (calleeNode->GetSCCNode() == callerNode->GetSCCNode() && + (eaCG->GetNeedConservation() || + callerNode->GetSCCNode()->GetNodes().size() > IPAEscapeAnalysis::kFuncInSCCLimit)) { + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because NeedConservation." << "\n"; + } + return; + } + if (calleeSummary == nullptr && calleeCandidate.GetBody() != nullptr && !calleeCandidate.IsNative()) { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because this is first loop in scc." << "\n"; + } + return; + } + if (calleeSummary == nullptr) { + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } else { + MapleVector *caller = eaCG->GetCallSiteArgNodeVector(callInfoId); + const MapleVector *callee = calleeSummary->GetFuncArgNodeVector(); + bool changedAfterMerge = eaCG->MergeCG(*caller, callee); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } +} + +void IPAEscapeAnalysis::HandleMultiCallees(const CallMeStmt &callMeStmt) { + uint32 callInfoId = callMeStmt.GetStmtID(); + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } +} + +void IPAEscapeAnalysis::UpdateEscConnGraphWithPhi(const BB &bb) { + const MapleMap &mePhiList = bb.GetMePhiList(); + for (auto it = mePhiList.begin(); it != mePhiList.end(); ++it) { + MePhiNode *phiNode = it->second; + auto *lhs = phiNode->GetLHS(); + if (lhs->GetMeOp() != kMeOpVar) { + continue; + } + if (!IsExprRefOrPtr(*lhs) || phiNode->GetOpnds().empty() || + IsVirtualVar(*ssaTab, static_cast(*lhs))) { + continue; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(static_cast(*lhs), false); + for (auto itt = phiNode->GetOpnds().begin(); itt != phiNode->GetOpnds().end(); ++itt) { + auto *var = static_cast(*itt); + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*var, true); + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } +} + +void IPAEscapeAnalysis::HandleParaAtFuncEntry() { + if (!mirModule->IsInIPA()) { + CHECK_FATAL(eaCG == nullptr, "Impossible"); + } + + if (eaCG != nullptr) { + return; + } + MIRFunction *mirFunc = func->GetMirFunc(); + eaCG = mirModule->GetMemPool()->New( + mirModule, &mirModule->GetMPAllocator(), mirFunc->GetNameStrIdx()); + eaCG->InitGlobalNode(); + OriginalStTable &ostTab = ssaTab->GetOriginalStTable(); + // create actual node for formal parameter + for (size_t i = 0; i < mirFunc->GetFormalCount(); ++i) { + MIRSymbol *mirSt = mirFunc->GetFormal(i); + OriginalSt *ost = ostTab.FindOrCreateSymbolOriginalSt(*mirSt, mirFunc->GetPuidx(), 0); + VarMeExpr *formal = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); + if (IsExprRefOrPtr(*formal)) { + EACGActualNode *actualNode = + eaCG->CreateActualNode(kArgumentEscape, false, true, static_cast(i), kInvalid); + EACGObjectNode *objNode = eaCG->CreateObjectNode(nullptr, kNoEscape, true, kInitTyIdx); + (void)actualNode->AddOutNode(*objNode); + EACGRefNode *formalNode = eaCG->CreateReferenceNode(formal, kNoEscape, false); + (void)formalNode->AddOutNode(*actualNode); + } + } +} + +void IPAEscapeAnalysis::ConstructConnGraph() { + HandleParaAtFuncEntry(); + auto cfg = func->GetCfg(); + cfg->BuildSCC(); + const MapleVector &sccTopologicalVec = cfg->GetSccTopologicalVec(); + for (size_t i = 0; i < sccTopologicalVec.size(); ++i) { + SCCOfBBs *scc = sccTopologicalVec[i]; + CHECK_FATAL(scc != nullptr, "nullptr check"); + if (scc->GetBBs().size() > 1) { + cfg->BBTopologicalSort(*scc); + } + cgChangedInSCC = true; + bool analyzeAgain = true; + while (analyzeAgain) { + analyzeAgain = false; + cgChangedInSCC = false; + for (BB *bb : scc->GetBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB()) { + continue; + } + UpdateEscConnGraphWithPhi(*bb); + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + UpdateEscConnGraphWithStmt(*stmt); + } + } + if (scc->HasCycle() && cgChangedInSCC) { + analyzeAgain = true; + } + } + } + eaCG->PropogateEAStatus(); + func->GetMirFunc()->SetEACG(eaCG); +} + +void IPAEscapeAnalysis::DoOptimization() { + CountObjRCOperations(); + ProcessNoAndRetEscObj(); + ProcessRetStmt(); + DeleteRedundantRC(); +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVarWithName(const std::string &name) { + const auto &strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + VarMeExpr *var = irMap->CreateNewVar(strIdx, PTY_ref, false); + return var; +} + +OriginalSt *IPAEscapeAnalysis::CreateEATempOst() { + std::string name = std::string("__EATemp__").append(std::to_string(++tempCount)); + return CreateEATempOstWithName(name); +} + +OriginalSt *IPAEscapeAnalysis::CreateEARetTempOst() { + std::string name = std::string("__EARetTemp__"); + return CreateEATempOstWithName(name); +} + +OriginalSt *IPAEscapeAnalysis::CreateEATempOstWithName(const std::string &name) { + MIRSymbol *symbol = func->GetMIRModule().GetMIRBuilder()->CreateLocalDecl(name, + *GlobalTables::GetTypeTable().GetRef()); + OriginalSt *ost = ssaTab->CreateSymbolOriginalSt(*symbol, func->GetMirFunc()->GetPuidx(), 0); + ost->SetZeroVersionIndex(irMap->GetVerst2MeExprTable().size()); + irMap->GetVerst2MeExprTable().push_back(nullptr); + ost->PushbackVersionsIndices(ost->GetZeroVersionIndex()); + return ost; +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVarMeExpr(OriginalSt &ost) { + VarMeExpr *var = irMap->CreateVarMeExprVersion(&ost); + return var; +} + +VarMeExpr *IPAEscapeAnalysis::GetOrCreateEARetTempVarMeExpr(OriginalSt &ost) { + if (retVar != nullptr) { + return retVar; + } + retVar = CreateEATempVarMeExpr(ost); + return retVar; +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVar() { + std::string name = std::string("__EATemp__").append(std::to_string(++tempCount)); + return CreateEATempVarWithName(name); +} + +VarMeExpr *IPAEscapeAnalysis::GetOrCreateEARetTempVar() { + if (retVar != nullptr) { + return retVar; + } + std::string name = std::string("__EARetTemp__"); + retVar = CreateEATempVarWithName(name); + return retVar; +} + +void IPAEscapeAnalysis::ProcessNoAndRetEscObj() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr || + bb->GetAttributes(kBBAttrIsInLoopForEA)) { + continue; + } + + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_dassign || stmt->GetOp() == OP_regassign || stmt->GetOp() == OP_iassign || + stmt->GetOp() == OP_maydassign) { + MeExpr *rhs = stmt->GetRHS(); + CHECK_FATAL(rhs != nullptr, "nullptr check"); + if (!rhs->IsGcmalloc()) { + continue; + } + CHECK_FATAL(func->GetMirFunc()->GetEACG() != nullptr, "Impossible"); + EACGBaseNode *node = func->GetMirFunc()->GetEACG()->GetCGNodeFromExpr(rhs); + CHECK_FATAL(node != nullptr, "nullptr check"); + CHECK_FATAL(node->IsObjectNode(), "impossible"); + EAStatus eaStatus = node->GetEAStatus(); + if ((eaStatus == kNoEscape) && (!static_cast(node)->IsPointedByFieldNode()) && + (static_cast(node)->GetRCOperations() >= kRCOperLB)) { + static_cast(node)->SetIgnorRC(true); + gcStmts.push_back(stmt); + OriginalSt *ost = CreateEATempOst(); + noAndRetEscOst.push_back(ost); + } + } + } + } + if (noAndRetEscOst.size() == 0) { + return; + } + BB *firstBB = cfg->GetFirstBB(); + CHECK_FATAL(firstBB != nullptr, "Impossible"); + for (size_t i = 0; i < noAndRetEscOst.size(); ++i) { + OriginalSt *ost = noAndRetEscOst[i]; + MeStmt *stmt = gcStmts.at(i); + BB *curBB = stmt->GetBB(); + VarMeExpr *initVar = CreateEATempVarMeExpr(*ost); + MeExpr *zeroExpr = irMap->CreateIntConstMeExpr(0, PTY_ref); + DassignMeStmt *initStmt = static_cast(irMap->CreateAssignMeStmt(*initVar, *zeroExpr, *firstBB)); + firstBB->AddMeStmtFirst(initStmt); + + VarMeExpr *var = CreateEATempVarMeExpr(*ost); + noAndRetEscObj.push_back(var); + ScalarMeExpr *lhs = stmt->GetLHS(); + CHECK_FATAL(lhs != nullptr, "nullptr check"); + DassignMeStmt *newStmt = static_cast(irMap->CreateAssignMeStmt(*var, *lhs, *curBB)); + curBB->InsertMeStmtAfter(stmt, newStmt); + IntrinsiccallMeStmt *meStmt = irMap->NewInPool(OP_intrinsiccall, INTRN_MCCSetObjectPermanent); + meStmt->PushBackOpnd(var); + curBB->InsertMeStmtAfter(newStmt, meStmt); + } +} + +void IPAEscapeAnalysis::ProcessRetStmt() { + if (noAndRetEscObj.size() == 0) { + return; + } + MeCFG *cfg = func->GetCfg(); + BB *firstBB = cfg->GetFirstBB(); + OriginalSt *ost = CreateEARetTempOst(); + VarMeExpr *initVar = CreateEATempVarMeExpr(*ost); + MeExpr *zeroExpr = irMap->CreateIntConstMeExpr(0, PTY_ref); + DassignMeStmt *newStmt = static_cast(irMap->CreateAssignMeStmt(*initVar, *zeroExpr, *firstBB)); + DEBUG_ASSERT(firstBB != nullptr, "null ptr check"); + firstBB->AddMeStmtFirst(newStmt); + + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_return) { + RetMeStmt *retMeStmt = static_cast(stmt); + CHECK_FATAL(retMeStmt->GetOpnds().size() <= 1, "must less than one"); + VarMeExpr *var = GetOrCreateEARetTempVarMeExpr(*ost); + for (const auto &expr : retMeStmt->GetOpnds()) { + if (IsExprRefOrPtr(*expr)) { + DassignMeStmt *newStmtTmp = static_cast(irMap->CreateAssignMeStmt(*var, *expr, *bb)); + bb->InsertMeStmtBefore(stmt, newStmtTmp); + } + } + IntrinsiccallMeStmt *meStmt = irMap->NewInPool( + OP_intrinsiccall, INTRN_MPL_CLEANUP_NORETESCOBJS); + meStmt->PushBackOpnd(var); + for (auto opnd : noAndRetEscObj) { + meStmt->PushBackOpnd(opnd); + } + bb->InsertMeStmtBefore(stmt, meStmt); + } + } + } +} + +void IPAEscapeAnalysis::CountObjRCOperations() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + switch (stmt->GetOp()) { + case OP_intrinsiccall: { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCIncDecRef: + case INTRN_MCCIncDecRefReset: { + CHECK_FATAL(eaCG->GetCGNodeFromExpr(intrn->GetOpnd(0)) != nullptr, "nullptr check"); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *intrn->GetOpnd(0), *intrn, false); + for (auto refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + break; + } + case INTRN_MCCWrite: + case INTRN_MCCWriteNoDec: + case INTRN_MCCWriteS: + case INTRN_MCCWriteSNoDec: + case INTRN_MCCWriteSVol: + case INTRN_MCCWriteSVolNoDec: + case INTRN_MCCWriteVol: + case INTRN_MCCWriteVolNoDec: + case INTRN_MCCWriteVolWeak: + case INTRN_MCCWriteWeak: { + CHECK_FATAL(eaCG->GetCGNodeFromExpr(intrn->GetOpnds().back()) != nullptr, "nullptr check"); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *intrn->GetOpnds().back(), *intrn, false); + for (auto refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + break; + } + default: + break; + } + break; + } + case OP_intrinsiccallassigned: { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCLoadRef: + case INTRN_MCCLoadRefS: + case INTRN_MCCLoadRefSVol: + case INTRN_MCCLoadRefVol: + case INTRN_MCCLoadWeak: + case INTRN_MCCLoadWeakVol: { + CHECK_FATAL(intrn->GetMustDefList()->size() == 1, "Impossible"); + if (intrn->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrn->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "must be kMeOpVar or kMeOpReg"); + } + + if (!IsExprRefOrPtr(*intrn->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGBaseNode *refNode = eaCG->GetCGNodeFromExpr(intrn->GetMustDefListItem(0).GetLHS()); + CHECK_FATAL(refNode != nullptr, "nullptr check"); + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + break; + } + default: + break; + } + break; + } + case OP_call: + case OP_callassigned: + case OP_superclasscallassigned: { + CallMeStmt *callMeStmt = static_cast(stmt); + + // If a function has no reference parameter or return value, then skip it. + if (IsNoSideEffect(*callMeStmt)) { + break; + } + MIRFunction &calleeCandidate = callMeStmt->GetTargetFunction(); + std::string fName = calleeCandidate.GetName(); + if (fName == "MCC_GetOrInsertLiteral" || + fName == "MCC_GetCurrentClassLoader" || + fName == "Native_Thread_currentThread" || + fName == "Native_java_lang_StringFactory_newStringFromBytes___3BIII" || + fName == "Native_java_lang_StringFactory_newStringFromChars__II_3C" || + fName == "Native_java_lang_StringFactory_newStringFromString__Ljava_lang_String_2" || + fName == "Native_java_lang_String_intern__" || + fName == "MCC_StringAppend" || + fName == "MCC_StringAppend_StringInt" || + fName == "MCC_StringAppend_StringJcharString" || + fName == "MCC_StringAppend_StringString") { + break; + } + const MapleVector &opnds = callMeStmt->GetOpnds(); + const size_t size = opnds.size(); + + bool isOptIcall = + (callMeStmt->GetOp() == OP_interfaceicallassigned || callMeStmt->GetOp() == OP_virtualicallassigned); + size_t firstParmIdx = (isOptIcall ? 1 : 0); + + bool isSpecialCall = false; + if (fName == "Native_java_lang_Object_clone_Ljava_lang_Object__" || + fName == "Native_java_lang_String_concat__Ljava_lang_String_2" || + fName == + "Ljava_2Flang_2FAbstractStringBuilder_3B_7CappendCLONEDignoreret_7C_28Ljava_2Flang_2FString_3B_29V" || + StartWith(fName, "Ljava_2Flang_2FAbstractStringBuilder_3B_7Cappend_7C") || + StartWith(fName, "Ljava_2Flang_2FStringBuilder_3B_7Cappend_7C")) { + CallMeStmt *call = static_cast(callMeStmt); + CHECK_FATAL(call->GetMustDefList() != nullptr, "funcName: %s", fName.c_str()); + CHECK_FATAL(call->GetMustDefList()->size() <= 1, "funcName: %s", fName.c_str()); + if (call->GetMustDefList() != nullptr && call->GetMustDefList()->size() == 0) { + break; + } + isSpecialCall = true; + } + + for (size_t i = firstParmIdx; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var) || var->GetOp() == OP_add) { + continue; + } + CHECK_NULL_FATAL(eaCG->GetCGNodeFromExpr(var)); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *callMeStmt, false); + CHECK_FATAL(nodes.size() > 0, "the size must not be zero"); + for (EACGBaseNode *refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + if (isSpecialCall) { + break; + } + } + break; + } + case OP_intrinsiccallwithtypeassigned: { + CHECK_FATAL(false, "must not be OP_intrinsiccallwithtypeassigned"); + break; + } + default: + break; + } + } + } + + for (EACGBaseNode *node : eaCG->GetNodes()) { + if (node == nullptr || !node->IsObjectNode()) { + continue; + } + EACGObjectNode *obj = static_cast(node); + if (obj->IsPhantom()) { + continue; + } + if (obj->IsPointedByFieldNode()) { + obj->IncresRCOperations(kRCOperLB); + } + } +} + +void IPAEscapeAnalysis::DeleteRedundantRC() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_intrinsiccall) { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCDecRef: + case INTRN_MCCIncDecRef: { + bool canRemoveStmt = true; + for (auto expr : intrn->GetOpnds()) { + if (eaCG->GetCGNodeFromExpr(expr) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *expr, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + if (!canRemoveStmt) { + break; + } + } + if (canRemoveStmt) { + bb->RemoveMeStmt(stmt); + } + break; + } + case INTRN_MCCIncDecRefReset: + case INTRN_MCCDecRefReset: { + bool canRemoveStmt = true; + for (auto expr : intrn->GetOpnds()) { + if (expr->GetMeOp() != kMeOpAddrof) { + if (eaCG->GetCGNodeFromExpr(expr) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *expr, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + if (!canRemoveStmt) { + break; + } + } else { + AddrofMeExpr *addrof = static_cast(expr); + const OriginalSt *ost = ssaTab->GetOriginalStFromID(addrof->GetOstIdx()); + DEBUG_ASSERT(ost != nullptr, "null ptr check"); + for (auto index : ost->GetVersionsIndices()) { + if (ost->IsFormal()) { + canRemoveStmt = false; + break; + } + if (index == ost->GetZeroVersionIndex()) { + continue; + } + MeExpr *var = irMap->GetMeExprByVerID(static_cast(index)); + if (var == nullptr) { + continue; + } + if (eaCG->GetCGNodeFromExpr(var) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + } + if (!canRemoveStmt) { + break; + } + } + } + if (canRemoveStmt) { + bb->RemoveMeStmt(stmt); + } + break; + } + default: + break; + } + } + } + } +} +} diff --git a/ecmascript/mapleall/maple_ipa/src/old/ipa_option.cpp b/ecmascript/mapleall/maple_ipa/src/old/ipa_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..01759c638d6ce09447bd6c26efb2fe9d3290819f --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/old/ipa_option.cpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "driver_options.h" +#include "file_utils.h" +#include "ipa_option.h" +#include "mpl_logging.h" +#include "triple.h" + +namespace maple { + +namespace opts::ipa { + maplecl::Option help({"--help", "-h"}, + " -h --help \tPrint usage and exit.Available command names:\n", + {ipaCategory}); + + maplecl::Option o1({"--O1", "-O1"}, + " --O1 \tEnable basic inlining\n", + {ipaCategory}); + + maplecl::Option o2({"--O2", "-O2"}, + " --O2 \tEnable greedy inlining\n", + {ipaCategory}); + + maplecl::Option effectipa({"--effectipa", "-effectipa"}, + " --effectipa \tEnable method side effect for ipa\n", + {ipaCategory}); + + maplecl::Option inlinefunclist({"--inlinefunclist", "-inlinefunclist"}, + " --inlinefunclist= \tInlining related configuration\n", + {ipaCategory}); + + maplecl::Option quiet({"--quiet", "-quiet"}, + " --quiet \tDisable out debug info\n", + {ipaCategory}); +} + +IpaOption &IpaOption::GetInstance() { + static IpaOption instance; + return instance; +} + +bool IpaOption::SolveOptions() const { + if (::opts::target.IsEnabledByUser()) { + Triple::GetTriple().Init(::opts::target.GetValue()); + } else { + Triple::GetTriple().Init(); + } + + if (opts::ipa::help.IsEnabledByUser()) { + maplecl::CommandLine::GetCommandLine().HelpPrinter(ipaCategory); + return false; + } + + if (opts::ipa::quiet.IsEnabledByUser()) { + MeOption::quiet = true; + Options::quiet = true; + } + + maplecl::CopyIfEnabled(MeOption::inlineFuncList, opts::ipa::inlinefunclist); + + return true; +} + +bool IpaOption::ParseCmdline(int argc, char **argv, std::vector &fileNames) { + // Default value + MeOption::inlineFuncList = ""; + + (void)maplecl::CommandLine::GetCommandLine().Parse(argc, static_cast(argv), ipaCategory); + bool result = SolveOptions(); + if (!result) { + return false; + } + + auto &badArgs = maplecl::CommandLine::GetCommandLine().badCLArgs; + for (auto &arg : badArgs) { + if (FileUtils::IsFileExists(arg.first)) { + fileNames.push_back(arg.first); + } else { + return false; + } + } + + return true; +} +} // namespace maple + diff --git a/ecmascript/mapleall/maple_ipa/src/prop_parameter_type.cpp b/ecmascript/mapleall/maple_ipa/src/prop_parameter_type.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6ed281b7dc7c84487269e0ce8416a49c2a151573 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/prop_parameter_type.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "call_graph.h" +#include "maple_phase.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "mir_function.h" +#include "prop_parameter_type.h" +#include "me_dominance.h" + +namespace maple { +bool PropParamType::CheckOpndZero(const MeExpr *expr) { + if (expr->GetMeOp() == kMeOpConst && + static_cast(expr)->IsZero()) { + return true; + } + return false; +} + +bool PropParamType::CheckCondtionStmt(const MeStmt &meStmt) { + auto *node = meStmt.GetOpnd(0); + auto subOpnd0 = node->GetOpnd(0); + auto subOpnd1 = node->GetOpnd(1); + return CheckOpndZero(subOpnd0) || CheckOpndZero(subOpnd1); +} + +void PropParamType::ResolveIreadExpr(MeExpr &expr) { + switch (expr.GetMeOp()) { + case kMeOpIvar: { + auto *ivarMeExpr = static_cast(&expr); + const MeExpr *base = ivarMeExpr->GetBase(); + if (base->GetMeOp() == kMeOpNary && base->GetOp() == OP_array) { + base = base->GetOpnd(0); + } + if (base->GetMeOp() == kMeOpVar) { + const VarMeExpr *varMeExpr = static_cast(base); + MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + if (sym->IsFormal() && formalMapLocal[sym] != PointerAttr::kPointerNull) { + formalMapLocal[sym] = PointerAttr::kPointerNoNull; + } + } + break; + } + default: { + for (uint32 i = 0; i < expr.GetNumOpnds(); ++i) { + auto *subExpr = expr.GetOpnd(i); + ResolveIreadExpr(*subExpr); + } + } + } +} + +void PropParamType::InsertNullCheck(CallMeStmt &callStmt, const std::string &funcName, + uint32 index, MeExpr &receiver) { + auto *irMap = curFunc->GetMeFunc()->GetIRMap(); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + CallAssertNonnullMeStmt *nullCheck = irMap->New(OP_callassertnonnull, + stridx, index, builder.GetCurrentFunction()->GetNameStrIdx()); + nullCheck->SetBB(callStmt.GetBB()); + nullCheck->SetSrcPos(callStmt.GetSrcPosition()); + nullCheck->SetMeStmtOpndValue(&receiver); + callStmt.GetBB()->InsertMeStmtBefore(&callStmt, nullCheck); +} + +void PropParamType::ResolveCallStmt(MeStmt &meStmt) { + auto *callMeStmt = static_cast(&meStmt); + PUIdx puidx = callMeStmt->GetPUIdx(); + MIRFunction *calledFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx); + + // If the parameter is passed through the fucntion call, we think it maybe have a judge. + for (auto map : *meStmt.GetMuList()) { + OStIdx idx = map.first; + SSATab *ssaTab = static_cast(dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), + &MESSATab::id))->GetResult(); + OriginalSt *ostTemp = ssaTab->GetSymbolOriginalStFromID(idx); + DEBUG_ASSERT(ostTemp != nullptr, "null ptr check"); + MIRSymbol *tempSymbol = ostTemp->GetMIRSymbol(); + if (tempSymbol != nullptr && tempSymbol->IsFormal() && curFunc->GetParamNonull(tempSymbol) != + PointerAttr::kPointerNoNull) { + formalMapLocal[tempSymbol] = PointerAttr::kPointerNull; + } + } + // Insert the assertstmt && analysis curFunc parameter + if (calledFunc->IsExtern() || calledFunc->IsEmpty()) { + return; + } + for (uint32 i = 0; i < calledFunc->GetFormalCount(); i++) { + MIRSymbol *formalSt = calledFunc->GetFormal(i); + if (formalSt->GetType()->GetKind() == kTypePointer) { + if (calledFunc->CheckParamNullType(formalSt) && + calledFunc->GetParamNonull(formalSt) == PointerAttr::kPointerNoNull) { + InsertNullCheck(*callMeStmt, calledFunc->GetName(), i, *callMeStmt->GetOpnd(i)); + MIRSymbol *calledFuncFormalSt = calledFunc->GetFormal(i); + if (calledFuncFormalSt->IsFormal() && curFunc->GetParamNonull(calledFuncFormalSt) != PointerAttr::kPointerNull) { + formalMapLocal[calledFuncFormalSt] = PointerAttr::kPointerNoNull; + } + } + } + } +} + +void PropParamType::TraversalMeStmt(MeStmt &meStmt) { + if (meStmt.GetOp() == OP_brfalse || meStmt.GetOp() == OP_brtrue) { + auto *opnd = meStmt.GetOpnd(0); + if (opnd->GetOp() != OP_eq && opnd->GetOp() != OP_ne && opnd->GetOp() != OP_gt && opnd->GetOp() != OP_ge) { + for (uint32 i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { + auto *expr = meStmt.GetOpnd(i); + ResolveIreadExpr(*expr); + } + return; + } + if (CheckCondtionStmt(meStmt)) { + auto subOpnd0 = opnd->GetOpnd(0); + auto subOpnd1 = opnd->GetOpnd(1); + MeExpr *expr = CheckOpndZero(subOpnd0) ? subOpnd1 : subOpnd0; + if (expr->GetOp() == OP_dread) { + VarMeExpr *varExpr = static_cast(expr); + MIRSymbol *sym = varExpr->GetOst()->GetMIRSymbol(); + if (sym->IsFormal()) { + formalMapLocal[sym] = PointerAttr::kPointerNull; + return; + } + if (meStmt.GetMuList() == nullptr) { + return; + } + for (auto map : *meStmt.GetMuList()) { + OStIdx idx = map.first; + SSATab *ssaTab = static_cast(dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), + &MESSATab::id))->GetResult(); + DEBUG_ASSERT(ssaTab != nullptr, "null ptr check"); + OriginalSt *ostTemp = ssaTab->GetSymbolOriginalStFromID(idx); + DEBUG_ASSERT(ostTemp != nullptr, "null ptr check"); + MIRSymbol *tempSymbol = ostTemp->GetMIRSymbol(); + if (tempSymbol->IsFormal()) { + formalMapLocal[tempSymbol] = PointerAttr::kPointerNull; + return; + } + } + } + } + } else if (meStmt.GetOp() == OP_callassigned || meStmt.GetOp() == OP_call) { + ResolveCallStmt(meStmt); + } else { + for (uint32 i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { + auto *expr = meStmt.GetOpnd(i); + ResolveIreadExpr(*expr); + } + } +} + +void PropParamType::runOnScc(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + formalMapLocal.clear(); + curFunc = func; + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRSymbol *formalSt = func->GetFormal(i); + if (formalSt->GetType()->GetKind() != kTypePointer) { + continue; + } + if (formalSt->GetAttr(ATTR_nonnull)) { + func->SetParamNonull(formalSt, PointerAttr::kPointerNoNull); + formalMapLocal[formalSt] = PointerAttr::kPointerNoNull; + } + } + Prop(*func); + for (auto it = formalMapLocal.begin(); it != formalMapLocal.end(); ++it) { + func->SetParamNonull(it->first, it->second); + if (it->second == PointerAttr::kPointerNoNull) { + static_cast(it->first)->SetAttr(ATTR_nonnull); + uint32 index = func->GetFormalIndex(it->first); + if (index != 0xffffffff) { + FormalDef &formalDef = const_cast(func->GetFormalDefAt(index)); + formalDef.formalAttrs.SetAttr(ATTR_nonnull); + } + } + } + } +} + +void PropParamType::Prop(MIRFunction &func) { + for (uint32 i = 0; i < func.GetFormalCount(); i++) { + MIRSymbol *formalSt = func.GetFormal(i); + if (formalSt->GetType()->GetKind() == kTypePointer) { + formalMapLocal[formalSt] = PointerAttr::kPointerUndeiced; + } + } + Dominance *dom = static_cast(dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), + &MEDominance::id))->GetResult(); + for (auto *bb : dom->GetReversePostOrder()) { + if (bb == nullptr) { + return; + } + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(meStmt); + } + } +} + +void SCCPropParamType::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool SCCPropParamType::PhaseRun(maple::SCCNode &scc) { + MIRModule *m = ((scc.GetNodes()[0])->GetMIRFunction())->GetModule(); + auto *memPool = GetPhaseMemPool(); + MapleAllocator alloc = MapleAllocator(memPool); + MaplePhase *it = GetAnalysisInfoHook()->GetOverIRAnalyisData(*m); + CallGraph *cg = static_cast(it)->GetResult(); + CHECK_FATAL(cg != nullptr, "Expecting a valid CallGraph, found nullptr"); + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + PropParamType prop(*memPool, alloc, *m, *cg, *dataMap); + prop.runOnScc(scc); + return true; +} +} diff --git a/ecmascript/mapleall/maple_ipa/src/prop_return_null.cpp b/ecmascript/mapleall/maple_ipa/src/prop_return_null.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d1c132a002af031b6449d80861e6600b67ac11c1 --- /dev/null +++ b/ecmascript/mapleall/maple_ipa/src/prop_return_null.cpp @@ -0,0 +1,484 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "call_graph.h" +#include "maple_phase.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "mir_function.h" +#include "prop_return_null.h" +#include "me_dominance.h" + +namespace maple { +static bool MaybeNull(const MeExpr &expr) { + if (expr.GetMeOp() == kMeOpVar) { + return static_cast(&expr)->GetMaybeNull(); + } else if (expr.GetMeOp() == kMeOpIvar) { + return static_cast(&expr)->GetMaybeNull(); + } else if (expr.GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(&expr))->GetOpnd(0); + return MaybeNull(*retypeRHS); + } + return true; +} + +TyIdx PropReturnAttr::GetInferredTyIdx(MeExpr &expr) const { + if (expr.GetMeOp() == kMeOpVar) { + auto *varMeExpr = static_cast(&expr); + if (varMeExpr->GetInferredTyIdx() == 0u) { + // If varMeExpr->inferredTyIdx has not been set, we can double check + // if it is coming from a static final field + const OriginalSt *ost = varMeExpr->GetOst(); + const MIRSymbol *mirSym = ost->GetMIRSymbol(); + if (mirSym->IsStatic() && mirSym->IsFinal() && mirSym->GetInferredTyIdx() != kInitTyIdx && + mirSym->GetInferredTyIdx() != kNoneTyIdx) { + varMeExpr->SetInferredTyIdx(mirSym->GetInferredTyIdx()); + } + if (mirSym->GetType()->GetKind() == kTypePointer) { + MIRType *pointedType = (static_cast(mirSym->GetType()))->GetPointedType(); + if (pointedType->GetKind() == kTypeClass) { + if ((static_cast(pointedType))->IsFinal()) { + varMeExpr->SetInferredTyIdx(pointedType->GetTypeIndex()); + } + } + } + } + return varMeExpr->GetInferredTyIdx(); + } else if (expr.GetMeOp() == kMeOpIvar) { + return static_cast(&expr)->GetInferredTyIdx(); + } else if (expr.GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(&expr))->GetOpnd(0); + return GetInferredTyIdx(*retypeRHS); + } + return TyIdx(0); +} + +void PropReturnAttr::PropVarInferredType(VarMeExpr &varMeExpr) const { + if (varMeExpr.GetDefBy() == kDefByStmt) { + DassignMeStmt &defStmt = utils::ToRef(safe_cast(varMeExpr.GetDefStmt())); + MeExpr *rhs = defStmt.GetRHS(); + if (rhs->GetOp() == OP_gcmalloc) { + varMeExpr.SetInferredTyIdx(static_cast(rhs)->GetTyIdx()); + varMeExpr.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (rhs->GetOp() == OP_gcmallocjarray) { + varMeExpr.SetInferredTyIdx(static_cast(rhs)->GetTyIdx()); + varMeExpr.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (!MaybeNull(*rhs)) { + varMeExpr.SetMaybeNull(false); + } else { + TyIdx tyIdx = GetInferredTyIdx(*rhs); + if (tyIdx != 0u) { + varMeExpr.SetInferredTyIdx(tyIdx); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } + } else if (varMeExpr.GetDefBy() == kDefByPhi) { + if (PropReturnAttr::debug) { + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] " << "Def by phi " << '\n'; + } + } +} + +void PropReturnAttr::PropIvarInferredType(IvarMeExpr &ivar) const { + IassignMeStmt *defStmt = ivar.GetDefStmt(); + if (defStmt == nullptr) { + return; + } + MeExpr *rhs = defStmt->GetRHS(); + CHECK_NULL_FATAL(rhs); + if (rhs->GetOp() == OP_gcmalloc) { + ivar.GetInferredTyIdx() = static_cast(rhs)->GetTyIdx(); + ivar.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << ivar.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (rhs->GetOp() == OP_gcmallocjarray) { + ivar.GetInferredTyIdx() = static_cast(rhs)->GetTyIdx(); + ivar.SetMaybeNull(false); + } else if (!MaybeNull(*rhs)) { + ivar.SetMaybeNull(false); + } else { + TyIdx tyIdx = GetInferredTyIdx(*rhs); + if (tyIdx != 0u) { + ivar.SetInferredTyidx(tyIdx); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << ivar.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } +} + +void PropReturnAttr::VisitVarPhiNode(MePhiNode &varPhi) { + MapleVector opnds = varPhi.GetOpnds(); + auto *lhs = varPhi.GetLHS(); + // RegPhiNode cases NYI + if (lhs == nullptr || lhs->GetMeOp() != kMeOpVar) { + return; + } + VarMeExpr *lhsVar = static_cast(varPhi.GetLHS()); + for (size_t i = 0; i < opnds.size(); ++i) { + VarMeExpr *opnd = static_cast(opnds[i]); + PropVarInferredType(*opnd); + if (MaybeNull(*opnd)) { + return; + } + } + lhsVar->SetMaybeNull(false); + return; +} + +void PropReturnAttr::VisitMeExpr(MeExpr *meExpr) const { + if (meExpr == nullptr) { + return; + } + MeExprOp meOp = meExpr->GetMeOp(); + switch (meOp) { + case kMeOpVar: { + auto *varExpr = static_cast(meExpr); + PropVarInferredType(*varExpr); + break; + } + case kMeOpReg: + break; + case kMeOpIvar: { + auto *iVar = static_cast(meExpr); + PropIvarInferredType(*iVar); + break; + } + case kMeOpOp: { + auto *meOpExpr = static_cast(meExpr); + for (uint32 i = 0; i < kOperandNumTernary; ++i) { + VisitMeExpr(meOpExpr->GetOpnd(i)); + } + break; + } + case kMeOpNary: { + auto *naryMeExpr = static_cast(meExpr); + for (MeExpr *opnd : naryMeExpr->GetOpnds()) { + VisitMeExpr(opnd); + } + break; + } + case kMeOpAddrof: + case kMeOpAddroffunc: + case kMeOpAddroflabel: + case kMeOpGcmalloc: + case kMeOpConst: + case kMeOpConststr: + case kMeOpConststr16: + case kMeOpSizeoftype: + case kMeOpFieldsDist: + break; + default: + CHECK_FATAL(false, "MeOP NIY"); + break; + } +} + +void PropReturnAttr::ReturnTyIdxInferring(const RetMeStmt &retMeStmt) { + const MapleVector &opnds = retMeStmt.GetOpnds(); + CHECK_FATAL(opnds.size() <= 1, "Assume at most one return value for now"); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + TyIdx tyIdx = GetInferredTyIdx(*opnd); + if (retTy == kNotSeen) { + // seen the first return stmt + retTy = kSeen; + inferredRetTyIdx = tyIdx; + if (!MaybeNull(*opnd)) { + maybeNull = false; + } + } else if (retTy == kSeen) { + // has seen an nonull before, check if they agreed + if (inferredRetTyIdx != tyIdx) { + retTy = kFailed; + inferredRetTyIdx = TyIdx(0); // not agreed, cleared. + } + if (MaybeNull(*opnd) || maybeNull) { + maybeNull = true; // not agreed, cleared. + } + } + } +} + +void PropReturnAttr::TraversalMeStmt(MeStmt &meStmt) { + Opcode op = meStmt.GetOp(); + switch (op) { + case OP_dassign: { + auto *varMeStmt = static_cast(&meStmt); + VisitMeExpr(varMeStmt->GetRHS()); + break; + } + case OP_regassign: { + auto *regMeStmt = static_cast(&meStmt); + VisitMeExpr(regMeStmt->GetRHS()); + break; + } + case OP_maydassign: { + auto *maydStmt = static_cast(&meStmt); + VisitMeExpr(maydStmt->GetRHS()); + break; + } + case OP_iassign: { + auto *ivarStmt = static_cast(&meStmt); + VisitMeExpr(ivarStmt->GetRHS()); + break; + } + case OP_syncenter: + case OP_syncexit: { + auto *syncMeStmt = static_cast(&meStmt); + const MapleVector &opnds = syncMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_throw: { + auto *thrMeStmt = static_cast(&meStmt); + VisitMeExpr(thrMeStmt->GetOpnd()); + break; + } + case OP_assertnonnull: + case OP_eval: + case OP_igoto: + case OP_free: { + auto *unaryStmt = static_cast(&meStmt); + VisitMeExpr(unaryStmt->GetOpnd()); + break; + } + case OP_asm: + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: { + auto *callMeStmt = static_cast(&meStmt); + const MapleVector &opnds = callMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_icall: + case OP_icallassigned: { + auto *icallMeStmt = static_cast(&meStmt); + const MapleVector &opnds = icallMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_intrinsiccallwithtype: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallwithtypeassigned: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + auto *intrinCallStmt = static_cast(&meStmt); + const MapleVector &opnds = intrinCallStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_brtrue: + case OP_brfalse: { + auto *condGotoStmt = static_cast(&meStmt); + VisitMeExpr(condGotoStmt->GetOpnd()); + break; + } + case OP_switch: { + auto *switchStmt = static_cast(&meStmt); + VisitMeExpr(switchStmt->GetOpnd()); + break; + } + case OP_return: { + auto *retMeStmt = static_cast(&meStmt); + const MapleVector &opnds = retMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + ReturnTyIdxInferring(*retMeStmt); + break; + } + CASE_OP_ASSERT_BOUNDARY { + auto *assMeStmt = static_cast(&meStmt); + VisitMeExpr(assMeStmt->GetOpnd(0)); + VisitMeExpr(assMeStmt->GetOpnd(1)); + break; + } + case OP_jstry: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_try: + case OP_catch: + case OP_goto: + case OP_gosub: + case OP_retsub: + case OP_comment: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + case OP_callassertnonnull: + break; + default: + CHECK_FATAL(false, "unexpected stmt or NYI"); + } + if (meStmt.GetOp() != OP_callassigned) { + return; + } + MapleVector *mustDefList = meStmt.GetMustDefList(); + if (mustDefList->empty()) { + return; + } + MeExpr *meLHS = mustDefList->front().GetLHS(); + if (meLHS->GetMeOp() != kMeOpVar) { + return; + } + auto *lhsVar = static_cast(meLHS); + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + if (called.GetAttr(FUNCATTR_nonnull)) { + lhsVar->SetMaybeNull(false); + } +} + +void PropReturnAttr::TraversalBB(BB *bb) { + if (bb == nullptr) { + return; + } + // traversal var phi nodes + MapleMap &mePhiList = bb->GetMePhiList(); + for (auto it = mePhiList.begin(); it != mePhiList.end(); ++it) { + MePhiNode *phiMeNode = it->second; + if (phiMeNode == nullptr || phiMeNode->GetLHS()->GetMeOp() != kMeOpVar) { + continue; + } + VisitVarPhiNode(*phiMeNode); + } + // traversal reg phi nodes (NYI) + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(meStmt); + } +} + +void PropReturnAttr::Perform(MeFunction &func) { + // Pre-order traverse the cominance tree, so that each def is traversed + // before its use + std::vector bbVisited(func.GetCfg()->GetAllBBs().size(), false); + Dominance *dom = static_cast(dataMap.GetVaildAnalysisPhase(func.GetUniqueID(), + &MEDominance::id))->GetResult(); + for (auto *bb : dom->GetReversePostOrder()) { + TraversalBB(bb); + } + MIRFunction *mirFunc = func.GetMirFunc(); + if (mirFunc == nullptr) { + return; + } + if (retTy == kSeen && !maybeNull) { + mirFunc->SetRetrunAttrKind(kPointerNoNull); + mirFunc->SetAttr(FUNCATTR_nonnull); + } +} + +void PropReturnAttr::Initialize(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + if (func->GetAttr(FUNCATTR_nonnull)) { + func->SetRetrunAttrKind(kPointerNoNull); + } + } +} + +void PropReturnAttr::Prop(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + retTy = kNotSeen; + maybeNull = true; + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty() || func->GetReturnType()->GetKind() != kTypePointer) { + continue; + } + if (func->GetRetrunAttrKind() == PointerAttr::kPointerNoNull) { + continue; + } + MeFunction *meFunc = func->GetMeFunc(); + Perform(*meFunc); + } +} + +void SCCPropReturnAttr::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool SCCPropReturnAttr::PhaseRun(maple::SCCNode &scc) { + MIRModule *m = ((scc.GetNodes()[0])->GetMIRFunction())->GetModule(); + auto *memPool = GetPhaseMemPool(); + MapleAllocator alloc = MapleAllocator(memPool); + MaplePhase *it = GetAnalysisInfoHook()->GetTopLevelAnalyisData(*m); + CallGraph *cg = static_cast(it)->GetResult(); + CHECK_FATAL(cg != nullptr, "Expecting a valid CallGraph, found nullptr"); + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + PropReturnAttr prop(*memPool, alloc, *m, *cg, *dataMap); + prop.Initialize(scc); + prop.Prop(scc); + return true; +} +} diff --git a/ecmascript/mapleall/maple_ir/BUILD.gn b/ecmascript/mapleall/maple_ir/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..1a540a07287409cc04bfcb5c986fc18d3268662a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/BUILD.gn @@ -0,0 +1,115 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_phase/include", +] + +src_libmplir = [ + "src/global_tables.cpp", + "src/intrinsics.cpp", + "src/lexer.cpp", + "src/mir_symbol_builder.cpp", + "src/mir_builder.cpp", + "src/mir_const.cpp", + "src/mir_scope.cpp", + "src/mir_function.cpp", + "src/mir_lower.cpp", + "src/mir_module.cpp", + "src/verification.cpp", + "src/verify_annotation.cpp", + "src/verify_mark.cpp", + "src/mir_nodes.cpp", + "src/mir_symbol.cpp", + "src/mir_type.cpp", + "src/opcode_info.cpp", + "src/option.cpp", + "src/mpl2mpl_options.cpp", + "src/parser.cpp", + "src/mir_parser.cpp", + "src/mir_pragma.cpp", + "src/printing.cpp", + "src/bin_func_import.cpp", + "src/bin_func_export.cpp", + "src/bin_mpl_import.cpp", + "src/bin_mpl_export.cpp", + "src/debug_info.cpp", + "src/debug_info_util.cpp", + "${MAPLEALL_ROOT}/maple_ipa/src/old/ea_connection_graph.cpp", +] + +src_irbuild = [ "src/driver.cpp" ] + +#configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +#cflags_cc += [ "-DSTORE_BACK_VTABLE_REF_AFTER_LOAD=1" ] + +static_library("libmplir") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + #cflags_cc += [ "-fPIC" ] + sources = src_libmplir + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + deps = [ + "${MAPLEALL_ROOT}/maple_driver:libdriver_option", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline" + ] +} + +executable("irbuild") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_irbuild + include_dirs = include_directories + deps = [ + ":libmplir", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libsec_static", + ] +} + +src_mpldbg = [ "src/mpl_dbg.cpp" ] + +executable("mpldbg") { + configs = [] + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_mpldbg + include_dirs = include_directories + deps = [ + ":libmplir", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libsec_static", + ] +} diff --git a/ecmascript/mapleall/maple_ir/include/all_attributes.def b/ecmascript/mapleall/maple_ir/include/all_attributes.def new file mode 100644 index 0000000000000000000000000000000000000000..525cf7364d1340d81611cc5b35a387f203be3de2 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/all_attributes.def @@ -0,0 +1,123 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* all possible attribute names from typeattrs.def, funcattrs.def and fieldattrs.def */ + ATTR(const) + ATTR(final) + ATTR(generic) + ATTR(implicit) + ATTR(private) + ATTR(protected) + ATTR(public) + ATTR(static) + ATTR(synthetic) + ATTR(used) + ATTR(hiddenapiblack) + ATTR(hiddenapigrey) +#ifdef FUNC_ATTR + ATTR(bridge) + ATTR(constructor) + ATTR(critical_native) + ATTR(declared_synchronized) + ATTR(default) + ATTR(destructor) + ATTR(delete) + ATTR(fast_native) + ATTR(inline) + ATTR(always_inline) + ATTR(noinline) + ATTR(native) + ATTR(strict) + ATTR(varargs) + ATTR(virtual) + ATTR(nosideeffect) + ATTR(pure) + ATTR(noexcept) + ATTR(nodefargeffect) + ATTR(nodefeffect) + ATTR(noretglobal) + ATTR(nothrow_exception) + ATTR(noretarg) + ATTR(noprivate_defeffect) + ATTR(ipaseen) + ATTR(rclocalunowned) + ATTR(callersensitive) + ATTR(weakref) + ATTR(safed) + ATTR(unsafed) + ATTR(noreturn) +#endif +#if defined(FUNC_ATTR) || defined(TYPE_ATTR) + ATTR(abstract) + ATTR(extern) + ATTR(interface) + ATTR(local) + ATTR(optimized) + ATTR(synchronized) + ATTR(weak) +#endif +#if defined(TYPE_ATTR) || defined(FIELD_ATTR) +#include "memory_order_attrs.def" + ATTR(enum) + ATTR(restrict) + ATTR(transient) + ATTR(volatile) + ATTR(rcunowned) + ATTR(rcweak) + ATTR(final_boundary_size) + ATTR(tls_static) + ATTR(tls_dynamic) +#endif +#ifdef TYPE_ATTR + ATTR(annotation) + ATTR(readonly) + ATTR(verified) + ATTR(localrefvar) + ATTR(rcunownedthis) + ATTR(incomplete_array) + ATTR(may_alias) + ATTR(static_init_zero) +#endif +#ifdef FUNC_ATTR + ATTR(firstarg_return) + ATTR(called_once) +#endif +#ifdef STMT_ATTR + ATTR(insaferegion) +#endif + ATTR(oneelem_simd) + ATTR(nonnull) + ATTR(section) + ATTR(asmattr) +#if defined(FUNC_ATTR) && !defined(NOCONTENT_ATTR) + ATTR(alias) + ATTR(constructor_priority) + ATTR(destructor_priority) +#endif +#if (defined(TYPE_ATTR) || defined(FIELD_ATTR)) && !defined(NOCONTENT_ATTR) + ATTR(pack) +#endif +#ifdef FUNC_ATTR + ATTR(initialization) + ATTR(termination) +#endif +#if (defined(FUNC_ATTR) || defined(STMT_ATTR)) + ATTR(ccall) + ATTR(webkitjscall) + ATTR(ghcall) +#endif +#if defined(FUNC_ATTR) && !defined(NOCONTENT_ATTR) + ATTR(frame_pointer) + ATTR(frame_reserved_slots) +#endif \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ir/include/bin_mir_file.h b/ecmascript/mapleall/maple_ir/include/bin_mir_file.h new file mode 100644 index 0000000000000000000000000000000000000000..2cc010c8d37c9911083903289b6064051bda0507 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/bin_mir_file.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#define MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#include +#include "types_def.h" + +namespace maple { +const std::string kBinMirFileID = "HWCMPL"; // for magic in file header +constexpr uint8 kVersionMajor = 0; // experimental version +constexpr uint8 kVersionMinor = 1; +constexpr int kMagicSize = 7; + +enum BinMirFileType { + kMjsvmFileTypeCmplV1, + kMjsvmFileTypeCmpl, // kCmpl v2 is the release version of + kMjsvmFileTypeUnknown +}; + +inline uint8 MakeVersionNum(uint8 major, uint8 minor) { + uint8 mj = major & 0x0Fu; + uint8 mn = minor & 0x0Fu; + constexpr uint8 shiftNum = 4; + return (mj << shiftNum) | mn; +} + +// file header for binary format kMmpl, 8B in total +// Note the header is different with the specification +struct BinMIRFileHeader { + char magic[kMagicSize]; // “HWCMPL”, or "HWLOS_" + uint8 segNum; // number of segments (e.g. one raw IR file is a segment unit) + uint8 type; // enum of type of VM file (e.g. MapleIR, TE) + uint8 version; // version of IR format (should be major.minor) +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MIR_FILE_H diff --git a/ecmascript/mapleall/maple_ir/include/bin_mpl_export.h b/ecmascript/mapleall/maple_ir/include/bin_mpl_export.h new file mode 100644 index 0000000000000000000000000000000000000000..918c8e04eb27ba68f311a2074462f156bf0f3bc8 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/bin_mpl_export.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_function.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "ea_connection_graph.h" + +namespace maple { +enum : uint8 { + kBinString = 1, + kBinUsrString = kBinString, + kBinInitConst = 2, + kBinSymbol = 3, + kBinFunction = 4, + kBinCallinfo = 5, + kBinKindTypeScalar = 6, + kBinKindTypeByName = 7, + kBinKindTypePointer = 8, + kBinKindTypeFArray = 9, + kBinKindTypeJarray = 10, + kBinKindTypeArray = 11, + kBinKindTypeFunction = 12, + kBinKindTypeParam = 13, + kBinKindTypeInstantVector = 14, + kBinKindTypeGenericInstant = 15, + kBinKindTypeBitField = 16, + kBinKindTypeStruct = 17, // for kTypeStruct, kTypeStructIncomplete and kTypeUnion + kBinKindTypeClass = 18, // for kTypeClass, and kTypeClassIncomplete + kBinKindTypeInterface = 19, // for kTypeInterface, and kTypeInterfaceIncomplete + kBinKindConstInt = 20, + kBinKindConstAddrof = 21, + kBinKindConstAddrofFunc = 22, + kBinKindConstStr = 23, + kBinKindConstStr16 = 24, + kBinKindConstFloat = 25, + kBinKindConstDouble = 26, + kBinKindConstAgg = 27, + kBinKindConstSt = 28, + kBinContentStart = 29, + kBinStrStart = 30, + kBinTypeStart = 31, + kBinCgStart = 32, + kBinSeStart = 33, + kBinFinish = 34, + kStartMethod = 35, + kBinEaCgNode = 36, + kBinEaCgActNode = 37, + kBinEaCgFieldNode = 38, + kBinEaCgRefNode = 39, + kBinEaCgObjNode = 40, + kBinEaCgStart = 41, + kBinEaStart = 42, + kBinNodeBlock = 43, +// kBinOpStatement : 44, +// kBinOpExpression : 45, + kBinReturnvals = 46, + kBinTypeTabStart = 47, + kBinSymStart = 48, + kBinSymTabStart = 49, + kBinFuncIdInfoStart = 50, + kBinFormalStart = 51, + kBinPreg = 52, + kBinSpecialReg = 53, + kBinLabel = 54, + kBinTypenameStart = 55, + kBinHeaderStart = 56, + kBinAliasMapStart = 57, +// kBinKindTypeViaTypename : 58, +// kBinKindSymViaSymname : 59, +// kBinKindFuncViaSymname : 60, + kBinFunctionBodyStart = 61, + kBinFormalWordsTypeTagged = 62, + kBinFormalWordsRefCounted = 63, + kBinLocalWordsTypeTagged = 64, + kBinLocalWordsRefCounter = 65, + kBinKindConstAddrofLabel = 66, + kBinKindConstAddrofLocal = 67, +}; + +// this value is used to check wether a file is a binary mplt file +constexpr int32 kMpltMagicNumber = 0xC0FFEE; +class BinaryMplExport { + public: + explicit BinaryMplExport(MIRModule &md); + virtual ~BinaryMplExport() = default; + + void Export(const std::string &fname, std::unordered_set *dumpFuncSet); + void WriteNum(int64 x); + void Write(uint8 b); + void OutputType(TyIdx tyIdx); + void WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet); + void OutputConst(MIRConst *constVal); + void OutputConstBase(const MIRConst &constVal); + void OutputTypeBase(const MIRType &type); + void OutputTypePairs(const MIRInstantVectorType &type); + void OutputStr(const GStrIdx &gstr); + void OutputUsrStr(UStrIdx ustr); + void OutputTypeAttrs(const TypeAttrs &ta); + void OutputPragmaElement(const MIRPragmaElement &e); + void OutputPragma(const MIRPragma &p); + void OutputFieldPair(const FieldPair &fp); + void OutputMethodPair(const MethodPair &memPool); + void OutputFieldsOfStruct(const FieldVector &fields); + void OutputMethodsOfStruct(const MethodVector &methods); + void OutputStructTypeData(const MIRStructType &type); + void OutputImplementedInterfaces(const std::vector &interfaces); + void OutputInfoIsString(const std::vector &infoIsString); + void OutputInfo(const std::vector &info, const std::vector &infoIsString); + void OutputPragmaVec(const std::vector &pragmaVec); + void OutputClassTypeData(const MIRClassType &type); + void OutputSymbol(MIRSymbol *sym); + void OutputFunction(PUIdx puIdx); + void OutputInterfaceTypeData(const MIRInterfaceType &type); + void OutputSrcPos(const SrcPosition &pos); + void OutputAliasMap(MapleMap &aliasVarMap); + void OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString); + void OutputFuncIdInfo(MIRFunction *func); + void OutputLocalSymbol(MIRSymbol *sym); + void OutputPreg(MIRPreg *preg); + void OutputLabel(LabelIdx lidx); + void OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab); + void OutputFormalsStIdx(MIRFunction *func); + void OutputFuncViaSym(PUIdx puIdx); + void OutputExpression(BaseNode *e); + void OutputBaseNode(const BaseNode *b); + void OutputReturnValues(const CallReturnVector *retv); + void OutputBlockNode(BlockNode *block); + + const MIRModule &GetMIRModule() const { + return mod; + } + + bool not2mplt; // this export is not to an mplt file + MIRFunction *curFunc = nullptr; + + private: + using CallSite = std::pair; + void WriteEaField(const CallGraph &cg); + void WriteEaCgField(EAConnectionGraph *eaCg); + void OutEaCgNode(EACGBaseNode &node); + void OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart); + void OutEaCgFieldNode(EACGFieldNode &field); + void OutEaCgRefNode(const EACGRefNode &ref); + void OutEaCgActNode(const EACGActualNode &act); + void OutEaCgObjNode(EACGObjectNode &obj); + void WriteCgField(uint64 contentIdx, const CallGraph *cg); + void WriteSeField(); + void OutputCallInfo(CallInfo &callInfo); + void WriteContentField4mplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP); + void WriteStrField(uint64 contentIdx); + void WriteHeaderField(uint64 contentIdx); + void WriteTypeField(uint64 contentIdx, bool useClassList = true); + void Init(); + void WriteSymField(uint64 contentIdx); + void WriteInt(int32 x); + uint8 Read(); + int32 ReadInt(); + void WriteInt64(int64 x); + void WriteAsciiStr(const std::string &str); + void Fixup(size_t i, int32 x); + void DumpBuf(const std::string &name); + void AppendAt(const std::string &name, int32 offset); + void ExpandFourBuffSize(); + + MIRModule &mod; + size_t bufI = 0; + std::vector buf; + std::unordered_map gStrMark; + std::unordered_map funcMark; + std::string importFileName; + std::unordered_map uStrMark; + std::unordered_map symMark; + std::unordered_map typMark; + std::unordered_map localSymMark; + std::unordered_map localPregMark; + std::unordered_map labelMark; + friend class UpdateMplt; + std::unordered_map callInfoMark; + std::map *func2SEMap = nullptr; + std::unordered_map eaNodeMark; + bool inIPA = false; + static int typeMarkOffset; // offset of mark (tag in binmplimport) resulting from duplicated function +}; + +class UpdateMplt { + public: + UpdateMplt() = default; + ~UpdateMplt() = default; + class ManualSideEffect { + public: + ManualSideEffect(std::string name, bool p, bool u, bool d, bool o, bool e) + : funcName(name), pure(p), defArg(u), def(d), object(o), exception(e) {}; + virtual ~ManualSideEffect() = default; + + const std::string &GetFuncName() const { + return funcName; + } + + bool GetPure() const { + return pure; + } + + bool GetDefArg() const { + return defArg; + } + + bool GetDef() const { + return def; + } + + bool GetObject() const { + return object; + } + + bool GetException() const { + return exception; + } + + bool GetPrivateUse() const { + return privateUse; + } + + bool GetPrivateDef() const { + return privateDef; + } + + private: + std::string funcName; + bool pure; + bool defArg; + bool def; + bool object; + bool exception; + bool privateUse = false; + bool privateDef = false; + }; + void UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H diff --git a/ecmascript/mapleall/maple_ir/include/bin_mpl_import.h b/ecmascript/mapleall/maple_ir/include/bin_mpl_import.h new file mode 100644 index 0000000000000000000000000000000000000000..ee7f909f9bb91203f9609623b361b2a19ba9baff --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/bin_mpl_import.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "mir_builder.h" +#include "ea_connection_graph.h" +namespace maple { +class BinaryMplImport { + public: + explicit BinaryMplImport(MIRModule &md) : mod(md), mirBuilder(&md) {} + BinaryMplImport &operator=(const BinaryMplImport&) = delete; + BinaryMplImport(const BinaryMplImport&) = delete; + + virtual ~BinaryMplImport() { + for (MIRStructType *structPtr : tmpStruct) { + delete structPtr; + } + for (MIRClassType *classPtr : tmpClass) { + delete classPtr; + } + for (MIRInterfaceType *interfacePtr : tmpInterface) { + delete interfacePtr; + } + } + + uint64 GetBufI() const { + return bufI; + } + void SetBufI(uint64 bufIVal) { + bufI = bufIVal; + } + + bool IsBufEmpty() const { + return buf.empty(); + } + size_t GetBufSize() const { + return buf.size(); + } + + int32 GetContent(int64 key) const { + return content.at(key); + } + + void SetImported(bool importedVal) { + imported = importedVal; + } + + bool Import(const std::string &modid, bool readSymbols = false, bool readSe = false); + bool ImportForSrcLang(const std::string &modid, MIRSrcLang &srcLang); + MIRSymbol *GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, MIRStorageClass sclass, + MIRFunction *func, uint8 scpID); + int32 ReadInt(); + int64 ReadNum(); + private: + void ReadContentField(); + void ReadStrField(); + void ReadHeaderField(); + void ReadTypeField(); + void ReadSymField(); + void ReadSymTabField(); + void ReadCgField(); + EAConnectionGraph *ReadEaCgField(); + void ReadEaField(); + EACGBaseNode &InEaCgNode(EAConnectionGraph &newEaCg); + void InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart); + void InEaCgActNode(EACGActualNode &actual); + void InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg); + void InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg); + void InEaCgRefNode(EACGRefNode &ref); + CallInfo *ImportCallInfo(); + void MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, std::vector &newSet); + void ReadSeField(); + void Jump2NextField(); + void Reset(); + void SkipTotalSize(); + void ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize); + MIRType &InsertInTypeTables(MIRType &ptype); + void InsertInHashTable(MIRType &ptype); + void SetupEHRootType(); + void UpdateMethodSymbols(); + void ImportConstBase(MIRConstKind &kind, MIRTypePtr &type); + MIRConst *ImportConst(MIRFunction *func); + GStrIdx ImportStr(); + UStrIdx ImportUsrStr(); + MIRType *CreateMirType(MIRTypeKind kind, GStrIdx strIdx, int64 tag) const; + MIRGenericInstantType *CreateMirGenericInstantType(GStrIdx strIdx) const; + MIRBitFieldType *CreateBitFieldType(uint8 fieldsize, PrimType pt, GStrIdx strIdx) const; + void CompleteAggInfo(TyIdx tyIdx); + TyIdx ImportType(bool forPointedType = false); + TyIdx ImportTypeNonJava(); + void ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal); + void InSymTypeTable(); + void ImportTypePairs(std::vector &insVecType); + TypeAttrs ImportTypeAttrs(); + MIRPragmaElement *ImportPragmaElement(); + MIRPragma *ImportPragma(); + void ImportFieldPair(FieldPair &fp); + void ImportMethodPair(MethodPair &memPool); + void ImportMethodsOfStructType(MethodVector &methods); + void ImportStructTypeData(MIRStructType &type); + void ImportInterfacesOfClassType(std::vector &interfaces); + void ImportInfoIsStringOfStructType(MIRStructType &type); + void ImportInfoOfStructType(MIRStructType &type); + void ImportPragmaOfStructType(MIRStructType &type); + void SetClassTyidxOfMethods(MIRStructType &type); + void ImportClassTypeData(MIRClassType &type); + void ImportInterfaceTypeData(MIRInterfaceType &type); + PUIdx ImportFunction(); + MIRSymbol *InSymbol(MIRFunction *func); + void ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString); + void ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab); + void ImportFuncIdInfo(MIRFunction *func); + MIRSymbol *ImportLocalSymbol(MIRFunction *func); + PregIdx ImportPreg(MIRFunction *func); + LabelIdx ImportLabel(MIRFunction *func); + void ImportFormalsStIdx(MIRFunction *func); + void ImportAliasMap(MIRFunction *func); + void ImportSrcPos(SrcPosition &pos); + void ImportBaseNode(Opcode &o, PrimType &typ); + PUIdx ImportFuncViaSym(MIRFunction *func); + BaseNode *ImportExpression(MIRFunction *func); + void ImportReturnValues(MIRFunction *func, CallReturnVector *retv); + BlockNode *ImportBlockNode(MIRFunction *fn); + void ReadFunctionBodyField(); + void ReadFileAt(const std::string &modid, int32 offset); + uint8 Read(); + int64 ReadInt64(); + void ReadAsciiStr(std::string &str); + int32 GetIPAFileIndex(std::string &name); + + bool inCG = false; + bool inIPA = false; + bool imported = true; // used only by irbuild to convert to ascii + bool importingFromMplt = false; // decided based on magic number + uint64 bufI = 0; + std::vector buf; + std::map content; + MIRModule &mod; + MIRBuilder mirBuilder; + std::vector gStrTab; + std::vector uStrTab; + std::vector tmpStruct; + std::vector tmpClass; + std::vector tmpInterface; + std::vector typTab; + std::vector funcTab; + std::vector symTab; + std::vector localSymTab; + std::vector localPregTab; + std::vector localLabelTab; + std::vector callInfoTab; + std::vector eaCgTab; + std::vector methodSymbols; + std::vector definedLabels; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H diff --git a/ecmascript/mapleall/maple_ir/include/bin_mplt.h b/ecmascript/mapleall/maple_ir/include/bin_mplt.h new file mode 100644 index 0000000000000000000000000000000000000000..e3e8359c1f9280775c68c1ae3c8b657172afdc7a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/bin_mplt.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPLT_H +#define MAPLE_IR_INCLUDE_BIN_MPLT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" + +namespace maple { +class BinaryMplt { + public: + + explicit BinaryMplt(MIRModule &md) : mirModule(md), binImport(md), binExport(md) {} + + virtual ~BinaryMplt() = default; + + void Export(const std::string &suffix, std::unordered_set *dumpFuncSet = nullptr) { + binExport.Export(suffix, dumpFuncSet); + } + + bool Import(const std::string &modID, bool readCG = false, bool readSE = false) { + importFileName = modID; + return binImport.Import(modID, readCG, readSE); + } + + const MIRModule &GetMod() const { + return mirModule; + } + + BinaryMplImport &GetBinImport() { + return binImport; + } + + BinaryMplExport &GetBinExport() { + return binExport; + } + + std::string &GetImportFileName() { + return importFileName; + } + + void SetImportFileName(const std::string &fileName) { + importFileName = fileName; + } + + private: + MIRModule &mirModule; + BinaryMplImport binImport; + BinaryMplExport binExport; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPLT_H diff --git a/ecmascript/mapleall/maple_ir/include/binary_op.def b/ecmascript/mapleall/maple_ir/include/binary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..a4a3104c76aab3caf44328daab5f90844877eff4 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/binary_op.def @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +BINARYOP(add) +BINARYOP(ashr) +BINARYOP(band) +BINARYOP(bior) +BINARYOP(bxor) +BINARYOP(cand) +BINARYOP(cior) +BINARYOP(cmp) +BINARYOP(cmpl) +BINARYOP(cmpg) +BINARYOP(div) +BINARYOP(eq) +BINARYOP(gt) +BINARYOP(land) +BINARYOP(lior) +BINARYOP(le) +BINARYOP(lshr) +BINARYOP(lt) +BINARYOP(max) +BINARYOP(min) +BINARYOP(mul) +BINARYOP(ne) +BINARYOP(ge) +BINARYOP(rem) +BINARYOP(shl) +BINARYOP(ror) +BINARYOP(sub) +BINARYOP(CG_array_elem_add) + diff --git a/ecmascript/mapleall/maple_ir/include/cfg_primitive_types.h b/ecmascript/mapleall/maple_ir/include/cfg_primitive_types.h new file mode 100644 index 0000000000000000000000000000000000000000..bf86a4fb1aeae267f07c7203a9748edb5e3073ab --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/cfg_primitive_types.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H +#define MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H + +namespace maple { + uint8 GetPointerSize(); // Circular include dependency with mir_type.h + +// Declaration of enum PrimType +#define LOAD_ALGO_PRIMARY_TYPE +enum PrimType { + PTY_begin, // PrimType begin +#define PRIMTYPE(P) PTY_##P, +#include "prim_types.def" + PTY_end, // PrimType end +#undef PRIMTYPE +}; + +constexpr PrimType kPtyInvalid = PTY_begin; +// just for test, no primitive type for derived SIMD types to be defined +constexpr PrimType kPtyDerived = PTY_end; + +struct PrimitiveTypeProperty { + PrimType type; + + PrimitiveTypeProperty(PrimType type, bool isInteger, bool isUnsigned, + bool isAddress, bool isFloat, bool isPointer, + bool isSimple, bool isDynamic, bool isDynamicAny, + bool isDynamicNone, bool isVector) : + type(type), isInteger(isInteger), isUnsigned(isUnsigned), + isAddress(isAddress), isFloat(isFloat), isPointer(isPointer), + isSimple(isSimple), isDynamic(isDynamic), isDynamicAny(isDynamicAny), + isDynamicNone(isDynamicNone), isVector(isVector) {} + + bool IsInteger() const { return isInteger; } + bool IsUnsigned() const { return isUnsigned; } + + bool IsAddress() const { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || + (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isAddress; + } + } + + bool IsFloat() const { return isFloat; } + + bool IsPointer() const { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || + (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isPointer; + } + } + + bool IsSimple() const { return isSimple; } + bool IsDynamic() const { return isDynamic; } + bool IsDynamicAny() const { return isDynamicAny; } + bool IsDynamicNone() const { return isDynamicNone; } + bool IsVector() const { return isVector; } + +private: + bool isInteger; + bool isUnsigned; + bool isAddress; + bool isFloat; + bool isPointer; + bool isSimple; + bool isDynamic; + bool isDynamicAny; + bool isDynamicNone; + bool isVector; +}; + +const PrimitiveTypeProperty &GetPrimitiveTypeProperty(PrimType pType); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H diff --git a/ecmascript/mapleall/maple_ir/include/cmpl.h b/ecmascript/mapleall/maple_ir/include/cmpl.h new file mode 100644 index 0000000000000000000000000000000000000000..0bd39ba97b8c6eb3ab435cfd6a0727c85bef0ff0 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/cmpl.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// This define the lowest level MAPLE IR data structures that are compatible +// with both the C++ and C coding environments of MAPLE +#ifndef MAPLE_INCLUDE_VM_CMPL_V2 +#define MAPLE_INCLUDE_VM_CMPL_V2 +// Still need constant value from MIR +#include +#include "mir_config.h" +#include "types_def.h" +#include "opcodes.h" +#include "prim_types.h" +#include "intrinsics.h" +#include "mir_module.h" + +namespace maple { +extern char appArray[]; +constexpr uint32 kTwoBitVectors = 2; +struct MirFuncT { // 28B + uint16 frameSize; + uint16 upFormalSize; + uint16 moduleID; + uint32 funcSize; // size of code in words + uint8 *formalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // uint16 numlabels; // removed. label table size + // StmtNode **lbl2stmt; // lbl2stmt table, removed; + // the first statement immediately follow MirFuncT + // since it starts with expression, BaseNodeT* is returned + void *FirstInst() const { + return reinterpret_cast(const_cast(this)) + sizeof(MirFuncT); + } + + // there are 4 bitvectors that follow the function code + uint32 FuncCodeSize() const { + return funcSize - (kTwoBitVectors * BlockSize2BitVectorSize(upFormalSize)) - + (kTwoBitVectors * BlockSize2BitVectorSize(frameSize)); + } +}; + +struct MirModuleT { + public: + MIRFlavor flavor; // should be kCmpl + MIRSrcLang srcLang; // the source language + uint16 id; + uint32 globalMemSize; // size of storage space for all global variables + uint8 *globalBlkMap; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + PUIdx mainFuncID; // the entry function; 0 if no main function + uint32 numFuncs; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MirFuncT **funcs; // list of all funcs in the module. +#if 1 // the js2mpl buld always set HAVE_MMAP to 1 // binmir file mmap info + int binMirImageFd; // file handle for mmap +#endif // HAVE_MMAP + void *binMirImageStart; // binimage memory start + uint32 binMirImageLength; // binimage memory size + MirFuncT *FuncFromPuIdx(PUIdx puIdx) const { + MIR_ASSERT(puIdx <= numFuncs); // puIdx starts from 1 + return funcs[puIdx - 1]; + } + + MirModuleT() = default; + ~MirModuleT() = default; + MirFuncT *MainFunc() const { + return (mainFuncID == 0) ? static_cast(nullptr) : FuncFromPuIdx(mainFuncID); + } + + void SetCurFunction(MirFuncT *f) { + curFunction = f; + } + + MirFuncT *GetCurFunction() const { + return curFunction; + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + private: + MirFuncT *curFunction = nullptr; +}; + +// At this stage, MirConstT don't need all information in MIRConst +// Note: only be used within Constval node: +// Warning: it's different from full feature MIR. +// only support 32bit int const (lower 32bit). higher 32bit are tags +union MirIntConstT { + int64 value; + uint32 val[2]; // ARM target load/store 2 32bit val instead of 1 64bit +}; + +// currently in VM, only intconst are used. +using MirConstT = MirIntConstT; +// +// It's a stacking of POD data structure to allow precise memory layout +// control and emulate the inheritance relationship of corresponding C++ +// data structures to keep the interface consistent (as much as possible). +// +// Rule: +// 1. base struct should be the first member (to allow safe pointer casting) +// 2. each node (just ops, no data) should be of either 4B or 8B. +// 3. casting the node to proper base type to access base type's fields. +// +// Current memory layout of nodes follows the postfix notation: +// Each operand instruction is positioned immediately before its parent or +// next operand. Memory layout of sub-expressions tree is done recursively. +// E.g. the code for (a + b) contains 3 instructions, starting with the READ a, +// READ b, and then followed by ADD. +// For (a + (b - c)), it is: +// +// READ a +// READ b +// READ c +// SUB +// ADD +// +// BaseNodeT is an abstraction of expression. +struct BaseNodeT { // 4B + Opcode op; + PrimType ptyp; + uint8 typeFlag; // a flag to speed up type related operations in the VM + uint8 numOpnds; // only used for N-ary operators, switch and rangegoto + // operands immediately before each node + virtual size_t NumOpnds() const { + if (op == OP_switch || op == OP_rangegoto) { + return 1; + } + return numOpnds; + } + + virtual uint8 GetNumOpnds() const { + return numOpnds; + } + virtual void SetNumOpnds(uint8 num) { + numOpnds = num; + } + + virtual Opcode GetOpCode() const { + return op; + } + + virtual void SetOpCode(Opcode o) { + op = o; + } + + virtual PrimType GetPrimType() const { + return ptyp; + } + + virtual void SetPrimType(PrimType type) { + ptyp = type; + } + + BaseNodeT() : op(OP_undef), ptyp(kPtyInvalid), typeFlag(0), numOpnds(0) {} + + virtual ~BaseNodeT() = default; +}; + +// typeFlag is a 8bit flag to provide short-cut information for its +// associated PrimType, because many type related information extraction +// is not very lightweight. +// Here is the convention: +// | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | +// dyn f i sc c (log2(size)) +// +// bit 0 - bit 3 is for type size information. (now not used in VM?) +// bit 0-2 represents the size of concrete types (not void/aggregate) +// it's the result of log2 operation on the real size to fit in 3 bits. +// which has the following correspondence: +// | 2 | 1 | 0 | type size (in Bytes) +// 0 0 0 1 +// 0 0 1 2 +// 0 1 0 4 +// 0 1 1 8 +// 1 0 0 16 +// +// bit 3 is the flag of "concrete types", i.e., types we know the type +// details. +// when it's 1, the bit0-2 size are valid +// when it's 0, the size of the type is 0, and bit0-2 are meaningless. +// +// bit 4 is for scalar types (1 if it's a scalar type) +// bit 5 is for integer types (1 if it's an integer type) +// bit 6 is for floating types (1 if it's a floating type) +// bit 7 is for dynamic types (1 if it's a dynamic type) +// +// refer to mirtypes.h/mirtypes.cpp in maple_ir directory for more information. +const int32 kTypeflagZero = 0x00; +const int32 kTypeflagDynMask = 0x80; +const int32 kTypeflagFloatMask = 0x40; +const int32 kTypeflagIntergerMask = 0x20; +const int32 kTypeflagScalarMask = 0x10; +const int32 kTypeflagConcreteMask = 0x08; +const int32 kTypeflagSizeMask = 0x07; +const int32 kTypeflagDynFloatMask = (kTypeflagDynMask | kTypeflagFloatMask); +const int32 kTypeflagDynIntergerMask = (kTypeflagDynMask | kTypeflagIntergerMask); +inline bool IsDynType(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynMask) != kTypeflagZero); +} + +inline bool IsDynFloat(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagDynFloatMask); +} + +inline bool IsDynInteger(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynIntergerMask) == kTypeflagDynIntergerMask); +} + +// IsFloat means "is statically floating types", i.e., float, but not dynamic +inline bool IsFloat(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagFloatMask); +} + +inline bool IsScalarType(uint8 typeFlag) { + return ((typeFlag & kTypeflagScalarMask) != kTypeflagZero); +} + +inline Opcode GetOpcode(const BaseNodeT &nodePtr) { + return nodePtr.op; +} + +inline PrimType GetPrimType(const BaseNodeT &nodePtr) { + return nodePtr.ptyp; +} + +inline uint32 GetOperandsNum(const BaseNodeT &nodePtr) { + return nodePtr.numOpnds; +} + +using UnaryNodeT = BaseNodeT; // alias +struct TypecvtNodeT : public BaseNodeT { // 8B + PrimType fromPTyp; + uint8 fromTypeFlag; // a flag to speed up type related operations + uint8 padding[2]; + PrimType FromType() const { + return fromPTyp; + } +}; + +struct ExtractbitsNodeT : public BaseNodeT { // 8B + uint8 bOffset; + uint8 bSize; + uint16 padding; +}; + +struct IreadoffNodeT : public BaseNodeT { // 8B + int32 offset; +}; + +using BinaryNodeT = BaseNodeT; +// Add expression types to compare node, to +// facilitate the evaluation of postorder stored kCmpl +// Note: the two operands should have the same type if they're +// not dynamic types +struct CompareNodeT : public BaseNodeT { // 8B + PrimType opndType; // type of operands. + uint8 opndTypeFlag; // typeFlag of opntype. + uint8 padding[2]; // every compare node has two opnds. +}; + +using TernaryNodeT = BaseNodeT; +using NaryNodeT = BaseNodeT; +// need to guarantee MIRIntrinsicID is 4B +// Note: this is not supported by c++0x +struct IntrinsicopNodeT : public BaseNodeT { // 8B + MIRIntrinsicID intrinsic; +}; + +struct ConstvalNodeT : public BaseNodeT { // 4B + 8B const value + MirConstT *Constval() const { + auto *tempPtr = const_cast(this); + return (reinterpret_cast(reinterpret_cast(tempPtr) + sizeof(ConstvalNodeT))); + } +}; + +// full MIR exported a pointer to MirConstT +inline MirConstT *GetConstval(const ConstvalNodeT &node) { + return node.Constval(); +} + +// SizeoftypeNode shouldn't be seen here +// ArrayNode shouldn't be seen here +struct AddrofNodeT : public BaseNodeT { // 12B + StIdx stIdx; + FieldID fieldID; +}; + +using DreadNodeT = AddrofNodeT; // same shape. +struct AddroffuncNodeT : public BaseNodeT { // 8B + PUIdx puIdx; // 32bit now +}; + +struct RegreadNodeT : public BaseNodeT { // 8B + PregIdx regIdx; // 32bit, negative if special register +}; + +struct AddroflabelNodeT : public BaseNodeT { // 8B + uint32 offset; +}; +} // namespace maple +#endif // MAPLE_INCLUDE_VM_CMPL_V2 diff --git a/ecmascript/mapleall/maple_ir/include/debug_info.h b/ecmascript/mapleall/maple_ir/include/debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..abba88962e36e30fd4e2748cf2c624c2214ee222 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/debug_info.h @@ -0,0 +1,758 @@ +/* + * Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLE_IR_INCLUDE_DBG_INFO_H +#define MAPLE_IR_INCLUDE_DBG_INFO_H +#include + +#include "mpl_logging.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_nodes.h" +#include "mir_scope.h" +#include "namemangler.h" +#include "lexer.h" +#include "dwarf.h" + +namespace maple { +// for more color code: http://ascii-table.com/ansi-escape-sequences.php +#define RESET "\x1B[0m" +#define BOLD "\x1B[1m" +#define RED "\x1B[31m" +#define GRN "\x1B[32m" +#define YEL "\x1B[33m" + +const uint32 kDbgDefaultVal = 0xdeadbeef; +#define HEX(val) std::hex << "0x" << val << std::dec + +class MIRModule; +class MIRType; +class MIRSymbol; +class MIRSymbolTable; +class MIRTypeNameTable; +class DBGBuilder; +class DBGCompileMsgInfo; +class MIRLexer; + +// for compiletime warnings +class DBGLine { + public: + DBGLine(uint32 lnum, const char *l) : lineNum(lnum), codeLine(l) {} + virtual ~DBGLine() {} + + void Dump() { + LogInfo::MapleLogger() << "LINE: " << lineNum << " " << codeLine << std::endl; + } + + private: + uint32 lineNum; + const char *codeLine; +}; + +#define MAXLINELEN 4096 + +class DBGCompileMsgInfo { + public: + DBGCompileMsgInfo(); + virtual ~DBGCompileMsgInfo() {} + void ClearLine(uint32 n); + void SetErrPos(uint32 lnum, uint32 cnum); + void UpdateMsg(uint32 lnum, const char *line); + void EmitMsg(); + + private: + uint32 startLine; // mod 3 + uint32 errLNum; + uint32 errCNum; + uint32 errPos; + uint32 lineNum[3]; + uint8 codeLine[3][MAXLINELEN]; // 3 round-robin line buffers +}; + +enum DBGDieKind { kDwTag, kDwAt, kDwOp, kDwAte, kDwForm, kDwCfa }; + +typedef uint32 DwTag; // for DW_TAG_* +typedef uint32 DwAt; // for DW_AT_* +typedef uint32 DwOp; // for DW_OP_* +typedef uint32 DwAte; // for DW_ATE_* +typedef uint32 DwForm; // for DW_FORM_* +typedef uint32 DwCfa; // for DW_CFA_* + +class DBGDieAttr; + +class DBGExpr { + public: + explicit DBGExpr(MIRModule *m) : dwOp(0), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + DBGExpr(MIRModule *m, DwOp op) : dwOp(op), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGExpr() {} + + void AddOpnd(uint64 val) { + opnds.push_back(val); + } + + int GetVal() const { + return value; + } + + void SetVal(int v) { + value = v; + } + + DwOp GetDwOp() const { + return dwOp; + } + + void SetDwOp(DwOp op) { + dwOp = op; + } + + MapleVector &GetOpnd() { + return opnds; + } + + size_t GetOpndSize() const { + return opnds.size(); + } + + void Clear() { + return opnds.clear(); + } + + private: + DwOp dwOp; + // for local var fboffset, global var strIdx + int value; + MapleVector opnds; +}; + +class DBGExprLoc { + public: + explicit DBGExprLoc(MIRModule *m) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) { + simpLoc = m->GetMemPool()->New(module); + } + + DBGExprLoc(MIRModule *m, DwOp op) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) { + simpLoc = m->GetMemPool()->New(module, op); + } + + virtual ~DBGExprLoc() {} + + bool IsSimp() const { + return (exprVec.size() == 0 && simpLoc->GetVal() != static_cast(kDbgDefaultVal)); + } + + int GetFboffset() const { + return simpLoc->GetVal(); + } + + void SetFboffset(int offset) { + simpLoc->SetVal(offset); + } + + int GetGvarStridx() const { + return simpLoc->GetVal(); + } + + void SetGvarStridx(int idx) { + simpLoc->SetVal(idx); + } + + DwOp GetOp() const { + return simpLoc->GetDwOp(); + } + + uint32 GetSize() const { + return static_cast(simpLoc->GetOpndSize()); + } + + void ClearOpnd() { + simpLoc->Clear(); + } + + void AddSimpLocOpnd(uint64 val) { + simpLoc->AddOpnd(val); + } + + DBGExpr *GetSimpLoc() const { + return simpLoc; + } + + void *GetSymLoc() { + return symLoc; + } + + void SetSymLoc(void *loc) { + symLoc = loc; + } + + void Dump(); + + private: + MIRModule *module; + DBGExpr *simpLoc; + MapleVector exprVec; + void *symLoc; +}; + +class DBGDieAttr { + public: + size_t SizeOf(DBGDieAttr *attr); + explicit DBGDieAttr(DBGDieKind k) : dieKind(k), dwAttr(DW_AT_deleted), dwForm(DW_FORM_GNU_strp_alt) { + value.u = kDbgDefaultVal; + } + + virtual ~DBGDieAttr() {} + + void AddSimpLocOpnd(uint64 val) { + value.ptr->AddSimpLocOpnd(val); + } + + void ClearSimpLocOpnd() { + value.ptr->ClearOpnd(); + } + + void Dump(int indent); + + DBGDieKind GetKind() const { + return dieKind; + } + + void SetKind(DBGDieKind kind) { + dieKind = kind; + } + + DwAt GetDwAt() const { + return dwAttr; + } + + void SetDwAt(DwAt at) { + dwAttr = at; + } + + DwForm GetDwForm() const { + return dwForm; + } + + void SetDwForm(DwForm form) { + dwForm = form; + } + + int32 GetI() const { + return value.i; + } + + void SetI(int32 val) { + value.i = val; + } + + uint32 GetId() const { + return value.id; + } + + void SetId(uint32 val) { + value.id = val; + } + + int64 GetJ() const { + return value.j; + } + + void SetJ(int64 val) { + value.j = val; + } + + uint64 GetU() const { + return value.u; + } + + void SetU(uint64 val) { + value.u = val; + } + + float GetF() const { + return value.f; + } + + void SetF(float val) { + value.f = val; + } + + double GetD() const { + return value.d; + } + + void SetD(double val) { + value.d = val; + } + + DBGExprLoc *GetPtr() { + return value.ptr; + } + + void SetPtr(DBGExprLoc *val) { + value.ptr = val; + } + + private: + DBGDieKind dieKind; + DwAt dwAttr; + DwForm dwForm; // type for the attribute value + union { + int32 i; + uint32 id; // dieId when dwForm is of DW_FORM_ref + // strIdx when dwForm is of DW_FORM_string + int64 j; + uint64 u; + float f; + double d; + + DBGExprLoc *ptr; + } value; +}; + +class DBGDie { + public: + DBGDie(MIRModule *m, DwTag tag); + virtual ~DBGDie() {} + void AddAttr(DBGDieAttr *attr); + void AddSubVec(DBGDie *die); + + DBGDieAttr *AddAttr(DwAt attr, DwForm form, uint64 val); + DBGDieAttr *AddSimpLocAttr(DwAt at, DwForm form, uint64 val); + DBGDieAttr *AddGlobalLocAttr(DwAt at, DwForm form, uint64 val); + DBGDieAttr *AddFrmBaseAttr(DwAt at, DwForm form); + DBGExprLoc *GetExprLoc(); + bool SetAttr(DwAt attr, uint64 val); + bool SetAttr(DwAt attr, int64 val); + bool SetAttr(DwAt attr, uint32 val); + bool SetAttr(DwAt attr, int32 val); + bool SetAttr(DwAt attr, float val); + bool SetAttr(DwAt attr, double val); + bool SetSimpLocAttr(DwAt attr, int64 val); + bool SetAttr(DwAt attr, DBGExprLoc *ptr); + void ResetParentDie(); + void Dump(int indent); + + uint32 GetId() const { + return id; + } + + void SetId(uint32 val) { + id = val; + } + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + bool GetWithChildren() const { + return withChildren; + } + + void SetWithChildren(bool val) { + withChildren = val; + } + + DBGDie *GetParent() const { + return parent; + } + + void SetParent(DBGDie *val) { + parent = val; + } + + DBGDie *GetSibling() const { + return sibling; + } + + void SetSibling(DBGDie *val) { + sibling = val; + } + + DBGDie *GetFirstChild() const { + return firstChild; + } + + void SetFirstChild(DBGDie *val) { + firstChild = val; + } + + uint32 GetAbbrevId() const { + return abbrevId; + } + + void SetAbbrevId(uint32 val) { + abbrevId = val; + } + + uint32 GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(uint32 val) { + tyIdx = val; + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 val) { + offset = val; + } + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 val) { + size = val; + } + + const MapleVector &GetAttrVec() const { + return attrVec; + } + + MapleVector &GetAttrVec() { + return attrVec; + } + + const MapleVector &GetSubDieVec() const { + return subDieVec; + } + + MapleVector &GetSubDieVec() { + return subDieVec; + } + + uint32 GetSubDieVecSize() const { + return static_cast(subDieVec.size()); + } + + DBGDie *GetSubDieVecAt(uint32 i) const { + return subDieVec[i]; + } + + private: + MIRModule *module; + DwTag tag; + uint32 id; // starts from 1 which is root die compUnit + bool withChildren; + DBGDie *parent; + DBGDie *sibling; + DBGDie *firstChild; + uint32 abbrevId; // id in .debug_abbrev + uint32 tyIdx; // for type TAG + uint32 offset; // Dwarf CU relative offset + uint32 size; // DIE Size in .debug_info + MapleVector attrVec; + MapleVector subDieVec; +}; + +class DBGAbbrevEntry { + public: + DBGAbbrevEntry(MIRModule *m, DBGDie *die); + virtual ~DBGAbbrevEntry() {} + bool Equalto(DBGAbbrevEntry *entry); + void Dump(int indent); + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + uint32 GetAbbrevId() const { + return abbrevId; + } + + void SetAbbrevId(uint32 val) { + abbrevId = val; + } + + bool GetWithChildren() const { + return withChildren; + } + + void SetWithChildren(bool val) { + withChildren = val; + } + + MapleVector &GetAttrPairs() { + return attrPairs; + } + + private: + DwTag tag; + uint32 abbrevId; + bool withChildren; + MapleVector attrPairs; // kDwAt kDwForm pairs +}; + +class DBGAbbrevEntryVec { + public: + DBGAbbrevEntryVec(MIRModule *m, DwTag tag) : tag(tag), entryVec(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGAbbrevEntryVec() {} + + uint32 GetId(MapleVector &attrs); + void Dump(int indent); + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + const MapleVector &GetEntryvec() const { + return entryVec; + } + + MapleVector &GetEntryvec() { + return entryVec; + } + + private: + DwTag tag; + MapleVector entryVec; +}; + +class DebugInfo { + public: + DebugInfo(MIRModule *m) + : module(m), + compUnit(nullptr), + dummyTypeDie(nullptr), + lexer(nullptr), + maxId(1), + builder(nullptr), + mplSrcIdx(0), + debugInfoLength(0), + curFunction(nullptr), + compileMsg(nullptr), + parentDieStack(m->GetMPAllocator().Adapter()), + idDieMap(std::less(), m->GetMPAllocator().Adapter()), + abbrevVec(m->GetMPAllocator().Adapter()), + tagAbbrevMap(std::less(), m->GetMPAllocator().Adapter()), + tyIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + stridxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcDefStrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + typeDefTyIdxMap(std::less(), m->GetMPAllocator().Adapter()), + pointedPointerMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxLabIdxMap(std::less(), m->GetMPAllocator().Adapter()), + strps(std::less(), m->GetMPAllocator().Adapter()) { + /* valid entry starting from index 1 as abbrevid starting from 1 as well */ + abbrevVec.push_back(nullptr); + InitMsg(); + varPtrPrefix = std::string(namemangler::kPtrPrefixStr); + } + + virtual ~DebugInfo() {} + + void InitMsg() { + compileMsg = module->GetMemPool()->New(); + } + + void UpdateMsg(uint32 lnum, const char *line) { + compileMsg->UpdateMsg(lnum, line); + } + + void SetErrPos(uint32 lnum, uint32 cnum) { + compileMsg->SetErrPos(lnum, cnum); + } + + void EmitMsg() { + compileMsg->EmitMsg(); + } + + DBGDie *GetDie(uint32 id) { + return idDieMap[id]; + } + + DBGDie *GetDummyTypeDie() { + return dummyTypeDie; + } + + DBGDie *GetDie(const MIRFunction *func); + + void Init(); + void Finish(); + void SetupCU(); + void BuildDebugInfo(); + void Dump(int indent); + + // build tree to populate withChildren, sibling, firstChild + // also insert DW_AT_sibling attributes when needed + void BuildDieTree(); + + // replace type idx with die id in DW_AT_type attributes + void FillTypeAttrWithDieId(); + + void BuildAbbrev(); + uint32 GetAbbrevId(DBGAbbrevEntryVec *, DBGAbbrevEntry *); + + void SetLocalDie(GStrIdx strIdx, const DBGDie *die); + void SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die); + DBGDie *GetLocalDie(GStrIdx strIdx); + DBGDie *GetLocalDie(MIRFunction *func, GStrIdx strIdx); + + LabelIdx GetLabelIdx(GStrIdx strIdx); + LabelIdx GetLabelIdx(MIRFunction *func, GStrIdx strIdx); + void SetLabelIdx(GStrIdx strIdx, LabelIdx idx); + void SetLabelIdx(MIRFunction *func, GStrIdx strIdx, LabelIdx idx); + + uint32 GetMaxId() const { + return maxId; + } + + uint32 GetIncMaxId() { + return maxId++; + } + + DBGDie *GetIdDieMapAt(uint32 i) { + return idDieMap[i]; + } + + void SetIdDieMap(uint32 i, DBGDie *die) { + idDieMap[i] = die; + } + + size_t GetParentDieSize() const { + return parentDieStack.size(); + } + + DBGDie *GetParentDie() { + return parentDieStack.top(); + } + + void PushParentDie(DBGDie *die) { + parentDieStack.push(die); + } + + void PopParentDie() { + parentDieStack.pop(); + } + + void ResetParentDie() { + parentDieStack.clear(); + parentDieStack.push(compUnit); + } + + void AddStrps(uint32 val) { + strps.insert(val); + } + + MapleSet &GetStrps() { + return strps; + } + + uint32 GetDebugInfoLength() const { + return debugInfoLength; + } + + MapleVector &GetAbbrevVec() { + return abbrevVec; + } + + DBGDie *GetCompUnit() const { + return compUnit; + } + + MIRFunction *GetCurFunction() { + return curFunction; + } + + void SetCurFunction(MIRFunction *func) { + curFunction = func; + } + + void SetTyidxDieIdMap(const TyIdx tyIdx, const DBGDie *die) { + tyIdxDieIdMap[tyIdx.GetIdx()] = die->GetId(); + } + + DBGDieAttr *CreateAttr(DwAt attr, DwForm form, uint64 val); + + DBGDie *CreateVarDie(MIRSymbol *sym); + DBGDie *CreateVarDie(MIRSymbol *sym, GStrIdx strIdx); // use alt name + DBGDie *CreateFormalParaDie(MIRFunction *func, MIRType *type, MIRSymbol *sym); + DBGDie *CreateFieldDie(maple::FieldPair pair, uint32 lnum); + DBGDie *CreateBitfieldDie(const MIRBitFieldType *type, GStrIdx idx, uint32 prevBits); + DBGDie *CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *type, bool update = false); + DBGDie *CreateClassTypeDie(GStrIdx strIdx, const MIRClassType *type); + DBGDie *CreateInterfaceTypeDie(GStrIdx strIdx, const MIRInterfaceType *type); + DBGDie *CreatePointedFuncTypeDie(MIRFuncType *func); + + DBGDie *GetOrCreateLabelDie(LabelIdx labid); + DBGDie *GetOrCreateTypeAttrDie(MIRSymbol *sym); + DBGDie *GetOrCreateConstTypeDie(TypeAttrs attr, DBGDie *typedie); + DBGDie *GetOrCreateVolatileTypeDie(TypeAttrs attr, DBGDie *typedie); + DBGDie *GetOrCreateFuncDeclDie(MIRFunction *func); + DBGDie *GetOrCreateFuncDefDie(MIRFunction *func, uint32 lnum); + DBGDie *GetOrCreatePrimTypeDie(MIRType *ty); + DBGDie *GetOrCreateTypeDie(MIRType *type); + DBGDie *GetOrCreatePointTypeDie(const MIRPtrType *type); + DBGDie *GetOrCreateArrayTypeDie(const MIRArrayType *type); + DBGDie *GetOrCreateStructTypeDie(const MIRType *type); + + void AddAliasDies(MapleMap &aliasMap); + void AddScopeDie(MIRScope *scope); + + // Functions for calculating the size and offset of each DW_TAG_xxx and DW_AT_xxx + void ComputeSizeAndOffsets(); + void ComputeSizeAndOffset(DBGDie *die, uint32 &offset); + + private: + MIRModule *module; + DBGDie *compUnit; // root die: compilation unit + DBGDie *dummyTypeDie; // workaround for unknown types + MIRLexer *lexer; + uint32 maxId; + DBGBuilder *builder; + GStrIdx mplSrcIdx; + uint32 debugInfoLength; + MIRFunction *curFunction; + + // for compilation messages + DBGCompileMsgInfo *compileMsg; + + MapleStack parentDieStack; + MapleMap idDieMap; + MapleVector abbrevVec; // valid entry starting from index 1 + MapleMap tagAbbrevMap; + + // to be used when derived type references a base type die + MapleMap tyIdxDieIdMap; + MapleMap stridxDieIdMap; + MapleMap funcDefStrIdxDieIdMap; + MapleMap typeDefTyIdxMap; // prevtyIdxtypidx_map + MapleMap pointedPointerMap; + MapleMap> funcLstrIdxDieIdMap; + MapleMap> funcLstrIdxLabIdxMap; + MapleSet strps; + std::string varPtrPrefix; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_DBG_INFO_H diff --git a/ecmascript/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def b/ecmascript/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def new file mode 100644 index 0000000000000000000000000000000000000000..9a338a28ca7828552a3d3d3c66bc250923ffc84b --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def @@ -0,0 +1,17 @@ +DEF_MIR_INTRINSIC(JAVA_INTERFACE_CALL,\ + "__dex_interface_call", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_PRINT,\ + "printf", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SGET,\ + "__dex_clinit_check_sget", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SPUT,\ + "__dex__clinit_check_sput", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_NEW,\ + "__dex_clinit_check_new", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_STR_TO_JSTR,\ + "__dex_str_to_jstr", INTRNISJAVA, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +// __dex_random is used to generate a random value used in callback cfg +DEF_MIR_INTRINSIC(JAVA_RANDOM,\ + "__dex_random", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_CLASSCAST,\ + "MCC_ThrowClassCastException", INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/mapleall/maple_ir/include/dwarf.def b/ecmascript/mapleall/maple_ir/include/dwarf.def new file mode 100644 index 0000000000000000000000000000000000000000..cc9ff7f5878ba09bb159a7ae46642f3a65eb328e --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/dwarf.def @@ -0,0 +1,163 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +// TODO: Add other DW-based macros. +#if !( \ + defined DW_TAG || defined DW_AT || defined DW_FORM || \ + defined DW_OP || defined DW_LANG || defined DW_ATE) +#error "Missing definition of DW*" +#endif + +#ifndef DW_TAG +#define DW_TAG(ID, NAME) +#endif + +#ifndef DW_AT +#define DW_AT(ID, NAME) +#endif + +#ifndef DW_FORM +#define DW_FORM(ID, NAME) +#endif + +#ifndef DW_OP +#define DW_OP(ID, NAME) +#endif + +#ifndef DW_LANG +#define DW_LANG(ID, NAME, LOWER_BOUND) +#endif + +#ifndef DW_ATE +#define DW_ATE(ID, NAME) +#endif + +// Tag +DW_TAG(0x0000, null) +DW_TAG(0x0001, array_type) +DW_TAG(0x0002, class_type) +DW_TAG(0x0004, enumeration_type) +DW_TAG(0x0005, formal_parameter) +DW_TAG(0x000a, label) +DW_TAG(0x000b, lexical_block) +DW_TAG(0x000d, member) +DW_TAG(0x000f, pointer_type) +DW_TAG(0x0011, compile_unit) +DW_TAG(0x0013, structure_type) +DW_TAG(0x0015, subroutine_type) +DW_TAG(0x0016, typedef) +DW_TAG(0x0017, union_type) +DW_TAG(0x0018, unspecified_parameters) +DW_TAG(0x001c, inheritance) +DW_TAG(0x0021, subrange_type) +DW_TAG(0x0024, base_type) +DW_TAG(0x0026, const_type) +DW_TAG(0x0028, enumerator) +DW_TAG(0x002e, subprogram) +DW_TAG(0x0034, variable) +DW_TAG(0x0035, volatile_type) +// New in DWARF v3 +DW_TAG(0x0038, interface_type) + +// Attributes +DW_AT(0x01, sibling) +DW_AT(0x02, location) +DW_AT(0x03, name) +DW_AT(0x0b, byte_size) +DW_AT(0x0c, bit_offset) +DW_AT(0x0d, bit_size) +DW_AT(0x10, stmt_list) +DW_AT(0x11, low_pc) +DW_AT(0x12, high_pc) +DW_AT(0x13, language) +DW_AT(0x1b, comp_dir) +DW_AT(0x1c, const_value) +DW_AT(0x25, producer) +DW_AT(0x27, prototyped) +DW_AT(0x2f, upper_bound) +DW_AT(0x32, accessibility) +DW_AT(0x38, data_member_location) +DW_AT(0x39, decl_column) +DW_AT(0x3a, decl_file) +DW_AT(0x3b, decl_line) +DW_AT(0x3e, encoding) +DW_AT(0x3f, external) +DW_AT(0x40, frame_base) +DW_AT(0x47, specification) +DW_AT(0x49, type) +// New in DWARF v3 +DW_AT(0x64, object_pointer) +// New in DWARF v5 +DW_AT(0x8a, deleted) +// Vendor extensions +DW_AT (0x2116, GNU_all_tail_call_sites) + +// Attribute form encodings +DW_FORM(0x01, addr) +DW_FORM(0x05, data2) +DW_FORM(0x06, data4) +DW_FORM(0x07, data8) +DW_FORM(0x08, string) +DW_FORM(0x0b, data1) +DW_FORM(0x0c, flag) +DW_FORM(0x0e, strp) +DW_FORM(0x10, ref_addr) +DW_FORM(0x11, ref1) +DW_FORM(0x12, ref2) +DW_FORM(0x13, ref4) +DW_FORM(0x14, ref8) +// New in DWARF v4 +DW_FORM(0x17, sec_offset) +DW_FORM(0x18, exprloc) +DW_FORM(0x19, flag_present) +// This was defined out of sequence. +DW_FORM(0x20, ref_sig8) +// Alternate debug sections proposal (output of "dwz" tool). +DW_FORM(0x1f20, GNU_ref_alt) +DW_FORM(0x1f21, GNU_strp_alt) + +// DWARF Expression operators. +DW_OP(0x03, addr) +DW_OP(0x70, breg0) +DW_OP(0x71, breg1) +DW_OP(0x72, breg2) +DW_OP(0x73, breg3) +DW_OP(0x74, breg4) +DW_OP(0x75, breg5) +DW_OP(0x76, breg6) +DW_OP(0x77, breg7) +DW_OP(0x91, fbreg) +// New in DWARF v3 +DW_OP(0x9c, call_frame_cfa) + +// DWARF languages. +DW_LANG(0x000c, C99, 0) + +// DWARF attribute type encodings. +DW_ATE(0x01, address) +DW_ATE(0x02, boolean) +DW_ATE(0x03, complex_float) +DW_ATE(0x04, float) +DW_ATE(0x05, signed) +DW_ATE(0x06, signed_char) +DW_ATE(0x07, unsigned) +DW_ATE(0x08, unsigned_char) + +#undef DW_TAG +#undef DW_AT +#undef DW_FORM +#undef DW_OP +#undef DW_LANG +#undef DW_ATE diff --git a/ecmascript/mapleall/maple_ir/include/dwarf.h b/ecmascript/mapleall/maple_ir/include/dwarf.h new file mode 100644 index 0000000000000000000000000000000000000000..b46c2d9dd3a936affb9cbcb4bdf606e716a803e2 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/dwarf.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLE_IR_INCLUDE_DWARF_H +#define MAPLE_IR_INCLUDE_DWARF_H + +#include + +enum Tag : uint16_t { +#define DW_TAG(ID, NAME) DW_TAG_##NAME = (ID), +#include "dwarf.def" + DW_TAG_lo_user = 0x4080, + DW_TAG_hi_user = 0xffff, + DW_TAG_user_base = 0x1000 +}; + +enum Attribute : uint16_t { +#define DW_AT(ID, NAME) DW_AT_##NAME = (ID), +#include "dwarf.def" + DW_AT_lo_user = 0x2000, + DW_AT_hi_user = 0x3fff, +}; + +enum Form : uint16_t { +#define DW_FORM(ID, NAME) DW_FORM_##NAME = (ID), +#include "dwarf.def" + DW_FORM_lo_user = 0x1f00, +}; + +enum LocationAtom { +#define DW_OP(ID, NAME) DW_OP_##NAME = (ID), +#include "dwarf.def" + DW_OP_lo_user = 0xe0, + DW_OP_hi_user = 0xff, +}; + +enum TypeKind : uint8_t { +#define DW_ATE(ID, NAME) DW_ATE_##NAME = (ID), +#include "dwarf.def" + DW_ATE_lo_user = 0x80, + DW_ATE_hi_user = 0xff, + DW_ATE_void = 0x20 +}; + +enum AccessAttribute { + DW_ACCESS_public = 0x01, + DW_ACCESS_protected = 0x02, + DW_ACCESS_private = 0x03 +}; + + +enum SourceLanguage { +#define DW_LANG(ID, NAME, LOWER_BOUND) DW_LANG_##NAME = (ID), +#include "dwarf.def" + DW_LANG_lo_user = 0x8000, + DW_LANG_hi_user = 0xffff +}; + +#endif diff --git a/ecmascript/mapleall/maple_ir/include/func_desc.h b/ecmascript/mapleall/maple_ir/include/func_desc.h new file mode 100644 index 0000000000000000000000000000000000000000..b9c81301d174d8e52a6095543d32d537b285ccf7 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/func_desc.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_FUNC_DESC_H +#define MAPLE_IR_INCLUDE_FUNC_DESC_H +#include "mpl_logging.h" +namespace maple { + +enum class FI { + kUnknown = 0, + kPure, // means this function will not modify any global memory. + kConst, // means this function will not read/modify any global memory. +}; + +static std::string kFIStr[] = { + "kUnknown", "kPure", "kConst" +}; + +enum class RI { + kUnknown = 0, // for ptr value, don't know anything. + kNoAlias, // for ptr value, no alias with any other ptr when this method is returned. As in malloc. + kAliasParam0, // for ptr value, it may alias with first param. As in memcpy. + kAliasParam1, + kAliasParam2, + kAliasParam3, + kAliasParam4, + kAliasParam5, +}; + +static std::string kRIStr[] = { + "kUnknown", + "kNoAlias", + "kAliasParam0", + "kAliasParam1", + "kAliasParam2", + "kAliasParam3", + "kAliasParam4", + "kAliasParam5" +}; + +enum class PI { + kUnknown = 0, // for ptr param, may read/write every level memory. + kReadWriteMemory, // for ptr param, only read & write the memory it points to. + kWriteMemoryOnly, // for ptr param, only write the memory it points to. + kReadMemoryOnly, // for ptr param, only read the memory it points to. + kReadSelfOnly, // for ptr param, only read the ptr itself, do not dereference. + kUnused, // this param is not used in this function. +}; + +static std::string kPIStr[] = { + "kUnknown", + "kReadWriteMemory", + "kWriteMemoryOnly", + "kReadMemoryOnly", + "kReadSelfOnly", + "kUnused" +}; + +// most function has less than 6 parameters. +const size_t kMaxParamCount = 6; +struct FuncDesc { + FI funcInfo{}; + RI returnInfo{}; + PI paramInfo[kMaxParamCount]{}; + bool configed = false; + + void InitToBest() { + funcInfo = FI::kConst; + returnInfo = RI::kNoAlias; + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + paramInfo[idx] = PI::kUnused; + } + } + + bool Equals(const FuncDesc &desc) const { + if (funcInfo != desc.funcInfo) { + return false; + } + if (returnInfo != desc.returnInfo) { + return false; + } + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + if (paramInfo[idx] != desc.paramInfo[idx]) { + return false; + } + } + return true; + } + + bool IsConfiged() const { + return configed; + } + + void SetConfiged() { + configed = true; + } + + bool IsConst() const { + return funcInfo == FI::kConst; + } + + bool IsPure() const { + return funcInfo == FI::kPure; + } + + bool IsReturnNoAlias() const { + return returnInfo == RI::kNoAlias; + } + bool IsReturnAlias() const { + return returnInfo >= RI::kAliasParam0; + } + + size_t EnumToIndex(const RI &ri) const { + switch (ri) { + case RI::kAliasParam0: return 0; + case RI::kAliasParam1: return 1; + case RI::kAliasParam2: return 2; + case RI::kAliasParam3: return 3; + case RI::kAliasParam4: return 4; + case RI::kAliasParam5: return 5; + default: { + CHECK_FATAL(false, "Impossible."); + } + } + } + + size_t ReturnParamX() const { + CHECK_FATAL(returnInfo >= RI::kAliasParam0, "Impossible."); + return EnumToIndex(returnInfo); + } + + const PI GetParamInfo(size_t index) const { + return paramInfo[index]; + } + + bool IsArgReadSelfOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadSelfOnly; + } + + bool IsArgReadMemoryOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadMemoryOnly; + } + + bool IsArgWriteMemoryOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kWriteMemoryOnly; + } + + bool IsArgUnused(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kUnused; + } + + void SetFuncInfo(const FI fi) { + funcInfo = fi; + } + + void SetFuncInfoNoBetterThan(const FI fi) { + auto oldValue = static_cast(funcInfo); + auto newValue = static_cast(fi); + if (newValue < oldValue) { + SetFuncInfo(static_cast(newValue)); + } + } + + void SetReturnInfo(const RI ri) { + returnInfo = ri; + } + + void SetParamInfo(const size_t idx, const PI pi) { + if (idx >= kMaxParamCount) { + return; + } + paramInfo[idx] = pi; + } + + void SetParamInfoNoBetterThan(const size_t idx, const PI pi) { + size_t oldValue = static_cast(paramInfo[idx]); + size_t newValue = static_cast(pi); + if (newValue < oldValue) { + SetParamInfo(idx, static_cast(newValue)); + } + } + + void Dump(size_t numParam = kMaxParamCount) { + auto dumpCount = numParam > kMaxParamCount ? kMaxParamCount : numParam; + LogInfo::MapleLogger() << kFIStr[static_cast(funcInfo)] + << " " << kRIStr[static_cast(returnInfo)]; + for (size_t i = 0; i < dumpCount; ++i) { + LogInfo::MapleLogger() << " " << kPIStr[static_cast(paramInfo[i])]; + } + LogInfo::MapleLogger() << "\n"; + } +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_FUNC_DESC_H diff --git a/ecmascript/mapleall/maple_ir/include/global_tables.h b/ecmascript/mapleall/maple_ir/include/global_tables.h new file mode 100644 index 0000000000000000000000000000000000000000..917cf40737f77fa926816f790f7421bdef11929d --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/global_tables.h @@ -0,0 +1,875 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#define MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#include +#include +#include +#include +#include +#include "thread_env.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "namemangler.h" +#include "mir_type.h" +#include "mir_const.h" + +namespace maple { +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; + +class BinaryMplImport; // circular dependency exists, no other choice + +// to facilitate the use of unordered_map +class TyIdxHash { + public: + std::size_t operator()(const TyIdx &tyIdx) const { + return std::hash{}(tyIdx); + } +}; + +// to facilitate the use of unordered_map +class GStrIdxHash { + public: + std::size_t operator()(const GStrIdx &gStrIdx) const { + return std::hash{}(gStrIdx); + } +}; + +// to facilitate the use of unordered_map +class UStrIdxHash { + public: + std::size_t operator()(const UStrIdx &uStrIdx) const { + return std::hash{}(uStrIdx); + } +}; + +class IntConstKey { + friend class IntConstHash; + friend class IntConstCmp; + public: + IntConstKey(int64 v, TyIdx tyIdx) : val(v), tyIdx(tyIdx) {} + virtual ~IntConstKey() {} + private: + int64 val; + TyIdx tyIdx; +}; + +class IntConstHash { + public: + std::size_t operator() (const IntConstKey &key) const { + return std::hash{}(key.val) ^ (std::hash{}(static_cast(key.tyIdx)) << 1); + } +}; + +class IntConstCmp { + public: + bool operator() (const IntConstKey &lkey, const IntConstKey &rkey) const { + return lkey.val == rkey.val && lkey.tyIdx == rkey.tyIdx; + } +}; + +class TypeTable { + friend BinaryMplImport; + public: + static MIRType *voidPtrType; + + TypeTable(); + TypeTable(const TypeTable&) = delete; + TypeTable &operator=(const TypeTable&) = delete; + ~TypeTable(); + + std::vector &GetTypeTable() { + return typeTable; + } + + const std::vector &GetTypeTable() const { + return typeTable; + } + + auto &GetTypeHashTable() const { + return typeHashTable; + } + + auto &GetPtrTypeMap() const { + return ptrTypeMap; + } + + auto &GetRefTypeMap() const { + return refTypeMap; + } + + MIRType *GetTypeFromTyIdx(TyIdx tyIdx) { + return const_cast(const_cast(this)->GetTypeFromTyIdx(tyIdx)); + } + const MIRType *GetTypeFromTyIdx(TyIdx tyIdx) const { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx); + } + + MIRType *GetTypeFromTyIdx(uint32 index) const { + CHECK_FATAL(index < typeTable.size(), "array index out of range"); + return typeTable.at(index); + } + + PrimType GetPrimTypeFromTyIdx(const TyIdx &tyIdx) const { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx)->GetPrimType(); + } + + void SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type); + MIRType *GetOrCreateMIRTypeNode(MIRType &ptype); + + TyIdx GetOrCreateMIRType(MIRType *pType) { + return GetOrCreateMIRTypeNode(*pType)->GetTypeIndex(); + } + + uint32 GetTypeTableSize() const { + return static_cast(typeTable.size()); + } + + // Get primtive types. + MIRType *GetPrimType(PrimType primType) const { + DEBUG_ASSERT(primType < typeTable.size(), "array index out of range"); + return typeTable.at(primType); + } + + MIRType *GetFloat() const { + DEBUG_ASSERT(PTY_f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f32); + } + + MIRType *GetDouble() const { + DEBUG_ASSERT(PTY_f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f64); + } + + MIRType *GetFloat128() const { + DEBUG_ASSERT(PTY_f128 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f128); + } + + MIRType *GetUInt1() const { + DEBUG_ASSERT(PTY_u1 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u1); + } + + MIRType *GetUInt8() const { + DEBUG_ASSERT(PTY_u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u8); + } + + MIRType *GetInt8() const { + DEBUG_ASSERT(PTY_i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i8); + } + + MIRType *GetUInt16() const { + DEBUG_ASSERT(PTY_u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u16); + } + + MIRType *GetInt16() const { + DEBUG_ASSERT(PTY_i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i16); + } + + MIRType *GetInt32() const { + DEBUG_ASSERT(PTY_i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i32); + } + + MIRType *GetUInt32() const { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetInt64() const { + DEBUG_ASSERT(PTY_i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i64); + } + + MIRType *GetUInt64() const { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtr() const { + DEBUG_ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } + +#ifdef USE_ARM32_MACRO + MIRType *GetUIntType() const { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetPtrType() const { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } +#else + MIRType *GetUIntType() const { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtrType() const { + DEBUG_ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } +#endif + +#ifdef USE_32BIT_REF + MIRType *GetCompactPtr() const { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + +#else + MIRType *GetCompactPtr() const { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + +#endif + MIRType *GetRef() const { + DEBUG_ASSERT(PTY_ref < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ref); + } + + MIRType *GetAddr32() const { + DEBUG_ASSERT(PTY_a32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a32); + } + + MIRType *GetAddr64() const { + DEBUG_ASSERT(PTY_a64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a64); + } + + MIRType *GetVoid() const { + DEBUG_ASSERT(PTY_void < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_void); + } + +#ifdef DYNAMICLANG + MIRType *GetDynundef() const { + DEBUG_ASSERT(PTY_dynundef < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynundef); + } + + MIRType *GetDynany() const { + DEBUG_ASSERT(PTY_dynany < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynany); + } + + MIRType *GetDyni32() const { + DEBUG_ASSERT(PTY_dyni32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dyni32); + } + + MIRType *GetDynf64() const { + DEBUG_ASSERT(PTY_dynf64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf64); + } + + MIRType *GetDynf32() const { + DEBUG_ASSERT(PTY_dynf32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf32); + } + + MIRType *GetDynstr() const { + DEBUG_ASSERT(PTY_dynstr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynstr); + } + + MIRType *GetDynobj() const { + DEBUG_ASSERT(PTY_dynobj < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynobj); + } + + MIRType *GetDynbool() const { + DEBUG_ASSERT(PTY_dynbool < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynbool); + } + +#endif + MIRType *GetUnknown() const { + DEBUG_ASSERT(PTY_unknown < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_unknown); + } + // vector type + MIRType *GetV4Int32() const { + DEBUG_ASSERT(PTY_v4i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i32); + } + + MIRType *GetV2Int32() const { + DEBUG_ASSERT(PTY_v2i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i32); + } + + MIRType *GetV4UInt32() const { + DEBUG_ASSERT(PTY_v4u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u32); + } + MIRType *GetV2UInt32() const { + DEBUG_ASSERT(PTY_v2u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u32); + } + + MIRType *GetV4Int16() const { + DEBUG_ASSERT(PTY_v4i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i16); + } + MIRType *GetV8Int16() const { + DEBUG_ASSERT(PTY_v8i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i16); + } + + MIRType *GetV4UInt16() const { + DEBUG_ASSERT(PTY_v4u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u16); + } + MIRType *GetV8UInt16() const { + DEBUG_ASSERT(PTY_v8u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u16); + } + + MIRType *GetV8Int8() const { + DEBUG_ASSERT(PTY_v8i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i8); + } + MIRType *GetV16Int8() const { + DEBUG_ASSERT(PTY_v16i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16i8); + } + + MIRType *GetV8UInt8() const { + DEBUG_ASSERT(PTY_v8u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u8); + } + MIRType *GetV16UInt8() const { + DEBUG_ASSERT(PTY_v16u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16u8); + } + MIRType *GetV2Int64() const { + DEBUG_ASSERT(PTY_v2i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i64); + } + MIRType *GetV2UInt64() const { + DEBUG_ASSERT(PTY_v2u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u64); + } + + MIRType *GetV2Float32() const { + DEBUG_ASSERT(PTY_v2f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f32); + } + MIRType *GetV4Float32() const { + DEBUG_ASSERT(PTY_v4f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4f32); + } + MIRType *GetV2Float64() const { + DEBUG_ASSERT(PTY_v2f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f64); + } + + // Get or Create derived types. + MIRType *GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreatePointerType(const MIRType &pointTo, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + const MIRType *GetPointedTypeIfApplicable(MIRType &type) const; + MIRType *GetPointedTypeIfApplicable(MIRType &type); + MIRType *GetVoidPtr() const { + DEBUG_ASSERT(voidPtrType != nullptr, "voidPtrType should not be null"); + return voidPtrType; + } + + void UpdateMIRType(const MIRType &pType, const TyIdx tyIdx); + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs = TypeAttrs()); + // For one dimention array + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateFarrayType(const MIRType &elem); + MIRType *GetOrCreateJarrayType(const MIRType &elem); + MIRType *GetOrCreateFunctionType(const TyIdx&, const std::vector&, const std::vector&, + bool isVarg = false, const TypeAttrs &retAttrs = TypeAttrs()); + MIRType *GetOrCreateStructType(const std::string &name, const FieldVector &fields, const FieldVector &prntFields, + MIRModule &module) { + return GetOrCreateStructOrUnion(name, fields, prntFields, module); + } + + MIRType *GetOrCreateUnionType(const std::string &name, const FieldVector &fields, const FieldVector &parentFields, + MIRModule &module) { + return GetOrCreateStructOrUnion(name, fields, parentFields, module, false); + } + + MIRType *GetOrCreateClassType(const std::string &name, MIRModule &module) { + return GetOrCreateClassOrInterface(name, module, true); + } + + MIRType *GetOrCreateInterfaceType(const std::string &name, MIRModule &module) { + return GetOrCreateClassOrInterface(name, module, false); + } + + void PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type); + void AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, const MIRType &fieldType); + + TyIdx lastDefaultTyIdx; + private: + using MIRTypePtr = MIRType*; + struct Hash { + size_t operator()(const MIRTypePtr &ty) const { + return ty->GetHashIndex(); + } + }; + + struct Equal { + bool operator()(const MIRTypePtr &tx, const MIRTypePtr &ty) const { + return tx->EqualTo(*ty); + } + }; + + // create an entry in typeTable for the type node + MIRType *CreateType(const MIRType &oldType) { + MIRType *newType = oldType.CopyMIRTypeNode(); + newType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(newType); + return newType; + } + + void PushNull() { typeTable.push_back(nullptr); } + void PopBack() { typeTable.pop_back(); } + + void CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, bool isObject, bool isIncomplete); + MIRType *CreateAndUpdateMirTypeNode(MIRType &pType); + MIRType *GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, const FieldVector &printFields, + MIRModule &module, bool forStruct = true, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass); + + MIRType *CreateMirType(uint32 primTypeIdx) const; + void PutToHashTable(MIRType *mirType); + + std::unordered_set typeHashTable; + std::unordered_map ptrTypeMap; + std::unordered_map refTypeMap; + std::vector typeTable; + mutable std::shared_timed_mutex mtx; +}; + +class StrPtrHash { + public: + size_t operator()(const std::string *str) const { + return std::hash{}(*str); + } + + size_t operator()(const std::u16string *str) const { + return std::hash{}(*str); + } +}; + +class StrPtrEqual { + public: + bool operator()(const std::string *str1, const std::string *str2) const { + return *str1 == *str2; + } + + bool operator()(const std::u16string *str1, const std::u16string *str2) const { + return *str1 == *str2; + } +}; + +// T can be std::string or std::u16string +// U can be GStrIdx, UStrIdx, or U16StrIdx +template +class StringTable { + public: + StringTable() = default; + StringTable(const StringTable&) = delete; + StringTable &operator=(const StringTable&) = delete; + + ~StringTable() { + stringTableMap.clear(); + for (auto it : stringTable) { + delete it; + } + } + + void Init() { + // initialize 0th entry of stringTable with an empty string + T *ptr = new T; + stringTable.push_back(ptr); + } + + U GetStrIdxFromName(const T &str) const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + + U GetOrCreateStrIdxFromName(const T &str) { + U strIdx = GetStrIdxFromName(str); + if (strIdx == 0u) { + if (ThreadEnv::IsMeParallel()) { + std::unique_lock lock(mtx); + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + return strIdx; + } + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + } + return strIdx; + } + + size_t StringTableSize() const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + return stringTable.size(); + } + return stringTable.size(); + } + + const T &GetStringFromStrIdx(U strIdx) const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + DEBUG_ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + DEBUG_ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + + const T &GetStringFromStrIdx(uint32 idx) const { + DEBUG_ASSERT(idx < stringTable.size(), "array index out of range"); + return *stringTable[idx]; + } + + private: + std::vector stringTable; // index is uint32 + std::unordered_map stringTableMap; + mutable std::shared_timed_mutex mtx; +}; + +class FPConstTable { + public: + FPConstTable(const FPConstTable &p) = delete; + FPConstTable &operator=(const FPConstTable &p) = delete; + ~FPConstTable(); + + // get the const from floatConstTable or create a new one + MIRFloatConst *GetOrCreateFloatConst(float fval); + // get the const from doubleConstTable or create a new one + MIRDoubleConst *GetOrCreateDoubleConst(double fval); + + static std::unique_ptr Create() { + auto p = std::unique_ptr(new FPConstTable()); + p->PostInit(); + return p; + } + + private: + FPConstTable() : floatConstTable(), doubleConstTable() {}; + void PostInit(); + MIRFloatConst *DoGetOrCreateFloatConst(float); + MIRDoubleConst *DoGetOrCreateDoubleConst(double); + MIRFloatConst *DoGetOrCreateFloatConstThreadSafe(float); + MIRDoubleConst *DoGetOrCreateDoubleConstThreadSafe(double); + std::shared_timed_mutex floatMtx; + std::shared_timed_mutex doubleMtx; + std::unordered_map floatConstTable; // map float const value to the table; + std::unordered_map doubleConstTable; // map double const value to the table; + MIRFloatConst *nanFloatConst = nullptr; + MIRFloatConst *infFloatConst = nullptr; + MIRFloatConst *minusInfFloatConst = nullptr; + MIRFloatConst *minusZeroFloatConst = nullptr; + MIRDoubleConst *nanDoubleConst = nullptr; + MIRDoubleConst *infDoubleConst = nullptr; + MIRDoubleConst *minusInfDoubleConst = nullptr; + MIRDoubleConst *minusZeroDoubleConst = nullptr; +}; + +class IntConstTable { + public: + IntConstTable(const IntConstTable &p) = delete; + IntConstTable &operator=(const IntConstTable &p) = delete; + ~IntConstTable(); + + MIRIntConst *GetOrCreateIntConst(const IntVal &val, MIRType &type); + MIRIntConst *GetOrCreateIntConst(uint64 val, MIRType &type); + + static std::unique_ptr Create() { + auto p = std::unique_ptr(new IntConstTable()); + return p; + } + + private: + IntConstTable() = default; + MIRIntConst *DoGetOrCreateIntConst(uint64 val, MIRType &type); + MIRIntConst *DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type); + std::shared_timed_mutex mtx; + std::unordered_map intConstTable; +}; + +// STypeNameTable is only used to store class and interface types. +// Each module maintains its own MIRTypeNameTable. +class STypeNameTable { + public: + STypeNameTable() = default; + virtual ~STypeNameTable() = default; + + const std::unordered_map &GetGStridxToTyidxMap() const { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + const auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.cend()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + + private: + std::unordered_map gStrIdxToTyIdxMap; +}; + +class FunctionTable { + public: + FunctionTable() { + funcTable.push_back(nullptr); + } // puIdx 0 is reserved + + virtual ~FunctionTable() = default; + + std::vector &GetFuncTable() { + return funcTable; + } + + MIRFunction *GetFunctionFromPuidx(PUIdx pIdx) const { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + return funcTable.at(pIdx); + } + + void SetFunctionItem(uint32 pIdx, MIRFunction *func) { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + funcTable[pIdx] = func; + } + + private: + std::vector funcTable; // index is PUIdx +}; + +class GSymbolTable { + public: + GSymbolTable(); + GSymbolTable(const GSymbolTable&) = delete; + GSymbolTable &operator=(const GSymbolTable&) = delete; + ~GSymbolTable(); + + MIRModule *GetModule() { + return module; + } + + void SetModule(MIRModule *m) { + module = m; + } + + bool IsValidIdx(size_t idx) const { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStidx(uint32 idx, bool checkFirst = false) const { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + DEBUG_ASSERT(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + void SetStrIdxStIdxMap(GStrIdx strIdx, StIdx stIdx) { + strIdxToStIdxMap[strIdx] = stIdx; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const { + const auto it = strIdxToStIdxMap.find(idx); + if (it == strIdxToStIdxMap.cend()) { + return StIdx(); + } + return it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(GStrIdx idx, bool checkFirst = false) const { + return GetSymbolFromStidx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + auto &GetTable() { + return symbolTable; + } + + size_t GetSymbolTableSize() const { + return symbolTable.size(); + } + + MIRSymbol *GetSymbol(uint32 idx) const { + DEBUG_ASSERT(idx < symbolTable.size(), "array index out of range"); + return symbolTable.at(idx); + } + + MIRSymbol *CreateSymbol(uint8 scopeID); + bool AddToStringSymbolMap(const MIRSymbol &st); + bool RemoveFromStringSymbolMap(const MIRSymbol &st); + void Dump(bool isLocal, int32 indent = 0) const; + + private: + MIRModule *module = nullptr; + // hash table mapping string index to st index + std::unordered_map strIdxToStIdxMap; + std::vector symbolTable; // map symbol idx to symbol node +}; + +class ConstPool { + public: + std::unordered_map &GetConstU16StringPool() { + return constU16StringPool; + } + + void InsertConstPool(GStrIdx strIdx, MIRConst *cst) { + (void)constMap.emplace(strIdx, cst); + } + + MIRConst *GetConstFromPool(GStrIdx strIdx) { + return constMap[strIdx]; + } + + void PutLiteralNameAsImported(GStrIdx gIdx) { + (void)importedLiteralNames.insert(gIdx); + } + + bool LookUpLiteralNameFromImported(GStrIdx gIdx) { + return importedLiteralNames.find(gIdx) != importedLiteralNames.end(); + } + + protected: + std::unordered_map constMap; + std::set importedLiteralNames; + + private: + std::unordered_map constU16StringPool; +}; + +class GlobalTables { + public: + static GlobalTables &GetGlobalTables(); + + static StringTable &GetStrTable() { + return globalTables.gStringTable; + } + + static StringTable &GetUStrTable() { + return globalTables.uStrTable; + } + + static StringTable &GetU16StrTable() { + return globalTables.u16StringTable; + } + + static TypeTable &GetTypeTable() { + return globalTables.typeTable; + } + + static FPConstTable &GetFpConstTable() { + return *(globalTables.fpConstTablePtr); + } + + static STypeNameTable &GetTypeNameTable() { + return globalTables.typeNameTable; + } + + static FunctionTable &GetFunctionTable() { + return globalTables.functionTable; + } + + static GSymbolTable &GetGsymTable() { + return globalTables.gSymbolTable; + } + + static ConstPool &GetConstPool() { + return globalTables.constPool; + } + + static IntConstTable &GetIntConstTable() { + return *(globalTables.intConstTablePtr); + } + + GlobalTables(const GlobalTables &globalTables) = delete; + GlobalTables(const GlobalTables &&globalTables) = delete; + GlobalTables &operator=(const GlobalTables &globalTables) = delete; + GlobalTables &operator=(const GlobalTables &&globalTables) = delete; + + private: + GlobalTables() : fpConstTablePtr(FPConstTable::Create()), + intConstTablePtr(IntConstTable::Create()) { + gStringTable.Init(); + uStrTable.Init(); + u16StringTable.Init(); + } + virtual ~GlobalTables() = default; + static GlobalTables globalTables; + + TypeTable typeTable; + STypeNameTable typeNameTable; + FunctionTable functionTable; + GSymbolTable gSymbolTable; + ConstPool constPool; + std::unique_ptr fpConstTablePtr; + std::unique_ptr intConstTablePtr; + StringTable gStringTable; + StringTable uStrTable; + StringTable u16StringTable; +}; + +inline MIRType &GetTypeFromTyIdx(TyIdx idx) { + return *(GlobalTables::GetTypeTable().GetTypeFromTyIdx(idx)); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_GLOBAL_TABLES_H diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_c.def b/ecmascript/mapleall/maple_ir/include/intrinsic_c.def new file mode 100644 index 0000000000000000000000000000000000000000..7e4121bfea389164e1aa535cf03525fdff2e6726 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_c.def @@ -0,0 +1,313 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) + +DEF_MIR_INTRINSIC(C_strcmp,\ + "strcmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncmp,\ + "strncmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strcpy,\ + "strcpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncpy,\ + "strncpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_strlen,\ + "strlen", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strchr,\ + "strchr", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strrchr,\ + "strrchr", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_memcmp,\ + "memcmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memcpy,\ + "memcpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memmove,\ + "memmove", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memset,\ + "memset", 0, kArgTyVoid, kArgTyPtr, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_acosf,\ + "acosf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_asinf,\ + "asinf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_atanf,\ + "atanf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_cosf,\ + "cosf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_coshf,\ + "coshf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_expf,\ + "expf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_logf,\ + "logf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_log10f,\ + "log10f", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinf,\ + "sinf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinhf,\ + "sinhf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_acos,\ + "acos", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_asin,\ + "asin", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_atan,\ + "atan", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cos,\ + "cos", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cosh,\ + "cosh", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_exp,\ + "exp", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log,\ + "log", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log10,\ + "log10", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sin,\ + "sin", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sinh,\ + "sinh", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_ffs,\ + "ffs", INTRNISPURE, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_va_start,\ + "sinh", INTRNISPURE | INTRNISSPECIAL, kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C_constant_p,\ + "sinh", 0, kArgTyI32, kArgTyDynany) +DEF_MIR_INTRINSIC(C_clz32,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clz64,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_ctz32,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_ctz64,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_popcount32,\ + "popcount32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_popcount64,\ + "popcount64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_parity32,\ + "parity32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_parity64,\ + "parity64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_clrsb32,\ + "clrsb32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clrsb64,\ + "clrsb64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_isaligned,\ + "isaligned", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_alignup,\ + "alignup", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_aligndown,\ + "aligndown", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_rev16_2,\ + "rev16", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, kArgTyI16) +DEF_MIR_INTRINSIC(C_rev_4,\ + "rev", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_rev_8,\ + "rev", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(C_stack_save,\ + "stack_save", INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +DEF_MIR_INTRINSIC(C_stack_restore,\ + "stack_restore", INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +// sync +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_1,\ + "__sync_add_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_2,\ + "__sync_add_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_4,\ + "__sync_add_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_8,\ + "__sync_add_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_1,\ + "__sync_sub_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_2,\ + "__sync_sub_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_4,\ + "__sync_sub_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_8,\ + "__sync_sub_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_1,\ + "__sync_fetch_and_add_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_2,\ + "__sync_fetch_and_add_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_4,\ + "__sync_fetch_and_add_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_8,\ + "__sync_fetch_and_add_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_1,\ + "__sync_fetch_and_sub_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_2,\ + "__sync_fetch_and_sub_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_4,\ + "__sync_fetch_and_sub_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_8,\ + "__sync_fetch_and_sub_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_1,\ + "__sync_bool_compare_and_swap_1", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_2,\ + "__sync_bool_compare_and_swap_2", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_4,\ + "__sync_bool_compare_and_swap_4", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_8,\ + "__sync_bool_compare_and_swap_8", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_1,\ + "__sync_val_compare_and_swap_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_2,\ + "__sync_val_compare_and_swap_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_4,\ + "__sync_val_compare_and_swap_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_8,\ + "__sync_val_compare_and_swap_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_1,\ + "__sync_lock_test_and_set_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_2,\ + "__sync_lock_test_and_set_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_4,\ + "__sync_lock_test_and_set_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_8,\ + "__sync_lock_test_and_set_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_release_8,\ + "__sync_lock_release_8", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_4,\ + "__sync_lock_release_4", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_2,\ + "__sync_lock_release_2", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_1,\ + "__sync_lock_release_1", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_1,\ + "__sync_fetch_and_and_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_2,\ + "__sync_fetch_and_and_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_4,\ + "__sync_fetch_and_and_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_8,\ + "__sync_fetch_and_and_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_1,\ + "__sync_fetch_and_or_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_2,\ + "__sync_fetch_and_or_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_4,\ + "__sync_fetch_and_or_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_8,\ + "__sync_fetch_and_or_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_1,\ + "__sync_fetch_and_xor_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_2,\ + "__sync_fetch_and_xor_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_4,\ + "__sync_fetch_and_xor_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_8,\ + "__sync_fetch_and_xor_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_1,\ + "__sync_fetch_and_nand_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_2,\ + "__sync_fetch_and_nand_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_4,\ + "__sync_fetch_and_nand_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_8,\ + "__sync_fetch_and_nand_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_1,\ + "__sync_and_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_2,\ + "__sync_and_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_4,\ + "__sync_and_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_8,\ + "__sync_and_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_1,\ + "__sync_or_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_2,\ + "__sync_or_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_4,\ + "__sync_or_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_8,\ + "__sync_or_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_1,\ + "__sync_xor_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_2,\ + "__sync_xor_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_4,\ + "__sync_xor_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_8,\ + "__sync_xor_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_1,\ + "__sync_nand_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_2,\ + "__sync_nand_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_4,\ + "__sync_nand_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_8,\ + "__sync_nand_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_synchronize,\ + "__sync_synchronize", INTRNATOMIC, kArgTyUndef) + +DEF_MIR_INTRINSIC(C__builtin_return_address,\ + "__builtin_return_address", INTRNISPURE, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C__builtin_extract_return_addr,\ + "__builtin_extract_return_addr", INTRNISPURE, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C___builtin_expect,\ + "__builtin_expect", INTRNISPURE, kArgTyI32, kArgTyI32, kArgTyI32) + +// atomic +DEF_MIR_INTRINSIC(C___atomic_load_n,\ + "__atomic_load_n", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_load,\ + "__atomic_load", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store_n,\ + "__atomic_store_n", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store,\ + "__atomic_store", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange_n,\ + "__atomic_exchange_n", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange,\ + "__atomic_exchange", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_add_fetch,\ + "__atomic_add_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_sub_fetch,\ + "__atomic_sub_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_and_fetch,\ + "__atomic_and_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_xor_fetch,\ + "__atomic_xor_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_or_fetch,\ + "__atomic_or_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_nand_fetch,\ + "__atomic_nand_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_add,\ + "__atomic_fetch_add", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_sub,\ + "__atomic_fetch_sub", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_and,\ + "__atomic_fetch_and", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_xor,\ + "__atomic_fetch_xor", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_or,\ + "__atomic_fetch_or", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_nand,\ + "__atomic_fetch_nand", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_test_and_set,\ + "__atomic_test_and_set", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_clear,\ + "__atomic_clear", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_thread_fence,\ + "__atomic_thread_fence", INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_signal_fence,\ + "__atomic_signal_fence", INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_always_lock_free,\ + "__atomic_always_lock_free", INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_is_lock_free,\ + "__atomic_is_lock_free", INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange_n,\ + "__atomic_compare_exchange_n", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyDynany, kArgTyU1, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange,\ + "__atomic_compare_exchange", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU1, kArgTyI32, kArgTyI32) diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_dai.def b/ecmascript/mapleall/maple_ir/include/intrinsic_dai.def new file mode 100644 index 0000000000000000000000000000000000000000..28c97ddfb22fad1643490dc94c00bafa6c5e4a8f --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_dai.def @@ -0,0 +1,20 @@ +DEF_MIR_INTRINSIC(MCC_DeferredConstClass,\ + "MCC_DeferredConstClass", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInstanceOf,\ + "MCC_DeferredInstanceOf", INTRNISJAVA, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredCheckCast,\ + "MCC_DeferredCheckCast", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewInstance,\ + "MCC_DeferredNewInstance", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewArray,\ + "MCC_DeferredNewArray", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32) +DEF_MIR_INTRINSIC(MCC_DeferredFillNewArray,\ + "MCC_DeferredFillNewArray", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany) +DEF_MIR_INTRINSIC(MCC_DeferredLoadField,\ + "MCC_DeferredLoadField", INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredStoreField,\ + "MCC_DeferredStoreField", INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInvoke,\ + "MCC_DeferredInvoke", INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredClinitCheck,\ + "MCC_DeferredClinitCheck", INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_java.def b/ecmascript/mapleall/maple_ir/include/intrinsic_java.def new file mode 100644 index 0000000000000000000000000000000000000000..717c6ea664013853d2dc313422ff72084410f040 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_java.def @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JAVA_ARRAY_LENGTH,\ + "__java_array_length", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ARRAY_FILL,\ + "__java_array_fill", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_FILL_NEW_ARRAY,\ + "__java_fill_new_array", INTRNISJAVA, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CHECK_CAST,\ + "__java_check_cast", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CONST_CLASS,\ + "__java_const_class", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_INSTANCE_OF,\ + "__java_instance_of", INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ISASSIGNABLEFROM,\ + "__java_isAssignableFrom", INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_MERGE,\ + "__java_merge", INTRNISJAVA, kArgTyPtr, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK,\ + "__java_clinit_check", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_POLYMORPHIC_CALL,\ + "__java_polymorphic_call", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_ARITHMETIC,\ + "MCC_ThrowArithmeticException", INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_GET_CLASS,\ + "MCC_GetClass", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_js.def b/ecmascript/mapleall/maple_ir/include/intrinsic_js.def new file mode 100644 index 0000000000000000000000000000000000000000..9cf7649522b5b84a67a2f7485d7dde16b539e332 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_js.def @@ -0,0 +1,124 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_INIT_CONTEXT,\ + "__js_init_context", INTRNISJS, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_REQUIRE,\ + "__js_require", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BIOBJECT,\ + "__jsobj_get_or_create_builtin", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BISTRING,\ + "__jsstr_get_builtin", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_THIS,\ + "__jsop_this", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ADD,\ + "__jsop_add", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(ADD_WITH_OVERFLOW,\ + "__add_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(SUB_WITH_OVERFLOW,\ + "__sub_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MUL_WITH_OVERFLOW,\ + "__mul_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CONCAT,\ + "__jsstr_concat_2", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTEQ,\ + "__jsop_stricteq", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTEQ,\ + "__jsstr_equal", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTNE,\ + "__jsop_strictne", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTNE,\ + "__jsstr_ne", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INSTANCEOF,\ + "__jsop_instanceof", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_IN,\ + "__jsop_in", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_OR,\ + "__jsop_or", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_AND,\ + "__jsop_and", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_TYPEOF,\ + "__jsop_typeof", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW,\ + "__js_new", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_STRING,\ + "__js_ToString", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_LENGTH,\ + "__jsstr_get_length", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_BOOLEAN,\ + "__js_ToBoolean", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NUMBER,\ + "__js_ToNumber", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_INT32,\ + "__js_ToInt32", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_PRINT,\ + "__jsop_print", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ERROR,\ + "__js_error", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNNEVERRETURN, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_EVAL,\ + "__js_eval", kIntrnUndef, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ICALL,\ + "__js_icall", INTRNISJS | INTRNRETURNSTRUCT, kArgTyDynany, kArgTyA32, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CALL, + "__jsop_call", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CCALL,\ + "__jsop_ccall", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW, + "__jsop_new", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETTIMEOUT, + "__js_setTimeout", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETCYCLEHEADER,\ + "__js_setCycleHeader", INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_0,\ + "__js_new_obj_obj_0", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_1,\ + "__js_new_obj_obj_1", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP,\ + "__jsop_setprop", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP,\ + "__jsop_getprop", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_DELPROP,\ + "__jsop_delprop", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_NAME,\ + "__jsop_setprop_by_name", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_NAME,\ + "__jsop_getprop_by_name", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_INDEX,\ + "__jsop_setprop_by_index", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_INDEX,\ + "__jsop_getprop_by_index", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_BY_NAME,\ + "__jsop_initprop", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_GETTER,\ + "__jsop_initprop_getter", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_SETTER,\ + "__jsop_initprop_setter", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_FUNCTION,\ + "__js_new_function", INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_ELEMS,\ + "__js_new_arr_elems", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_LENGTH,\ + "__js_new_arr_length", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_LENGTH,\ + "__jsop_length", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW_ITERATOR,\ + "__jsop_valueto_iterator", INTRNISJS, kArgTyPtr, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEXT_ITERATOR,\ + "__jsop_iterator_next", INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_MORE_ITERATOR,\ + "__jsop_more_iterator", INTRNISJS, kArgTyU32, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ADDSYSEVENTLISTENER,\ + "__js_add_sysevent_listener", INTRNISJS, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_js_eng.def b/ecmascript/mapleall/maple_ir/include/intrinsic_js_eng.def new file mode 100644 index 0000000000000000000000000000000000000000..f21cda4431815f7e09e1c8d50c04dd45e69ec3a9 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_js_eng.def @@ -0,0 +1,34 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_GET_ARGUMENTOBJECT,\ + "__jsobj_get_or_create_argument", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_ERROR_OBJECT,\ + "__jsobj_get_or_create_error", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_EVALERROR_OBJECT,\ + "__jsobj_get_or_create_evalError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_RANGEERROR_OBJECT,\ + "__jsobj_get_or_create_rangeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_REFERENCEERROR_OBJECT,\ + "__jsobj_get_or_create_referenceError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_SYNTAXERROR_OBJECT,\ + "__jsobj_get_or_create_syntaxError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_TYPEERROR_OBJECT,\ + "__jsobj_get_or_create_typeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_URIERROR_OBJECT,\ + "__jsobj_get_or_create_uriError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ASSERTVALUE, + "__jsop_assert_value", INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_op.h b/ecmascript/mapleall/maple_ir/include/intrinsic_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f8326b3e5964c8abc0893ed87cad4ae966a6d4 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_op.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_INTRINSIC_OP_H +#define MAPLE_IR_INCLUDE_INTRINSIC_OP_H + +namespace maple { +enum MIRIntrinsicID { +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) INTRN_##STR, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSIC_OP_H diff --git a/ecmascript/mapleall/maple_ir/include/intrinsic_vector.def b/ecmascript/mapleall/maple_ir/include/intrinsic_vector.def new file mode 100644 index 0000000000000000000000000000000000000000..84a9cf253f06049e03e4db8c2f22cabcc7623808 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsic_vector.def @@ -0,0 +1,1227 @@ +/* + * Copyright (c) [2021] Futurewei Technologies, Inc. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the + * MulanPSL - 2.0. You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY + * KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO + * NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the + * MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, +// ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) + +// vecTy vector_abs(vecTy src) +// Create a vector by getting the absolute value of the elements in src. +DEF_MIR_INTRINSIC(vector_abs_v8i8, "vector_abs_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_abs_v4i16, "vector_abs_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_abs_v2i32, "vector_abs_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_abs_v1i64, "vector_abs_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_abs_v2f32, "vector_abs_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) +DEF_MIR_INTRINSIC(vector_abs_v1f64, "vector_abs_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_abs_v16i8, "vector_abs_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_abs_v8i16, "vector_abs_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_abs_v4i32, "vector_abs_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_abs_v2i64, "vector_abs_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_abs_v4f32, "vector_abs_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_abs_v2f64, "vector_abs_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) + +// vecTy vector_addl_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_low_v8i8, "vector_addl_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addl_low_v4i16, "vector_addl_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addl_low_v2i32, "vector_addl_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addl_low_v8u8, "vector_addl_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addl_low_v4u16, "vector_addl_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addl_low_v2u32, "vector_addl_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_addl_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_high_v8i8, "vector_addl_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addl_high_v4i16, "vector_addl_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addl_high_v2i32, "vector_addl_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addl_high_v8u8, "vector_addl_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addl_high_v4u16, "vector_addl_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addl_high_v2u32, "vector_addl_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_addw_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_low_v8i8, "vector_addw_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addw_low_v4i16, "vector_addw_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addw_low_v2i32, "vector_addw_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addw_low_v8u8, "vector_addw_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addw_low_v4u16, "vector_addw_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addw_low_v2u32, "vector_addw_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_addw_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_high_v8i8, "vector_addw_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addw_high_v4i16, "vector_addw_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addw_high_v2i32, "vector_addw_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addw_high_v8u8, "vector_addw_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addw_high_v4u16, "vector_addw_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addw_high_v2u32, "vector_addw_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy vector_from_scalar(scalarTy value) +// Create a vector by repeating the scalar value for each element in the +// vector. +DEF_MIR_INTRINSIC(vector_from_scalar_v2i64, "vector_from_scalar_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i32, "vector_from_scalar_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i16, "vector_from_scalar_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16i8, "vector_from_scalar_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u64, "vector_from_scalar_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u32, "vector_from_scalar_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u16, "vector_from_scalar_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16u8, "vector_from_scalar_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f64, "vector_from_scalar_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4f32, "vector_from_scalar_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32) +DEF_MIR_INTRINSIC(vector_from_scalar_v1i64, "vector_from_scalar_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2i32, "vector_from_scalar_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i16, "vector_from_scalar_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i8, "vector_from_scalar_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1u64, "vector_from_scalar_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u32, "vector_from_scalar_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u16, "vector_from_scalar_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u8, "vector_from_scalar_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1f64, "vector_from_scalar_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f32, "vector_from_scalar_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32) + +// vecTy2 vector_labssub(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted arguments. +DEF_MIR_INTRINSIC(vector_labssub_low_v8i8, "vector_labssub_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4i16, "vector_labssub_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2i32, "vector_labssub_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_labssub_low_v8u8, "vector_labssub_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4u16, "vector_labssub_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2u32, "vector_labssub_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy2 vector_labssub_high(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted high args. +DEF_MIR_INTRINSIC(vector_labssub_high_v8i8, "vector_labssub_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4i16, "vector_labssub_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2i32, "vector_labssub_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_labssub_high_v8u8, "vector_labssub_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4u16, "vector_labssub_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2u32, "vector_labssub_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy2 vector_madd(vecTy2 accum, vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2, then accumulate into accum. +// Elements of vecTy2 are twice as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_madd_v2i32, "vector_madd_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_madd_v4i16, "vector_madd_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_madd_v8i8, "vector_madd_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_madd_v2u32, "vector_madd_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_madd_v4u16, "vector_madd_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_madd_v8u8, "vector_madd_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_low(vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2. Elements of vecTy2 are twice as +// long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_low_v2i32, "vector_mull_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_low_v4i16, "vector_mull_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_low_v8i8, "vector_mull_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_low_v2u32, "vector_mull_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_low_v4u16, "vector_mull_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_low_v8u8, "vector_mull_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_high(vecTy1 src1, vecTy1 src2) +// Multiply the upper elements of src1 and src2. Elements of vecTy2 are twice +// as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_high_v2i32, "vector_mull_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_high_v4i16, "vector_mull_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_high_v8i8, "vector_mull_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_high_v2u32, "vector_mull_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_high_v4u16, "vector_mull_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_high_v8u8, "vector_mull_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy vector_merge(vecTy src1, vecTy src2, int n) +// Create a vector by concatenating the high elements of src1, starting +// with the nth element, followed by the low elements of src2. +DEF_MIR_INTRINSIC(vector_merge_v2i64, "vector_merge_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i32, "vector_merge_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i16, "vector_merge_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16i8, "vector_merge_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u64, "vector_merge_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u32, "vector_merge_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u16, "vector_merge_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16u8, "vector_merge_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f64, "vector_merge_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4f32, "vector_merge_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1i64, "vector_merge_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2i32, "vector_merge_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i16, "vector_merge_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i8, "vector_merge_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1u64, "vector_merge_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u32, "vector_merge_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u16, "vector_merge_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u8, "vector_merge_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1f64, "vector_merge_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f32, "vector_merge_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the low part of the source vector. +DEF_MIR_INTRINSIC(vector_get_low_v2i64, "vector_get_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_low_v4i32, "vector_get_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_low_v8i16, "vector_get_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_low_v16i8, "vector_get_low_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_low_v2u64, "vector_get_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_low_v4u32, "vector_get_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_low_v8u16, "vector_get_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_low_v16u8, "vector_get_low_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_low_v2f64, "vector_get_low_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_low_v4f32, "vector_get_low_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the high part of the source vector. +DEF_MIR_INTRINSIC(vector_get_high_v2i64, "vector_get_high_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_high_v4i32, "vector_get_high_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_high_v8i16, "vector_get_high_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_high_v16i8, "vector_get_high_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_high_v2u64, "vector_get_high_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_high_v4u32, "vector_get_high_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_high_v8u16, "vector_get_high_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_high_v16u8, "vector_get_high_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_high_v2f64, "vector_get_high_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_high_v4f32, "vector_get_high_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// scalarTy vector_get_element(vecTy src, int n) +// Get the nth element of the source vector. +DEF_MIR_INTRINSIC(vector_get_element_v2i64, "vector_get_element_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i32, "vector_get_element_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i16, "vector_get_element_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16i8, "vector_get_element_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u64, "vector_get_element_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u32, "vector_get_element_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u16, "vector_get_element_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16u8, "vector_get_element_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f64, "vector_get_element_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4f32, "vector_get_element_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1i64, "vector_get_element_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2i32, "vector_get_element_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i16, "vector_get_element_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i8, "vector_get_element_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1u64, "vector_get_element_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u32, "vector_get_element_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u16, "vector_get_element_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u8, "vector_get_element_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1f64, "vector_get_element_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f32, "vector_get_element_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32, kArgTyI32) + +// vecTy vector_set_element(ScalarTy value, VecTy vec, int n) +// Set the nth element of the source vector to value. +DEF_MIR_INTRINSIC(vector_set_element_v2i64, "vector_set_element_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i32, "vector_set_element_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i16, "vector_set_element_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16i8, "vector_set_element_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u64, "vector_set_element_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u32, "vector_set_element_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u16, "vector_set_element_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16u8, "vector_set_element_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f64, "vector_set_element_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4f32, "vector_set_element_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1i64, "vector_set_element_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2i32, "vector_set_element_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i16, "vector_set_element_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i8, "vector_set_element_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1u64, "vector_set_element_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u32, "vector_set_element_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u16, "vector_set_element_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u8, "vector_set_element_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1f64, "vector_set_element_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f32, "vector_set_element_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_widen_low(vecTy1 src) +// Widen each element of the 64-bit argument to double size of the +// original width to a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_low_v2i32, "vector_widen_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_widen_low_v4i16, "vector_widen_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_widen_low_v8i8, "vector_widen_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_widen_low_v2u32, "vector_widen_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_widen_low_v4u16, "vector_widen_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_widen_low_v8u8, "vector_widen_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8) + +// vecTy2 vector_widen_high(vecTy1 src) +// Widen each upper element of the 128-bit source vector to double size of +// the original width into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_high_v2i32, "vector_widen_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_widen_high_v4i16, "vector_widen_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_widen_high_v8i8, "vector_widen_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_widen_high_v2u32, "vector_widen_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_widen_high_v4u16, "vector_widen_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_widen_high_v8u8, "vector_widen_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) + +// vecTy2 vector_narrow_low(vecTy1 src) +// Narrow each element of the 128-bit source vector to half of the original width, +// then write it to the lower half of the destination vector. +DEF_MIR_INTRINSIC(vector_narrow_low_v2i64, "vector_narrow_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4i32, "vector_narrow_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8i16, "vector_narrow_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_low_v2u64, "vector_narrow_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4u32, "vector_narrow_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8u16, "vector_narrow_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16) + +// vecTy2 vector_narrow_high(vecTy1 src) +// Narrow each element of the upper source vector to half of the original width, +// concatenate with the first 64-bit arg into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_narrow_high_v2i64, "vector_narrow_high_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV2I32, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4i32, "vector_narrow_high_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV4I16, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8i16, "vector_narrow_high_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV8I8, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_high_v2u64, "vector_narrow_high_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV2U32, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4u32, "vector_narrow_high_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV4U16, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8u16, "vector_narrow_high_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV8U8, kArgTyV8U16) + +// vecTy vector_pairwise_adalp(vecTy src1, vecTy2 src2) +// Pairwise add of src2 then accumulate into src1 as dest +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i8, "vector_pairwise_adalp_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i16, "vector_pairwise_adalp_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2i32, "vector_pairwise_adalp_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u8, "vector_pairwise_adalp_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u16, "vector_pairwise_adalp_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2u32, "vector_pairwise_adalp_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16i8, "vector_pairwise_adalp_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i16, "vector_pairwise_adalp_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i32, "vector_pairwise_adalp_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16u8, "vector_pairwise_adalp_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u16, "vector_pairwise_adalp_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u32, "vector_pairwise_adalp_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy2 vector_pairwise_add(vecTy1 src) +// Add pairs of elements from the source vector and put the result into the +// destination vector, whose element size is twice and the number of +// elements is half of the source vector type. +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i32, "vector_pairwise_add_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i16, "vector_pairwise_add_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16i8, "vector_pairwise_add_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u32, "vector_pairwise_add_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u16, "vector_pairwise_add_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16u8, "vector_pairwise_add_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2i32, "vector_pairwise_add_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i16, "vector_pairwise_add_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i8, "vector_pairwise_add_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2u32, "vector_pairwise_add_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u16, "vector_pairwise_add_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u8, "vector_pairwise_add_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U8) + +// vecTy vector_reverse(vecTy src) +// Create a vector by reversing the order of the elements in src. +DEF_MIR_INTRINSIC(vector_reverse_v2i64, "vector_reverse_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_reverse_v4i32, "vector_reverse_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse_v8i16, "vector_reverse_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse_v16i8, "vector_reverse_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse_v2u64, "vector_reverse_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_reverse_v4u32, "vector_reverse_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse_v8u16, "vector_reverse_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse_v16u8, "vector_reverse_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse_v2f64, "vector_reverse_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_reverse_v4f32, "vector_reverse_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_reverse_v1i64, "vector_reverse_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_reverse_v2i32, "vector_reverse_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_reverse_v4i16, "vector_reverse_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_reverse_v8i8, "vector_reverse_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_reverse_v1u64, "vector_reverse_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_reverse_v2u32, "vector_reverse_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse_v4u16, "vector_reverse_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse_v8u8, "vector_reverse_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse_v1f64, "vector_reverse_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_reverse_v2f32, "vector_reverse_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) + +// vector_reverse16 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse16_v16u8, "vector_reverse16_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse16_v16i8, "vector_reverse16_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse16_v8u8, "vector_reverse16_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse16_v8i8, "vector_reverse16_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v16u8, "vector_reverse64_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse64_v16i8, "vector_reverse64_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse64_v8u8, "vector_reverse64_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse64_v8i8, "vector_reverse64_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 16-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v8u16, "vector_reverse64_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse64_v8i16, "vector_reverse64_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse64_v4u16, "vector_reverse64_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse64_v4i16, "vector_reverse64_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) + +// vector_reverse64 with 32-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v4u32, "vector_reverse64_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse64_v4i32, "vector_reverse64_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse64_v2u32, "vector_reverse64_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse64_v2i32, "vector_reverse64_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) + +// vecTy2 vector_shift_narrow_low(vecTy1 src, const int n) +// Shift each element in the vector right by n, narrow each element to half +// of the original width (truncating), then write the result to the lower +// half of the destination vector. +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2i64, "vector_shr_narrow_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4i32, "vector_shr_narrow_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8i16, "vector_shr_narrow_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2u64, "vector_shr_narrow_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4u32, "vector_shr_narrow_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8u16, "vector_shr_narrow_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16, kArgTyI32) + +// scalarTy vector_sum(vecTy src) +// Sum all of the elements in the vector into a scalar. +DEF_MIR_INTRINSIC(vector_sum_v2i64, "vector_sum_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_sum_v4i32, "vector_sum_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_sum_v8i16, "vector_sum_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_sum_v16i8, "vector_sum_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_sum_v2u64, "vector_sum_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_sum_v4u32, "vector_sum_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_sum_v8u16, "vector_sum_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_sum_v16u8, "vector_sum_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_sum_v2f64, "vector_sum_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_sum_v4f32, "vector_sum_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_sum_v1i64, "vector_sum_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_sum_v2i32, "vector_sum_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_sum_v4i16, "vector_sum_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_sum_v8i8, "vector_sum_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_sum_v1u64, "vector_sum_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_sum_v2u32, "vector_sum_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_sum_v4u16, "vector_sum_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_sum_v8u8, "vector_sum_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_sum_v1f64, "vector_sum_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_sum_v2f32, "vector_sum_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32) + +// vecTy table_lookup(vecTy tbl, vecTy idx) +// Performs a table vector lookup. +DEF_MIR_INTRINSIC(vector_table_lookup_v2i64, "vector_table_lookup_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i32, "vector_table_lookup_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i16, "vector_table_lookup_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16i8, "vector_table_lookup_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u64, "vector_table_lookup_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u32, "vector_table_lookup_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u16, "vector_table_lookup_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16u8, "vector_table_lookup_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f64, "vector_table_lookup_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4f32, "vector_table_lookup_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_table_lookup_v1i64, "vector_table_lookup_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2i32, "vector_table_lookup_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i16, "vector_table_lookup_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i8, "vector_table_lookup_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1u64, "vector_table_lookup_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u32, "vector_table_lookup_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u16, "vector_table_lookup_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u8, "vector_table_lookup_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1f64, "vector_table_lookup_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f32, "vector_table_lookup_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32) + +// vecArrTy vector_zip(vecTy a, vecTy b) +// Interleave the upper half of elements from a and b into the destination +// vector. +DEF_MIR_INTRINSIC(vector_zip_v2i32, "vector_zip_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_zip_v4i16, "vector_zip_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_zip_v8i8, "vector_zip_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_zip_v2u32, "vector_zip_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_zip_v4u16, "vector_zip_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_zip_v8u8, "vector_zip_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_zip_v2f32, "vector_zip_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2F32, kArgTyV2F32) + +// vecTy vector_load(scalarTy *ptr) +// Load the elements pointed to by ptr into a vector. +DEF_MIR_INTRINSIC(vector_load_v2i64, "vector_load_v2i64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i32, "vector_load_v4i32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i16, "vector_load_v8i16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16i8, "vector_load_v16i8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u64, "vector_load_v2u64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u32, "vector_load_v4u32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u16, "vector_load_v8u16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16u8, "vector_load_v16u8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f64, "vector_load_v2f64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4f32, "vector_load_v4f32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1i64, "vector_load_v1i64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2i32, "vector_load_v2i32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i16, "vector_load_v4i16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i8, "vector_load_v8i8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1u64, "vector_load_v1u64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u32, "vector_load_v2u32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u16, "vector_load_v4u16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u8, "vector_load_v8u8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1f64, "vector_load_v1f64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f32, "vector_load_v2f32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyPtr) + +// void vector_store(scalarTy *ptr, vecTy src) +// Store the elements from src into the memory pointed to by ptr. +DEF_MIR_INTRINSIC(vector_store_v2i64, "vector_store_v2i64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_store_v4i32, "vector_store_v4i32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_store_v8i16, "vector_store_v8i16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_store_v16i8, "vector_store_v16i8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_store_v2u64, "vector_store_v2u64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_store_v4u32, "vector_store_v4u32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_store_v8u16, "vector_store_v8u16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_store_v16u8, "vector_store_v16u8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_store_v2f64, "vector_store_v2f64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_store_v4f32, "vector_store_v4f32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_store_v1i64, "vector_store_v1i64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_store_v2i32, "vector_store_v2i32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_store_v4i16, "vector_store_v4i16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_store_v8i8, "vector_store_v8i8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_store_v1u64, "vector_store_v1u64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_store_v2u32, "vector_store_v2u32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_store_v4u16, "vector_store_v4u16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_store_v8u8, "vector_store_v8u8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_store_v1f64, "vector_store_v1f64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_store_v2f32, "vector_store_v2f32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F32) + +// vecTy vector_subl_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_low_v8i8, "vector_subl_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subl_low_v4i16, "vector_subl_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subl_low_v2i32, "vector_subl_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subl_low_v8u8, "vector_subl_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subl_low_v4u16, "vector_subl_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subl_low_v2u32, "vector_subl_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_subl_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_high_v8i8, "vector_subl_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subl_high_v4i16, "vector_subl_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subl_high_v2i32, "vector_subl_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subl_high_v8u8, "vector_subl_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subl_high_v4u16, "vector_subl_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subl_high_v2u32, "vector_subl_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_subw_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_low_v8i8, "vector_subw_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subw_low_v4i16, "vector_subw_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subw_low_v2i32, "vector_subw_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subw_low_v8u8, "vector_subw_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subw_low_v4u16, "vector_subw_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subw_low_v2u32, "vector_subw_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_subw_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_high_v8i8, "vector_subw_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subw_high_v4i16, "vector_subw_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subw_high_v2i32, "vector_subw_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subw_high_v8u8, "vector_subw_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subw_high_v4u16, "vector_subw_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subw_high_v2u32, "vector_subw_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) diff --git a/ecmascript/mapleall/maple_ir/include/intrinsics.def b/ecmascript/mapleall/maple_ir/include/intrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..fc588ed36ced654f6bfebf2f1705e2520a63eb72 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsics.def @@ -0,0 +1,154 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(UNDEFINED,\ + nullptr, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_INC,\ + "__dex_ainc", kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_DEC,\ + "__dex_adec", kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_ATOMIC_EXCHANGE_PTR,\ + "__mpl_atomic_exchange_ptr", kIntrnIsAtomic, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLINIT_CHECK,\ + "__mpl_clinit_check", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_PROF_COUNTER_INC,\ + "__mpl_prof_counter_inc", INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEAR_STACK,\ + "__mpl_clear_stack", kIntrnUndef, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_GET_VTAB_FUNC,\ + "MCC_getFuncPtrFromVtab", kIntrnUndef, kArgTyA64, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_STATIC_OFFSET_TAB,\ + "__mpl_read_static_offset", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY,\ + "__mpl_const_offset", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY2,\ + "__mpl_const_offset2", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_LAZY,\ + "__mpl_const_offset_lazy", INTRNNOSIDEEFFECT, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_VTAB_LAZY,\ + "__mpl_const_offset_vtab_lazy", INTRNISPURE, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_FIELD_LAZY,\ + "__mpl_const_offset_field_lazy", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_BOUNDARY_CHECK,\ + "", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyU1, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_ARRAYCLASS_CACHE_ENTRY,\ + "__mpl_const_arrayclass_cache", kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) + +// start of RC Intrinsics with one parameters +DEF_MIR_INTRINSIC(MCCSetPermanent,\ + "MCC_SetObjectPermanent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncRef,\ + "MCC_IncRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRef,\ + "MCC_DecRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRefReset,\ + "MCC_ClearLocalStackRef", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadRefSVol,\ + "MCC_LoadVolatileStaticField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefS,\ + "MCC_LoadRefStatic", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCSetObjectPermanent,\ + "MCC_SetObjectPermanent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of RC Intrinsics with two parameters +DEF_MIR_INTRINSIC(MCCCheck,\ + "MCC_CheckRefCount", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyU32) +DEF_MIR_INTRINSIC(MCCCheckArrayStore,\ + "MCC_Reflect_Check_Arraystore", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRef,\ + "MCC_IncDecRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRefReset,\ + "MCC_IncDecRefReset", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCDecRefResetPair,\ + "MCC_DecRefResetPair", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadWeakVol,\ + "MCC_LoadVolatileWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadWeak,\ + "MCC_LoadWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRef,\ + "MCC_LoadRefField_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefVol,\ + "MCC_LoadVolatileField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteReferent,\ + "MCC_WriteReferent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoInc,\ + "MCC_WriteVolatileStaticFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoDec,\ + "MCC_WriteVolatileStaticFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoRC,\ + "MCC_WriteVolatileStaticFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVol,\ + "MCC_WriteVolatileStaticField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoInc,\ + "MCC_WriteRefFieldStaticNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoDec,\ + "MCC_WriteRefFieldStaticNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoRC,\ + "MCC_WriteRefFieldStaticNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteS,\ + "MCC_WriteRefFieldStatic", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) + +// start of RC intrinsics with three parameters +DEF_MIR_INTRINSIC(MCCWriteVolNoInc,\ + "MCC_WriteVolatileFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoDec,\ + "MCC_WriteVolatileFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoRC,\ + "MCC_WriteVolatileFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVol,\ + "MCC_WriteVolatileField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoInc,\ + "MCC_WriteRefFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoDec,\ + "MCC_WriteRefFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoRC,\ + "MCC_WriteRefFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWrite,\ + "MCC_WriteRefField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolWeak,\ + "MCC_WriteVolatileWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteWeak,\ + "MCC_WriteWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) + +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS,\ + "__mpl_cleanup_localrefvars", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS_SKIP,\ + "__mpl_cleanup_localrefvars_skip", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_MEMSET_LOCALVAR,\ + "", kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyU8, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_SET_CLASS,\ + "", kIntrnUndef, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_NORETESCOBJS,\ + "__mpl_cleanup_noretescobjs", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef,\ + kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) + +// start of GC Intrinsics +DEF_MIR_INTRINSIC(MCCGCCheck,\ + "MCC_CheckObjAllocated", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of Profile Intrinsics +DEF_MIR_INTRINSIC(MCCSaveProf,\ + "MCC_SaveProfile", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +#include "intrinsic_java.def" +#include "simplifyintrinsics.def" +#include "intrinsic_c.def" +#include "intrinsic_js.def" +#include "intrinsic_js_eng.def" +#include "dex2mpl/dexintrinsic.def" +#include "intrinsic_dai.def" +#include "intrinsic_vector.def" +DEF_MIR_INTRINSIC(LAST,\ + nullptr, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/mapleall/maple_ir/include/intrinsics.h b/ecmascript/mapleall/maple_ir/include/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..d646a194bafa03d708114f5da353dd9f6e886a42 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/intrinsics.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_INTRINSICS_H +#define MAPLE_IR_INCLUDE_INTRINSICS_H +#include "prim_types.h" +#include "intrinsic_op.h" + +namespace maple { +enum IntrinProperty { + kIntrnUndef, + kIntrnIsJs, + kIntrnIsJsUnary, + kIntrnIsJsBinary, + kIntrnIsJava, + kIntrnIsJavaUnary, + kIntrnIsJavaBinary, + kIntrnIsReturnStruct, + kIntrnNoSideEffect, + kIntrnIsLoadMem, + kIntrnIsPure, + kIntrnNeverReturn, + kIntrnIsAtomic, + kIntrnIsRC, + kIntrnIsSpecial, + kIntrnIsVector +}; + +enum IntrinArgType { + kArgTyUndef, + kArgTyVoid, + kArgTyI8, + kArgTyI16, + kArgTyI32, + kArgTyI64, + kArgTyU8, + kArgTyU16, + kArgTyU32, + kArgTyU64, + kArgTyU1, + kArgTyPtr, + kArgTyRef, + kArgTyA32, + kArgTyA64, + kArgTyF32, + kArgTyF64, + kArgTyF128, + kArgTyC64, + kArgTyC128, + kArgTyAgg, + kArgTyV2I64, + kArgTyV4I32, + kArgTyV8I16, + kArgTyV16I8, + kArgTyV2U64, + kArgTyV4U32, + kArgTyV8U16, + kArgTyV16U8, + kArgTyV2F64, + kArgTyV4F32, + kArgTyV1I64, + kArgTyV2I32, + kArgTyV4I16, + kArgTyV8I8, + kArgTyV1U64, + kArgTyV2U32, + kArgTyV4U16, + kArgTyV8U8, + kArgTyV1F64, + kArgTyV2F32, +#ifdef DYNAMICLANG + kArgTyDynany, + kArgTyDynu32, + kArgTyDyni32, + kArgTyDynundef, + kArgTyDynnull, + kArgTyDynhole, + kArgTyDynbool, + kArgTyDynf64, + kArgTyDynf32, + kArgTySimplestr, + kArgTyDynstr, + kArgTySimpleobj, + kArgTyDynobj +#endif +}; + +constexpr uint32 INTRNISJS = 1U << kIntrnIsJs; +constexpr uint32 INTRNISJSUNARY = 1U << kIntrnIsJsUnary; +constexpr uint32 INTRNISJSBINARY = 1U << kIntrnIsJsBinary; +constexpr uint32 INTRNISJAVA = 1U << kIntrnIsJava; +constexpr uint32 INTRNNOSIDEEFFECT = 1U << kIntrnNoSideEffect; +constexpr uint32 INTRNRETURNSTRUCT = 1U << kIntrnIsReturnStruct; +constexpr uint32 INTRNLOADMEM = 1U << kIntrnIsLoadMem; +constexpr uint32 INTRNISPURE = 1U << kIntrnIsPure; +constexpr uint32 INTRNNEVERRETURN = 1U << kIntrnNeverReturn; +constexpr uint32 INTRNATOMIC = 1U << kIntrnIsAtomic; +constexpr uint32 INTRNISRC = 1U << kIntrnIsRC; +constexpr uint32 INTRNISSPECIAL = 1U << kIntrnIsSpecial; +constexpr uint32 INTRNISVECTOR = 1U << kIntrnIsVector; +class MIRType; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +struct IntrinDesc { + static constexpr int kMaxArgsNum = 7; + const char *name; + uint32 properties; + IntrinArgType argTypes[1 + kMaxArgsNum]; // argTypes[0] is the return type + bool IsJS() const { + return static_cast(properties & INTRNISJS); + } + + bool IsJava() const { + return static_cast(properties & INTRNISJAVA); + } + + bool IsJsUnary() const { + return static_cast(properties & INTRNISJSUNARY); + } + + bool IsJsBinary() const { + return static_cast(properties & INTRNISJSBINARY); + } + + bool IsJsOp() const { + return static_cast(properties & INTRNISJSUNARY) || static_cast(properties & INTRNISJSBINARY); + } + + bool IsLoadMem() const { + return static_cast(properties & INTRNLOADMEM); + } + + bool IsJsReturnStruct() const { + return static_cast(properties & INTRNRETURNSTRUCT); + } + + bool IsPure() const { + return static_cast(properties & INTRNISPURE); + } + + bool IsNeverReturn() const { + return static_cast(properties & INTRNNEVERRETURN); + } + + bool IsAtomic() const { + return static_cast(properties & INTRNATOMIC); + } + + bool IsRC() const { + return static_cast(properties & INTRNISRC); + } + + bool IsSpecial() const { + return static_cast(properties & INTRNISSPECIAL); + } + + bool HasNoSideEffect() const { + return properties & INTRNNOSIDEEFFECT; + } + + bool IsVectorOp() const { + return static_cast(properties & INTRNISVECTOR); + } + + MIRType *GetReturnType() const; + MIRType *GetArgType(uint32 index) const; + MIRType *GetTypeFromArgTy(IntrinArgType argType) const; + static MIRType *jsValueType; + static MIRModule *mirModule; + static void InitMIRModule(MIRModule *mirModule); + static MIRType *GetOrCreateJSValueType(); + static IntrinDesc intrinTable[INTRN_LAST + 1]; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSICS_H diff --git a/ecmascript/mapleall/maple_ir/include/ir_safe_cast_traits.def b/ecmascript/mapleall/maple_ir/include/ir_safe_cast_traits.def new file mode 100644 index 0000000000000000000000000000000000000000..14ed1a367b4ea5f8b738103b6412af97f7ae2988 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/ir_safe_cast_traits.def @@ -0,0 +1,247 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include +#include "opcode_info.h" + +namespace maple { +#ifdef LOAD_SAFE_CAST_FOR_MIR_CONST +#undef LOAD_SAFE_CAST_FOR_MIR_CONST +REGISTER_SAFE_CAST(MIRIntConst, from.GetKind() == kConstInt); +REGISTER_SAFE_CAST(MIRAddrofConst, from.GetKind() == kConstAddrof); +REGISTER_SAFE_CAST(MIRAddroffuncConst, from.GetKind() == kConstAddrofFunc); +REGISTER_SAFE_CAST(MIRLblConst, from.GetKind() == kConstLblConst); +REGISTER_SAFE_CAST(MIRStrConst, from.GetKind() == kConstStrConst); +REGISTER_SAFE_CAST(MIRStr16Const, from.GetKind() == kConstStr16Const); +REGISTER_SAFE_CAST(MIRFloatConst, from.GetKind() == kConstFloatConst); +REGISTER_SAFE_CAST(MIRDoubleConst, from.GetKind() == kConstDoubleConst); +REGISTER_SAFE_CAST(MIRFloat128Const, from.GetKind() == kConstFloat128Const); +REGISTER_SAFE_CAST(MIRAggConst, from.GetKind() == kConstAggConst); +REGISTER_SAFE_CAST(MIRStConst, from.GetKind() == kConstStConst); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_TYPE +#undef LOAD_SAFE_CAST_FOR_MIR_TYPE +REGISTER_SAFE_CAST(MIRPtrType, from.GetKind() == kTypePointer); +REGISTER_SAFE_CAST(MIRArrayType, from.GetKind() == kTypeArray); +REGISTER_SAFE_CAST(MIRFarrayType, from.GetKind() == kTypeFArray || + instance_of(from)); +REGISTER_SAFE_CAST(MIRStructType, from.GetKind() == kTypeStruct || + from.GetKind() == kTypeStructIncomplete || + from.GetKind() == kTypeUnion || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(MIRJarrayType, from.GetKind() == kTypeJArray); +REGISTER_SAFE_CAST(MIRClassType, from.GetKind() == kTypeClass || + from.GetKind() == kTypeClassIncomplete); +REGISTER_SAFE_CAST(MIRInterfaceType, from.GetKind() == kTypeInterface || + from.GetKind() == kTypeInterfaceIncomplete); +REGISTER_SAFE_CAST(MIRBitFieldType, from.GetKind() == kTypeBitField); +REGISTER_SAFE_CAST(MIRFuncType, from.GetKind() == kTypeFunction); +REGISTER_SAFE_CAST(MIRTypeByName, from.GetKind() == kTypeByName); +REGISTER_SAFE_CAST(MIRTypeParam, from.GetKind() == kTypeParam); +REGISTER_SAFE_CAST(MIRInstantVectorType, from.GetKind() == kTypeInstantVector); +REGISTER_SAFE_CAST(MIRGenericInstantType, from.GetKind() == kTypeGenericInstant); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_NODE +#undef LOAD_SAFE_CAST_FOR_MIR_NODE +REGISTER_SAFE_CAST(UnaryNode, from.GetOpCode() == OP_abs || + from.GetOpCode() == OP_bnot || + from.GetOpCode() == OP_lnot || + from.GetOpCode() == OP_neg || + from.GetOpCode() == OP_recip || + from.GetOpCode() == OP_sqrt || + from.GetOpCode() == OP_alloca || + from.GetOpCode() == OP_malloc || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(TypeCvtNode, from.GetOpCode() == OP_ceil || + from.GetOpCode() == OP_cvt || + from.GetOpCode() == OP_floor || + from.GetOpCode() == OP_round || + from.GetOpCode() == OP_trunc || + instance_of(from)); +REGISTER_SAFE_CAST(RetypeNode, from.GetOpCode() == OP_retype); +REGISTER_SAFE_CAST(ExtractbitsNode, from.GetOpCode() == OP_extractbits || + from.GetOpCode() == OP_sext || + from.GetOpCode() == OP_zext); +REGISTER_SAFE_CAST(GCMallocNode, from.GetOpCode() == OP_gcmalloc || + from.GetOpCode() = OP_gcpermalloc); +REGISTER_SAFE_CAST(JarrayMallocNode, from.GetOpCode() == OP_gcmallocjarray || + from.GetOpCode() = OP_gcpermallocjarray); +REGISTER_SAFE_CAST(IreadNode, from.GetOpCode() == OP_iread || + from.GetOpCode() = OP_iaddrof); +REGISTER_SAFE_CAST(IreadoffNode, from.GetOpCode() == OP_ireadoff); +REGISTER_SAFE_CAST(IreadFPoffNode, from.GetOpCode() == OP_ireadfpoff); +REGISTER_SAFE_CAST(BinaryNode, from.GetOpCode() == OP_add || + from.GetOpCode() == OP_sub || + from.GetOpCode() == OP_mul || + from.GetOpCode() == OP_div || + from.GetOpCode() == OP_rem || + from.GetOpCode() == OP_ashr || + from.GetOpCode() == OP_lshr || + from.GetOpCode() == OP_shl || + from.GetOpCode() == OP_max || + from.GetOpCode() == OP_min || + from.GetOpCode() == OP_band || + from.GetOpCode() == OP_bior || + from.GetOpCode() == OP_bxor || + from.GetOpCode() == OP_CG_array_elem_add || + from.GetOpCode() == OP_land || + from.GetOpCode() == OP_lior || + from.GetOpCode() == OP_cand || + from.GetOpCode() == OP_cior || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CompareNode, from.GetOpCode() == OP_eq || + from.GetOpCode() = OP_ge || + from.GetOpCode() = OP_gt || + from.GetOpCode() = OP_le || + from.GetOpCode() = OP_lt || + from.GetOpCode() = OP_ne || + from.GetOpCode() = OP_cmp || + from.GetOpCode() = OP_cmpl || + from.GetOpCode() = OP_cmpg); +REGISTER_SAFE_CAST(DepositbitsNode, from.GetOpCode() == OP_depositbits); +REGISTER_SAFE_CAST(ResolveFuncNode, from.GetOpCode() == OP_resolveinterfacefunc || + from.GetOpCode() == OP_resolvevirtualfunc); +REGISTER_SAFE_CAST(TernaryNode, from.GetOpCode() == OP_select); +REGISTER_SAFE_CAST(NaryNode, instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IntrinsicopNode, from.GetOpCode() == OP_intrinsicop || + from.GetOpCode() == OP_intrinsicopwithtype); +REGISTER_SAFE_CAST(ConstvalNode, from.GetOpCode() == OP_constval); +REGISTER_SAFE_CAST(ConststrNode, from.GetOpCode() == OP_conststr); +REGISTER_SAFE_CAST(Conststr16Node, from.GetOpCode() == OP_conststr16); +REGISTER_SAFE_CAST(SizeoftypeNode, from.GetOpCode() == OP_sizeoftype); +REGISTER_SAFE_CAST(FieldsDistNode, from.GetOpCode() == OP_fieldsdist); +REGISTER_SAFE_CAST(ArrayNode, from.GetOpCode() == OP_array); +REGISTER_SAFE_CAST(AddrofNode, from.GetOpCode() == OP_dread || + from.GetOpCode() == OP_addrof); +REGISTER_SAFE_CAST(RegreadNode, from.GetOpCode() == OP_regread); +REGISTER_SAFE_CAST(AddroffuncNode, from.GetOpCode() == OP_addroffunc); +REGISTER_SAFE_CAST(AddroflabelNode, from.GetOpCode() == OP_addroflabel); +REGISTER_SAFE_CAST(StmtNode, from.GetOpCode() == OP_finally || + from.GetOpCode() == OP_cleanuptry || + from.GetOpCode() == OP_endtry || + from.GetOpCode() == OP_retsub || + from.GetOpCode() == OP_membaracquire || + from.GetOpCode() == OP_membarrelease || + from.GetOpCode() == OP_membarstoreload || + from.GetOpCode() == OP_membarstorestore || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IassignNode, from.GetOpCode() == OP_iassign); +REGISTER_SAFE_CAST(GotoNode, from.GetOpCode() == OP_goto || + from.GetOpCode() == OP_gosub); +REGISTER_SAFE_CAST(JsTryNode, from.GetOpCode() == OP_jstry); +REGISTER_SAFE_CAST(TryNode, from.GetOpCode() == OP_try); +REGISTER_SAFE_CAST(CatchNode, from.GetOpCode() == OP_catch); +REGISTER_SAFE_CAST(SwitchNode, from.GetOpCode() == OP_switch); +REGISTER_SAFE_CAST(MultiwayNode, from.GetOpCode() == OP_multiway); +REGISTER_SAFE_CAST(UnaryStmtNode, from.GetOpCode() == OP_eval || + from.GetOpCode() == OP_throw || + from.GetOpCode() == OP_free || + from.GetOpCode() == OP_decref || + from.GetOpCode() == OP_incref || + from.GetOpCode() == OP_decrefreset || + (kOpcodeInfo.IsAssertNonnull(from.GetOpCode()) && + !kOpcodeInfo.IsCallAssertNonnull(from.GetOpCode())) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallAssertNonnullStmtNode, from.GetOpCode() == OP_callassertnonnull); +REGISTER_SAFE_CAST(DassignNode, from.GetOpCode() == OP_dassign || + from.GetOpCode() == OP_maydassign); +REGISTER_SAFE_CAST(RegassignNode, from.GetOpCode() == OP_regassign); +REGISTER_SAFE_CAST(CondGotoNode, from.GetOpCode() == OP_brtrue || + from.GetOpCode() == OP_brfalse); +REGISTER_SAFE_CAST(RangeGotoNode, from.GetOpCode() == OP_rangegoto); +REGISTER_SAFE_CAST(BlockNode, from.GetOpCode() == OP_block); +REGISTER_SAFE_CAST(IfStmtNode, from.GetOpCode() == OP_if); +REGISTER_SAFE_CAST(WhileStmtNode, from.GetOpCode() == OP_while || + from.GetOpCode() == OP_dowhile); +REGISTER_SAFE_CAST(DoloopNode, from.GetOpCode() == OP_doloop); +REGISTER_SAFE_CAST(ForeachelemNode, from.GetOpCode() == OP_foreachelem); +REGISTER_SAFE_CAST(BinaryStmtNode, from.GetOpCode() == OP_assertge || + from.GetOpCode() == OP_assertlt || + instance_of(from)); +REGISTER_SAFE_CAST(IassignoffNode, from.GetOpCode() == OP_iassignoff); +REGISTER_SAFE_CAST(IassignFPoffNode, from.GetOpCode() == OP_iassignfpoff); +REGISTER_SAFE_CAST(NaryStmtNode, from.GetOpCode() == OP_return || + from.GetOpCode() == OP_syncenter || + from.GetOpCode() == OP_syncexit || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallNode, from.GetOpCode() == OP_call || + from.GetOpCode() == OP_virtualcall || + from.GetOpCode() == OP_superclasscall || + from.GetOpCode() == OP_interfacecall || + from.GetOpCode() == OP_customcall || + from.GetOpCode() == OP_polymorphiccall || + from.GetOpCode() == OP_interfaceicall || + from.GetOpCode() == OP_virtualicall || + from.GetOpCode() == OP_callassigned || + from.GetOpCode() == OP_virtualcallassigned || + from.GetOpCode() == OP_superclasscallassigned || + from.GetOpCode() == OP_interfacecallassigned || + from.GetOpCode() == OP_customcallassigned || + from.GetOpCode() == OP_polymorphiccallassigned || + from.GetOpCode() == OP_interfaceicallassigned || + from.GetOpCode() == OP_virtualicallassigned || + instance_of(from)); +REGISTER_SAFE_CAST(IcallNode, from.GetOpCode() == OP_icall || + from.GetOpCode() == OP_icallassigned || + from.GetOpCode() == OP_icallproto || + from.GetOpCode() == OP_icallprotoassigned); +REGISTER_SAFE_CAST(IntrinsiccallNode, from.GetOpCode() == OP_intrinsiccall || + from.GetOpCode() == OP_intrinsiccallwithtype || + from.GetOpCode() == OP_xintrinsiccall || + from.GetOpCode() == OP_intrinsiccallassigned || + from.GetOpCode() == OP_intrinsiccallwithtypeassigned || + from.GetOpCode() == OP_xintrinsiccallassigned); +REGISTER_SAFE_CAST(CallinstantNode, from.GetOpCode() == OP_callinstant || + from.GetOpCode() == OP_virtualcallinstant || + from.GetOpCode() == OP_superclasscallinstant || + from.GetOpCode() == OP_interfacecallinstant || + from.GetOpCode() == OP_callinstantassigned || + from.GetOpCode() == OP_virtualcallinstantassigned || + from.GetOpCode() == OP_superclasscallinstantassigned || + from.GetOpCode() == OP_interfacecallinstantassigned); +REGISTER_SAFE_CAST(LabelNode, from.GetOpCode() == OP_label); +REGISTER_SAFE_CAST(CommentNode, from.GetOpCode() == OP_comment); +#endif +} diff --git a/ecmascript/mapleall/maple_ir/include/java_eh_lower.h b/ecmascript/mapleall/maple_ir/include/java_eh_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..5462ea04b899ef303bcfcf1495712a9df7c9fe8a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/java_eh_lower.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#define MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#include "phase_impl.h" +#include "class_hierarchy.h" +#include "maple_phase_manager.h" + +namespace maple { +class JavaEHLowerer : public FuncOptimizeImpl { + public: + JavaEHLowerer(MIRModule &mod, KlassHierarchy *kh, bool dump) : FuncOptimizeImpl(mod, kh, dump) {} + ~JavaEHLowerer() = default; + + FuncOptimizeImpl *Clone() override { + return new JavaEHLowerer(*this); + } + + void ProcessFunc(MIRFunction *func) override; + + private: + BlockNode *DoLowerBlock(BlockNode&); + BaseNode *DoLowerExpr(BaseNode&, BlockNode&); + BaseNode *DoLowerDiv(BinaryNode&, BlockNode&); + void DoLowerBoundaryCheck(IntrinsiccallNode&, BlockNode&); + BaseNode *DoLowerRem(BinaryNode &expr, BlockNode &blkNode) { + return DoLowerDiv(expr, blkNode); + } + + uint32 divSTIndex = 0; // The index of divide operand and result. + bool useRegTmp = Options::usePreg; // Use register to save temp variable or not. +}; + +MAPLE_MODULE_PHASE_DECLARE(M2MJavaEHLowerer) +} // namespace maple +#endif // MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H diff --git a/ecmascript/mapleall/maple_ir/include/keywords.def b/ecmascript/mapleall/maple_ir/include/keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..51a603eafae235a6acdc13ca654ab98383dda862 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/keywords.def @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + // opcode keywords +#define OPCODE(X, Y, Z, S) KEYWORD(X) +#include "opcodes.def" +#undef OPCODE + // primitive types +#define LOAD_ALGO_PRIMARY_TYPE +#define PRIMTYPE(P) KEYWORD(P) +#include "prim_types.def" +#undef PRIMTYPE + // intrinsic names +#undef DEF_MIR_INTRINSIC +#define DEF_MIR_INTRINSIC(X, NAME, INTRN_CLASS, RETURN_TYPE, ...) KEYWORD(X) +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + KEYWORD(else) + // declaration keywords + KEYWORD(var) + KEYWORD(tempvar) + KEYWORD(reg) + KEYWORD(type) + KEYWORD(func) + KEYWORD(struct) + KEYWORD(structincomplete) + KEYWORD(union) + KEYWORD(class) + KEYWORD(classincomplete) + KEYWORD(interfaceincomplete) + KEYWORD(javaclass) + KEYWORD(javainterface) + // type attribute keywords +#define FUNC_ATTR +#define TYPE_ATTR +#define FIELD_ATTR +#define ATTR(X) KEYWORD(X) +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +#undef TYPE_ATTR +#undef FIELD_ATTR + KEYWORD(align) + // per-function declaration keywords + KEYWORD(framesize) + KEYWORD(upformalsize) + KEYWORD(moduleid) + KEYWORD(funcsize) + KEYWORD(funcid) + KEYWORD(formalwordstypetagged) + KEYWORD(localwordstypetagged) + KEYWORD(formalwordsrefcounted) + KEYWORD(localwordsrefcounted) + // per-module declaration keywords + KEYWORD(flavor) + KEYWORD(srclang) + KEYWORD(globalmemsize) + KEYWORD(globalmemmap) + KEYWORD(globalwordstypetagged) + KEYWORD(globalwordsrefcounted) + KEYWORD(id) + KEYWORD(numfuncs) + KEYWORD(entryfunc) + // file related declaration keywords + KEYWORD(fileinfo) + KEYWORD(filedata) + KEYWORD(srcfileinfo) + KEYWORD(funcinfo) + // special float constants + KEYWORD(nanf) + KEYWORD(nan) + KEYWORD(inff) + KEYWORD(inf) + // pragma + KEYWORD(pragma) + KEYWORD(param) + KEYWORD(func_ex) + KEYWORD(func_var) + // staticvalue + KEYWORD(staticvalue) + // import + KEYWORD(import) + KEYWORD(importpath) + // source position information + KEYWORD(LOC) + // scope and source var to mpl var mapping + KEYWORD(SCOPE) + KEYWORD(ALIAS) + // storage class + KEYWORD(pstatic) + KEYWORD(fstatic) + // file-scope asm + KEYWORD(asmdecl) diff --git a/ecmascript/mapleall/maple_ir/include/lexer.h b/ecmascript/mapleall/maple_ir/include/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..57a6ce6d76aac2bdc8d8aec4eade391e871b6cce --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/lexer.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_LEXER_H +#define MAPLE_IR_INCLUDE_LEXER_H +#include "cstdio" +#include +#include "types_def.h" +#include "tokens.h" +#include "mempool_allocator.h" +#include "mir_module.h" + +namespace maple { +class MIRParser; // circular dependency exists, no other choice +class MIRLexer { + friend MIRParser; + + public: + explicit MIRLexer(MIRModule &mod); + ~MIRLexer() { + airFile = nullptr; + if (airFileInternal.is_open()) { + airFileInternal.close(); + } + } + + void PrepareForFile(const std::string &filename); + void PrepareForString(const std::string &src); + TokenKind NextToken(); + TokenKind LexToken(); + TokenKind GetTokenKind() const { + return kind; + } + + uint32 GetLineNum() const { + return lineNum; + } + + uint32 GetCurIdx() const { + return curIdx; + } + + // get the identifier name after the % or $ prefix + const std::string &GetName() const { + return name; + } + + uint64 GetTheIntVal() const { + return theIntVal; + } + + float GetTheFloatVal() const { + return theFloatVal; + } + + double GetTheDoubleVal() const { + return theDoubleVal; + } + + std::string GetTokenString() const; // for error reporting purpose + + private: + MIRModule &module; + // for storing the different types of constant values + uint64 theIntVal = 0; // also indicates preg number under TK_preg + float theFloatVal = 0.0; + double theDoubleVal = 0.0; + MapleVector seenComments; + std::ifstream *airFile = nullptr; + std::ifstream airFileInternal; + std::string line; + size_t lineBufSize = 0; // the allocated size of line(buffer). + uint32 currentLineSize = 0; + uint32 curIdx = 0; + uint32 lineNum = 0; + TokenKind kind = TK_invalid; + std::string name = ""; // store the name token without the % or $ prefix + MapleUnorderedMap keywordMap; + std::queue mirQueue; + bool needFile = true; + void RemoveReturnInline(std::string &line) { + if (line.empty()) { + return; + } + if (line.back() == '\n') { + line.pop_back(); + } + if (line.back() == '\r') { + line.pop_back(); + } + } + + int ReadALine(); // read a line from MIR (text) file. + int ReadALineByMirQueue(); // read a line from MIR Queue. + void GenName(); + TokenKind GetConstVal(); + TokenKind GetSpecialFloatConst(); + TokenKind GetHexConst(uint32 valStart, bool negative); + TokenKind GetIntConst(uint32 valStart, bool negative); + TokenKind GetFloatConst(uint32 valStart, uint32 startIdx, bool negative); + TokenKind GetSpecialTokenUsingOneCharacter(char c); + TokenKind GetTokenWithPrefixDollar(); + TokenKind GetTokenWithPrefixPercent(); + TokenKind GetTokenWithPrefixAmpersand(); + TokenKind GetTokenWithPrefixAtOrCircumflex(char prefix); + TokenKind GetTokenWithPrefixExclamation(); + TokenKind GetTokenWithPrefixQuotation(); + TokenKind GetTokenWithPrefixDoubleQuotation(); + TokenKind GetTokenSpecial(); + + char GetCharAt(uint32 idx) const { + return line[idx]; + } + + char GetCharAtWithUpperCheck(uint32 idx) const { + return idx < currentLineSize ? line[idx] : 0; + } + + char GetCharAtWithLowerCheck(uint32 idx) const { + return idx >= 0 ? line[idx] : 0; + } + + char GetCurrentCharWithUpperCheck() { + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + char GetNextCurrentCharWithUpperCheck() { + ++curIdx; + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + void SetFile(std::ifstream &file) { + airFile = &file; + } + + std::ifstream *GetFile() const { + return airFile; + } + + void SetMirQueue(const std::string &fileText) { + StringUtils::Split(fileText, mirQueue, '\n'); + needFile = false; + } +}; + +inline bool IsPrimitiveType(TokenKind tk) { + return (tk >= TK_void) && (tk < TK_unknown); +} + +inline bool IsVarName(TokenKind tk) { + return (tk == TK_lname) || (tk == TK_gname); +} + +inline bool IsExprBinary(TokenKind tk) { + return (tk >= TK_add) && (tk <= TK_sub); +} + +inline bool IsConstValue(TokenKind tk) { + return (tk >= TK_intconst) && (tk <= TK_doubleconst); +} + +inline bool IsConstAddrExpr(TokenKind tk) { + return (tk == TK_addrof) || (tk == TK_addroffunc) || (tk == TK_addroflabel) || + (tk == TK_conststr) || (tk == TK_conststr16); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_LEXER_H diff --git a/ecmascript/mapleall/maple_ir/include/memory_order_attrs.def b/ecmascript/mapleall/maple_ir/include/memory_order_attrs.def new file mode 100644 index 0000000000000000000000000000000000000000..e7a241ad03b3c953400f2d9bebc97c690b9cba50 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/memory_order_attrs.def @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + ATTR(memory_order_relaxed) + ATTR(memory_order_consume) + ATTR(memory_order_acquire) + ATTR(memory_order_release) + ATTR(memory_order_acq_rel) + ATTR(memory_order_seq_cst) diff --git a/ecmascript/mapleall/maple_ir/include/metadata_layout.h b/ecmascript/mapleall/maple_ir/include/metadata_layout.h new file mode 100644 index 0000000000000000000000000000000000000000..86b975177a1a4d1174f28ac721e4783d70d274a7 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/metadata_layout.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef METADATA_LAYOUT_H +#define METADATA_LAYOUT_H +#include + +// metadata layout is shared between maple compiler and runtime, thus not in namespace maplert +// some of the reference field of metadata is stored as relative offset +// for example, declaring class of Fields/Methods +// which can be negative +#ifdef USE_32BIT_REF +using MetaRef = uint32_t; // consistent with reffield_t in address.h +#else +using MetaRef = uintptr_t; // consistent iwth reffield_t in address.h +#endif // USE_32BIT_REF + +// DataRefOffset aims to represent a reference to data in maple file, which is already an offset. +// DataRefOffset is meant to have pointer size. +// All Xx32 data types defined in this file aim to use 32 bits to save 64-bit address, and thus are +// specific for 64-bit platforms. +struct DataRefOffset32 { + int32_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline int32_t GetRawValue() const; + inline void SetRawValue(int32_t value); +}; + +struct DataRefOffsetPtr { + intptr_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct DataRefOffset { +#ifdef USE_32BIT_REF + DataRefOffset32 refOffset; +#else + DataRefOffsetPtr refOffset; +#endif + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct MethodFieldRef { + // MethodFieldRef aims to represent a reference to fields/methods in maple file, which is already an offset. + // also, offset LSB may set 1, to indicate that it is compact fields/methods. + enum MethodFieldRefFormat { + kMethodFieldRefIsCompact = 1, + }; + DataRefOffsetPtr refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline bool IsCompact() const; + template + inline T GetCompactData() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +// DataRef aims for reference to data in maple file (generated by maple compiler) and is aligned to at least 4 bytes. +// Perhaps MDataRef is more fit, still DataRef is chosen to make it common. +// DataRef allows 4 formats of value: +// 0. "label_name" for direct reference +// 1. "label_name - . + 1" for padding unused +// 2. "label_name - . + 2" for reference in offset format +// 3. "indirect.label_name - . + 3" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// DataRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRefOffsetPtr is preferred. +enum DataRefFormat { + kDataRefIsDirect = 0, // must be 0 + kDataRefPadding = 1, // unused + kDataRefIsOffset = 2, + kDataRefIsIndirect = 3, // read-only + kDataRefBitMask = 3, +}; + +struct DataRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(T ref, DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; + +struct DataRef { + uintptr_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(const T ref, const DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; +// GctibRef aims to represent a reference to gctib in maple file, which is an offset by default. +// GctibRef is meant to have pointer size and aligned to at least 4 bytes. +// GctibRef allows 2 formats of value: +// 0. "label_name - . + 0" for reference in offset format +// 1. "indirect.label_name - . + 1" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// GctibRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRef is preferred. +enum GctibRefFormat { + kGctibRefIsOffset = 0, // default + kGctibRefIsIndirect = 1, + kGctibRefBitMask = 3 +}; + +struct GctibRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(T ref, GctibRefFormat format = kGctibRefIsOffset); +}; + +struct GctibRef { + uintptr_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(const T ref, const GctibRefFormat format = kGctibRefIsOffset); +}; + +// MByteRef is meant to represent a reference to data defined in maple file. It is a direct reference or an offset. +// MByteRef is self-encoded/decoded and aligned to 1 byte. +// Unlike DataRef, the format of MByteRef is determined by its value. +struct MByteRef { + uintptr_t refVal; // initializer prefers this field to be a pointer + +#if defined(__arm__) || defined(USE_ARM32_MACRO) + // assume address range 0 ~ 256MB is unused in arm runtime + // kEncodedOffsetMin ~ kEncodedOffsetMax is the value range of encoded offset + static constexpr intptr_t kOffsetBound = 128 * 1024 * 1024; + static constexpr intptr_t kOffsetMin = -kOffsetBound; + static constexpr intptr_t kOffsetMax = kOffsetBound; + + static constexpr intptr_t kPositiveOffsetBias = 128 * 1024 * 1024; + static constexpr intptr_t kEncodedOffsetMin = kPositiveOffsetBias + kOffsetMin; + static constexpr intptr_t kEncodedOffsetMax = kPositiveOffsetBias + kOffsetMax; +#else + enum { + kBiasBitPosition = sizeof(refVal) * 8 - 4, // the most significant 4 bits + }; + + static constexpr uintptr_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uintptr_t kPositiveOffsetMin = 0; + static constexpr uintptr_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uintptr_t kPositiveOffsetBias = static_cast(6) << kBiasBitPosition; + static constexpr uintptr_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uintptr_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; +#endif + + template + inline T GetRef() const; + template + inline void SetRef(const T ref); + inline bool IsOffset() const; +}; + +struct MByteRef32 { + uint32_t refVal; + static constexpr uint32_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uint32_t kPositiveOffsetMin = 0; + static constexpr uint32_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uint32_t kPositiveOffsetBias = 0x60000000; // the most significant 4 bits 0110 + static constexpr uint32_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uint32_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; + + static constexpr uint32_t kDirectRefMin = 0xC0000000; // according to kDsoLoadedAddessStart = 0xC0000000 + static constexpr uint32_t kDirectRefMax = 0xF0000000; // according to kDsoLoadedAddessEnd = 0xF0000000 + + static constexpr int32_t kNegativeOffsetMin = -(256 * 1024 * 1024); // -kOffsetBound + static constexpr int32_t kNegativeOffsetMax = 0; + + template + inline T GetRef() const; + template + inline void SetRef(T ref); + inline bool IsOffset() const; + inline bool IsPositiveOffset() const; + inline bool IsNegativeOffset() const; +}; + +// MethodMeta defined in methodmeta.h +// FieldMeta defined in fieldmeta.h +// MethodDesc contains MethodMetadata and stack map +struct MethodDesc { + // relative offset for method metadata relative to current PC. + // method metadata is in compact format if this offset is odd. + uint32_t metadataOffset; + + int16_t localRefOffset; + uint16_t localRefNumber; + + // stack map for a methed might be placed here +}; + +// Note: class init in maplebe and cg is highly dependent on this type. +// update aarch64rtsupport.h if you modify this definition. +struct ClassMetadataRO { + MByteRef className; + MethodFieldRef fields; // point to info of fields + MethodFieldRef methods; // point to info of methods + union { // Element classinfo of array, others parent classinfo + DataRef superclass; + DataRef componentClass; + }; + + uint16_t numOfFields; + uint16_t numOfMethods; + +#ifndef USE_32BIT_REF + uint16_t flag; + uint16_t numOfSuperclasses; + uint32_t padding; +#endif // !USE_32BIT_REF + + uint32_t mod; + DataRefOffset32 annotation; + DataRefOffset32 clinitAddr; +}; + +static constexpr size_t kPageSize = 4096; +static constexpr size_t kCacheLine = 64; + +// according to kSpaceAnchor and kFireBreak defined in bp_allocator.cpp +// the address of this readable page is set as kProtectedMemoryStart for java class +static constexpr uintptr_t kClInitStateAddrBase = 0xc0000000 - (1u << 20) * 2; + +// In Kirin 980, 2 mmap memory address with odd number of page distances may have unreasonable L1&L2 cache conflict. +// kClassInitializedState is used as the init state for class that has no method, it's will be loaded in many +// place for Decouple build App. if we set the value to kClInitStateAddrBase(0xbfe00000), it may conflict with the +// yieldpoind test address globalPollingPage which is defined in yieldpoint.cpp. +// Hence we add 1 cache line (64 byte) offset here to avoid such conflict +static constexpr uintptr_t kClassInitializedState = kClInitStateAddrBase + kCacheLine; + +extern "C" uint8_t classInitProtectRegion[]; + +// Note there is no state to indicate a class is already initialized. +// Any state beyond listed below is treated as initialized. +enum ClassInitState { + kClassInitStateMin = 0, + kClassUninitialized = 1, + kClassInitializing = 2, + kClassInitFailed = 3, + kClassInitialized = 4, + kClassInitStateMax = 4, +}; + +enum SEGVAddr { + kSEGVAddrRangeStart = kPageSize + 0, + + // Note any readable address is treated as Initialized. + kSEGVAddrForClassInitStateMin = kSEGVAddrRangeStart + kClassInitStateMin, + kSEGVAddrForClassUninitialized = kSEGVAddrForClassInitStateMin + kClassUninitialized, + kSEGVAddrForClassInitializing = kSEGVAddrForClassInitStateMin + kClassInitializing, + kSEGVAddrForClassInitFailed = kSEGVAddrForClassInitStateMin + kClassInitFailed, + kSEGVAddrFoClassInitStateMax = kSEGVAddrForClassInitStateMin + kClassInitStateMax, + + kSEGVAddrRangeEnd, +}; + +struct ClassMetadata { + // object common fields + MetaRef shadow; // point to classinfo of java/lang/Class + int32_t monitor; + + // other fields + uint16_t clIndex; // 8bit ClassLoader index, used for querying the address of related ClassLoader instance. + union { + uint16_t objSize; + uint16_t componentSize; + } sizeInfo; + +#ifdef USE_32BIT_REF // for alignment purpose + uint16_t flag; + uint16_t numOfSuperclasses; +#endif // USE_32BIT_REF + + DataRef iTable; // iTable of current class, used for interface call, will insert the content into classinfo + DataRef vTable; // vTable of current class, used for virtual call, will insert the content into classinfo + GctibRef gctib; // for rc + +#ifdef USE_32BIT_REF + DataRef32 classInfoRo; + DataRef32 cacheFalseClass; +#else + DataRef classInfoRo; +#endif + + union { + uintptr_t initState; // a readable address for initState means initialized + DataRef cacheTrueClass; + }; + + public: + static inline intptr_t OffsetOfInitState() { + ClassMetadata *base = nullptr; + return reinterpret_cast(&(base->initState)); + } + + uintptr_t GetInitStateRawValue() const { + return __atomic_load_n(&initState, __ATOMIC_ACQUIRE); + } + + template + void SetInitStateRawValue(T val) { + __atomic_store_n(&initState, reinterpret_cast(val), __ATOMIC_RELEASE); + } +}; + +// function to set Class/Field/Method metadata's shadow field to avoid type conversion +// Note 1: here we don't do NULL-check and type-compatibility check +// NOte 2: C should be of jclass/ClassMetata* type +template +static inline void MRTSetMetadataShadow(M *meta, C cls) { + meta->shadow = static_cast(reinterpret_cast(cls)); +} + +#endif // METADATA_LAYOUT_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_builder.h b/ecmascript/mapleall/maple_ir/include/mir_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..27710a4e4d93c387875b8fff4298c977676f2d58 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_builder.h @@ -0,0 +1,351 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_BUILDER_H +#define MAPLE_IR_INCLUDE_MIR_BUILDER_H +#include +#include +#include +#include +#ifdef _WIN32 +#include +#endif +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +using ArgPair = std::pair; +using ArgVector = MapleVector; +class MIRBuilder { + public: + enum MatchStyle { + kUpdateFieldID = 0, // do not match but traverse to update fieldID + kMatchTopField = 1, // match top level field only + kMatchAnyField = 2, // match any field + kParentFirst = 4, // traverse parent first + kFoundInChild = 8, // found in child + }; + + explicit MIRBuilder(MIRModule *module) + : mirModule(module), + incompleteTypeRefedSet(mirModule->GetMPAllocator().Adapter()) {} + + virtual ~MIRBuilder() = default; + + virtual void SetCurrentFunction(MIRFunction &fun) { + mirModule->SetCurFunction(&fun); + } + + virtual MIRFunction *GetCurrentFunction() const { + return mirModule->CurFunction(); + } + MIRFunction *GetCurrentFunctionNotNull() const { + MIRFunction *func = GetCurrentFunction(); + CHECK_FATAL(func != nullptr, "nullptr check"); + return func; + } + + MIRModule &GetMirModule() { + return *mirModule; + } + + const MapleSet &GetIncompleteTypeRefedSet() const { + return incompleteTypeRefedSet; + } + + std::vector> &GetExtraFieldsTuples() { + return extraFieldsTuples; + } + + unsigned int GetLineNum() const { + return lineNum; + } + void SetLineNum(unsigned int num) { + lineNum = num; + } + + GStrIdx GetOrCreateStringIndex(const std::string &str) const { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + } + + GStrIdx GetOrCreateStringIndex(GStrIdx strIdx, const std::string &str) const { + std::string firstString(GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx)); + firstString.append(str); + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(firstString); + } + + GStrIdx GetStringIndex(const std::string &str) const { + return GlobalTables::GetStrTable().GetStrIdxFromName(str); + } + + MIRFunction *GetOrCreateFunction(const std::string&, TyIdx); + MIRFunction *GetFunctionFromSymbol(const MIRSymbol &funcst); + MIRFunction *GetFunctionFromStidx(StIdx stIdx); + MIRFunction *GetFunctionFromName(const std::string&); + // For compiler-generated metadata struct + void AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue); + void AddAddrofFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, const MIRSymbol &fieldSt); + void AddAddroffuncFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSt); + + bool TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID); + bool TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle); + void TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, uint32 &fieldID, + uint32 &idx); + + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, unsigned int matchStyle); + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, TyIdx idx); + + FieldID GetStructFieldIDFromFieldName(MIRType &type, const std::string &name); + FieldID GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name); + + void SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType); + // for creating Function. + MIRSymbol *GetFunctionArgument(MIRFunction &fun, uint32 index) const { + CHECK(index < fun.GetFormalCount(), "index out of range in GetFunctionArgument"); + return fun.GetFormal(index); + } + + MIRFunction *CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg = false, bool createBody = true) const; + MIRFunction *CreateFunction(StIdx stIdx, bool addToTable = true) const; + virtual void UpdateFunction(MIRFunction&, const MIRType*, const ArgVector&) {} + + MIRSymbol *GetSymbolFromEnclosingScope(StIdx stIdx) const; + virtual MIRSymbol *GetOrCreateLocalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetLocalDecl(const std::string &str); + MIRSymbol *CreateLocalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetGlobalDecl(const std::string &str); + MIRSymbol *GetDecl(const std::string &str); + MIRSymbol *CreateGlobalDecl(const std::string &str, + const MIRType &type, + MIRStorageClass sc = kScGlobal); + MIRSymbol *GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func); + // for creating Expression + ConstvalNode *CreateConstval(MIRConst *constVal); + ConstvalNode *CreateIntConst(uint64, PrimType); + ConstvalNode *CreateFloatConst(float val); + ConstvalNode *CreateDoubleConst(double val); + ConstvalNode *CreateFloat128Const(const uint64 *val); + ConstvalNode *GetConstInt(MemPool &memPool, int val); + ConstvalNode *GetConstInt(int val) { + return CreateIntConst(val, PTY_i32); + } + + ConstvalNode *GetConstUInt1(bool val) { + return CreateIntConst(val, PTY_u1); + } + + ConstvalNode *GetConstUInt8(uint8 val) { + return CreateIntConst(val, PTY_u8); + } + + ConstvalNode *GetConstUInt16(uint16 val) { + return CreateIntConst(val, PTY_u16); + } + + ConstvalNode *GetConstUInt32(uint32 val) { + return CreateIntConst(val, PTY_u32); + } + + ConstvalNode *GetConstUInt64(uint64 val) { + return CreateIntConst(val, PTY_u64); + } + + ConstvalNode *CreateAddrofConst(BaseNode&); + ConstvalNode *CreateAddroffuncConst(const BaseNode&); + ConstvalNode *CreateStrConst(const BaseNode&); + ConstvalNode *CreateStr16Const(const BaseNode&); + SizeoftypeNode *CreateExprSizeoftype(const MIRType &type); + FieldsDistNode *CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2); + AddrofNode *CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool = nullptr); + AddrofNode *CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool = nullptr); + AddroffuncNode *CreateExprAddroffunc(PUIdx, MemPool *memPool = nullptr); + AddrofNode *CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol); + AddrofNode *CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRType &type, MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRSymbol &symbol); + AddrofNode *CreateExprDread(PregIdx pregID, PrimType pty); + AddrofNode *CreateExprDread(MIRSymbol &symbol, uint16 fieldID); + DreadoffNode *CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset); + RegreadNode *CreateExprRegread(PrimType pty, PregIdx regIdx); + IreadNode *CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IreadoffNode *CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0); + IreadFPoffNode *CreateExprIreadFPoff(PrimType pty, int32 offset); + IaddrofNode *CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IaddrofNode *CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr); + BinaryNode *CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1); + BinaryNode *CreateExprBinary(Opcode opcode, PrimType pty, BaseNode *opnd0, BaseNode *opnd1) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pty)); + return CreateExprBinary(opcode, *ty, opnd0, opnd1); + } + TernaryNode *CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, BaseNode *opnd2); + CompareNode *CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1); + UnaryNode *CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd); + GCMallocNode *CreateExprGCMalloc(Opcode opcode, const MIRType &ptype, const MIRType &type); + JarrayMallocNode *CreateExprJarrayMalloc(Opcode opcode, const MIRType &ptype, const MIRType &type, BaseNode *opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromtype, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + DepositbitsNode *CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *leftOpnd, BaseNode* rightOpnd); + RetypeNode *CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd); + RetypeNode *CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd); + ArrayNode *CreateExprArray(const MIRType &arrayType); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2); + ArrayNode *CreateExprArray(const MIRType &arrayType, std::vector ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, TyIdx tyIdx, + const MapleVector &ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opcode, const MIRType &type, + const MapleVector &ops); + // for creating Statement. + NaryStmtNode *CreateStmtReturn(BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, const MapleVector &rVals); + AssertNonnullStmtNode *CreateStmtAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx funcNameIdx); + CallAssertNonnullStmtNode *CreateStmtCallAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx callFuncNameIdx, + size_t index, GStrIdx stmtFuncNameIdx); + CallAssertBoundaryStmtNode *CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t index, GStrIdx stmtFuncNameIdx); + AssertBoundaryStmtNode *CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, GStrIdx funcNameIdx); + UnaryStmtNode *CreateStmtUnary(Opcode op, BaseNode *rVal); + UnaryStmtNode *CreateStmtThrow(BaseNode *rVal); + DassignNode *CreateStmtDassign(const MIRSymbol &var, FieldID fieldID, BaseNode *src); + DassignNode *CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src); + RegassignNode *CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src); + IassignNode *CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src); + IassignoffNode *CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *opnd0, BaseNode *src); + IassignFPoffNode *CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src); + CallNode *CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opcode = OP_call); + CallNode *CreateStmtCall(const std::string &name, const MapleVector &args); + CallNode *CreateStmtVirtualCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_virtualcall); + } + + CallNode *CreateStmtSuperclassCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_superclasscall); + } + + CallNode *CreateStmtInterfaceCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_interfacecall); + } + + IcallNode *CreateStmtIcall(const MapleVector &args); + IcallNode *CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret); + IcallNode *CreateStmtIcallproto(const MapleVector &args); + IcallNode *CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret); + // For Call, VirtualCall, SuperclassCall, InterfaceCall + IntrinsiccallNode *CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments); + CallNode *CreateStmtCallAssigned(PUIdx puidx, const MIRSymbol *ret, Opcode op = OP_callassigned); + CallNode *CreateStmtCallAssigned(PUIdx puidx, const MapleVector &args, const MIRSymbol *ret, + Opcode op = OP_callassigned, TyIdx tyIdx = TyIdx()); + CallNode *CreateStmtCallRegassigned(PUIdx, PregIdx, Opcode); + CallNode *CreateStmtCallRegassigned(PUIdx, PregIdx, Opcode, BaseNode *opnd); + CallNode *CreateStmtCallRegassigned(PUIdx, const MapleVector&, PregIdx, Opcode); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &arguments, + PregIdx retPregIdx); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &arguments, + const MIRSymbol *ret, TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret); + IfStmtNode *CreateStmtIf(BaseNode *cond); + IfStmtNode *CreateStmtIfThenElse(BaseNode *cond); + DoloopNode *CreateStmtDoloop(StIdx, bool, BaseNode*, BaseNode*, BaseNode*); + SwitchNode *CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable); + GotoNode *CreateStmtGoto(Opcode o, LabelIdx labIdx); + JsTryNode *CreateStmtJsTry(Opcode o, LabelIdx cLabIdx, LabelIdx fLabIdx); + TryNode *CreateStmtTry(const MapleVector &cLabIdxs); + CatchNode *CreateStmtCatch(const MapleVector &tyIdxVec); + LabelIdx GetOrCreateMIRLabel(const std::string &name); + LabelIdx CreateLabIdx(MIRFunction &mirFunc); + LabelNode *CreateStmtLabel(LabelIdx labIdx); + StmtNode *CreateStmtComment(const std::string &comment); + CondGotoNode *CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx); + void AddStmtInCurrentFunctionBody(StmtNode &stmt); + MIRSymbol *GetSymbol(TyIdx, const std::string&, MIRSymKind, MIRStorageClass, uint8, bool) const; + MIRSymbol *GetSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, uint8, bool) const; + MIRSymbol *GetOrCreateSymbol(TyIdx, const std::string&, MIRSymKind, MIRStorageClass, MIRFunction*, uint8, bool) const; + MIRSymbol *GetOrCreateSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, MIRFunction*, uint8, bool) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx, PregIdx, MIRFunction&) const; + // for creating symbol + MIRSymbol *CreateSymbol(TyIdx, const std::string&, MIRSymKind, MIRStorageClass, MIRFunction*, uint8) const; + MIRSymbol *CreateSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, MIRFunction*, uint8) const; + MIRSymbol *CreateConstStringSymbol(const std::string &symbolName, const std::string &content); + // for creating nodes + AddrofNode *CreateAddrof(const MIRSymbol &st, PrimType pty = PTY_ptr); + AddrofNode *CreateDread(const MIRSymbol &st, PrimType pty); + virtual MemPool *GetCurrentFuncCodeMp(); + virtual MapleAllocator *GetCurrentFuncCodeMpAllocator(); + virtual MemPool *GetCurrentFuncDataMp(); + + virtual void GlobalLock() {} + virtual void GlobalUnlock() {} + + private: + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const; + MIRSymbol *GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const; + + MIRModule *mirModule; + MapleSet incompleteTypeRefedSet; + // + std::vector> extraFieldsTuples; + unsigned int lineNum = 0; +}; + +class MIRBuilderExt : public MIRBuilder { + public: + explicit MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex = nullptr); + virtual ~MIRBuilderExt() = default; + + void SetCurrentFunction(MIRFunction &func) override { + curFunction = &func; + } + + MIRFunction *GetCurrentFunction() const override { + return curFunction; + } + + MemPool *GetCurrentFuncCodeMp() override; + MapleAllocator *GetCurrentFuncCodeMpAllocator() override; + void GlobalLock() override; + void GlobalUnlock() override; + + private: + MIRFunction *curFunction = nullptr; + pthread_mutex_t *mutex = nullptr; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_BUILDER_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_config.h b/ecmascript/mapleall/maple_ir/include/mir_config.h new file mode 100644 index 0000000000000000000000000000000000000000..7e753473baf5325cecb344cf0f87bc2db2c0e35e --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_config.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// configuration definition for code in maple_ir namespace +#ifndef MAPLE_IR_INCLUDE_MIR_CONFIG_H +#define MAPLE_IR_INCLUDE_MIR_CONFIG_H + +// MIR_FEATURE_FULL = 1 : for host/server size building, by default. +// MIR_FEATURE_FULL = 0 : for resource-constrained devices. optimized for memory size +#if !defined(MIR_FEATURE_FULL) +#define MIR_FEATURE_FULL 1 // default to full feature building, for debugging +#endif // MIR_FEATURE_FULL define + +// MIR_DEBUG = 0 : for release building. +// MIR_DEBUG = 1 : for debug building. +#ifndef MIR_DEBUG +#define MIR_DEBUG 0 // currently default to none. turn it on explicitly +#endif // MIR_DEBUG + +// MIR_DEBUG_LEVEL = 0: no debuging information at all. +// 1: with error information. +// 2: with severe warning information +// 3: with normal warning information +// 4: with normal information +// 5: with everything +// +#ifndef MIR_DEBUG_LEVEL +#define MIR_DEBUG_LEVEL 0 +#endif // MIR_DEBUG_LEVEL +// assertion +#if !MIR_FEATURE_FULL +#define MIR_ASSERT(...) \ + do { \ + } while (0) +#define MIR_PRINTF(...) \ + do { \ + } while (0) +#define MIR_INFO(...) \ + do { \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + } while (0) +#define MIR_CAST_TO(var, totype) ((totype)(var)) +#include +#if DEBUG +#include +#define MIR_FATAL(...) \ + do { \ + printf("FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + printf(__VA_ARGS__); \ + exit(1); \ + } while (0) +#else +#define MIR_FATAL(...) \ + do { \ + exit(1); \ + } while (0) +#endif // DEBUG +#else // MIR_FEATURE_FULL +#include +#include +#include + +namespace maple { +#define MIR_ASSERT(...) assert(__VA_ARGS__) +#define MIR_FATAL(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + exit(EXIT_FAILURE); \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + fprintf(stderr, "ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + fprintf(stderr, "WARNING: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_PRINTF(...) printf(__VA_ARGS__) +#define MIR_INFO(...) printf(__VA_ARGS__) +#define MIR_CAST_TO(var, totype) static_cast(var) +#endif // !MIR_FEATURE_FULL +#if MIR_DEBUG +#else +#endif // MIR_DEBUG + +// MIR specific configurations. +// Note: fix size definition cannot handle arbitary long MIR lines, such +// as those array initialization lines. +constexpr int kMirMaxLineSize = 3072; // a max of 3K characters per line initially +// LIBRARY API availability +#if MIR_FEATURE_FULL +#define HAVE_STRTOD 1 // strtod +#define HAVE_MALLOC 1 // malloc/free +#else // compact VM +#define HAVE_STRTOD 1 // strtod in current libc +#define HAVE_MALLOC 0 // no malloc/free in current libc +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_CONFIG_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_const.h b/ecmascript/mapleall/maple_ir/include/mir_const.h new file mode 100644 index 0000000000000000000000000000000000000000..9a8ff7057ffb43088691d3308562df3a9b390213 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_const.h @@ -0,0 +1,638 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_CONST_H +#define MAPLE_IR_INCLUDE_MIR_CONST_H +#include +#include "mir_type.h" +#include "mpl_int_val.h" + +namespace maple { +class MIRConst; // circular dependency exists, no other choice +using MIRConstPtr = MIRConst*; +#if MIR_FEATURE_FULL +class MIRSymbol; // circular dependency exists, no other choice +enum MIRConstKind { + kConstInvalid, + kConstInt, + kConstAddrof, + kConstAddrofFunc, + kConstLblConst, + kConstStrConst, + kConstStr16Const, + kConstFloatConst, + kConstDoubleConst, + kConstFloat128Const, + kConstAggConst, + kConstStConst +}; + +class MIRConst { + public: + explicit MIRConst(MIRType &type, MIRConstKind constKind = kConstInvalid) + : type(&type), kind(constKind) {} + + virtual ~MIRConst() = default; + + virtual void Dump(const MIRSymbolTable *localSymTab = nullptr) const { + (void)localSymTab; + } + + uint32 GetFieldId() const { + return fieldID; + } + + void SetFieldId(uint32 fieldIdx) { + DoSetFieldId(fieldIdx); + } + + virtual bool IsZero() const { + return false; + } + + virtual bool IsOne() const { + return false; + } + + virtual bool IsMagicNum() const { + return false; + } + + // NO OP + virtual void Neg() {} + + virtual bool operator==(const MIRConst &rhs) const { + return &rhs == this; + } + + virtual MIRConst *Clone(MemPool &memPool) const = 0; + + MIRConstKind GetKind() const { + return kind; + } + + MIRType &GetType() { + return *type; + } + + const MIRType &GetType() const { + return *type; + } + + void SetType(MIRType &t) { + type = &t; + } + + protected: + uint32 fieldID = 0; + + private: + MIRType *type; + MIRConstKind kind; + virtual void DoSetFieldId(uint32 fieldIdx) { + ASSERT(kind != kConstInt, "must be"); + fieldID = fieldIdx; + } +}; + +class MIRIntConst : public MIRConst { + public: + MIRIntConst(uint64 val, MIRType &type) + : MIRConst(type, kConstInt), value(val, type.GetPrimType()) {} + + MIRIntConst(const IntVal &val, MIRType &type) : MIRConst(type, kConstInt), value(val) { + [[maybe_unused]] PrimType pType = type.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(pType) && GetPrimTypeActualBitSize(pType) <= value.GetBitWidth(), + "Constant is tried to be constructed with non-integral type or bit-width is not appropriate for it"); + } + + /// @return number of used bits in the value + uint8 GetActualBitWidth() const; + + void Trunc(uint8 width) { + value.TruncInPlace(width); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool IsNegative() const { + return value.IsSigned() && value.GetSignBit(); + } + + bool IsPositive() const { + return !IsNegative() && value != 0; + } + + bool IsZero() const override { + return value == 0; + } + + bool IsOne() const override { + return value == 1; + } + + void Neg() override { + value = -value; + } + + const IntVal &GetValue() const { + return value; + } + + int64 GetExtValue(uint8 size = 0) const { + return value.GetExtValue(size); + } + + int64 GetSXTValue(uint8 size = 0) const { + return value.GetSXTValue(size); + } + + uint64 GetZXTValue(uint8 size = 0) const { + return value.GetZXTValue(size); + } + + void SetValue(int64 val) const { + (void)val; + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + + bool operator==(const MIRConst &rhs) const override; + + MIRIntConst *Clone(MemPool &memPool) const override { + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + + private: + IntVal value; + + void DoSetFieldId(uint32 fieldIdx) override { + DEBUG_ASSERT(false, "Can't Use This Interface in This Object"); + (void)fieldIdx; + } +}; + +class MIRAddrofConst : public MIRConst { + public: + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty) + : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(0) {} + + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty, int32 ofst) + : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(ofst) {} + + ~MIRAddrofConst() = default; + + StIdx GetSymbolIndex() const { + return stIdx; + } + + void SetSymbolIndex(StIdx idx) { + stIdx = idx; + } + + FieldID GetFieldID() const { + return fldID; + } + + int32 GetOffset() const { + return offset; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddrofConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + StIdx stIdx; + FieldID fldID; + int32 offset; +}; + +class MIRAddroffuncConst : public MIRConst { + public: + MIRAddroffuncConst(PUIdx idx, MIRType &ty) + : MIRConst(ty, kConstAddrofFunc), puIdx(idx) {} + + ~MIRAddroffuncConst() = default; + + PUIdx GetValue() const { + return puIdx; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddroffuncConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + PUIdx puIdx; +}; + +class MIRLblConst : public MIRConst { + public: + MIRLblConst(LabelIdx val, PUIdx pidx, MIRType &type) + : MIRConst(type, kConstLblConst), value(val), puIdx(pidx) {} + + ~MIRLblConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRLblConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + LabelIdx GetValue() const { + return value; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + private: + LabelIdx value; + PUIdx puIdx; +}; + +class MIRStrConst : public MIRConst { + public: + MIRStrConst(UStrIdx val, MIRType &type) : MIRConst(type, kConstStrConst), value(val) {} + + MIRStrConst(const std::string &str, MIRType &type); + + ~MIRStrConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStrConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + UStrIdx GetValue() const { + return value; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + private: + UStrIdx value; + static const PrimType kPrimType = PTY_ptr; +}; + +class MIRStr16Const : public MIRConst { + public: + MIRStr16Const(const U16StrIdx &val, MIRType &type) : MIRConst(type, kConstStr16Const), value(val) {} + + MIRStr16Const(const std::u16string &str, MIRType &type); + ~MIRStr16Const() = default; + + static PrimType GetPrimType() { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStr16Const *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + U16StrIdx GetValue() const { + return value; + } + + private: + static const PrimType kPrimType = PTY_ptr; + U16StrIdx value; +}; + +class MIRFloatConst : public MIRConst { + public: + using value_type = float; + MIRFloatConst(float val, MIRType &type) : MIRConst(type, kConstFloatConst) { + value.floatValue = val; + } + + ~MIRFloatConst() = default; + + void SetFloatValue(float fvalue) { + value.floatValue = fvalue; + } + + value_type GetFloatValue() const { + return value.floatValue; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + int32 GetIntValue() const { + return value.intValue; + } + + value_type GetValue() const { + return GetFloatValue(); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override { + return fabs(value.floatValue) <= 1e-6; + } + + bool IsGeZero() const { + return value.floatValue >= 0; + } + + bool IsNeg() const { + return ((static_cast(value.intValue) & 0x80000000) == 0x80000000); + } + + bool IsOne() const override { + return fabs(value.floatValue - 1) <= 1e-6; + }; + bool IsAllBitsOne() const { + return fabs(value.floatValue + 1) <= 1e-6; + }; + void Neg() override { + value.floatValue = -value.floatValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRFloatConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + static const PrimType kPrimType = PTY_f32; + union { + value_type floatValue; + int32 intValue; + } value; +}; + +class MIRDoubleConst : public MIRConst { + public: + using value_type = double; + MIRDoubleConst(double val, MIRType &type) : MIRConst(type, kConstDoubleConst) { + value.dValue = val; + } + + ~MIRDoubleConst() = default; + + uint32 GetIntLow32() const { + auto unsignVal = static_cast(value.intValue); + return static_cast(unsignVal & 0xffffffff); + } + + uint32 GetIntHigh32() const { + auto unsignVal = static_cast(value.intValue); + return static_cast((unsignVal & 0xffffffff00000000) >> 32); + } + + int64 GetIntValue() const { + return value.intValue; + } + + value_type GetValue() const { + return value.dValue; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override { + return fabs(value.dValue) <= 1e-15; + } + + bool IsGeZero() const { + return value.dValue >= 0; + } + + bool IsNeg() const { + return ((static_cast(value.intValue) & 0x8000000000000000LL) == 0x8000000000000000LL); + } + + bool IsOne() const override { + return fabs(value.dValue - 1) <= 1e-15; + }; + bool IsAllBitsOne() const { + return fabs(value.dValue + 1) <= 1e-15; + }; + void Neg() override { + value.dValue = -value.dValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRDoubleConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + static const PrimType kPrimType = PTY_f64; + union { + value_type dValue; + int64 intValue; + } value; +}; + +class MIRFloat128Const : public MIRConst { + public: + MIRFloat128Const(const uint64 &val, MIRType &type) : MIRConst(type, kConstFloat128Const) { + value = &val; + } + + ~MIRFloat128Const() = default; + + const uint64 *GetIntValue() const { + return value; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + bool IsZero() const override { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0; + } + + bool IsOne() const override { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0x3FFF000000000000; + }; + bool IsAllBitsOne() const { + MIR_ASSERT(value && "value must not be nullptr!"); + return (value[0] == 0xffffffffffffffff && value[1] == 0xffffffffffffffff); + }; + bool operator==(const MIRConst &rhs) const override; + + MIRFloat128Const *Clone(MemPool &memPool) const override { + auto *res = memPool.New(*this); + return res; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + private: + static const PrimType kPrimType = PTY_f128; + // value[0]: Low 64 bits; value[1]: High 64 bits. + const uint64 *value; +}; + +class MIRAggConst : public MIRConst { + public: + MIRAggConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstAggConst), + constVec(mod.GetMPAllocator().Adapter()), + fieldIdVec(mod.GetMPAllocator().Adapter()) {} + + ~MIRAggConst() = default; + + MIRConst *GetAggConstElement(unsigned int fieldId) { + for (size_t i = 0; i < fieldIdVec.size(); ++i) { + if (fieldId == fieldIdVec[i]) { + return constVec[i]; + } + } + return nullptr; + } + + void SetFieldIdOfElement(uint32 index, uint32 fieldId) { + DEBUG_ASSERT(index < fieldIdVec.size(), "index out of range"); + fieldIdVec[index] = fieldId; + } + + const MapleVector &GetConstVec() const { + return constVec; + } + + MapleVector &GetConstVec() { + return constVec; + } + + const MIRConstPtr &GetConstVecItem(size_t index) const { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + MIRConstPtr &GetConstVecItem(size_t index) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + void SetConstVecItem(size_t index, MIRConst& st) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = &st; + } + + uint32 GetFieldIdItem(size_t index) const { + DEBUG_ASSERT(index < fieldIdVec.size(), "index out of range"); + return fieldIdVec[index]; + } + + void SetItem(uint32 index, MIRConst *mirConst, uint32 fieldId) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = mirConst; + fieldIdVec[index] = fieldId; + } + + void AddItem(MIRConst *mirConst, uint32 fieldId) { + constVec.push_back(mirConst); + fieldIdVec.push_back(fieldId); + } + + void PushBack(MIRConst *elem) { + AddItem(elem, 0); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRAggConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + MapleVector constVec; + MapleVector fieldIdVec; +}; + +// the const has one or more symbols +class MIRStConst : public MIRConst { + public: + MIRStConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstStConst), + stVec(mod.GetMPAllocator().Adapter()), + stOffsetVec(mod.GetMPAllocator().Adapter()) {} + + const MapleVector &GetStVec() const { + return stVec; + } + void PushbackSymbolToSt(MIRSymbol *sym) { + stVec.push_back(sym); + } + + MIRSymbol *GetStVecItem(size_t index) { + CHECK_FATAL(index < stVec.size(), "array index out of range"); + return stVec[index]; + } + + const MapleVector &GetStOffsetVec() const { + return stOffsetVec; + } + void PushbackOffsetToSt(uint32 offset) { + stOffsetVec.push_back(offset); + } + + uint32 GetStOffsetVecItem(size_t index) const { + CHECK_FATAL(index < stOffsetVec.size(), "array index out of range"); + return stOffsetVec[index]; + } + + MIRStConst *Clone(MemPool &memPool) const override { + auto *res = memPool.New(*this); + return res; + } + + ~MIRStConst() = default; + + private: + MapleVector stVec; // symbols that in the st const + MapleVector stOffsetVec; // symbols offset +}; +#endif // MIR_FEATURE_FULL + +bool IsDivSafe(const MIRIntConst& dividend, const MIRIntConst& divisor, PrimType pType); + +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_CONST +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_CONST_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_function.h b/ecmascript/mapleall/maple_ir/include/mir_function.h new file mode 100644 index 0000000000000000000000000000000000000000..1627afe8e54bca0ee4b5136e226a6346dd000f6e --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_function.h @@ -0,0 +1,1372 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#define MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#include +#include "mir_module.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_preg.h" +#include "intrinsics.h" +#include "file_layout.h" +#include "mir_nodes.h" +#include "mir_type.h" +#include "mir_scope.h" +#include "profile.h" +#include "func_desc.h" + +#define DEBUGME true + +namespace maple { +enum PointerAttr: uint32_t { + kPointerUndeiced = 0x1, + kPointerNull = 0x2, + kPointerNoNull = 0x3 +}; + +enum FuncAttrProp : uint32_t { + kNoThrowException = 0x1, + kNoRetNewlyAllocObj = 0x2, + kNoDefEffect = 0x4, + kNoDefArgEffect = 0x8, + kPureFunc = 0x10, + kIpaSeen = 0x20, + kUseEffect = 0x40, + kDefEffect = 0x80 +}; + +// describe a formal definition in a function declaration +class FormalDef { + public: + GStrIdx formalStrIdx = GStrIdx(0); // used when processing the prototype + MIRSymbol *formalSym = nullptr; // used in the function definition + TyIdx formalTyIdx = TyIdx(); + TypeAttrs formalAttrs = TypeAttrs(); // the formal's type attributes + + FormalDef() {}; + virtual ~FormalDef() {} + FormalDef(MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) : formalSym(s), formalTyIdx(tidx), formalAttrs(at) {} + FormalDef(const GStrIdx &sidx, MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) + : formalStrIdx(sidx), formalSym(s), formalTyIdx(tidx), formalAttrs(at) {} +}; + +class MeFunction; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +class MIRFunction { + public: + MIRFunction(MIRModule *mod, StIdx idx) + : module(mod), + symbolTableIdx(idx) { + scope = module->GetMemPool()->New(mod); + } + + ~MIRFunction() = default; + + void Dump(bool withoutBody = false); + void DumpUpFormal(int32 indent) const; + void DumpFrame(int32 indent) const; + void DumpFuncBody(int32 indent); + void DumpScope(); + const MIRSymbol *GetFuncSymbol() const; + MIRSymbol *GetFuncSymbol(); + + void SetBaseClassFuncNames(GStrIdx strIdx); + void SetMemPool(MemPool *memPool) { + SetCodeMemPool(memPool); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + + /// update signature_strIdx, basefunc_strIdx, baseclass_strIdx, basefunc_withtype_strIdx + /// without considering baseclass_strIdx, basefunc_strIdx's original non-zero values + /// \param strIdx full_name strIdx of the new function name + void OverrideBaseClassFuncNames(GStrIdx strIdx); + const std::string &GetName() const; + + GStrIdx GetNameStrIdx() const; + + const std::string &GetBaseClassName() const; + + const std::string &GetBaseFuncName() const; + + const std::string &GetBaseFuncNameWithType() const; + + const std::string &GetBaseFuncSig() const; + + const std::string &GetSignature() const; + + GStrIdx GetBaseClassNameStrIdx() const { + return baseClassStrIdx; + } + + GStrIdx GetBaseFuncNameStrIdx() const { + return baseFuncStrIdx; + } + + GStrIdx GetBaseFuncNameWithTypeStrIdx() const { + return baseFuncWithTypeStrIdx; + } + + GStrIdx GetBaseFuncSigStrIdx() const { + return baseFuncSigStrIdx; + } + + void SetBaseClassNameStrIdx(GStrIdx id) { + baseClassStrIdx = id; + } + + void SetBaseFuncNameStrIdx(GStrIdx id) { + baseFuncStrIdx = id; + } + + void SetBaseFuncNameWithTypeStrIdx(GStrIdx id) { + baseFuncWithTypeStrIdx = id; + } + + const MIRType *GetReturnType() const; + MIRType *GetReturnType(); + bool IsReturnVoid() const { + return GetReturnType()->GetPrimType() == PTY_void; + } + TyIdx GetReturnTyIdx() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetRetTyIdx(); + } + void SetReturnTyIdx(TyIdx tyidx) { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + funcType->SetRetTyIdx(tyidx); + } + + const MIRType *GetClassType() const; + TyIdx GetClassTyIdx() const { + return classTyIdx; + } + void SetClassTyIdx(TyIdx tyIdx) { + classTyIdx = tyIdx; + } + void SetClassTyIdx(uint32 idx) { + classTyIdx.reset(idx); + } + + void AddArgument(MIRSymbol *st) { + DEBUG_ASSERT(st != nullptr, "null ptr check"); + FormalDef formalDef(st->GetNameStrIdx(), st, st->GetTyIdx(), st->GetAttrs()); + formalDefVec.push_back(formalDef); + } + + void AddFormalDef(const FormalDef &formalDef) { + formalDefVec.push_back(formalDef); + } + + size_t GetParamSize() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList().size(); + } + + auto &GetParamTypes() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList(); + } + + TyIdx GetNthParamTyIdx(size_t i) const { + DEBUG_ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return funcType->GetParamTypeList()[i]; + } + + const MIRType *GetNthParamType(size_t i) const; + MIRType *GetNthParamType(size_t i); + + const TypeAttrs &GetNthParamAttr(size_t i) const { + DEBUG_ASSERT(i < formalDefVec.size(), "array index out of range"); + DEBUG_ASSERT(formalDefVec[i].formalSym != nullptr, "null ptr check"); + return formalDefVec[i].formalSym->GetAttrs(); + } + + void UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs = false); + void UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs = false); + LabelIdx GetOrCreateLableIdxFromName(const std::string &name); + GStrIdx GetLabelStringIndex(LabelIdx labelIdx) const { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + DEBUG_ASSERT(labelIdx < labelTab->Size(), "index out of range in GetLabelStringIndex"); + return labelTab->GetSymbolFromStIdx(labelIdx); + } + const std::string &GetLabelName(LabelIdx labelIdx) const { + GStrIdx strIdx = GetLabelStringIndex(labelIdx); + return GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + } + + const MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false) const; + MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false); + + void SetAttrsFromSe(uint8 specialEffect); + + const FuncAttrs &GetAttrs() const { + return funcAttrs; + } + + void SetAttrs(FuncAttrs attr) { + funcAttrs = attr; + } + + bool GetAttr(FuncAttrKind attrKind) const { + return funcAttrs.GetAttr(attrKind); + } + + void SetAttr(FuncAttrKind attrKind) { + funcAttrs.SetAttr(attrKind); + } + + void UnSetAttr(FuncAttrKind attrKind) { + funcAttrs.SetAttr(attrKind, true); + } + + bool IsVarargs() const { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + bool IsWeak() const { + return funcAttrs.GetAttr(FUNCATTR_weak); + } + + bool IsStatic() const { + return funcAttrs.GetAttr(FUNCATTR_static); + } + + bool IsInline() const { + return funcAttrs.GetAttr(FUNCATTR_inline); + } + + bool IsExtern() const { + return funcAttrs.GetAttr(FUNCATTR_extern); + } + + bool IsNative() const { + return funcAttrs.GetAttr(FUNCATTR_native); + } + + bool IsFinal() const { + return funcAttrs.GetAttr(FUNCATTR_final); + } + + bool IsAbstract() const { + return funcAttrs.GetAttr(FUNCATTR_abstract); + } + + bool IsPublic() const { + return funcAttrs.GetAttr(FUNCATTR_public); + } + + bool IsPrivate() const { + return funcAttrs.GetAttr(FUNCATTR_private); + } + + bool IsProtected() const { + return funcAttrs.GetAttr(FUNCATTR_protected); + } + + bool IsConstructor() const { + return funcAttrs.GetAttr(FUNCATTR_constructor); + } + + bool IsLocal() const { + return funcAttrs.GetAttr(FUNCATTR_local); + } + + bool IsNoDefArgEffect() const { + return funcAttrs.GetAttr(FUNCATTR_nodefargeffect); + } + + bool IsNoDefEffect() const { + return funcAttrs.GetAttr(FUNCATTR_nodefeffect); + } + + bool IsNoRetGlobal() const { + return funcAttrs.GetAttr(FUNCATTR_noretglobal); + } + + bool IsNoThrowException() const { + return funcAttrs.GetAttr(FUNCATTR_nothrow_exception); + } + + bool IsNoRetArg() const { + return funcAttrs.GetAttr(FUNCATTR_noretarg); + } + + bool IsNoPrivateDefEffect() const { + return funcAttrs.GetAttr(FUNCATTR_noprivate_defeffect); + } + + bool IsIpaSeen() const { + return funcAttrs.GetAttr(FUNCATTR_ipaseen); + } + + bool IsPure() const { + return funcAttrs.GetAttr(FUNCATTR_pure); + } + + bool IsFirstArgReturn() const { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + bool IsUnSafe() const { + return !funcAttrs.GetAttr(FUNCATTR_safed) || funcAttrs.GetAttr(FUNCATTR_unsafed); + } + + bool IsSafe() const { + return funcAttrs.GetAttr(FUNCATTR_safed); + } + + void SetVarArgs() { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + void SetNoDefArgEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + + void SetNoDefEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + + void SetNoRetGlobal() { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + + void SetNoThrowException() { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } + + void SetNoRetArg() { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + + void SetNoPrivateDefEffect() { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + + void SetIpaSeen() { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + + void SetPure() { + funcAttrs.SetAttr(FUNCATTR_pure); + } + + void SetFirstArgReturn() { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + void UnsetNoDefArgEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect, true); + } + + void UnsetNoDefEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefeffect, true); + } + + void UnsetNoRetGlobal() { + funcAttrs.SetAttr(FUNCATTR_noretglobal, true); + } + + void UnsetNoThrowException() { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception, true); + } + + void UnsetPure() { + funcAttrs.SetAttr(FUNCATTR_pure, true); + } + + void UnsetNoRetArg() { + funcAttrs.SetAttr(FUNCATTR_noretarg, true); + } + + void UnsetNoPrivateDefEffect() { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + + bool HasCall() const; + void SetHasCall(); + + bool IsReturnStruct() const; + void SetReturnStruct(); + void SetReturnStruct(const MIRType &retType); + + bool IsUserFunc() const; + void SetUserFunc(); + + bool IsInfoPrinted() const; + void SetInfoPrinted(); + void ResetInfoPrinted(); + + void SetNoReturn(); + bool NeverReturns() const; + + void SetHasSetjmp(); + bool HasSetjmp() const; + + void SetHasAsm(); + bool HasAsm() const; + + void SetStructReturnedInRegs(); + bool StructReturnedInRegs() const; + + void SetReturnStruct(const MIRType *retType); + + bool IsEmpty() const; + bool IsClinit() const; + uint32 GetInfo(GStrIdx strIdx) const; + uint32 GetInfo(const std::string &str) const; + bool IsAFormal(const MIRSymbol *st) const { + for (const auto &formalDef : formalDefVec) { + if (st == formalDef.formalSym) { + return true; + } + } + return false; + } + + uint32 GetFormalIndex(const MIRSymbol *symbol) const { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalSym == symbol) { + return i; + } + } + return 0xffffffff; + } + + FormalDef &GetFormalDefFromMIRSymbol(const MIRSymbol *symbol) { + for (auto &formalDef : formalDefVec) { + if (formalDef.formalSym == symbol) { + return formalDef; + } + } + CHECK_FATAL(false, "Impossible."); + } + + bool IsAFormalName(const GStrIdx idx) const { + for (const auto &formalDef : formalDefVec) { + if (idx == formalDef.formalStrIdx) { + return true; + } + } + return false; + } + + const FormalDef GetFormalFromName(const GStrIdx idx) const { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalStrIdx == idx) { + return formalDefVec[i]; + } + } + return FormalDef(); + } + + // tell whether this function is a Java method + bool IsJava() const { + return classTyIdx != 0u; + } + + const MIRType *GetNodeType(const BaseNode &node) const; + +#ifdef DEBUGME + void SetUpGDBEnv(); + void ResetGDBEnv(); +#endif + void ReleaseMemory() { + if (codeMemPoolTmp != nullptr) { + delete codeMemPoolTmp; + codeMemPoolTmp = nullptr; + } + } + + void ReleaseCodeMemory() { + if (codeMemPool != nullptr) { + codeMemPoolAllocator.SetMemPool(nullptr); + delete codeMemPool; + SetMemPool(nullptr); + } + } + + MemPool *GetCodeMempool() { + if (useTmpMemPool) { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + MapleAllocator &GetCodeMemPoolAllocator() { + GetCodeMempool(); + if (useTmpMemPool) { + return codeMemPoolTmpAllocator; + } + return codeMemPoolAllocator; + } + + MapleAllocator &GetCodeMempoolAllocator() { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPoolAllocator; + } + + TyIdx GetFuncRetStructTyIdx() { + TyIdx tyIdx = GetFormalDefAt(0).formalTyIdx; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Fake param not a pointer"); + MIRPtrType *pType = static_cast(ty); + tyIdx = pType->GetPointedTyIdx(); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->IsStructType(), + "Must be struct return type"); + return tyIdx; + } + + void EnterFormals(); + void NewBody(); + + MIRModule *GetModule() { + return module; + } + + PUIdx GetPuidx() const { + return puIdx; + } + void SetPuidx(PUIdx idx) { + puIdx = idx; + } + + PUIdx GetPuidxOrigin() const { + return puIdxOrigin; + } + void SetPuidxOrigin(PUIdx idx) { + puIdxOrigin = idx; + } + + StIdx GetStIdx() const { + return symbolTableIdx; + } + void SetStIdx(StIdx stIdx) { + symbolTableIdx = stIdx; + } + + int32 GetSCCId() const { + return sccID; + } + void SetSCCId(int32 id) { + sccID = id; + } + + MIRFuncType *GetMIRFuncType() { + return funcType; + } + void SetMIRFuncType(MIRFuncType *type) { + funcType = type; + } + + TyIdx GetInferredReturnTyIdx() const { + return inferredReturnTyIdx; + } + + void SetInferredReturnTyIdx(TyIdx tyIdx) { + inferredReturnTyIdx = tyIdx; + } + + MIRTypeNameTable *GetTypeNameTab() const { + return typeNameTab; + } + + void AllocTypeNameTab() { + if (typeNameTab == nullptr) { + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + bool HaveTypeNameTab() const { + return typeNameTab != nullptr; + } + const MapleMap &GetGStrIdxToTyIdxMap() const { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetGStrIdxToTyIdxMap(); + } + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetTyIdxFromGStrIdx(idx); + } + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + typeNameTab->SetGStrIdxToTyIdx(gStrIdx, tyIdx); + } + + const std::string &GetLabelTabItem(LabelIdx labelIdx) const { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + return labelTab->GetName(labelIdx); + } + + void AllocLabelTab() { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + + MIRPregTable *GetPregTab() const { + return pregTab; + } + + void SetPregTab(MIRPregTable *tab) { + pregTab = tab; + } + void AllocPregTab() { + if (pregTab == nullptr) { + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + } + } + MIRPreg *GetPregItem(PregIdx idx) { + return const_cast(const_cast(this)->GetPregItem(idx)); + } + const MIRPreg *GetPregItem(PregIdx idx) const { + return pregTab->PregFromPregIdx(idx); + } + + BlockNode *GetBody() { + return body; + } + const BlockNode *GetBody() const { + return body; + } + void SetBody(BlockNode *node) { + body = node; + } + + SrcPosition &GetSrcPosition() { + DEBUG_ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + return GetFuncSymbol()->GetSrcPosition(); + } + + void SetSrcPosition(const SrcPosition &position) { + DEBUG_ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + GetFuncSymbol()->SetSrcPosition(position); + } + + const FuncAttrs &GetFuncAttrs() const { + return funcAttrs; + } + FuncAttrs &GetFuncAttrs() { + return funcAttrs; + } + + void SetFuncAttrs(const FuncAttrs &attrs) { + funcAttrs = attrs; + } + void SetFuncAttrs(uint64 attrFlag) { + funcAttrs.SetAttrFlag(attrFlag); + } + + uint32 GetFlag() const { + return flag; + } + void SetFlag(uint32 newFlag) { + flag = newFlag; + } + + uint16 GetHashCode() const { + return hashCode; + } + void SetHashCode(uint16 newHashCode) { + hashCode = newHashCode; + } + + void SetFileIndex(uint32 newFileIndex) { + fileIndex = newFileIndex; + } + + MIRInfoVector &GetInfoVector() { + return info; + } + + const MIRInfoPair &GetInfoPair(size_t i) const { + return info.at(i); + } + + void PushbackMIRInfo(const MIRInfoPair &pair) { + info.push_back(pair); + } + + void SetMIRInfoNum(size_t idx, uint32 num) { + info[idx].second = num; + } + + MapleVector &InfoIsString() { + return infoIsString; + } + + void PushbackIsString(bool isString) { + infoIsString.push_back(isString); + } + + MIRScope *GetScope() { + return scope; + } + + bool NeedEmitAliasInfo() const { + return scope->NeedEmitAliasInfo(); + } + + MapleMap &GetAliasVarMap() { + return scope->GetAliasVarMap(); + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + scope->SetAliasVarMap(idx, vars); + } + + void AddAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + scope->AddAliasVarMap(idx, vars); + } + + bool HasVlaOrAlloca() const { + return hasVlaOrAlloca; + } + void SetVlaOrAlloca(bool has) { + hasVlaOrAlloca = has; + } + + // Default freq is the lastStmtFreq + bool HasFreqMap() const { + return freqLastMap != nullptr; + } + + bool HasFirstFreqMap() const { + return freqFirstMap != nullptr; + } + + const MapleMap &GetFirstFreqMap() const { + return *freqFirstMap; + } + + void SetFirstFreqMap(uint32 stmtID, uint32 freq) { + if (freqFirstMap == nullptr) { + freqFirstMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqFirstMap)[stmtID] = freq; + } + + const MapleMap &GetLastFreqMap() const { + return *freqLastMap; + } + + int32 GetFreqFromLastStmt(uint32 stmtId) { + if (freqLastMap == nullptr) { + return -1; + } + if ((*freqLastMap).find(stmtId) == (*freqLastMap).end()) { + return -1; + } + return static_cast((*freqLastMap)[stmtId]); + } + + int32 GetFreqFromFirstStmt(uint32 stmtId) { + if (freqFirstMap == nullptr) { + return -1; + } + if ((*freqFirstMap).find(stmtId) == (*freqFirstMap).end()) { + return -1; + } + return static_cast((*freqFirstMap)[stmtId]); + } + + void SetLastFreqMap(uint32 stmtID, uint32 freq) { + if (freqLastMap == nullptr) { + freqLastMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqLastMap)[stmtID] = freq; + } + + bool WithLocInfo() const { + return withLocInfo; + } + void SetWithLocInfo(bool withInfo) { + withLocInfo = withInfo; + } + + bool IsDirty() const { + return isDirty; + } + void SetDirty(bool dirty) { + isDirty = dirty; + } + + bool IsFromMpltInline() const { + return fromMpltInline; + } + void SetFromMpltInline(bool isInline) { + fromMpltInline = isInline; + } + + uint8 GetLayoutType() const { + return layoutType; + } + void SetLayoutType(uint8 type) { + layoutType = type; + } + + uint32 GetCallTimes() const { + return callTimes; + } + void SetCallTimes(uint32 times) { + callTimes = times; + } + + uint32 GetFrameSize() const { + return frameSize; + } + void SetFrameSize(uint32 size) { + frameSize = size; + } + + uint32 GetUpFormalSize() const { + return upFormalSize; + } + void SetUpFormalSize(uint32 size) { + upFormalSize = size; + } + + uint32 GetOutParmSize() const { + return outParmSize; + } + void SetOutParmSize(uint32 size) { + outParmSize = size; + } + + uint16 GetModuleId() const { + return moduleID; + } + void SetModuleID(uint16 id) { + moduleID = id; + } + + uint32 GetFuncSize() const { + return funcSize; + } + void SetFuncSize(uint32 size) { + funcSize = size; + } + + uint32 GetTempCount() const { + return tempCount; + } + void IncTempCount() { + ++tempCount; + } + + uint8 *GetFormalWordsTypeTagged() const { + return formalWordsTypeTagged; + } + void SetFormalWordsTypeTagged(uint8 *tagged) { + formalWordsTypeTagged = tagged; + } + uint8 **GetFwtAddress() { + return &formalWordsTypeTagged; + } + + uint8 *GetLocalWordsTypeTagged() const { + return localWordsTypeTagged; + } + void SetLocalWordsTypeTagged(uint8 *tagged) { + localWordsTypeTagged = tagged; + } + uint8 **GetLwtAddress() { + return &localWordsTypeTagged; + } + + uint8 *GetFormalWordsRefCounted() const { + return formalWordsRefCounted; + } + void SetFormalWordsRefCounted(uint8 *counted) { + formalWordsRefCounted = counted; + } + uint8 **GetFwrAddress() { + return &formalWordsRefCounted; + } + + uint8 *GetLocalWordsRefCounted() const { + return localWordsRefCounted; + } + void SetLocalWordsRefCounted(uint8 *counted) { + localWordsRefCounted = counted; + } + + MeFunction *GetMeFunc() { + return meFunc; + } + + void SetMeFunc(MeFunction *func) { + meFunc = func; + } + + EAConnectionGraph *GetEACG() { + return eacg; + } + void SetEACG(EAConnectionGraph *eacgVal) { + eacg = eacgVal; + } + + void SetFormalDefVec(const MapleVector &currFormals) { + formalDefVec = currFormals; + } + + MapleVector &GetFormalDefVec() { + return formalDefVec; + } + + const FormalDef &GetFormalDefAt(size_t i) const { + return formalDefVec[i]; + } + + FormalDef &GetFormalDefAt(size_t i) { + return formalDefVec[i]; + } + + const MIRSymbol *GetFormal(size_t i) const { + return formalDefVec[i].formalSym; + } + + MIRSymbol *GetFormal(size_t i) { + return formalDefVec[i].formalSym; + } + + const std::string &GetFormalName(size_t i) const { + auto *formal = formalDefVec[i].formalSym; + if (formal != nullptr) { + return formal->GetName(); + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx); + } + + size_t GetFormalCount() const { + return formalDefVec.size(); + } + + void ClearFormals() { + formalDefVec.clear(); + } + + void ClearArguments() { + formalDefVec.clear(); + funcType->GetParamTypeList().clear(); + funcType->GetParamAttrsList().clear(); + } + + size_t GetSymbolTabSize() const { + DEBUG_ASSERT(symTab != nullptr, "symTab is nullptr"); + return symTab->GetSymbolTableSize(); + } + MIRSymbol *GetSymbolTabItem(uint32 idx, bool checkFirst = false) const { + return symTab->GetSymbolFromStIdx(idx, checkFirst); + } + const MIRSymbolTable *GetSymTab() const { + return symTab; + } + MIRSymbolTable *GetSymTab() { + return symTab; + } + void AllocSymTab() { + if (symTab == nullptr) { + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + MIRLabelTable *GetLabelTab() const { + CHECK_FATAL(labelTab != nullptr, "must be"); + return labelTab; + } + MIRLabelTable *GetLabelTab() { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + return labelTab; + } + void SetLabelTab(MIRLabelTable *currLabelTab) { + labelTab = currLabelTab; + } + + const MapleSet &GetRetRefSym() const { + return retRefSym; + } + void InsertMIRSymbol(MIRSymbol *sym) { + (void)retRefSym.insert(sym); + } + + MemPool *GetDataMemPool() const { + return module->GetMemPool(); + } + + MemPool *GetCodeMemPool() { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + void SetCodeMemPool(MemPool *currCodeMemPool) { + codeMemPool = currCodeMemPool; + } + + MapleAllocator &GetCodeMPAllocator() { + GetCodeMemPool(); + return codeMemPoolAllocator; + } + + void AddFuncGenericDeclare(GenericDeclare *g) { + genericDeclare.push_back(g); + } + + void AddFuncGenericArg(AnnotationType *a) { + genericArg.push_back(a); + } + + void AddFuncGenericRet(AnnotationType *r) { + genericRet = r; + } + + void AddFuncLocalGenericVar(const GStrIdx &str, AnnotationType *at) { + genericLocalVar[str] = at; + } + + MapleVector &GetFuncGenericDeclare() { + return genericDeclare; + } + + MapleVector &GetFuncGenericArg() { + return genericArg; + } + + void SetRetrunAttrKind(const PointerAttr kind) { + returnKind = kind; + } + + PointerAttr GetRetrunAttrKind() const { + return returnKind; + } + + AnnotationType *GetFuncGenericRet() { + return genericRet; + } + + AnnotationType *GetFuncLocalGenericVar(const GStrIdx &str) { + if (genericLocalVar.find(str) == genericLocalVar.end()) { + return nullptr; + } + return genericLocalVar[str]; + } + + StmtNode *FindStmtWithId(StmtNode *stmt, uint32 stmtId) { + while (stmt != nullptr) { + StmtNode *next = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_dowhile: + case OP_while: { + WhileStmtNode *wnode = static_cast(stmt); + if (wnode->GetBody() != nullptr && wnode->GetBody()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(wnode->GetBody()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_if: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + IfStmtNode *inode = static_cast(stmt); + if (inode->GetThenPart() != nullptr && inode->GetThenPart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetThenPart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + if (inode->GetElsePart() != nullptr && inode->GetElsePart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetElsePart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_callassigned: + case OP_call: + case OP_brtrue: + case OP_brfalse: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + break; + } + default: { + break; + } + } + stmt = next; + } + return nullptr; + } + + StmtNode *GetStmtNodeFromMeId(uint32 stmtId) { + if (GetBody() == nullptr) { + return nullptr; + } + StmtNode *stmt = GetBody()->GetFirst(); + return FindStmtWithId(stmt, stmtId); + } + + MemPool *GetCodeMemPoolTmp() { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + + bool CheckParamNullType(MIRSymbol *sym) { + return paramNonullTypeMap.find(sym) != paramNonullTypeMap.end(); + } + + PointerAttr GetParamNonull(MIRSymbol *sym) { + return paramNonullTypeMap[sym]; + } + + void SetParamNonull(MIRSymbol *sym, PointerAttr type) { + paramNonullTypeMap[sym] = type; + } + + void CopyReferedRegs(std::set regs) { + for (auto reg : regs) { + referedPregs.insert(reg); + } + } + + MapleSet GetReferedRegs() const { + return referedPregs; + } + + bool IsReferedRegsValid() const { + return referedRegsValid; + } + + void SetReferedRegsValid(bool val) { + referedRegsValid = val; + } + + FuncDesc &GetFuncDesc() { + return funcDesc; + } + + void SetFuncDesc(const FuncDesc &value) { + funcDesc = value; + } + + void SetProfCtrTbl(MIRSymbol *pct) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + profCtrTbl = pct; + } + + MIRSymbol *GetProfCtrTbl() { + return profCtrTbl; + } + + void SetNumCtrs(uint32 num) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + nCtrs = num; + } + + uint32 GetNumCtrs() const { + return nCtrs; + } + + void SetFileLineNoChksum(uint64 chksum) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + fileLinenoChksum = chksum; + } + + uint64 GetFileLineNoChksum() const { + return fileLinenoChksum; + } + + void SetCFGChksum(uint64 chksum) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + cfgChksum = chksum; + } + + uint64 GetCFGChksum() const { + return cfgChksum; + } + + void InitFuncDescToBest() { + funcDesc.InitToBest(); + } + + const FuncDesc &GetFuncDesc() const { + return funcDesc; + } + + void AddProfileDesc(uint64 hash, uint32 start, uint32 end) { + profileDesc = module->GetMemPool()->New(hash, start, end); + } + + const IRProfileDesc *GetProfInf() { + if (profileDesc == nullptr) { + // return profileDesc with default value + profileDesc = module->GetMemPool()->New(); + } + return profileDesc; + } + + bool IsVisited() const { + return isVisited; + } + void SetIsVisited() { + isVisited = true; + } + + void SetFuncProfData(GcovFuncInfo *data) { + funcProfData = data; + } + GcovFuncInfo* GetFuncProfData() { + return funcProfData; + } + GcovFuncInfo* GetFuncProfData() const { + return funcProfData; + } + void SetStmtFreq(uint32_t stmtID, uint64_t freq) { + DEBUG_ASSERT((funcProfData != nullptr && freq > 0), "nullptr check"); + funcProfData->SetStmtFreq(stmtID, static_cast(freq)); + } + + uint8 GetFrameReseverdSlot() { + return funcAttrs.GetFrameResverdSlot(); + } + private: + MIRModule *module; // the module that owns this function + PUIdx puIdx = 0; // the PU index of this function + PUIdx puIdxOrigin = 0; // the original puIdx when initial generation + StIdx symbolTableIdx; // the symbol table index of this function + int32 sccID = -1; // the scc id of this function, for mplipa + MIRFuncType *funcType = nullptr; + TyIdx inferredReturnTyIdx{0}; // the actual return type of of this function (may be a + // subclass of the above). 0 means can not be inferred. + TyIdx classTyIdx{0}; // class/interface type this function belongs to + MapleVector formalDefVec{module->GetMPAllocator().Adapter()}; // the formals in function definition + MapleSet retRefSym{module->GetMPAllocator().Adapter()}; + + MapleVector genericDeclare{module->GetMPAllocator().Adapter()}; + MapleVector genericArg{module->GetMPAllocator().Adapter()}; + MapleMap genericLocalVar{module->GetMPAllocator().Adapter()}; + AnnotationType *genericRet = nullptr; + + MIRSymbolTable *symTab = nullptr; + MIRTypeNameTable *typeNameTab = nullptr; + MIRLabelTable *labelTab = nullptr; + MIRPregTable *pregTab = nullptr; + MemPool *codeMemPool = nullptr; + MapleAllocator codeMemPoolAllocator{nullptr}; + uint32 callTimes = 0; + BlockNode *body = nullptr; + FuncAttrs funcAttrs{}; + uint32 flag = 0; + uint16 hashCode = 0; // for methodmetadata order + uint32 fileIndex = 0; // this function belongs to which file, used by VM for plugin manager + MIRInfoVector info{module->GetMPAllocator().Adapter()}; + MapleVector infoIsString{module->GetMPAllocator().Adapter()}; // tells if an entry has string value + MIRScope *scope = nullptr; + MapleMap *freqFirstMap = nullptr; // save bb frequency in its first_stmt, key is stmtId + MapleMap *freqLastMap = nullptr; // save bb frequency in its last_stmt, key is stmtId + MapleSet referedPregs{module->GetMPAllocator().Adapter()}; + bool referedRegsValid = false; + bool hasVlaOrAlloca = false; + bool withLocInfo = true; + bool isVisited = false; // only used in inline phase. + bool isDirty = false; + bool fromMpltInline = false; // Whether this function is imported from mplt_inline file or not. + uint8_t layoutType = kLayoutUnused; + uint32 frameSize = 0; + uint32 upFormalSize = 0; + uint32 outParmSize = 0; + uint16 moduleID = 0; + uint32 funcSize = 0; // size of code in words + uint32 tempCount = 0; + uint8 *formalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // uint16 numlabels; // removed. label table size + // StmtNode **lbl2stmt; // lbl2stmt table, removed; + // to hold unmangled class and function names + MeFunction *meFunc = nullptr; + EAConnectionGraph *eacg = nullptr; + IRProfileDesc *profileDesc = nullptr; + GStrIdx baseClassStrIdx{0}; // the string table index of base class name + GStrIdx baseFuncStrIdx{0}; // the string table index of base function name + // the string table index of base function name mangled with type info + GStrIdx baseFuncWithTypeStrIdx{0}; + // funcname + types of args, no type of retv + GStrIdx baseFuncSigStrIdx{0}; + GStrIdx signatureStrIdx{0}; + MemPool *codeMemPoolTmp{nullptr}; + MapleAllocator codeMemPoolTmpAllocator{nullptr}; + bool useTmpMemPool = false; + PointerAttr returnKind = PointerAttr::kPointerUndeiced; + MapleMap paramNonullTypeMap{module->GetMPAllocator().Adapter()}; + FuncDesc funcDesc{}; + MIRSymbol *profCtrTbl = nullptr; + uint32 nCtrs = 0; // number of counters + uint64 fileLinenoChksum = 0; + uint64 cfgChksum = 0; + GcovFuncInfo *funcProfData = nullptr; + void DumpFlavorLoweredThanMmpl() const; + MIRFuncType *ReconstructFormals(const std::vector &symbols, bool clearOldArgs); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_FUNCTION_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_lower.h b/ecmascript/mapleall/maple_ir/include/mir_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..76ed4a29fe09778169f77e502b4759a3a5baef43 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_lower.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_LOWER_H +#define MAPLE_IR_INCLUDE_MIR_LOWER_H +#include +#include "mir_builder.h" +#include "opcodes.h" + +namespace maple { +// The base value for branch probability notes and edge probabilities. +static constexpr int32 kProbAll = 10000; +static constexpr int32 kProbLikely = 9000; +static constexpr int32 kProbUnlikely = kProbAll - kProbLikely; +constexpr uint32 kNodeFirstOpnd = 0; +constexpr uint32 kNodeSecondOpnd = 1; +constexpr uint32 kNodeThirdOpnd = 2; +enum MirLowerPhase : uint8 { + kLowerUnder, + kLowerMe, + kLowerExpandArray, + kLowerBe, + kLowerCG, + kLowerLNO +}; + +constexpr uint32 kShiftLowerMe = 1U << kLowerMe; +constexpr uint32 kShiftLowerExpandArray = 1U << kLowerExpandArray; +constexpr uint32 kShiftLowerBe = 1U << kLowerBe; +constexpr uint32 kShiftLowerCG = 1U << kLowerCG; +constexpr uint32 kShiftLowerLNO = 1U << kLowerLNO; +// check if a block node ends with an unconditional jump +inline bool OpCodeNoFallThrough(Opcode opCode) { + return opCode == OP_goto || opCode == OP_return || opCode == OP_switch || opCode == OP_throw || opCode == OP_gosub || + opCode == OP_retsub; +} + +inline bool IfStmtNoFallThrough(const IfStmtNode &ifStmt) { + return OpCodeNoFallThrough(ifStmt.GetThenPart()->GetLast()->GetOpCode()); +} + +class MIRLower { + public: + static const std::set kSetArrayHotFunc; + + MIRLower(MIRModule &mod, MIRFunction *f) : mirModule(mod), mirFunc(f) {} + + virtual ~MIRLower() = default; + + const MIRFunction *GetMirFunc() const { + return mirFunc; + } + + void SetMirFunc(MIRFunction *f) { + mirFunc = f; + } + + void Init() { + mirBuilder = mirModule.GetMemPool()->New(&mirModule); + } + + virtual BlockNode *LowerIfStmt(IfStmtNode &ifStmt, bool recursive); + BlockNode *LowerSwitchStmt(SwitchNode *switchNode); + virtual BlockNode *LowerWhileStmt(WhileStmtNode&); + BlockNode *LowerDowhileStmt(WhileStmtNode&); + BlockNode *LowerDoloopStmt(DoloopNode&); + BlockNode *LowerBlock(BlockNode&); + BaseNode *LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *block); + void LowerCandCior(BlockNode &block); + void LowerBuiltinExpect(BlockNode &block); + void LowerFunc(MIRFunction &func); + BaseNode *LowerFarray(ArrayNode *array); + BaseNode *LowerCArray(ArrayNode *array); + void ExpandArrayMrt(MIRFunction &func); + IfStmtNode *ExpandArrayMrtIfBlock(IfStmtNode &node); + WhileStmtNode *ExpandArrayMrtWhileBlock(WhileStmtNode &node); + DoloopNode *ExpandArrayMrtDoloopBlock(DoloopNode &node); + ForeachelemNode *ExpandArrayMrtForeachelemBlock(ForeachelemNode &node); + BlockNode *ExpandArrayMrtBlock(BlockNode &block); + void AddArrayMrtMpl(BaseNode &exp, BlockNode &newblk); + MIRFuncType *FuncTypeFromFuncPtrExpr(BaseNode *x); + void SetLowerME() { + lowerPhase |= kShiftLowerMe; + } + + void SetLowerLNO() { + lowerPhase |= kShiftLowerLNO; + } + + void SetLowerExpandArray() { + lowerPhase |= kShiftLowerExpandArray; + } + + void SetLowerBE() { + lowerPhase |= kShiftLowerBe; + } + + void SetLowerCG() { + lowerPhase |= kShiftLowerCG; + } + + uint8 GetOptLevel() const { + return optLevel; + } + + void SetOptLevel(uint8 optlvl) { + optLevel = optlvl; + } + + bool IsLowerME() const { + return lowerPhase & kShiftLowerMe; + } + + bool IsLowerLNO() const { + return lowerPhase & kShiftLowerLNO; + } + + bool IsLowerExpandArray() const { + return lowerPhase & kShiftLowerExpandArray; + } + + bool IsLowerBE() const { + return lowerPhase & kShiftLowerBe; + } + + bool IsLowerCG() const { + return lowerPhase & kShiftLowerCG; + } + + static bool ShouldOptArrayMrt(const MIRFunction &func); + + virtual bool InLFO() const { return false; } + + GcovFuncInfo *GetFuncProfData() { return mirFunc->GetFuncProfData(); } + void CopyStmtFrequency(StmtNode *newStmt, StmtNode *oldStmt) { + DEBUG_ASSERT(GetFuncProfData() != nullptr, "nullptr check"); + if (newStmt == oldStmt) return; + int64_t freq = GetFuncProfData()->GetStmtFreq(oldStmt->GetStmtID()); + GetFuncProfData()->SetStmtFreq(newStmt->GetStmtID(), freq); +} + + protected: + MIRModule &mirModule; + private: + MIRFunction *mirFunc; + MIRBuilder *mirBuilder = nullptr; + uint32 lowerPhase = 0; + uint8 optLevel = 0; + LabelIdx CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_LOWER_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_module.h b/ecmascript/mapleall/maple_ir/include/mir_module.h new file mode 100644 index 0000000000000000000000000000000000000000..3cd5016ee25776def373ed8d12e18a5968293646 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_module.h @@ -0,0 +1,799 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_MODULE_H +#define MAPLE_IR_INCLUDE_MIR_MODULE_H +#include "types_def.h" +#include "prim_types.h" +#include "intrinsics.h" +#include "opcodes.h" +#include "mpl_logging.h" +#include "muid.h" +#include "profile.h" +#include "namemangler.h" +#include "gcov_profile.h" +#include "string_utils.h" +#if MIR_FEATURE_FULL +#include +#include +#include +#include +#include +#include +#include "thread_env.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "maple_string.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +class CallInfo; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +using MIRModulePtr = MIRModule*; +using MIRBuilderPtr = MIRBuilder*; + +enum MIRFlavor { + kFlavorUnknown, + kFeProduced, + kMeProduced, + kBeLowered, + kFlavorMbc, + kMmpl, + kCmplV1, + kCmpl, // == CMPLv2 + kFlavorLmbc, +}; + + +enum MIRSrcLang { + kSrcLangUnknown, + kSrcLangC, + kSrcLangJs, + kSrcLangCPlusPlus, + kSrcLangJava, + kSrcLangChar, + // SrcLangSwift : when clang adds support for Swift. +}; + +class CalleePair { + public: + CalleePair(PUIdx id, int32_t index) + : id(id), index(index) {} + bool operator < (const CalleePair &func) const { + if (id < func.id) + return true; + else if (id == func.id && index < func.index) { + return true; + } else { + return false; + } + } + + private: + PUIdx id; + int32_t index; +}; + +class CallerSummary { + public: + CallerSummary(PUIdx id, uint32 stmtId) + : id(id), stmtId(stmtId) {} + PUIdx GetPuidx() const { return id; }; + uint32 GetStmtId() const { return stmtId; } + + private: + PUIdx id; + uint32 stmtId; +}; + +// This data structure is for the ipa-cp. Important expresstion is about the condtion statement. +class ImpExpr { + public: + ImpExpr(uint32 stmtId, uint32 paramIndex) + : stmtId(stmtId), paramIndex(paramIndex) {} + uint32 GetStmtId() const { return stmtId; } + uint32 GetParamIndex() const { return paramIndex; } + + private: + uint32 stmtId; + uint32 paramIndex; +}; + +// blksize gives the size of the memory block in bytes; there are (blksize+3)/4 +// words; 1 bit for each word, so the bit vector's length in bytes is +// ((blksize+3)/4+7)/8 +static inline uint32 BlockSize2BitVectorSize(uint32 blkSize) { + uint32 bitVectorLen = ((blkSize + 3) / 4 + 7) / 8; + return ((bitVectorLen + 3) >> 2) << 2; // round up to word boundary +} + +#if MIR_FEATURE_FULL +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRFloatConst; // circular dependency exists, no other choice +class MIRDoubleConst; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +class DebugInfo; // circular dependency exists, no other choice +class BinaryMplt; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +using MIRInfoPair = std::pair; +using MIRInfoVector = MapleVector; +using MIRDataPair = std::pair>; +using MIRDataVector = MapleVector; +constexpr int kMaxEncodedValueLen = 10; +struct EncodedValue { + uint8 encodedValue[kMaxEncodedValueLen] = { 0 }; +}; + +class MIRTypeNameTable { + public: + explicit MIRTypeNameTable(MapleAllocator &allocator) + : gStrIdxToTyIdxMap(std::less(), allocator.Adapter()) {} + + ~MIRTypeNameTable() = default; + + const MapleMap &GetGStrIdxToTyIdxMap() const { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.end()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + + size_t Size() const { + return gStrIdxToTyIdxMap.size(); + } + private: + MapleMap gStrIdxToTyIdxMap; +}; + +class MIRModule { + public: + bool firstInline = true; + using CallSite = std::pair; + + explicit MIRModule(const std::string &fn = ""); + MIRModule(MIRModule &p) = delete; + MIRModule &operator=(const MIRModule &module) = delete; + ~MIRModule(); + + MemPool *GetMemPool() const { + return memPool; + } + MemPool *GetPragmaMemPool() { + return pragmaMemPool; + } + MapleAllocator &GetPragmaMPAllocator() { + return pragmaMemPoolAllocator; + } + const MapleAllocator &GetMPAllocator() const { + return memPoolAllocator; + } + + void ReleasePragmaMemPool() { + if (pragmaMemPool) { + memPoolCtrler.DeleteMemPool(pragmaMemPool); + } + pragmaMemPool = nullptr; + } + + MapleAllocator &GetMPAllocator() { + return memPoolAllocator; + } + + const auto &GetFunctionList() const { + return functionList; + } + auto &GetFunctionList() { + return functionList; + } + + const MapleVector &GetImportedMplt() const { + return importedMplt; + } + void PushbackImportedMplt(const std::string &importFileName) { + importedMplt.push_back(importFileName); + } + + MIRTypeNameTable *GetTypeNameTab() { + return typeNameTab; + } + + const MapleVector &GetTypeDefOrder() const { + return typeDefOrder; + } + void PushbackTypeDefOrder(GStrIdx gstrIdx) { + typeDefOrder.push_back(gstrIdx); + } + + void AddClass(TyIdx tyIdx); + void RemoveClass(TyIdx tyIdx); + + void SetCurFunction(MIRFunction *f) { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + curFunctionMap[tid] = f; + return; // DO NOT delete the return statement + } + curFunction = f; + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + const MapleSet &GetSymbolSet() const { + return symbolSet; + } + + const MapleVector &GetSymbolDefOrder() const { + return symbolDefOrder; + } + + Profile &GetProfile() { + return profile; + } + + GcovProfileData* GetGcovProfile() { + return gcovProfile; + } + void SetGcovProfile(GcovProfileData* info) { + gcovProfile = info; + } + + void SetSomeSymbolNeedForDecl(bool s) { + someSymbolNeedForwDecl = s; + } + + MIRFunction *CurFunction() const { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + auto pair = curFunctionMap.find(tid); + return pair->second; + } + return curFunction; + } + + MemPool *CurFuncCodeMemPool() const; + MapleAllocator *CurFuncCodeMemPoolAllocator() const; + MapleAllocator &GetCurFuncCodeMPAllocator() const; + void AddExternStructType(TyIdx tyIdx); + void AddExternStructType(const MIRType *t); + void AddSymbol(StIdx stIdx); + void AddSymbol(const MIRSymbol *s); + void AddFunction(MIRFunction *pf) { + functionList.push_back(pf); + } + + void DumpGlobals(bool emitStructureType = true) const; + void Dump(bool emitStructureType = true, const std::unordered_set *dumpFuncSet = nullptr) const; + void DumpToFile(const std::string &fileNameStr, bool emitStructureType = true) const; + void DumpInlineCandidateToFile(const std::string &fileNameStr); + void DumpDefType(); + const std::string &GetFileNameFromFileNum(uint32 fileNum) const; + + void DumpToHeaderFile(bool binaryMplt, const std::string &outputName = ""); + void DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const; + void DumpClassToFile(const std::string &path) const; + void DumpFunctionList(const std::unordered_set *dumpFuncSet) const; + void DumpGlobalArraySymbol() const; + void Emit(const std::string &outFileName) const; + uint32 GetAndIncFloatNum() { + return floatNum++; + } + + void SetEntryFunction(MIRFunction *f) { + entryFunc = f; + } + + MIRFunction *GetEntryFunction() const { + return entryFunc; + } + + MIRFunction *FindEntryFunction(); + uint32 GetFileinfo(GStrIdx strIdx) const; + void OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet = nullptr, + bool emitStructureType = true, bool binaryform = false); + void OutputFunctionListAsciiMpl(const std::string &phaseName); + const std::string &GetFileName() const { + return fileName; + } + + const std::string &GetFileText() const { + return fileText; + } + + bool IsNeedFile() const { + return needFile; + } + + std::string GetFileNameAsPostfix() const; + void SetFileName(const std::string &name) { + fileName = name; + } + + std::string GetProfileDataFileName() const { + std::string profileDataFileName = fileName.substr(0, fileName.find_last_of(".")); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '.', '_'); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '-', '_'); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '/', '_'); + profileDataFileName = profileDataFileName + namemangler::kProfFileNameExt; + return profileDataFileName; + } + + bool IsJavaModule() const { + return srcLang == kSrcLangJava; + } + + bool IsCModule() const { + return srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus; + } + + bool IsCPlusPlusModule() const { + return srcLang == kSrcLangCPlusPlus; + } + + bool IsCharModule() const { + return srcLang == kSrcLangChar; + } + + void addSuperCall(const std::string &func) { + (void)superCallSet.insert(func); + } + + bool findSuperCall(const std::string &func) const { + return superCallSet.find(func) != superCallSet.end(); + } + + void ReleaseCurFuncMemPoolTmp(); + void SetUseFuncCodeMemPoolTmp() { + useFuncCodeMemPoolTmp = true; + } + + void ResetUseFuncCodeMemPoolTmp() { + useFuncCodeMemPoolTmp = false; + } + + void SetFuncInfoPrinted() const; + size_t GetOptFuncsSize() const { + return optimizedFuncs.size(); + } + + void AddOptFuncs(MIRFunction *func) { + optimizedFuncs.emplace(func); + } + + const MapleSet &GetOptFuncs() const { + return optimizedFuncs; + } + + bool IsOptFunc(MIRFunction *func) const { + if (std::find(optimizedFuncs.begin(), optimizedFuncs.end(), func) != optimizedFuncs.end()) { + return true; + } + return false; + } + + void AddOptFuncsType(MIRType *type) { + optimizedFuncsType.emplace(type); + } + + const MapleMap*> &GetPuIdxFieldInitializedMap() const { + std::shared_lock lock(fieldMapMutex); + return puIdxFieldInitializedMap; + } + void SetPuIdxFieldSet(PUIdx puIdx, MapleSet *fieldIDSet) { + std::unique_lock lock(fieldMapMutex); + puIdxFieldInitializedMap[puIdx] = fieldIDSet; + } + + std::map>> &GetCalleeParamAboutInt() { + return calleeParamAboutInt; + } + + std::map>> &GetCalleeParamAboutFloat() { + return calleeParamAboutFloat; + } + + std::map>> &GetCalleeParamAboutDouble() { + return calleeParamAboutDouble; + } + + std::map> &GetFuncImportantExpr() { + return funcImportantExpr; + } + + const auto &GetRealCaller() const { + return realCaller; + } + + auto &GetRealCaller() { + return realCaller; + } + + const MapleSet &GetInlineGlobals() const { + return inliningGlobals; + } + void InsertInlineGlobal(uint32_t global) { + (void)inliningGlobals.insert(global); + } + + const MapleSet *GetPUIdxFieldInitializedMapItem(PUIdx key) const { + std::shared_lock lock(fieldMapMutex); + auto it = puIdxFieldInitializedMap.find(key); + if (it != puIdxFieldInitializedMap.end()) { + return it->second; + } + return nullptr; + } + + std::ostream &GetOut() const { + return out; + } + + const MIRBuilderPtr &GetMIRBuilder() const { + return mirBuilder; + } + + const std::string &GetEntryFuncName() const { + return entryFuncName; + } + void SetEntryFuncName(const std::string &entryFunctionName) { + entryFuncName = entryFunctionName; + } + + TyIdx GetThrowableTyIdx() const { + return throwableTyIdx; + } + void SetThrowableTyIdx(TyIdx throwableTypeIndex) { + throwableTyIdx = throwableTypeIndex; + } + + bool GetWithProfileInfo() const { + return withProfileInfo; + } + void SetWithProfileInfo(bool withProfInfo) { + withProfileInfo = withProfInfo; + } + + BinaryMplt *GetBinMplt() { + return binMplt; + } + void SetBinMplt(BinaryMplt *binaryMplt) { + binMplt = binaryMplt; + } + + bool IsInIPA() const { + return inIPA; + } + bool IsWithMe() const { + return withMe; + } + void SetWithMe(bool isWithMe) { + withMe = isWithMe; + } + void SetInIPA(bool isInIPA) { + inIPA = isInIPA; + } + + void SetFileText(const std::string &inText) { + fileText = inText; + needFile = false; + } + + MIRInfoVector &GetFileInfo() { + return fileInfo; + } + void PushFileInfoPair(MIRInfoPair pair) { + fileInfo.push_back(pair); + } + void SetFileInfo(const MIRInfoVector &fileInf) { + fileInfo = fileInf; + } + + MapleVector &GetFileInfoIsString() { + return fileInfoIsString; + } + void SetFileInfoIsString(const MapleVector &fileInfoIsStr) { + fileInfoIsString = fileInfoIsStr; + } + void PushFileInfoIsString(bool isString) { + fileInfoIsString.push_back(isString); + } + + const MIRDataVector &GetFileData() const { + return fileData; + } + void PushbackFileData(const MIRDataPair &pair) { + fileData.push_back(pair); + } + + const MIRInfoVector &GetSrcFileInfo() const { + return srcFileInfo; + } + void PushbackFileInfo(const MIRInfoPair &pair) { + srcFileInfo.push_back(pair); + } + + const MIRFlavor &GetFlavor() const { + return flavor; + } + void SetFlavor(MIRFlavor flv) { + flavor = flv; + } + + void SetSrcLang(MIRSrcLang sourceLanguage) { + srcLang = sourceLanguage; + } + + uint16 GetID() const { + return id; + } + + void SetID(uint16 num) { + id = num; + } + + uint32 GetGlobalMemSize() const { + return globalMemSize; + } + void SetGlobalMemSize(uint32 globalMemberSize) { + globalMemSize = globalMemberSize; + } + + uint8 *GetGlobalBlockMap() { + return globalBlkMap; + } + void SetGlobalBlockMap(uint8 *globalBlockMap) { + globalBlkMap = globalBlockMap; + } + + uint8 *GetGlobalWordsTypeTagged() { + return globalWordsTypeTagged; + } + void SetGlobalWordsTypeTagged(uint8 *globalWordsTyTagged) { + globalWordsTypeTagged = globalWordsTyTagged; + } + + uint8 *GetGlobalWordsRefCounted() { + return globalWordsRefCounted; + } + void SetGlobalWordsRefCounted(uint8 *counted) { + globalWordsRefCounted = counted; + } + + uint32 GetNumFuncs() const { + return numFuncs; + } + + void SetNumFuncs(uint32 numFunc) { + numFuncs = numFunc; + } + + MapleVector &GetImportFiles() { + return importFiles; + } + + void PushbackImportPath(GStrIdx path) { + importPaths.push_back(path); + } + + MapleVector &GetAsmDecls() { + return asmDecls; + } + + const MapleSet &GetClassList() const { + return classList; + } + + const std::map> &GetMethod2TargetMap() const { + return method2TargetMap; + } + + std::vector &GetMemFromMethod2TargetMap(PUIdx methodPuIdx) { + return method2TargetMap[methodPuIdx]; + } + + void SetMethod2TargetMap(const std::map> &map) { + method2TargetMap = map; + } + + void AddMemToMethod2TargetMap(PUIdx idx, const std::vector &callSite) { + method2TargetMap[idx] = callSite; + } + + bool HasTargetHash(PUIdx idx, uint32 key) const { + auto it = method2TargetHash.find(idx); + if (it == method2TargetHash.end()) { + return false; + } + return it->second.find(key) != it->second.end(); + } + void InsertTargetHash(PUIdx idx, uint32 key) { + (void)method2TargetHash[idx].insert(key); + } + void AddValueToMethod2TargetHash(PUIdx idx, const std::unordered_set &value) { + method2TargetHash[idx] = value; + } + + const std::map &GetEASummary() const { + return eaSummary; + } + void SetEAConnectionGraph(GStrIdx funcNameIdx, EAConnectionGraph *eaCg) { + eaSummary[funcNameIdx] = eaCg; + } + + DebugInfo *GetDbgInfo() const { + return dbgInfo; + } + + void SetWithDbgInfo(bool v) { + withDbgInfo = v; + } + + bool IsWithDbgInfo() const { + return withDbgInfo; + } + + bool HasPartO2List() const { + return hasPartO2List; + } + + void SetHasPartO2List(bool value) { + hasPartO2List = value; + } + + void InitPartO2List(const std::string &list); + bool IsInPartO2List(const GStrIdx &idx) const { + return partO2FuncList.count(idx) > 0; + } + + void SetBaseName(const std::string &curbaseName) { + baseName = curbaseName; + } + const std::string &GetBaseName() const { + return baseName; + } + void SetOutputFileName(const std::string &curOFileName) { + outputFileName = curOFileName; + } + const std::string &GetOutputFileName() const { + return outputFileName; + } + void SetInputFileName(const std::string &curInFileName) { + inputFileName = curInFileName; + } + const std::string &GetInputFileName() const { + return inputFileName; + } + + uint32 GetUniqueID() const { + return UINT_MAX; + } + + bool HasNotWarned(uint32 postion, uint32 stmtOriginalID); + + private: + void DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const; + + MemPool *memPool; + MemPool *pragmaMemPool; + MapleAllocator memPoolAllocator; + MapleAllocator pragmaMemPoolAllocator; + MapleList functionList; // function table in the order of the appearance of function bodies; it + // excludes prototype-only functions + MapleVector importedMplt; + MIRTypeNameTable *typeNameTab; + MapleVector typeDefOrder; + + MapleSet externStructTypeSet; + MapleSet symbolSet; + MapleVector symbolDefOrder; + Profile profile; + GcovProfileData* gcovProfile; + bool someSymbolNeedForwDecl = false; // some symbols' addressses used in initialization + + std::ostream &out; + MIRBuilder *mirBuilder; + std::string entryFuncName = ""; // name of the entry function + std::string fileName; + std::string fileText; + bool needFile = true; + TyIdx throwableTyIdx{0}; // a special type that is the base of java exception type. only used for java + bool withProfileInfo = false; + + DebugInfo *dbgInfo = nullptr; + bool withDbgInfo = false; + + // for cg in mplt + BinaryMplt *binMplt = nullptr; + bool inIPA = false; + bool withMe = true; + MIRInfoVector fileInfo; // store info provided under fileInfo keyword + MapleVector fileInfoIsString; // tells if an entry has string value + MIRDataVector fileData; + MIRInfoVector srcFileInfo; // store info provided under srcFileInfo keyword + MIRFlavor flavor = kFlavorUnknown; + MIRSrcLang srcLang = kSrcLangUnknown; // the source language + uint16 id = 0xffff; + uint32 globalMemSize = 0; // size of storage space for all global variables + uint8 *globalBlkMap = nullptr; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + uint32 numFuncs = 0; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MapleVector importFiles; + MapleVector importPaths; + MapleVector asmDecls; + MapleSet classList; + + std::map> method2TargetMap; + std::map> method2TargetHash; + std::map eaSummary; + + bool useFuncCodeMemPoolTmp = false; + MIRFunction *entryFunc = nullptr; + uint32 floatNum = 0; + // curFunction for single thread, curFunctionMap for multiple threads + std::map curFunctionMap; + mutable std::mutex curFunctionMutex; + MIRFunction *curFunction; + MapleSet optimizedFuncs; + MapleSet optimizedFuncsType; + // Add the field for decouple optimization + std::unordered_set superCallSet; + // record all the fields that are initialized in the constructor. module scope, + // if puIdx doesn't appear in this map, it writes to all field id + // if puIdx appears in the map, but it's corresponding MapleSet is nullptr, it writes nothing fieldID + // if puIdx appears in the map, and the value of first corresponding MapleSet is 0, the puIdx appears in this module + // and writes to all field id otherwise, it writes the field ids in MapleSet + MapleMap*> puIdxFieldInitializedMap; + mutable std::shared_timed_mutex fieldMapMutex; + std::map, GStrIdx> realCaller; + MapleSet inliningGlobals; // global symbols accessed, used for inlining + bool hasPartO2List = false; + MapleSet partO2FuncList; + std::string inputFileName = ""; + std::string baseName = ""; + std::string outputFileName = ""; + MapleMap> safetyWarningMap; // indexed map for large module. + std::map>> calleeParamAboutInt; + std::map>> calleeParamAboutDouble; + std::map>> calleeParamAboutFloat; + std::map> funcImportantExpr; +}; +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_MODULE_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_nodes.h b/ecmascript/mapleall/maple_ir/include/mir_nodes.h new file mode 100755 index 0000000000000000000000000000000000000000..5280a41967a9d7dd561200b46d530c6c3e562069 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_nodes.h @@ -0,0 +1,3749 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_NODES_H +#define MAPLE_IR_INCLUDE_MIR_NODES_H +#include +#include +#include +#include "opcodes.h" +#include "opcode_info.h" +#include "mir_type.h" +#include "cmpl.h" +#include "mir_module.h" +#include "mir_const.h" +#include "maple_string.h" +#include "src_position.h" +#include "ptr_list_ref.h" + +namespace maple { +constexpr size_t kFirstOpnd = 0; +constexpr size_t kSecondOpnd = 1; +constexpr size_t kThirdOpnd = 2; + +extern MIRModule *theMIRModule; +extern void EmitStr(const MapleString &mplStr); + +class MIRPregTable; // circular dependency exists, no other choice +class TypeTable; // circular dependency exists, no other choice +class VerifyResult; // circular dependency exists, no other choice + +struct RegFieldPair { + public: + RegFieldPair() = default; + + RegFieldPair(FieldID fidx, PregIdx pidx) : fieldID(fidx), pregIdx(pidx) {} + + bool IsReg() const { + return pregIdx > 0; + } + + FieldID GetFieldID() const { + return fieldID; + } + + PregIdx GetPregIdx() const { + return pregIdx; + } + + void SetFieldID(FieldID fld) { + fieldID = fld; + } + + void SetPregIdx(PregIdx idx) { + pregIdx = idx; + } + + private: + FieldID fieldID = 0; + PregIdx pregIdx = 0; +}; + +using CallReturnPair = std::pair; +using CallReturnVector = MapleVector; +// Made public so that other modules (such as maplebe) can print intrinsic names +// in debug information or comments in assembly files. +const char *GetIntrinsicName(MIRIntrinsicID intrn); +class BaseNode : public BaseNodeT { + public: + explicit BaseNode(Opcode o) { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = 0; + } + + BaseNode(Opcode o, uint8 numOpr) { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = numOpr; + } + + BaseNode(const Opcode o, const PrimType typ, uint8 numOpr) { + op = o; + ptyp = typ; + typeFlag = 0; + numOpnds = numOpr; + } + + virtual ~BaseNode() = default; + + virtual BaseNode *CloneTree(MapleAllocator &allocator) const { + return allocator.GetMemPool()->New(*this); + } + + virtual void DumpBase(int32 indent) const; + + virtual void Dump(int32 indent) const { + DumpBase(indent); + } + + void Dump() const { + Dump(0); + LogInfo::MapleLogger() << '\n'; + } + + virtual uint8 SizeOfInstr() const { + return kOpcodeInfo.GetTableItemAt(GetOpCode()).instrucSize; + } + + const char *GetOpName() const; + bool MayThrowException(); + virtual size_t NumOpnds() const { + return numOpnds; + } + + virtual BaseNode *Opnd(size_t) const { + DEBUG_ASSERT(0, "override needed"); + return nullptr; + } + + virtual void SetOpnd(BaseNode*, size_t) { + DEBUG_ASSERT(0, "This should not happen"); + } + + virtual bool IsLeaf() const { + return true; + } + + virtual CallReturnVector *GetCallReturnVector() { + return nullptr; + } + + virtual MIRType *GetCallReturnType() { + return nullptr; + } + + virtual bool IsUnaryNode() const { + return false; + } + + virtual bool IsBinaryNode() const { + return false; + } + + virtual bool IsTernaryNode() const { + return false; + } + + virtual bool IsNaryNode() const { + return false; + } + + bool IsCondBr() const { + return kOpcodeInfo.IsCondBr(GetOpCode()); + } + + bool IsConstval() const { + return op == OP_constval; + } + + virtual bool Verify() const { + return true; + } + + virtual bool Verify(VerifyResult &) const { + return Verify(); + } + + virtual bool IsSSANode() const { + return false; + } + + virtual bool IsSameContent(const BaseNode *node) const { + return false; + } +}; + +class UnaryNode : public BaseNode { + public: + explicit UnaryNode(Opcode o) : BaseNode(o, 1) {} + + UnaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, 1) {} + + UnaryNode(Opcode o, PrimType typ, BaseNode *expr) : BaseNode(o, typ, 1), uOpnd(expr) {} + + virtual ~UnaryNode() override = default; + + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult &) const override { + return Verify(); + } + + UnaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + BaseNode *Opnd(size_t) const override { + return uOpnd; + } + + size_t NumOpnds() const override { + return 1; + } + + void SetOpnd(BaseNode *node, size_t) override { + uOpnd = node; + } + + bool IsLeaf() const override { + return false; + } + + bool IsUnaryNode() const override { + return true; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + BaseNode *uOpnd = nullptr; +}; + +class TypeCvtNode : public UnaryNode { + public: + explicit TypeCvtNode(Opcode o) : UnaryNode(o) {} + + TypeCvtNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + TypeCvtNode(Opcode o, PrimType typ, PrimType fromtyp, BaseNode *expr) + : UnaryNode(o, typ, expr), fromPrimType(fromtyp) {} + + virtual ~TypeCvtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult &) const override { + return Verify(); + } + + TypeCvtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + PrimType FromType() const { + return fromPrimType; + } + + void SetFromType(PrimType from) { + fromPrimType = from; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + PrimType fromPrimType = kPtyInvalid; +}; + +// used for retype +class RetypeNode : public TypeCvtNode { + public: + RetypeNode() : TypeCvtNode(OP_retype) {} + + explicit RetypeNode(PrimType typ) : TypeCvtNode(OP_retype, typ) {} + + RetypeNode(PrimType typ, PrimType fromtyp, TyIdx idx, BaseNode *expr) + : TypeCvtNode(OP_retype, typ, fromtyp, expr), tyIdx(idx) {} + + virtual ~RetypeNode() = default; + void Dump(int32 indent) const override; + bool Verify(VerifyResult &verifyResult) const override; + + RetypeNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) { + tyIdx = tyIdxVal; + } + + private: + bool VerifyPrimTypesAndOpnd() const; + bool CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + bool VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const; + bool VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, VerifyResult &verifyResult) const; + bool IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + + bool BothPointerOrJarray(const MIRType &from, const MIRType &to) const { + if (from.GetKind() != to.GetKind()) { + return false; + } + return from.IsMIRPtrType() || from.IsMIRJarrayType(); + } + + bool IsInterfaceOrClass(const MIRType &mirType) const { + return mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + bool IsJavaRefType(const MIRType &mirType) const { + return mirType.IsMIRJarrayType() || mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + TyIdx tyIdx = TyIdx(0); +}; + +// used for extractbits, sext, zext +class ExtractbitsNode : public UnaryNode { + public: + explicit ExtractbitsNode(Opcode o) : UnaryNode(o) {} + + ExtractbitsNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size) + : UnaryNode(o, typ), bitsOffset(offset), bitsSize(size) {} + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *expr) + : UnaryNode(o, typ, expr), bitsOffset(offset), bitsSize(size) {} + + virtual ~ExtractbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + ExtractbitsNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + uint8 GetBitsOffset() const { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) { + bitsOffset = offset; + } + + uint8 GetBitsSize() const { + return bitsSize; + } + + void SetBitsSize(uint8 size) { + bitsSize = size; + } + + private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +class GCMallocNode : public BaseNode { + public: + explicit GCMallocNode(Opcode o) : BaseNode(o) {} + + GCMallocNode(Opcode o, PrimType typ, TyIdx tIdx) : BaseNode(o, typ, 0), tyIdx(tIdx) {} + + virtual ~GCMallocNode() = default; + + void Dump(int32 indent) const override; + + GCMallocNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + void SetOrigPType(PrimType type) { + origPrimType = type; + } + + private: + TyIdx tyIdx = TyIdx(0); + PrimType origPrimType = kPtyInvalid; +}; + +class JarrayMallocNode : public UnaryNode { + public: + explicit JarrayMallocNode(Opcode o) : UnaryNode(o) {} + + JarrayMallocNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx) : UnaryNode(o, typ), tyIdx(typeIdx) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx, BaseNode *opnd) : UnaryNode(o, typ, opnd), tyIdx(typeIdx) {} + + virtual ~JarrayMallocNode() = default; + + void Dump(int32 indent) const override; + + JarrayMallocNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + private: + TyIdx tyIdx = TyIdx(0); +}; + +// iaddrof also use this node +class IreadNode : public UnaryNode { + public: + explicit IreadNode(Opcode o) : UnaryNode(o) {} + + IreadNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid) : UnaryNode(o, typ), tyIdx(typeIdx), fieldID(fid) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid, BaseNode *expr) + : UnaryNode(o, typ, expr), tyIdx(typeIdx), fieldID(fid) {} + + virtual ~IreadNode() = default; + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) { + tyIdx = tyIdxVal; + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) { + fieldID = fieldIDVal; + } + + bool IsSameContent(const BaseNode *node) const override; + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const { + BaseNode *base = Opnd(0); + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + bool IsVolatile() const; + + MIRType *GetType() const; + + protected: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID = 0; +}; + +// IaddrofNode has the same member fields and member methods as IreadNode +using IaddrofNode = IreadNode; + +class IreadoffNode : public UnaryNode { + public: + IreadoffNode() : UnaryNode(OP_ireadoff) {} + + IreadoffNode(PrimType ptyp, int32 ofst) : UnaryNode(OP_ireadoff, ptyp), offset(ofst) {} + + IreadoffNode(PrimType ptyp, BaseNode *opnd, int32 ofst) : UnaryNode(OP_ireadoff, ptyp, opnd), offset(ofst) {} + + virtual ~IreadoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + int32 offset = 0; +}; + +class IreadFPoffNode : public BaseNode { + public: + IreadFPoffNode() : BaseNode(OP_ireadfpoff) {} + + IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} + + virtual ~IreadFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadFPoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + int32 offset = 0; +}; + +class IreadPCoffNode : public IreadFPoffNode { + public: + + IreadPCoffNode(Opcode o, PrimType typ, uint8 numopns) { + op = o; + ptyp = typ; + numOpnds = numopns; + } + virtual ~IreadPCoffNode() {} +}; + +typedef IreadPCoffNode AddroffPCNode; + +class BinaryOpnds { + public: + virtual ~BinaryOpnds() = default; + + virtual void Dump(int32 indent) const; + + BaseNode *GetBOpnd(size_t i) const { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + return bOpnd[i]; + } + + void SetBOpnd(BaseNode *node, size_t i) { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + bOpnd[i] = node; + } + + virtual bool IsSameContent(const BaseNode *node) const; + + private: + BaseNode *bOpnd[kOperandNumBinary]; +}; + +class BinaryNode : public BaseNode, public BinaryOpnds { + public: + explicit BinaryNode(Opcode o) : BaseNode(o, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ, BaseNode *l, BaseNode *r) : BaseNode(o, typ, kOperandNumBinary) { + SetBOpnd(l, 0); + SetBOpnd(r, 1); + } + + virtual ~BinaryNode() = default; + + using BaseNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + BinaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + bool IsCommutative() const { + switch (GetOpCode()) { + case OP_add: + case OP_mul: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_land: + case OP_lior: + return true; + default: + return false; + } + } + + BaseNode *Opnd(size_t i) const override { + DEBUG_ASSERT(i < kOperandNumBinary, "invalid operand idx in BinaryNode"); + DEBUG_ASSERT(i >= 0, "invalid operand idx in BinaryNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + SetBOpnd(node, i); + } + + bool IsLeaf() const override { + return false; + } + + bool IsBinaryNode() const override { + return true; + } + bool IsSameContent(const BaseNode *node) const override; +}; + +class CompareNode : public BinaryNode { + public: + explicit CompareNode(Opcode o) : BinaryNode(o) {} + + CompareNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + CompareNode(Opcode o, PrimType typ, PrimType otype, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), opndType(otype) {} + + virtual ~CompareNode() = default; + + using BinaryNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + CompareNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + PrimType GetOpndType() const { + return opndType; + } + + void SetOpndType(PrimType type) { + opndType = type; + } + + private: + PrimType opndType = kPtyInvalid; // type of operands. +}; + +class DepositbitsNode : public BinaryNode { + public: + DepositbitsNode() : BinaryNode(OP_depositbits) {} + + DepositbitsNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + DepositbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), bitsOffset(offset), bitsSize(size) {} + + virtual ~DepositbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DepositbitsNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint8 GetBitsOffset() const { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) { + bitsOffset = offset; + } + + uint8 GetBitsSize() const { + return bitsSize; + } + + void SetBitsSize(uint8 size) { + bitsSize = size; + } + + private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +// used for resolveinterfacefunc, resolvevirtualfunc +// bOpnd[0] stores base vtab/itab address +// bOpnd[1] stores offset +class ResolveFuncNode : public BinaryNode { + public: + explicit ResolveFuncNode(Opcode o) : BinaryNode(o) {} + + ResolveFuncNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx idx) : BinaryNode(o, typ), puIdx(idx) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx pIdx, BaseNode *opnd0, BaseNode *opnd1) + : BinaryNode(o, typ, opnd0, opnd1), puIdx(pIdx) {} + + virtual ~ResolveFuncNode() = default; + + void Dump(int32 indent) const override; + + ResolveFuncNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *GetTabBaseAddr() const { + return GetBOpnd(0); + } + + BaseNode *GetOffset() const { + return GetBOpnd(1); + } + + PUIdx GetPuIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx idx) { + puIdx = idx; + } + + private: + PUIdx puIdx = 0; +}; + +class TernaryNode : public BaseNode { + public: + explicit TernaryNode(Opcode o) : BaseNode(o, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ, BaseNode *e0, BaseNode *e1, BaseNode *e2) : BaseNode(o, typ, kOperandNumTernary) { + topnd[0] = e0; + topnd[1] = e1; + topnd[2] = e2; + } + + virtual ~TernaryNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + TernaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->topnd[0] = topnd[0]->CloneTree(allocator); + node->topnd[1] = topnd[1]->CloneTree(allocator); + node->topnd[2] = topnd[2]->CloneTree(allocator); + return node; + } + + BaseNode *Opnd(size_t i) const override { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + return topnd[i]; + } + + size_t NumOpnds() const override { + return kOperandNumTernary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + topnd[i] = node; + } + + bool IsLeaf() const override { + return false; + } + + bool IsTernaryNode() const override { + return true; + } + + private: + BaseNode *topnd[kOperandNumTernary] = { nullptr, nullptr, nullptr }; +}; + +class NaryOpnds { + public: + explicit NaryOpnds(MapleAllocator &mpallocter) : nOpnd(mpallocter.Adapter()) {} + + virtual ~NaryOpnds() = default; + + virtual void Dump(int32 indent) const; + bool VerifyOpnds() const; + + const MapleVector &GetNopnd() const { + return nOpnd; + } + + MapleVector &GetNopnd() { + return nOpnd; + } + + size_t GetNopndSize() const { + return nOpnd.size(); + } + + BaseNode *GetNopndAt(size_t i) const { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + return nOpnd[i]; + } + + void SetNOpndAt(size_t i, BaseNode *opnd) { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + nOpnd[i] = opnd; + } + + void SetNOpnd(const MapleVector &val) { + nOpnd = val; + } + + private: + MapleVector nOpnd; +}; + +class DeoptBundleInfo { + public: + explicit DeoptBundleInfo(MapleAllocator &mpallocter) : deoptBundleInfo(mpallocter.Adapter()) {} + + virtual ~DeoptBundleInfo() = default; + + virtual void Dump(int32 indent) const; + // bool VerifyOpnds() const; + + const MapleMap &GetDeoptBundleInfo() const { + return deoptBundleInfo; + } + + MapleMap &GetDeoptBundleInfo() { + return deoptBundleInfo; + } + + void SetDeoptBundleInfo(const std::map &vregMap) { + deoptBundleInfo.clear(); + for (const auto &elem : vregMap) { + deoptBundleInfo[elem.first] = elem.second; + } + } + + void SetDeoptBundleInfo(const MapleMap &vregMap) { + deoptBundleInfo = vregMap; + } + + void AddDeoptBundleInfo(int32 deoptVreg, PregIdx pregIdx) { + deoptBundleInfo.insert(std::pair(deoptVreg, pregIdx)); + } + + private: + MapleMap deoptBundleInfo; +}; + +class NaryNode : public BaseNode, public NaryOpnds { + public: + NaryNode(MapleAllocator &allocator, Opcode o) : BaseNode(o), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryNode(MapleAllocator &allocator, Opcode o, PrimType typ) : BaseNode(o, typ, 0), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o, PrimType typ) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o, typ) {} + + NaryNode(MapleAllocator &allocator, const NaryNode &node) + : BaseNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, const NaryNode &node) : NaryNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + NaryNode(NaryNode &node) = delete; + NaryNode &operator=(const NaryNode &node) = delete; + virtual ~NaryNode() = default; + + void Dump(int32 indent) const override; + + NaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + return node; + } + + BaseNode *Opnd(size_t i) const override { + return GetNopndAt(i); + } + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "NaryNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + DEBUG_ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + bool IsLeaf() const override { + return false; + } + + bool Verify() const override { + return true; + } + + bool IsNaryNode() const override { + return true; + } +}; + +class IntrinsicopNode : public NaryNode { + public: + IntrinsicopNode(MapleAllocator &allocator, Opcode o, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) {} + + IntrinsicopNode(const MIRModule &mod, Opcode o, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typeIdx) {} + + IntrinsicopNode(MapleAllocator &allocator, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o, typ), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) {} + + IntrinsicopNode(const MIRModule &mod, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typ, typeIdx) {} + + IntrinsicopNode(MapleAllocator &allocator, const IntrinsicopNode &node) + : NaryNode(allocator, node), intrinsic(node.GetIntrinsic()), tyIdx(node.GetTyIdx()) {} + + IntrinsicopNode(const MIRModule &mod, const IntrinsicopNode &node) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IntrinsicopNode(IntrinsicopNode &node) = delete; + IntrinsicopNode &operator=(const IntrinsicopNode &node) = delete; + virtual ~IntrinsicopNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + IntrinsicopNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID intrinsicID) { + intrinsic = intrinsicID; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + // IntrinDesc query + const IntrinDesc &GetIntrinDesc() const { + return IntrinDesc::intrinTable[intrinsic]; + } + + bool VerifyJArrayLength(VerifyResult &verifyResult) const; + + private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; +}; + +class ConstvalNode : public BaseNode { + public: + ConstvalNode() : BaseNode(OP_constval) {} + + explicit ConstvalNode(PrimType typ) : BaseNode(OP_constval, typ, 0) {} + + explicit ConstvalNode(MIRConst *constv) : BaseNode(OP_constval), constVal(constv) {} + + ConstvalNode(PrimType typ, MIRConst *constv) : BaseNode(OP_constval, typ, 0), constVal(constv) {} + virtual ~ConstvalNode() = default; + void Dump(int32 indent) const override; + + ConstvalNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + const MIRConst *GetConstVal() const { + return constVal; + } + + MIRConst *GetConstVal() { + return constVal; + } + + void SetConstVal(MIRConst *val) { + constVal = val; + } + + bool IsSameContent(const BaseNode *node) const override; + private: + MIRConst *constVal = nullptr; +}; + +class ConststrNode : public BaseNode { + public: + ConststrNode() : BaseNode(OP_conststr) {} + + explicit ConststrNode(UStrIdx i) : BaseNode(OP_conststr), strIdx(i) {} + + ConststrNode(PrimType typ, UStrIdx i) : BaseNode(OP_conststr, typ, 0), strIdx(i) {} + + virtual ~ConststrNode() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + ConststrNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + UStrIdx GetStrIdx() const { + return strIdx; + } + + void SetStrIdx(UStrIdx idx) { + strIdx = idx; + } + + private: + UStrIdx strIdx = UStrIdx(0); +}; + +class Conststr16Node : public BaseNode { + public: + Conststr16Node() : BaseNode(OP_conststr16) {} + + explicit Conststr16Node(U16StrIdx i) : BaseNode(OP_conststr16), strIdx(i) {} + + Conststr16Node(PrimType typ, U16StrIdx i) : BaseNode(OP_conststr16, typ, 0), strIdx(i) {} + + virtual ~Conststr16Node() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + Conststr16Node *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + U16StrIdx GetStrIdx() const { + return strIdx; + } + + void SetStrIdx(U16StrIdx idx) { + strIdx = idx; + } + + private: + U16StrIdx strIdx = U16StrIdx(0); +}; + +class SizeoftypeNode : public BaseNode { + public: + SizeoftypeNode() : BaseNode(OP_sizeoftype) {} + + explicit SizeoftypeNode(TyIdx t) : BaseNode(OP_sizeoftype), tyIdx(t) {} + + SizeoftypeNode(PrimType type, TyIdx t) : BaseNode(OP_sizeoftype, type, 0), tyIdx(t) {} + + virtual ~SizeoftypeNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SizeoftypeNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + private: + TyIdx tyIdx = TyIdx(0); +}; + +class FieldsDistNode : public BaseNode { + public: + FieldsDistNode() : BaseNode(OP_fieldsdist) {} + + FieldsDistNode(TyIdx t, FieldID f1, FieldID f2) : BaseNode(OP_fieldsdist), tyIdx(t), fieldID1(f1), fieldID2(f2) {} + + FieldsDistNode(PrimType typ, TyIdx t, FieldID f1, FieldID f2) + : BaseNode(OP_fieldsdist, typ, 0), tyIdx(t), fieldID1(f1), fieldID2(f2) {} + + virtual ~FieldsDistNode() = default; + + void Dump(int32 indent) const override; + + FieldsDistNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + FieldID GetFiledID1() const { + return fieldID1; + } + + void SetFiledID1(FieldID id) { + fieldID1 = id; + } + + FieldID GetFiledID2() const { + return fieldID2; + } + + void SetFiledID2(FieldID id) { + fieldID2 = id; + } + + private: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID1 = 0; + FieldID fieldID2 = 0; +}; + +class ArrayNode : public NaryNode { + public: + ArrayNode(MapleAllocator &allocator) : NaryNode(allocator, OP_array) {} + + explicit ArrayNode(const MIRModule &mod) : ArrayNode(mod.GetCurFuncCodeMPAllocator()) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx) + : NaryNode(allocator, OP_array, typ), tyIdx(idx) {} + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx, bool bcheck) + : NaryNode(allocator, OP_array, typ), tyIdx(idx), boundsCheck(bcheck) {} + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx, bool bcheck) + : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx, bcheck) {} + + ArrayNode(MapleAllocator &allocator, const ArrayNode &node) + : NaryNode(allocator, node), tyIdx(node.tyIdx), boundsCheck(node.boundsCheck) {} + + ArrayNode(const MIRModule &mod, const ArrayNode &node) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + ArrayNode(ArrayNode &node) = delete; + ArrayNode &operator=(const ArrayNode &node) = delete; + virtual ~ArrayNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool IsSameBase(ArrayNode*); + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "ArrayNode has wrong numOpnds field"); + return GetNopndSize(); + } + + ArrayNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->boundsCheck = boundsCheck; + node->SetNumOpnds(GetNopndSize()); + return node; + } + + const MIRType *GetArrayType(const TypeTable &tt) const; + MIRType *GetArrayType(const TypeTable &tt); + + BaseNode *GetIndex(size_t i) { + return Opnd(i + 1); + } + + const BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i) const; + BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i); + + BaseNode *GetBase() { + return Opnd(0); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + bool GetBoundsCheck() const { + return boundsCheck; + } + + void SetBoundsCheck(bool check) { + boundsCheck = check; + } + + private: + TyIdx tyIdx; + bool boundsCheck = true; +}; + +class AddrofNode : public BaseNode { + public: + explicit AddrofNode(Opcode o) : BaseNode(o), stIdx() {} + + AddrofNode(Opcode o, PrimType typ) : AddrofNode(o, typ, StIdx(), 0) {} + + AddrofNode(Opcode o, PrimType typ, StIdx sIdx, FieldID fid) : BaseNode(o, typ, 0), stIdx(sIdx), fieldID(fid) {} + + virtual ~AddrofNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool CheckNode(const MIRModule &mod) const; + + AddrofNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + StIdx GetStIdx() const { + return stIdx; + } + + void SetStIdx(StIdx idx) { + stIdx = idx; + } + + void SetStFullIdx(uint32 idx) { + stIdx.SetFullIdx(idx); + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) { + fieldID = fieldIDVal; + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +// DreadNode has the same member fields and member methods as AddrofNode +using DreadNode = AddrofNode; + +class DreadoffNode : public BaseNode { + public: + explicit DreadoffNode(Opcode o) : BaseNode(o), stIdx() {} + + DreadoffNode(Opcode o, PrimType typ) : BaseNode(o, typ, 0), stIdx() {} + + virtual ~DreadoffNode() = default; + + void Dump(int32 indent) const override; + + DreadoffNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + + public: + StIdx stIdx; + int32 offset = 0; +}; + +// AddrofoffNode has the same member fields and member methods as DreadoffNode +using AddrofoffNode = DreadoffNode; + +class RegreadNode : public BaseNode { + public: + RegreadNode() : BaseNode(OP_regread) {} + + explicit RegreadNode(PregIdx pIdx) : BaseNode(OP_regread), regIdx(pIdx) {} + + RegreadNode(PrimType primType, PregIdx pIdx) : RegreadNode(pIdx) { + ptyp = primType; + } + + virtual ~RegreadNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegreadNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + PregIdx GetRegIdx() const { + return regIdx; + } + void SetRegIdx(PregIdx reg) { + regIdx = reg; + } + + bool IsSameContent(const BaseNode *node) const override; + private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +class AddroffuncNode : public BaseNode { + public: + AddroffuncNode() : BaseNode(OP_addroffunc) {} + + AddroffuncNode(PrimType typ, PUIdx pIdx) : BaseNode(OP_addroffunc, typ, 0), puIdx(pIdx) {} + + virtual ~AddroffuncNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroffuncNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx puIdxValue) { + puIdx = puIdxValue; + } + + bool IsSameContent(const BaseNode *node) const override; + private: + PUIdx puIdx = 0; // 32bit now +}; + +class AddroflabelNode : public BaseNode { + public: + AddroflabelNode() : BaseNode(OP_addroflabel) {} + + explicit AddroflabelNode(uint32 ofst) : BaseNode(OP_addroflabel), offset(ofst) {} + + virtual ~AddroflabelNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroflabelNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + private: + LabelIdx offset = 0; +}; + +// for cleanuptry, jscatch, finally, retsub, endtry, membaracquire, membarrelease, +// membarstoreload, membarstorestore +class StmtNode : public BaseNode, public PtrListNodeBase { + public: + static std::atomic stmtIDNext; // for assigning stmtID, initialized to 1; 0 is reserved + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; + + explicit StmtNode(Opcode o) : BaseNode(o), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + StmtNode(Opcode o, uint8 numOpr) + : BaseNode(o, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + StmtNode(Opcode o, PrimType typ, uint8 numOpr) + : BaseNode(o, typ, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + // used for NaryStmtNode when clone + StmtNode(Opcode o, PrimType typ, uint8 numOpr, const SrcPosition &srcPosition, uint32 stmtOriginalID, StmtAttrs attrs) + : BaseNode(o, typ, numOpr), PtrListNodeBase(), srcPosition(srcPosition), stmtID(stmtIDNext), + stmtOriginalID(stmtOriginalID), stmtAttrs(attrs) { + ++stmtIDNext; + } + + virtual ~StmtNode() = default; + + using BaseNode::Dump; + void DumpBase(int32 indent) const override; + void Dump(int32 indent) const override; + void InsertAfterThis(StmtNode &pos); + void InsertBeforeThis(StmtNode &pos); + + virtual StmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *s = allocator.GetMemPool()->New(*this); + s->SetStmtID(stmtIDNext++); + s->SetMeStmtID(meStmtID); + return s; + } + + virtual bool Verify() const override { + return true; + } + + virtual bool Verify(VerifyResult &) const override { + return Verify(); + } + + const SrcPosition &GetSrcPos() const { + return srcPosition; + } + + SrcPosition &GetSrcPos() { + return srcPosition; + } + + void SetSrcPos(SrcPosition pos) { + srcPosition = pos; + } + + uint32 GetStmtID() const { + return stmtID; + } + + void SetStmtID(uint32 id) { + stmtID = id; + } + + uint32 GetOriginalID() const { + return stmtOriginalID; + } + + void SetOriginalID(uint32 id) { + stmtOriginalID = id; + } + + uint32 GetMeStmtID() const { + return meStmtID; + } + + void SetMeStmtID(uint32 id) { + meStmtID = id; + } + + StmtNode *GetRealNext() const; + + virtual BaseNode *GetRHS() const { + return nullptr; + } + + bool GetIsLive() const { + return isLive; + } + + void SetIsLive(bool live) const { + isLive = live; + } + + bool IsInSafeRegion() const { + return stmtAttrs.GetAttr(STMTATTR_insaferegion); + } + + void SetInSafeRegion() { + stmtAttrs.SetAttr(STMTATTR_insaferegion); + } + + void CopySafeRegionAttr(const StmtAttrs &stmtAttr) { + this->stmtAttrs.AppendAttr(stmtAttr.GetTargetAttrFlag(STMTATTR_insaferegion)); + } + + const StmtAttrs &GetStmtAttrs() const { + return stmtAttrs; + } + + void SetAttr(StmtAttrKind x) { + stmtAttrs.SetAttr(x); + } + + bool GetAttr(StmtAttrKind x) { + return stmtAttrs.GetAttr(x); + } + + void SetStmtAttrs(StmtAttrs stmtAttrs_) { + stmtAttrs = stmtAttrs_; + } + + protected: + SrcPosition srcPosition; + + private: + uint32 stmtID; // a unique ID assigned to it + uint32 stmtOriginalID; // first define id, no change when clone, need copy when emit from MeStmt + uint32 meStmtID = 0; // Need copy when emit from MeStmt, attention:this just for two stmt(if && call) + mutable bool isLive = false; // only used for dse to save compile time + // mutable to keep const-ness at most situation + StmtAttrs stmtAttrs; +}; + +class IassignNode : public StmtNode { + public: + IassignNode() : IassignNode(TyIdx(0), 0, nullptr, nullptr) {} + + IassignNode(TyIdx tyIdx, FieldID fieldID, BaseNode *addrOpnd, BaseNode *rhsOpnd) + : StmtNode(OP_iassign), tyIdx(tyIdx), fieldID(fieldID), addrExpr(addrOpnd), rhs(rhsOpnd) { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~IassignNode() = default; + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fid) { + fieldID = fid; + } + + BaseNode *Opnd(size_t i) const override { + if (i == 0) { + return addrExpr; + } + return rhs; + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override { + if (i == 0) { + addrExpr = node; + } else { + rhs = node; + } + } + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *bn = allocator.GetMemPool()->New(*this); + bn->SetStmtID(stmtIDNext++); + bn->SetOpnd(addrExpr->CloneTree(allocator), 0); + bn->SetRHS(rhs->CloneTree(allocator)); + return bn; + } + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const { + BaseNode *base = addrExpr; + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + void SetAddrExpr(BaseNode *exp) { + addrExpr = exp; + } + + BaseNode *GetRHS() const override { + return rhs; + } + + void SetRHS(BaseNode *node) { + rhs = node; + } + + bool AssigningVolatile() const; + + private: + TyIdx tyIdx; + FieldID fieldID; + public: + BaseNode *addrExpr; + BaseNode *rhs; +}; + +// goto and gosub +class GotoNode : public StmtNode { + public: + explicit GotoNode(Opcode o) : StmtNode(o) {} + + GotoNode(Opcode o, uint32 ofst) : StmtNode(o), offset(ofst) {} + + virtual ~GotoNode() = default; + + void Dump(int32 indent) const override; + + GotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *g = allocator.GetMemPool()->New(*this); + g->SetStmtID(stmtIDNext++); + return g; + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 o) { + offset = o; + } + + private: + uint32 offset = 0; +}; + +// jstry +class JsTryNode : public StmtNode { + public: + JsTryNode() : StmtNode(OP_jstry) {} + + JsTryNode(uint16 catchofst, uint16 finallyofset) + : StmtNode(OP_jstry), catchOffset(catchofst), finallyOffset(finallyofset) {} + + virtual ~JsTryNode() = default; + + void Dump(int32 indent) const override; + + JsTryNode *CloneTree(MapleAllocator &allocator) const override { + auto *t = allocator.GetMemPool()->New(*this); + t->SetStmtID(stmtIDNext++); + return t; + } + + uint16 GetCatchOffset() const { + return catchOffset; + } + + void SetCatchOffset(uint32 offset) { + catchOffset = offset; + } + + uint16 GetFinallyOffset() const { + return finallyOffset; + } + + void SetFinallyOffset(uint32 offset) { + finallyOffset = offset; + } + + private: + uint16 catchOffset = 0; + uint16 finallyOffset = 0; +}; + +// try, cpptry +class TryNode : public StmtNode { + public: + explicit TryNode(MapleAllocator &allocator) : StmtNode(OP_try), offsets(allocator.Adapter()) {} + + explicit TryNode(const MapleVector &offsets) : StmtNode(OP_try), offsets(offsets) {} + + explicit TryNode(const MIRModule &mod) : TryNode(mod.GetCurFuncCodeMPAllocator()) {} + + TryNode(TryNode &node) = delete; + TryNode &operator=(const TryNode &node) = delete; + virtual ~TryNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + MapleVector &GetOffsets() { + return offsets; + } + + LabelIdx GetOffset(size_t i) const { + DEBUG_ASSERT(i < offsets.size(), "array index out of range"); + return offsets.at(i); + } + + void SetOffset(LabelIdx offsetValue, size_t i) { + DEBUG_ASSERT(i < offsets.size(), "array index out of range"); + offsets[i] = offsetValue; + } + + void AddOffset(LabelIdx offsetValue) { + offsets.push_back(offsetValue); + } + + void ResizeOffsets(size_t offsetSize) { + offsets.resize(offsetSize); + } + + void SetOffsets(const MapleVector &offsetsValue) { + offsets = offsetsValue; + } + + size_t GetOffsetsCount() const { + return offsets.size(); + } + + MapleVector::iterator GetOffsetsBegin() { + return offsets.begin(); + } + + MapleVector::iterator GetOffsetsEnd() { + return offsets.end(); + } + + void OffsetsInsert(MapleVector::iterator a, MapleVector::iterator b, + MapleVector::iterator c) { + (void)offsets.insert(a, b, c); + } + + TryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < offsets.size(); ++i) { + node->AddOffset(offsets[i]); + } + return node; + } + + private: + MapleVector offsets; +}; + +// catch +class CatchNode : public StmtNode { + public: + explicit CatchNode(MapleAllocator &allocator) : StmtNode(OP_catch), exceptionTyIdxVec(allocator.Adapter()) {} + + explicit CatchNode(const MapleVector &tyIdxVec) + : StmtNode(OP_catch), exceptionTyIdxVec(tyIdxVec) {} + + explicit CatchNode(const MIRModule &mod) : CatchNode(mod.GetCurFuncCodeMPAllocator()) {} + + CatchNode(CatchNode &node) = delete; + CatchNode &operator=(const CatchNode &node) = delete; + virtual ~CatchNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + TyIdx GetExceptionTyIdxVecElement(size_t i) const { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + return exceptionTyIdxVec[i]; + } + + const MapleVector &GetExceptionTyIdxVec() const { + return exceptionTyIdxVec; + } + + size_t Size() const { + return exceptionTyIdxVec.size(); + } + + void SetExceptionTyIdxVecElement(TyIdx idx, size_t i) { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + exceptionTyIdxVec[i] = idx; + } + + void SetExceptionTyIdxVec(MapleVector vec) { + exceptionTyIdxVec = vec; + } + + void PushBack(TyIdx idx) { + exceptionTyIdxVec.push_back(idx); + } + + CatchNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < Size(); ++i) { + node->PushBack(GetExceptionTyIdxVecElement(i)); + } + return node; + } + + private: + // TyIdx exception_tyidx; + MapleVector exceptionTyIdxVec; +}; + +// cppcatch +class CppCatchNode : public StmtNode { + public: + explicit CppCatchNode(const TyIdx &idx) : StmtNode(OP_cppcatch), exceptionTyIdx(idx) {} + explicit CppCatchNode() : CppCatchNode(TyIdx(0)) {} + + explicit CppCatchNode(const CppCatchNode &node) = delete; + CppCatchNode &operator=(const CppCatchNode &node) = delete; + ~CppCatchNode() = default; + + void Dump(int32 indent) const override; + + CppCatchNode *CloneTree(MapleAllocator &allocator) const override { + CppCatchNode *node = allocator.GetMemPool()->New(); + node->SetStmtID(stmtIDNext++); + node->exceptionTyIdx = exceptionTyIdx; + return node; + } + + CppCatchNode *CloneTree(const MIRModule &mod) const { + return CppCatchNode::CloneTree(*mod.CurFuncCodeMemPoolAllocator()); + } + public: + TyIdx exceptionTyIdx; +}; + +using CasePair = std::pair; +using CaseVector = MapleVector; +class SwitchNode : public StmtNode { + public: + explicit SwitchNode(MapleAllocator &allocator) : StmtNode(OP_switch, 1), switchTable(allocator.Adapter()) {} + + explicit SwitchNode(const MIRModule &mod) : SwitchNode(mod.GetCurFuncCodeMPAllocator()) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label) : SwitchNode(allocator, label, nullptr) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label, BaseNode *opnd) + : StmtNode(OP_switch, 1), switchOpnd(opnd), defaultLabel(label), switchTable(allocator.Adapter()) {} + + SwitchNode(const MIRModule &mod, LabelIdx label) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + SwitchNode(MapleAllocator &allocator, const SwitchNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), + defaultLabel(node.GetDefaultLabel()), + switchTable(allocator.Adapter()) {} + + SwitchNode(const MIRModule &mod, const SwitchNode &node) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + SwitchNode(SwitchNode &node) = delete; + SwitchNode &operator=(const SwitchNode &node) = delete; + virtual ~SwitchNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SwitchNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetSwitchOpnd(switchOpnd->CloneTree(allocator)); + for (size_t i = 0; i < switchTable.size(); ++i) { + node->InsertCasePair(switchTable[i]); + } + return node; + } + + BaseNode *Opnd(size_t) const override { + return switchOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override { + switchOpnd = node; + } + + BaseNode *GetSwitchOpnd() const { + return switchOpnd; + } + + void SetSwitchOpnd(BaseNode *node) { + switchOpnd = node; + } + + LabelIdx GetDefaultLabel() const { + return defaultLabel; + } + + void SetDefaultLabel(LabelIdx idx) { + defaultLabel = idx; + } + + const CaseVector &GetSwitchTable() const { + return switchTable; + } + + CaseVector &GetSwitchTable() { + return switchTable; + } + + CasePair GetCasePair(size_t idx) const { + DEBUG_ASSERT(idx < switchTable.size(), "out of range in SwitchNode::GetCasePair"); + return switchTable.at(idx); + } + + void SetSwitchTable(CaseVector vec) { + switchTable = vec; + } + + void InsertCasePair(CasePair pair) { + switchTable.push_back(pair); + } + + void UpdateCaseLabelAt(size_t i, LabelIdx idx) { + switchTable[i] = std::make_pair(switchTable[i].first, idx); + } + + void SortCasePair(bool func(const CasePair&, const CasePair&)) { + std::sort(switchTable.begin(), switchTable.end(), func); + } + + private: + BaseNode *switchOpnd = nullptr; + LabelIdx defaultLabel = 0; + CaseVector switchTable; +}; + +using MCasePair = std::pair; +using MCaseVector = MapleVector; +class MultiwayNode : public StmtNode { + public: + explicit MultiwayNode(MapleAllocator &allocator) : StmtNode(OP_multiway, 1), multiWayTable(allocator.Adapter()) {} + + explicit MultiwayNode(const MIRModule &mod) : MultiwayNode(mod.GetCurFuncCodeMPAllocator()) {} + + MultiwayNode(MapleAllocator &allocator, LabelIdx label) + : StmtNode(OP_multiway, 1), defaultLabel(label), multiWayTable(allocator.Adapter()) {} + + MultiwayNode(const MIRModule &mod, LabelIdx label) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + MultiwayNode(MapleAllocator &allocator, const MultiwayNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + defaultLabel(node.defaultLabel), + multiWayTable(allocator.Adapter()) {} + + MultiwayNode(const MIRModule &mod, const MultiwayNode &node) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + MultiwayNode(MultiwayNode &node) = delete; + MultiwayNode &operator=(const MultiwayNode &node) = delete; + virtual ~MultiwayNode() = default; + + void Dump(int32 indent) const override; + + MultiwayNode *CloneTree(MapleAllocator &allocator) const override { + auto *nd = allocator.GetMemPool()->New(allocator, *this); + nd->multiWayOpnd = static_cast(multiWayOpnd->CloneTree(allocator)); + for (size_t i = 0; i < multiWayTable.size(); ++i) { + BaseNode *node = multiWayTable[i].first->CloneTree(allocator); + MCasePair pair(static_cast(node), multiWayTable[i].second); + nd->multiWayTable.push_back(pair); + } + return nd; + } + + BaseNode *Opnd(size_t i) const override { + return *(&multiWayOpnd + static_cast(i)); + } + + const BaseNode *GetMultiWayOpnd() const { + return multiWayOpnd; + } + + void SetMultiWayOpnd(BaseNode *multiwayOpndPara) { + multiWayOpnd = multiwayOpndPara; + } + + void SetDefaultlabel(LabelIdx defaultLabelPara) { + defaultLabel = defaultLabelPara; + } + + void AppendElemToMultiWayTable(const MCasePair &mCasrPair) { + multiWayTable.push_back(mCasrPair); + } + + const MCaseVector &GetMultiWayTable() const { + return multiWayTable; + } + + private: + BaseNode *multiWayOpnd = nullptr; + LabelIdx defaultLabel = 0; + MCaseVector multiWayTable; +}; + +// eval, throw, free, decref, incref, decrefreset, assertnonnull, igoto +class UnaryStmtNode : public StmtNode { + public: + explicit UnaryStmtNode(Opcode o) : StmtNode(o, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ) : StmtNode(o, typ, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ, BaseNode *opnd) : StmtNode(o, typ, 1), uOpnd(opnd) {} + + virtual ~UnaryStmtNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + + bool Verify() const override { + return uOpnd->Verify(); + } + + bool Verify(VerifyResult &verifyResult) const override { + if (GetOpCode() == OP_throw && !VerifyThrowable(verifyResult)) { + return false; + } + return uOpnd->Verify(verifyResult); + } + + UnaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + bool IsLeaf() const override { + return false; + } + + BaseNode *GetRHS() const override { + return Opnd(0); + } + + virtual void SetRHS(BaseNode *rhs) { + this->SetOpnd(rhs, 0); + } + + BaseNode *Opnd(size_t i = 0) const override { + (void)i; + return uOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override { + uOpnd = node; + } + + private: + bool VerifyThrowable(VerifyResult &verifyResult) const; + + BaseNode *uOpnd = nullptr; +}; + +// dassign, maydassign +class DassignNode : public UnaryStmtNode { + public: + DassignNode() : UnaryStmtNode(OP_dassign), stIdx() {} + + explicit DassignNode(PrimType typ) : UnaryStmtNode(OP_dassign, typ), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassign, typ, opnd), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd, StIdx idx, FieldID fieldID) + : UnaryStmtNode(OP_dassign, typ, opnd), stIdx(idx), fieldID(fieldID) {} + + DassignNode(BaseNode *opnd, StIdx idx, FieldID fieldID) : DassignNode(kPtyInvalid, opnd, idx, fieldID) {} + + virtual ~DassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override { + return 1; + } + + bool IsIdentityDassign() const { + BaseNode *rhs = GetRHS(); + if (rhs->GetOpCode() != OP_dread) { + return false; + } + auto *dread = static_cast(rhs); + return (stIdx == dread->GetStIdx()); + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + StIdx GetStIdx() const { + return stIdx; + } + void SetStIdx(StIdx s) { + stIdx = s; + } + + const FieldID &GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID f) { + fieldID = f; + } + + bool AssigningVolatile(const MIRModule &mod) const; + + private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +class DassignoffNode : public UnaryStmtNode { + public: + DassignoffNode() : UnaryStmtNode(OP_dassignoff), stIdx() {} + + explicit DassignoffNode(PrimType typ) : UnaryStmtNode(OP_dassignoff, typ), stIdx() {} + + DassignoffNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassignoff, typ, opnd), stIdx() {} + + DassignoffNode(const StIdx &lhsStIdx, int32 dOffset, PrimType rhsType, BaseNode *rhsNode) + : DassignoffNode(rhsType, rhsNode) { + stIdx = lhsStIdx; + offset = dOffset; + } + virtual ~DassignoffNode() = default; + + void Dump(int32 indent) const override; + + DassignoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override { + return 1; + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + public: + StIdx stIdx; + int32 offset = 0; +}; + +class RegassignNode : public UnaryStmtNode { + public: + RegassignNode() : UnaryStmtNode(OP_regassign) {} + + RegassignNode(PrimType primType, PregIdx idx, BaseNode *opnd) + : UnaryStmtNode(OP_regassign, primType, opnd), regIdx(idx) {} + + virtual ~RegassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + PregIdx GetRegIdx() const { + return regIdx; + } + void SetRegIdx(PregIdx idx) { + regIdx = idx; + } + + private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +// brtrue and brfalse +class CondGotoNode : public UnaryStmtNode { + public: + static const int32 probAll; + explicit CondGotoNode(Opcode o) : CondGotoNode(o, 0, nullptr) {} + + CondGotoNode(Opcode o, uint32 offset, BaseNode *opnd) : UnaryStmtNode(o, kPtyInvalid, opnd), offset(offset) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~CondGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 offsetValue) { + offset = offsetValue; + } + + bool IsBranchProbValid() const { + return branchProb > 0 && branchProb < probAll; + } + + int32 GetBranchProb() const { + return branchProb; + } + + void SetBranchProb(int32 prob) { + branchProb = prob; + } + + void ReverseBranchProb() { + if (IsBranchProbValid()) { + branchProb = probAll - branchProb; + } + } + + void InvalidateBranchProb() { + if (IsBranchProbValid()) { + branchProb = -1; + } + } + + CondGotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + private: + uint32 offset; + int32 branchProb = -1; // branch probability, a negative number indicates that the probability is invalid +}; + +using SmallCasePair = std::pair; +using SmallCaseVector = MapleVector; +class RangeGotoNode : public UnaryStmtNode { + public: + explicit RangeGotoNode(MapleAllocator &allocator) + : UnaryStmtNode(OP_rangegoto), rangegotoTable(allocator.Adapter()) {} + + explicit RangeGotoNode(const MIRModule &mod) : RangeGotoNode(mod.GetCurFuncCodeMPAllocator()) {} + + RangeGotoNode(MapleAllocator &allocator, const RangeGotoNode &node) + : UnaryStmtNode(node), + tagOffset(node.tagOffset), + rangegotoTable(allocator.Adapter()) {} + + RangeGotoNode(const MIRModule &mod, const RangeGotoNode &node) + : RangeGotoNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + RangeGotoNode(RangeGotoNode &node) = delete; + RangeGotoNode &operator=(const RangeGotoNode &node) = delete; + virtual ~RangeGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + RangeGotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + for (size_t i = 0; i < rangegotoTable.size(); ++i) { + node->rangegotoTable.push_back(rangegotoTable[i]); + } + return node; + } + + const SmallCaseVector &GetRangeGotoTable() const { + return rangegotoTable; + } + + const SmallCasePair &GetRangeGotoTableItem(size_t i) const { + return rangegotoTable.at(i); + } + + void SetRangeGotoTable(SmallCaseVector rt) { + rangegotoTable = rt; + } + + void AddRangeGoto(uint32 tag, LabelIdx idx) { + rangegotoTable.push_back(SmallCasePair(tag, idx)); + } + + int32 GetTagOffset() const { + return tagOffset; + } + + void SetTagOffset(int32 offset) { + tagOffset = offset; + } + + private: + int32 tagOffset = 0; + // add each tag to tagOffset field to get the actual tag values + SmallCaseVector rangegotoTable; +}; + +class BlockNode : public StmtNode { + public: + using StmtNodes = PtrListRef; + + BlockNode() : StmtNode(OP_block) {} + + ~BlockNode() { + stmtNodeList.clear(); + } + + void AddStatement(StmtNode *stmt); + void AppendStatementsFromBlock(BlockNode &blk); + void InsertFirst(StmtNode *stmt); // Insert stmt as the first + void InsertLast(StmtNode *stmt); // Insert stmt as the last + void ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk); + void ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2); + void RemoveStmt(const StmtNode *stmtNode1); + void InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 before ss1 in current block. + void InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 after ss1 in current block. + // insert all the stmts in inblock to the current block after stmt1 + void InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1); + void Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, + bool withInfo, bool isFuncbody, MIRFlavor flavor) const; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + void Dump(int32 indent) const override { + Dump(indent, nullptr, nullptr, false, false, kFlavorUnknown); + } + + BlockNode *CloneTree(MapleAllocator &allocator) const override { + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = static_cast(stmt.CloneTree(allocator)); + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + } + return blk; + } + + BlockNode *CloneTreeWithSrcPosition(const MIRModule &mod) { + MapleAllocator &allocator = mod.GetCurFuncCodeMPAllocator(); + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = static_cast(stmt.CloneTree(allocator)); + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + } + return blk; + } + + BlockNode *CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp); + + bool IsEmpty() const { + return stmtNodeList.empty(); + } + + void ResetBlock() { + stmtNodeList.clear(); + } + + StmtNode *GetFirst() { + return &(stmtNodeList.front()); + } + + const StmtNode *GetFirst() const { + return &(stmtNodeList.front()); + } + + void SetFirst(StmtNode *node) { + stmtNodeList.update_front(node); + } + + StmtNode *GetLast() { + return &(stmtNodeList.back()); + } + + const StmtNode *GetLast() const { + return &(stmtNodeList.back()); + } + + void SetLast(StmtNode *node) { + stmtNodeList.update_back(node); + } + + StmtNodes &GetStmtNodes() { + return stmtNodeList; + } + + const StmtNodes &GetStmtNodes() const { + return stmtNodeList; + } + + private: + StmtNodes stmtNodeList; +}; + +class IfStmtNode : public UnaryStmtNode { + public: + IfStmtNode() : UnaryStmtNode(OP_if) { + numOpnds = kOperandNumTernary; + } + + virtual ~IfStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IfStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + node->thenPart = thenPart->CloneTree(allocator); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTree(allocator); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + IfStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? newFreq : 1; + if (updateOp & kUpdateOrigFreq) { + uint64_t left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = left; + } + } + node->thenPart = thenPart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + BaseNode *Opnd(size_t i = 0) const override { + if (i == 0) { + return UnaryStmtNode::Opnd(0); + } else if (i == 1) { + return thenPart; + } else if (i == 2) { + DEBUG_ASSERT(elsePart != nullptr, "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + DEBUG_ASSERT(numOpnds == kOperandNumTernary, "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + return elsePart; + } + DEBUG_ASSERT(false, "IfStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + + BlockNode *GetThenPart() const { + return thenPart; + } + + void SetThenPart(BlockNode *node) { + thenPart = node; + } + + BlockNode *GetElsePart() const { + return elsePart; + } + + void SetElsePart(BlockNode *node) { + elsePart = node; + } + + size_t NumOpnds() const override { + if (elsePart == nullptr) { + return kOperandNumBinary; + } + return kOperandNumTernary; + } + + private: + BlockNode *thenPart = nullptr; + BlockNode *elsePart = nullptr; +}; + +// for both while and dowhile +class WhileStmtNode : public UnaryStmtNode { + public: + explicit WhileStmtNode(Opcode o) : UnaryStmtNode(o) { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~WhileStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + WhileStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTree(allocator); + return node; + } + + WhileStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + int64_t oldFreq = fromFreqs[GetStmtID()]; + int64_t newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? static_cast(newFreq) : 1; + if (updateOp & kUpdateOrigFreq) { + int64_t left = (oldFreq - newFreq) > 0 ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = left; + } + } + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + return node; + } + + void SetBody(BlockNode *node) { + body = node; + } + + BlockNode *GetBody() const { + return body; + } + + BaseNode *Opnd(size_t i = 0) const override { + if (i == 0) { + return UnaryStmtNode::Opnd(); + } else if (i == 1) { + return body; + } + DEBUG_ASSERT(false, "WhileStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + + private: + BlockNode *body = nullptr; +}; + +class DoloopNode : public StmtNode { + public: + DoloopNode() : DoloopNode(StIdx(), false, nullptr, nullptr, nullptr, nullptr) {} + + DoloopNode(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, BaseNode *incrExp, BlockNode *doBody) + : StmtNode(OP_doloop, kOperandNumDoloop), + doVarStIdx(doVarStIdx), + isPreg(isPReg), + startExpr(startExp), + condExpr(contExp), + incrExpr(incrExp), + doBody(doBody) {} + + virtual ~DoloopNode() = default; + + void DumpDoVar(const MIRModule &mod) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + DoloopNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTree(allocator)); + return node; + } + + DoloopNode *CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = oldFreq; + if (updateOp & kUpdateFreqbyScale) { // used in inline/clone + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } else if (updateOp & kUpdateUnrolledFreq) { // used in unrolled part + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (bodyFreq * numer / denom + (oldFreq - bodyFreq)) : oldFreq; + } else if (updateOp & kUpdateUnrollRemainderFreq) { // used in unrolled remainder + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (((bodyFreq * numer) % denom) + (oldFreq - bodyFreq)) : oldFreq; + } + toFreqs[node->GetStmtID()] = static_cast(newFreq); + DEBUG_ASSERT(oldFreq >= newFreq, "sanity check"); + if (updateOp & kUpdateOrigFreq) { + uint64_t left = oldFreq - newFreq; + fromFreqs[GetStmtID()] = left; + } + } + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + return node; + } + + void SetDoVarStIdx(StIdx idx) { + doVarStIdx = idx; + } + + PregIdx GetDoVarPregIdx() const { + return static_cast(doVarStIdx.FullIdx()); + } + + const StIdx &GetDoVarStIdx() const { + return doVarStIdx; + } + + void SetDoVarStFullIdx(uint32 idx) { + doVarStIdx.SetFullIdx(idx); + } + + void SetIsPreg(bool isPregVal) { + isPreg = isPregVal; + } + + bool IsPreg() const { + return isPreg; + } + + void SetStartExpr(BaseNode *node) { + startExpr = node; + } + + BaseNode *GetStartExpr() const { + return startExpr; + } + + void SetContExpr(BaseNode *node) { + condExpr = node; + } + + BaseNode *GetCondExpr() const { + return condExpr; + } + + void SetIncrExpr(BaseNode *node) { + incrExpr = node; + } + + BaseNode *GetIncrExpr() const { + return incrExpr; + } + + void SetDoBody(BlockNode *node) { + doBody = node; + } + + BlockNode *GetDoBody() const { + return doBody; + } + + BaseNode *Opnd(size_t i) const override { + if (i == 0) { + return startExpr; + } + if (i == 1) { + return condExpr; + } + if (i == 2) { + return incrExpr; + } + return *(&doBody + i - 3); + } + + size_t NumOpnds() const override { + return kOperandNumDoloop; + } + + void SetOpnd(BaseNode *node, size_t i) override { + if (i == 0) { + startExpr = node; + } + if (i == 1) { + SetContExpr(node); + } + if (i == 2) { + incrExpr = node; + } else { + *(&doBody + i - 3) = static_cast(node); + } + } + + private: + static constexpr int kOperandNumDoloop = 4; + StIdx doVarStIdx; // must be local; cast to PregIdx for preg + bool isPreg; + BaseNode *startExpr; + BaseNode *condExpr; + BaseNode *incrExpr; + BlockNode *doBody; +}; + +class ForeachelemNode : public StmtNode { + public: + ForeachelemNode() : StmtNode(OP_foreachelem) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~ForeachelemNode() = default; + + const StIdx &GetElemStIdx() const { + return elemStIdx; + } + + void SetElemStIdx(StIdx elemStIdxValue) { + elemStIdx = elemStIdxValue; + } + + const StIdx &GetArrayStIdx() const { + return arrayStIdx; + } + + void SetArrayStIdx(StIdx arrayStIdxValue) { + arrayStIdx = arrayStIdxValue; + } + + BlockNode *GetLoopBody() const { + return loopBody; + } + + void SetLoopBody(BlockNode *loopBodyValue) { + loopBody = loopBodyValue; + } + + BaseNode *Opnd(size_t) const override { + return loopBody; + } + + size_t NumOpnds() const override { + return numOpnds; + } + + void Dump(int32 indent) const override; + + ForeachelemNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetLoopBody(loopBody->CloneTree(allocator)); + return node; + } + + private: + StIdx elemStIdx; // must be local symbol + StIdx arrayStIdx; // symbol table entry of the array/collection variable + BlockNode *loopBody = nullptr; +}; + +// used by assertge, assertlt +class BinaryStmtNode : public StmtNode, public BinaryOpnds { + public: + explicit BinaryStmtNode(Opcode o) : StmtNode(o, kOperandNumBinary) {} + + virtual ~BinaryStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + BinaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *Opnd(size_t i) const override { + DEBUG_ASSERT(i < kOperandNumBinary, "Invalid operand idx in BinaryStmtNode"); + DEBUG_ASSERT(i >= 0, "Invalid operand idx in BinaryStmtNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override { + SetBOpnd(node, i); + } + + bool IsLeaf() const override { + return false; + } +}; + +class IassignoffNode : public BinaryStmtNode { + public: + IassignoffNode() : BinaryStmtNode(OP_iassignoff) {} + + explicit IassignoffNode(int32 ofst) : BinaryStmtNode(OP_iassignoff), offset(ofst) {} + + IassignoffNode(PrimType primType, int32 offset, BaseNode *addrOpnd, BaseNode *srcOpnd) : + IassignoffNode(offset) { + BaseNodeT::SetPrimType(primType); + SetBOpnd(addrOpnd, 0); + SetBOpnd(srcOpnd, 1); + } + + virtual ~IassignoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 newOffset) { + offset = newOffset; + } + + private: + int32 offset = 0; +}; + +// for iassignfpoff, iassignspoff, iassignpcoff +class IassignFPoffNode : public UnaryStmtNode { + public: + IassignFPoffNode(Opcode o) : UnaryStmtNode(o) {} + + explicit IassignFPoffNode(Opcode o, int32 ofst) : UnaryStmtNode(o), offset(ofst) {} + + IassignFPoffNode(Opcode o, PrimType primType, int32 offset, BaseNode *src) : + IassignFPoffNode(o, offset) { + BaseNodeT::SetPrimType(primType); + UnaryStmtNode::SetOpnd(src, 0); + } + + virtual ~IassignFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignFPoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + void SetOffset(int32 ofst) { + offset = ofst; + } + + int32 GetOffset() const { + return offset; + } + + private: + int32 offset = 0; +}; + +typedef IassignFPoffNode IassignPCoffNode; + +class BlkassignoffNode : public BinaryStmtNode { + public: + BlkassignoffNode() : BinaryStmtNode(OP_blkassignoff) { ptyp = PTY_agg; + ptyp = PTY_agg; + alignLog2 = 0; + offset = 0; } + explicit BlkassignoffNode(int32 ofst, int32 bsize) : + BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { ptyp = PTY_agg; + alignLog2 = 0; } + explicit BlkassignoffNode(int32 ofst, int32 bsize, BaseNode *dest, BaseNode *src) : + BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { + ptyp = PTY_agg; + alignLog2 = 0; + SetBOpnd(dest, 0); + SetBOpnd(src, 1); + } + ~BlkassignoffNode() = default; + + void Dump(int32 indent) const override; + + BlkassignoffNode *CloneTree(MapleAllocator &allocator) const override { + BlkassignoffNode *node = allocator.GetMemPool()->New(offset, blockSize); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint32 GetAlign() const { + uint32 res = 1; + for (uint32 i = 0; i < alignLog2; i++) { + res *= 2; + } + return res; + } + + void SetAlign(uint32 x) { + if (x == 0) { + alignLog2 = 0; + return; + } + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non power of 2"); + uint32 res = 0; + while (x != 1) { + x >>= 1; + ++res; + } + alignLog2 = res; + } + + uint32 alignLog2 : 4; + int32 offset : 28; + int32 blockSize = 0; +}; + +// used by return, syncenter, syncexit +class NaryStmtNode : public StmtNode, public NaryOpnds { + public: + NaryStmtNode(MapleAllocator &allocator, Opcode o) : StmtNode(o), NaryOpnds(allocator) {} + + NaryStmtNode(const MIRModule &mod, Opcode o) : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryStmtNode(MapleAllocator &allocator, const NaryStmtNode &node) + // do not use stmt copy constructor + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + NaryOpnds(allocator) {} + + NaryStmtNode(const MIRModule &mod, const NaryStmtNode &node) + : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + explicit NaryStmtNode(const NaryStmtNode &node) = delete; + NaryStmtNode &operator=(const NaryStmtNode &node) = delete; + virtual ~NaryStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + NaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + BaseNode *Opnd(size_t i) const override { + return GetNopndAt(i); + } + + void SetOpnd(BaseNode *node, size_t i) override { + DEBUG_ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "NaryStmtNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnds(const MapleVector &arguments) { + SetNOpnd(arguments); + SetNumOpnds(arguments.size()); + } + + void PushOpnd(BaseNode *node) { + if (node != nullptr) { + GetNopnd().push_back(node); + } + SetNumOpnds(GetNopndSize()); + } + + void InsertOpnd(BaseNode *node, size_t idx) { + if (node == nullptr || idx > GetNopndSize()) { + return; + } + auto begin = GetNopnd().begin(); + for (size_t i = 0; i < idx; ++i) { + ++begin; + } + (void)GetNopnd().insert(begin, node); + SetNumOpnds(GetNopndSize()); + } +}; + +class SafetyCheckStmtNode { + public: + explicit SafetyCheckStmtNode(GStrIdx funcNameIdx) + : funcNameIdx(funcNameIdx) {} + explicit SafetyCheckStmtNode(const SafetyCheckStmtNode& stmtNode) + : funcNameIdx(stmtNode.GetFuncNameIdx()) {} + + virtual ~SafetyCheckStmtNode() = default; + + std::string GetFuncName() const; + + GStrIdx GetFuncNameIdx() const { + return funcNameIdx; + } + + void Dump() const { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ">"; + } + + private: + GStrIdx funcNameIdx; +}; + +// used by callassertnonnull, callassertle +class SafetyCallCheckStmtNode { + public: + SafetyCallCheckStmtNode(GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : callFuncNameIdx(callFuncNameIdx), paramIndex(paramIndex), stmtFuncNameIdx(stmtFuncNameIdx) {} + explicit SafetyCallCheckStmtNode(const SafetyCallCheckStmtNode& stmtNode) + : callFuncNameIdx(stmtNode.GetFuncNameIdx()), paramIndex(stmtNode.GetParamIndex()), + stmtFuncNameIdx(stmtNode.GetStmtFuncNameIdx()) {} + + virtual ~SafetyCallCheckStmtNode() = default; + + std::string GetFuncName() const; + GStrIdx GetFuncNameIdx() const { + return callFuncNameIdx; + } + std::string GetStmtFuncName() const; + size_t GetParamIndex() const { + return paramIndex; + } + + GStrIdx GetStmtFuncNameIdx() const { + return stmtFuncNameIdx; + } + + void Dump() const { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ", " << paramIndex << ", &" << GetStmtFuncName() << ">"; + } + + private: + GStrIdx callFuncNameIdx; + size_t paramIndex; + GStrIdx stmtFuncNameIdx; +}; + +// used by callassertnonnull +class CallAssertNonnullStmtNode : public UnaryStmtNode, public SafetyCallCheckStmtNode { + public: + CallAssertNonnullStmtNode(Opcode o, GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : UnaryStmtNode(o), SafetyCallCheckStmtNode(callFuncNameIdx, paramIndex, stmtFuncNameIdx) {} + virtual ~CallAssertNonnullStmtNode() {} + + void Dump(int32 indent) const override; + + CallAssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertnonnull +class AssertNonnullStmtNode : public UnaryStmtNode, public SafetyCheckStmtNode { + public: + AssertNonnullStmtNode(Opcode o, GStrIdx funcNameIdx) + : UnaryStmtNode(o), SafetyCheckStmtNode(funcNameIdx) {} + virtual ~AssertNonnullStmtNode() {} + + void Dump(int32 indent) const override; + + AssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertle +class AssertBoundaryStmtNode : public NaryStmtNode, public SafetyCheckStmtNode { + public: + AssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx) + : NaryStmtNode(allocator, o), SafetyCheckStmtNode(funcNameIdx) {} + virtual ~AssertBoundaryStmtNode() {} + + AssertBoundaryStmtNode(MapleAllocator &allocator, const AssertBoundaryStmtNode& stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCheckStmtNode(stmtNode) {} + + AssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx) + : AssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx) {} + + void Dump(int32 indent) const override; + + AssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by callassertle +class CallAssertBoundaryStmtNode : public NaryStmtNode, public SafetyCallCheckStmtNode { + public: + CallAssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : NaryStmtNode(allocator, o), SafetyCallCheckStmtNode(funcNameIdx, paramIndex, stmtFuncNameIdx) {} + virtual ~CallAssertBoundaryStmtNode() {} + + CallAssertBoundaryStmtNode(MapleAllocator &allocator, const CallAssertBoundaryStmtNode& stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCallCheckStmtNode(stmtNode) {} + + CallAssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : CallAssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx, paramIndex, stmtFuncNameIdx) {} + + void Dump(int32 indent) const override; + + CallAssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by call, virtualcall, virtualicall, superclasscall, interfacecall, +// interfaceicall, customcall +// polymorphiccall +// callassigned, virtualcallassigned, virtualicallassigned, +// superclasscallassigned, interfacecallassigned, interfaceicallassigned, +// customcallassigned +// polymorphiccallassigned +class CallNode : public NaryStmtNode, public DeoptBundleInfo { + public: + CallNode(MapleAllocator &allocator, Opcode o) : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + returnValues(allocator.Adapter()) {} + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx) : CallNode(allocator, o, idx, TyIdx()) {} + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx, TyIdx tdx) + : NaryStmtNode(allocator, o), DeoptBundleInfo(allocator), + puIdx(idx), tyIdx(tdx), returnValues(allocator.Adapter()) {} + + CallNode(const MIRModule &mod, Opcode o) : CallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + CallNode(const MIRModule &mod, Opcode o, PUIdx idx, TyIdx tdx) + : CallNode(mod.GetCurFuncCodeMPAllocator(), o, idx, tdx) {} + + CallNode(MapleAllocator &allocator, const CallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + puIdx(node.GetPUIdx()), + tyIdx(node.tyIdx), + returnValues(allocator.Adapter()) {} + + CallNode(const MIRModule &mod, const CallNode &node) : CallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CallNode(CallNode &node) = delete; + CallNode &operator=(const CallNode &node) = delete; + virtual ~CallNode() = default; + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override ; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + + CallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + for (const auto &elem : GetDeoptBundleInfo()) { + node->AddDeoptBundleInfo(elem.first, elem.second); + } + return node; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(const PUIdx idx) { + puIdx = idx; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + CallReturnPair GetReturnPair(size_t idx) const { + DEBUG_ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + return returnValues.at(idx); + } + + void SetReturnPair(CallReturnPair retVal, size_t idx) { + DEBUG_ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + returnValues.at(idx) = retVal; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + CallReturnPair GetNthReturnVec(size_t i) const { + DEBUG_ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues[i]; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "CallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + void SetCallReturnVector(const CallReturnVector &value) { + returnValues = value; + } + + private: + PUIdx puIdx = 0; + TyIdx tyIdx = TyIdx(0); + CallReturnVector returnValues; +}; + +// icall, icallassigned, icallproto and icallprotoassigned +class IcallNode : public NaryStmtNode, public DeoptBundleInfo { + public: + IcallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + retTyIdx(0), returnValues(allocator.Adapter()) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(MapleAllocator &allocator, Opcode o, TyIdx idx) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + retTyIdx(idx), returnValues(allocator.Adapter()) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(const MIRModule &mod, Opcode o) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IcallNode(const MIRModule &mod, Opcode o, TyIdx idx) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o, idx) {} + + IcallNode(MapleAllocator &allocator, const IcallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + retTyIdx(node.retTyIdx), returnValues(allocator.Adapter()) {} + + IcallNode(const MIRModule &mod, const IcallNode &node) : IcallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IcallNode(IcallNode &node) = delete; + IcallNode &operator=(const IcallNode &node) = delete; + virtual ~IcallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override ; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + IcallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->returnValues.push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + for (const auto &elem : GetDeoptBundleInfo()) { + node->AddDeoptBundleInfo(elem.first, elem.second); + } + return node; + } + + TyIdx GetRetTyIdx() const { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) { + retTyIdx = idx; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "IcallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + private: + TyIdx retTyIdx; // for icall: return type for callee; for icallproto: the prototye + // the 0th operand is the function pointer + CallReturnVector returnValues; +}; + +// used by intrinsiccall and xintrinsiccall +class IntrinsiccallNode : public NaryStmtNode, public DeoptBundleInfo { + public: + IntrinsiccallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + intrinsic(INTRN_UNDEFINED), tyIdx(0), returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(MapleAllocator &allocator, Opcode o, MIRIntrinsicID id) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + intrinsic(id), tyIdx(0), returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(const MIRModule &mod, Opcode o) : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IntrinsiccallNode(const MIRModule &mod, Opcode o, MIRIntrinsicID id) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o, id) {} + + IntrinsiccallNode(MapleAllocator &allocator, const IntrinsiccallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + intrinsic(node.GetIntrinsic()), + tyIdx(node.tyIdx), + returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(const MIRModule &mod, const IntrinsiccallNode &node) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IntrinsiccallNode(IntrinsiccallNode &node) = delete; + IntrinsiccallNode &operator=(const IntrinsiccallNode &node) = delete; + virtual ~IntrinsiccallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + + IntrinsiccallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID id) { + intrinsic = id; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "IntrinsiccallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + CallReturnPair &GetCallReturnPair(uint32 i) { + DEBUG_ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues.at(i); + } + + private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; + CallReturnVector returnValues; +}; + +// used by callinstant, virtualcallinstant, superclasscallinstant and +// interfacecallinstant, callinstantassigned, virtualcallinstantassigned, +// superclasscallinstantassigned and interfacecallinstantassigned +class CallinstantNode : public CallNode { + public: + CallinstantNode(MapleAllocator &allocator, Opcode o, TyIdx tIdx) : CallNode(allocator, o), instVecTyIdx(tIdx) {} + + CallinstantNode(const MIRModule &mod, Opcode o, TyIdx tIdx) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), o, tIdx) {} + + CallinstantNode(MapleAllocator &allocator, const CallinstantNode &node) + : CallNode(allocator, node), instVecTyIdx(node.instVecTyIdx) {} + + CallinstantNode(const MIRModule &mod, const CallinstantNode &node) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CallinstantNode(CallinstantNode &node) = delete; + CallinstantNode &operator=(const CallinstantNode &node) = delete; + virtual ~CallinstantNode() = default; + + void Dump(int32 indent, bool newline) const override; + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallinstantNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < GetReturnVec().size(); ++i) { + node->GetReturnVec().push_back(GetNthReturnVec(i)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + CallReturnVector *GetCallReturnVector() override { + return &GetReturnVec(); + } + + private: + TyIdx instVecTyIdx; +}; + +class LabelNode : public StmtNode { + public: + LabelNode() : StmtNode(OP_label) {} + + explicit LabelNode(LabelIdx idx) : StmtNode(OP_label), labelIdx(idx) {} + + virtual ~LabelNode() = default; + + void Dump(int32 indent) const override; + + LabelNode *CloneTree(MapleAllocator &allocator) const override { + auto *l = allocator.GetMemPool()->New(*this); + l->SetStmtID(stmtIDNext++); + return l; + } + + LabelIdx GetLabelIdx() const { + return labelIdx; + } + + void SetLabelIdx(LabelIdx idx) { + labelIdx = idx; + } + + private: + LabelIdx labelIdx = 0; +}; + +class CommentNode : public StmtNode { + public: + explicit CommentNode(const MapleAllocator &allocator) : StmtNode(OP_comment), comment(allocator.GetMemPool()) {} + + explicit CommentNode(const MIRModule &mod) : CommentNode(mod.GetCurFuncCodeMPAllocator()) {} + + CommentNode(const MapleAllocator &allocator, const std::string &cmt) + : StmtNode(OP_comment), comment(cmt, allocator.GetMemPool()) {} + + CommentNode(const MIRModule &mod, const std::string &cmt) : CommentNode(mod.GetCurFuncCodeMPAllocator(), cmt) {} + + CommentNode(const MapleAllocator &allocator, const CommentNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType()), + comment(node.comment, allocator.GetMemPool()) {} + + CommentNode(const MIRModule &mod, const CommentNode &node) : CommentNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CommentNode(CommentNode &node) = delete; + CommentNode &operator=(const CommentNode &node) = delete; + virtual ~CommentNode() = default; + + void Dump(int32 indent) const override; + + CommentNode *CloneTree(MapleAllocator &allocator) const override { + auto *c = allocator.GetMemPool()->New(allocator, *this); + return c; + } + + const MapleString &GetComment() const { + return comment; + } + + void SetComment(MapleString com) { + comment = com; + } + + void SetComment(const std::string &str) { + comment = str; + } + + void SetComment(const char *str) { + comment = str; + } + + void Append(const std::string &str) { + comment.append(str); + } + + private: + MapleString comment; +}; + +enum AsmQualifierKind : unsigned { // they are alreadgy Maple IR keywords + kASMvolatile, + kASMinline, + kASMgoto, +}; + +class AsmNode : public NaryStmtNode { + public: + explicit AsmNode(MapleAllocator *alloc) + : NaryStmtNode(*alloc, OP_asm), + asmString(alloc->GetMemPool()), inputConstraints(alloc->Adapter()), + asmOutputs(alloc->Adapter()), outputConstraints(alloc->Adapter()), + clobberList(alloc->Adapter()), gotoLabels(alloc->Adapter()), qualifiers(0) {} + + AsmNode(MapleAllocator &allocator, const AsmNode &node) + : NaryStmtNode(allocator, node), asmString(node.asmString, allocator.GetMemPool()), + inputConstraints(allocator.Adapter()), asmOutputs(allocator.Adapter()), + outputConstraints(allocator.Adapter()), clobberList(allocator.Adapter()), + gotoLabels(allocator.Adapter()), qualifiers(node.qualifiers) {} + + virtual ~AsmNode() = default; + + AsmNode *CloneTree(MapleAllocator &allocator) const override; + + void SetQualifier(AsmQualifierKind x) { + qualifiers |= (1U << static_cast(x)); + } + + bool GetQualifier(AsmQualifierKind x) const { + return (qualifiers & (1U << static_cast(x))) != 0; + } + + CallReturnVector *GetCallReturnVector() override { + return &asmOutputs; + } + + void SetHasWriteInputs() { + hasWriteInputs = true; + } + + bool HasWriteInputs() const { + return hasWriteInputs; + } + + void DumpOutputs(int32 indent, std::string &uStr) const; + void DumpInputOperands(int32 indent, std::string &uStr) const; + void Dump(int32 indent) const override; + + MapleString asmString; + MapleVector inputConstraints; // length is numOpnds + CallReturnVector asmOutputs; + MapleVector outputConstraints; // length is returnValues.size() + MapleVector clobberList; + MapleVector gotoLabels; + uint32 qualifiers; + + private: + bool hasWriteInputs = false; +}; + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent); +bool HasIreadExpr(const BaseNode *expr); +size_t MaxDepth(const BaseNode *expr); +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_NODE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_NODES_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_parser.h b/ecmascript/mapleall/maple_ir/include/mir_parser.h new file mode 100755 index 0000000000000000000000000000000000000000..2529cf2ea36a0b5a3ebf99cf36591458c86b100a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_parser.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PARSER_H +#define MAPLE_IR_INCLUDE_MIR_PARSER_H +#include "mir_module.h" +#include "lexer.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "mir_scope.h" +#include "parser_opt.h" + +namespace maple { +using BaseNodePtr = BaseNode*; +using StmtNodePtr = StmtNode*; +using BlockNodePtr = BlockNode*; + +class FormalDef; + +class MIRParser { + public: + explicit MIRParser(MIRModule &md) + : lexer(md), + mod(md), + definedLabels(mod.GetMPAllocator().Adapter()) { + safeRegionFlag.push(false); + } + + ~MIRParser() = default; + + MIRFunction *CreateDummyFunction(); + void ResetCurrentFunction() { + mod.SetCurFunction(dummyFunction); + } + + bool ParseLoc(); + bool ParseLocStmt(StmtNodePtr &stmt); + bool ParsePosition(SrcPosition &pos); + bool ParseOneScope(MIRScope &scope); + bool ParseScope(StmtNodePtr &stmt); + bool ParseOneAlias(GStrIdx &strIdx, MIRAliasVars &aliasVar); + bool ParseAlias(StmtNodePtr &stmt); + uint8 *ParseWordsInfo(uint32 size); + bool ParseSwitchCase(int64&, LabelIdx&); + bool ParseExprOneOperand(BaseNodePtr &expr); + bool ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1); + bool ParseExprNaryOperand(MapleVector &opndVec); + bool IsDelimitationTK(TokenKind tk) const; + Opcode GetOpFromToken(TokenKind tk) const; + bool IsStatement(TokenKind tk) const; + PrimType GetPrimitiveType(TokenKind tk) const; + MIRIntrinsicID GetIntrinsicID(TokenKind tk) const; + bool ParseScalarValue(MIRConstPtr &stype, MIRType &type); + bool ParseConstAddrLeafExpr(MIRConstPtr &cexpr); + bool ParseInitValue(MIRConstPtr &theConst, TyIdx tyIdx, bool allowEmpty = false); + bool ParseDeclaredSt(StIdx &stidx); + void CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx); + bool ParseDeclaredFunc(PUIdx &puidx); + bool ParseTypeAttrs(TypeAttrs &attrs); + bool ParseVarTypeAttrs(MIRSymbol &st); + bool CheckAlignTk(); + bool ParseAlignAttrs(TypeAttrs &tA); + bool ParsePackAttrs(); + bool ParseFieldAttrs(FieldAttrs &attrs); + bool ParseFuncAttrs(FuncAttrs &attrs); + void SetAttrContent(FuncAttrs &attrs, FuncAttrKind x, const MIRLexer &lexer); + bool CheckPrimAndDerivedType(TokenKind tokenKind, TyIdx &tyIdx); + bool ParsePrimType(TyIdx &tyIdx); + bool ParseFarrayType(TyIdx &arrayTyIdx); + bool ParseArrayType(TyIdx &arrayTyIdx); + bool ParseBitFieldType(TyIdx &fieldTyIdx); + bool ParsePragmaElement(MIRPragmaElement &elem); + bool ParsePragmaElementForArray(MIRPragmaElement &elem); + bool ParsePragmaElementForAnnotation(MIRPragmaElement &elem); + bool ParsePragma(MIRStructType &type); + bool ParseFields(MIRStructType &type); + bool ParseStructType(TyIdx &styIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseClassType(TyIdx &styidx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseInterfaceType(TyIdx &sTyIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseDefinedTypename(TyIdx &definedTyIdx, MIRTypeKind kind = kTypeUnknown); + bool ParseTypeParam(TyIdx &definedTyIdx); + bool ParsePointType(TyIdx &tyIdx); + bool ParseFuncType(TyIdx &tyIdx); + bool ParseGenericInstantVector(MIRInstantVectorType &insVecType); + bool ParseDerivedType(TyIdx &tyIdx, MIRTypeKind kind = kTypeUnknown, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseType(TyIdx &tyIdx); + bool ParseStatement(StmtNodePtr &stmt); + bool ParseSpecialReg(PregIdx &pRegIdx); + bool ParsePseudoReg(PrimType primType, PregIdx &pRegIdx); + bool ParseStmtBlock(BlockNodePtr &blk); + bool ParsePrototype(MIRFunction &func, MIRSymbol &funcSymbol, TyIdx &funcTyIdx); + bool ParseFunction(uint32 fileIdx = 0); + bool ParseStorageClass(MIRSymbol &symbol) const; + bool ParseDeclareVarInitValue(MIRSymbol &symbol); + bool ParseDeclareVar(MIRSymbol&); + bool ParseDeclareReg(MIRSymbol &symbol, const MIRFunction &func); + bool ParseDeclareFormal(FormalDef &formalDef); + bool ParsePrototypeRemaining(MIRFunction &func, std::vector &vecTyIdx, + std::vector &vecAttrs, bool &varArgs); + + // Stmt Parser + bool ParseStmtDassign(StmtNodePtr &stmt); + bool ParseStmtDassignoff(StmtNodePtr &stmt); + bool ParseStmtRegassign(StmtNodePtr &stmt); + bool ParseStmtIassign(StmtNodePtr &stmt); + bool ParseStmtIassignoff(StmtNodePtr &stmt); + bool ParseStmtIassignFPoff(StmtNodePtr &stmt); + bool ParseStmtBlkassignoff(StmtNodePtr &stmt); + bool ParseStmtDoloop(StmtNodePtr &stmt); + bool ParseStmtForeachelem(StmtNodePtr &stmt); + bool ParseStmtDowhile(StmtNodePtr &stmt); + bool ParseStmtIf(StmtNodePtr &stmt); + bool ParseStmtWhile(StmtNodePtr &stmt); + bool ParseStmtLabel(StmtNodePtr &stmt); + bool ParseStmtGoto(StmtNodePtr &stmt); + bool ParseStmtBr(StmtNodePtr &stmt); + bool ParseStmtSwitch(StmtNodePtr &stmt); + bool ParseStmtRangegoto(StmtNodePtr &stmt); + bool ParseStmtMultiway(StmtNodePtr &stmt); + PUIdx EnterUndeclaredFunction(bool isMcount = false); // for -pg in order to add "void _mcount()" + bool ParseStmtCall(StmtNodePtr &stmt); + bool ParseStmtCallMcount(StmtNodePtr &stmt); // for -pg in order to add "void _mcount()" to all the functions + bool ParseStmtIcall(StmtNodePtr &stmt, Opcode op); + bool ParseStmtIcall(StmtNodePtr &stmt); + bool ParseStmtIcallassigned(StmtNodePtr &stmt); + bool ParseStmtIcallproto(StmtNodePtr &stmt); + bool ParseStmtIcallprotoassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt); + bool ParseCallReturnPair(CallReturnPair &retpair); + bool ParseCallReturns(CallReturnVector &retsvec); + bool ParseBinaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssertGE(StmtNodePtr &stmt); + bool ParseNaryStmtAssertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertGE(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCallAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtReturn(StmtNodePtr &stmt); + bool ParseNaryStmtSyncEnter(StmtNodePtr &stmt); + bool ParseNaryStmtSyncExit(StmtNodePtr &stmt); + bool ParseStmtJsTry(StmtNodePtr &stmt); + bool ParseStmtTry(StmtNodePtr &stmt); + bool ParseStmtCatch(StmtNodePtr &stmt); + bool ParseUnaryStmt(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtThrow(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRef(StmtNodePtr &stmt); + bool ParseUnaryStmtIncRef(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRefReset(StmtNodePtr &stmt); + bool ParseUnaryStmtIGoto(StmtNodePtr &stmt); + bool ParseUnaryStmtEval(StmtNodePtr &stmt); + bool ParseUnaryStmtFree(StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt); + bool ParseStmtMarker(StmtNodePtr &stmt); + bool ParseStmtGosub(StmtNodePtr &stmt); + bool ParseStmtAsm(StmtNodePtr &stmt); + bool ParseStmtSafeRegion(StmtNodePtr &stmt); + + // Expression Parser + bool ParseExpression(BaseNodePtr &expr); + bool ParseExprDread(BaseNodePtr &expr); + bool ParseExprDreadoff(BaseNodePtr &expr); + bool ParseExprRegread(BaseNodePtr &expr); + bool ParseExprBinary(BaseNodePtr &expr); + bool ParseExprCompare(BaseNodePtr &expr); + bool ParseExprDepositbits(BaseNodePtr &expr); + bool ParseExprConstval(BaseNodePtr &expr); + bool ParseExprConststr(BaseNodePtr &expr); + bool ParseExprConststr16(BaseNodePtr &expr); + bool ParseExprSizeoftype(BaseNodePtr &expr); + bool ParseExprFieldsDist(BaseNodePtr &expr); + bool ParseExprIreadIaddrof(IreadNode &expr); + bool ParseExprIread(BaseNodePtr &expr); + bool ParseExprIreadoff(BaseNodePtr &expr); + bool ParseExprIreadFPoff(BaseNodePtr &expr); + bool ParseExprIaddrof(BaseNodePtr &expr); + bool ParseExprAddrof(BaseNodePtr &expr); + bool ParseExprAddrofoff(BaseNodePtr &expr); + bool ParseExprAddroffunc(BaseNodePtr &expr); + bool ParseExprAddroflabel(BaseNodePtr &expr); + bool ParseExprUnary(BaseNodePtr &expr); + bool ParseExprJarray(BaseNodePtr &expr); + bool ParseExprSTACKJarray(BaseNodePtr &expr); + bool ParseExprGCMalloc(BaseNodePtr &expr); + bool ParseExprExtractbits(BaseNodePtr &expr); + bool ParseExprTyconvert(BaseNodePtr &expr); + bool ParseExprRetype(BaseNodePtr &expr); + bool ParseExprTernary(BaseNodePtr &expr); + bool ParseExprArray(BaseNodePtr &expr); + bool ParseExprIntrinsicop(BaseNodePtr &expr); + bool ParseNaryExpr(NaryStmtNode &stmtNode); + + // funcName and paramIndex is out parameter + bool ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName); + bool ParseAssertInfo(std::string &funcName); + bool ParseTypedef(); + bool ParseJavaClassInterface(MIRSymbol &symbol, bool isClass); + bool ParseIntrinsicId(IntrinsicopNode &intrnOpNode); + void Error(const std::string &str); + void Warning(const std::string &str); + void FixForwardReferencedTypeForOneAgg(MIRType *type); + void FixupForwardReferencedTypeByMap(); + + const std::string &GetError(); + const std::string &GetWarning() const; + bool ParseFuncInfo(void); + void PrepareParsingMIR(); + void PrepareParsingMplt(); + bool ParseSrcLang(MIRSrcLang &srcLang); + bool ParseMIR(uint32 fileIdx = 0, uint32 option = 0, bool isIPA = false, bool isComb = false); + bool ParseMIR(std::ifstream &mplFile); // the main entry point + bool ParseInlineFuncBody(std::ifstream &mplFile); + bool ParseMPLT(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseMPLTStandalone(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseTypeFromString(const std::string &src, TyIdx &tyIdx); + void EmitError(const std::string &fileName); + void EmitWarning(const std::string &fileName); + uint32 GetOptions() const { + return options; + } + + private: + // func ptr map for ParseMIR() + using FuncPtrParseMIRForElem = bool (MIRParser::*)(); + static std::map funcPtrMapForParseMIR; + static std::map InitFuncPtrMapForParseMIR(); + + bool TypeCompatible(TyIdx typeIdx1, TyIdx typeIdx2); + bool IsTypeIncomplete(MIRType *type); + + // func for ParseMIR + bool ParseMIRForFunc(); + bool ParseMIRForVar(); + bool ParseMIRForClass(); + bool ParseMIRForInterface(); + bool ParseMIRForFlavor(); + bool ParseMIRForSrcLang(); + bool ParseMIRForGlobalMemSize(); + bool ParseMIRForGlobalMemMap(); + bool ParseMIRForGlobalWordsTypeTagged(); + bool ParseMIRForGlobalWordsRefCounted(); + bool ParseMIRForID(); + bool ParseMIRForNumFuncs(); + bool ParseMIRForEntryFunc(); + bool ParseMIRForFileInfo(); + bool ParseMIRForFileData(); + bool ParseMIRForSrcFileInfo(); + bool ParseMIRForImport(); + bool ParseMIRForImportPath(); + bool ParseMIRForAsmdecl(); + + // func for ParseExpr + using FuncPtrParseExpr = bool (MIRParser::*)(BaseNodePtr &ptr); + static std::map funcPtrMapForParseExpr; + static std::map InitFuncPtrMapForParseExpr(); + + // func and param for ParseStmt + using FuncPtrParseStmt = bool (MIRParser::*)(StmtNodePtr &stmt); + static std::map funcPtrMapForParseStmt; + static std::map InitFuncPtrMapForParseStmt(); + + // func and param for ParseStmtBlock + using FuncPtrParseStmtBlock = bool (MIRParser::*)(); + static std::map funcPtrMapForParseStmtBlock; + static std::map InitFuncPtrMapForParseStmtBlock(); + void ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum); + bool ParseStmtBlockForVar(TokenKind stmtTK); + bool ParseStmtBlockForVar(); + bool ParseStmtBlockForTempVar(); + bool ParseStmtBlockForReg(); + bool ParseStmtBlockForType(); + bool ParseStmtBlockForFrameSize(); + bool ParseStmtBlockForUpformalSize(); + bool ParseStmtBlockForModuleID(); + bool ParseStmtBlockForFuncSize(); + bool ParseStmtBlockForFuncID(); + bool ParseStmtBlockForFormalWordsTypeTagged(); + bool ParseStmtBlockForLocalWordsTypeTagged(); + bool ParseStmtBlockForFormalWordsRefCounted(); + bool ParseStmtBlockForLocalWordsRefCounted(); + bool ParseStmtBlockForFuncInfo(); + + // common func + void SetSrcPos(SrcPosition &srcPosition, uint32 mplNum); + + // func for ParseExpr + Opcode paramOpForStmt = OP_undef; + TokenKind paramTokenKindForStmt = TK_invalid; + // func and param for ParseStmtBlock + MIRFunction *paramCurrFuncForParseStmtBlock = nullptr; + MIRLexer lexer; + MIRModule &mod; + std::string message; + std::string warningMessage; + uint32 options = kKeepFirst; + MapleVector definedLabels; // true if label at labidx is defined + MIRFunction *dummyFunction = nullptr; + MIRFunction *curFunc = nullptr; + uint16 lastFileNum = 0; // to remember first number after LOC + uint32 lastLineNum = 0; // to remember second number after LOC + uint16 lastColumnNum = 0; // to remember third number after LOC + uint32 firstLineNum = 0; // to track function starting line + std::map typeDefIdxMap; // map previous declared tyIdx + bool firstImport = true; // Mark the first imported mplt file + bool paramParseLocalType = false; // param for ParseTypedef + uint32 paramFileIdx = 0; // param for ParseMIR() + bool paramIsIPA = false; + bool paramIsComb = false; + TokenKind paramTokenKind = TK_invalid; + std::vector paramImportFileList; + std::stack safeRegionFlag; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PARSER_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_pragma.h b/ecmascript/mapleall/maple_ir/include/mir_pragma.h new file mode 100644 index 0000000000000000000000000000000000000000..3bd3814b68acc5149a45ce7eb7b6050410134064 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_pragma.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#define MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "mpl_logging.h" +#include "mempool_allocator.h" + +namespace maple { +class MIRModule; // circular dependency exists, no other choice +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRTypeNameTable; // circular dependency exists, no other choice +enum PragmaKind { + kPragmaUnknown, + kPragmaClass, + kPragmaFunc, + kPragmaField, + kPragmaParam, + kPragmaPkg, + kPragmaVar, + kPragmaGlbvar, + kPragmaFuncExecptioni, + kPragmaFuncVar +}; + +enum PragmaVisibility { + kVisBuild, + kVisRuntime, + kVisSystem, + kVisMaple +}; + +enum PragmaValueType { + kValueByte = 0x00, // (none; must be 0) ubyte[1] + kValueShort = 0x02, // size - 1 (0…1) ubyte[size] + kValueChar = 0x03, // size - 1 (0…1) ubyte[size] + kValueInt = 0x04, // size - 1 (0…3) ubyte[size] + kValueLong = 0x06, // size - 1 (0…7) ubyte[size] + kValueFloat = 0x10, // size - 1 (0…3) ubyte[size] + kValueDouble = 0x11, // size - 1 (0…7) ubyte[size] + kValueMethodType = 0x15, // size - 1 (0…3) ubyte[size] + kValueMethodHandle = 0x16, // size - 1 (0…3) ubyte[size] + kValueString = 0x17, // size - 1 (0…3) ubyte[size] + kValueType = 0x18, // size - 1 (0…3) ubyte[size] + kValueField = 0x19, // size - 1 (0…3) ubyte[size] + kValueMethod = 0x1a, // size - 1 (0…3) ubyte[size] + kValueEnum = 0x1b, // size - 1 (0…3) ubyte[size] + kValueArray = 0x1c, // (none; must be 0) encoded_array + kValueAnnotation = 0x1d, // (none; must be 0) encoded_annotation + kValueNull = 0x1e, // (none; must be 0) (none) + kValueBoolean = 0x1f // boolean (0…1) (none) +}; + +class MIRPragmaElement { + public: + explicit MIRPragmaElement(MIRModule &m) : MIRPragmaElement(m.GetPragmaMPAllocator()) { + val.d = 0; + } + + explicit MIRPragmaElement(MapleAllocator &subElemAllocator) + : subElemVec(subElemAllocator.Adapter()) { + subElemVec.clear(); + val.d = 0; + } + + ~MIRPragmaElement() = default; + void Dump(int indent) const; + void SubElemVecPushBack(MIRPragmaElement *elem) { + subElemVec.push_back(elem); + } + + const MapleVector &GetSubElemVec() const { + return subElemVec; + } + + const MIRPragmaElement *GetSubElement(uint64 i) const { + return subElemVec[i]; + } + + MapleVector &GetSubElemVec() { + return subElemVec; + } + + const GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + + const GStrIdx GetTypeStrIdx() const { + return typeStrIdx; + } + + PragmaValueType GetType() const { + return valueType; + } + + int32 GetI32Val() const { + return val.i; + } + + int64 GetI64Val() const { + return val.j; + } + + uint64 GetU64Val() const { + return val.u; + } + + float GetFloatVal() const { + return val.f; + } + + double GetDoubleVal() const { + return val.d; + } + + void SetTypeStrIdx(GStrIdx strIdx) { + typeStrIdx = strIdx; + } + + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + + void SetType(PragmaValueType type) { + valueType = type; + } + + void SetI32Val(int32 val) { + this->val.i = val; + } + + void SetI64Val(int64 val) { + this->val.j = val; + } + + void SetU64Val(uint64 val) { + this->val.u = val; + } + + void SetFloatVal(float val) { + this->val.f = val; + } + + void SetDoubleVal(double val) { + this->val.d = val; + } + + private: + GStrIdx nameStrIdx{ 0 }; + GStrIdx typeStrIdx{ 0 }; + PragmaValueType valueType = kValueNull; + union { + int32 i; + int64 j; + uint64 u; + float f; + double d; + } val; + MapleVector subElemVec; +}; + +class MIRPragma { + public: + explicit MIRPragma(MIRModule &m) : MIRPragma(m, m.GetPragmaMPAllocator()) {} + + MIRPragma(MIRModule &m, MapleAllocator &elemAllocator) + : mod(&m), + elementVec(elemAllocator.Adapter()) {} + + ~MIRPragma() = default; + MIRPragmaElement *GetPragmaElemFromSignature(const std::string &signature); + void Dump(int indent) const; + void PushElementVector(MIRPragmaElement *elem) { + elementVec.push_back(elem); + } + + void ClearElementVector() { + elementVec.clear(); + } + + PragmaKind GetKind() const { + return pragmaKind; + } + + uint8 GetVisibility() const { + return visibility; + } + + const GStrIdx GetStrIdx() const { + return strIdx; + } + + const TyIdx GetTyIdx() const { + return tyIdx; + } + + const TyIdx GetTyIdxEx() const { + return tyIdxEx; + } + + int32 GetParamNum() const { + return paramNum; + } + + const MapleVector &GetElementVector() const { + return elementVec; + } + + const MIRPragmaElement *GetNthElement(uint32 i) const { + return elementVec[i]; + } + + void ElementVecPushBack(MIRPragmaElement *elem) { + elementVec.push_back(elem); + } + + void SetKind(PragmaKind kind) { + pragmaKind = kind; + } + + void SetVisibility(uint8 visValue) { + visibility = visValue; + } + + void SetStrIdx(GStrIdx idx) { + strIdx = idx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + void SetTyIdxEx(TyIdx idx) { + tyIdxEx = idx; + } + + void SetParamNum(int32 num) { + paramNum = num; + } + + private: + MIRModule *mod; + PragmaKind pragmaKind = kPragmaUnknown; + uint8 visibility = 0; + GStrIdx strIdx{ 0 }; + TyIdx tyIdx{ 0 }; + TyIdx tyIdxEx{ 0 }; + int32 paramNum = -1; // paramNum th param in function, -1 not for param annotation + MapleVector elementVec; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PRAGMA_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_preg.h b/ecmascript/mapleall/maple_ir/include/mir_preg.h new file mode 100644 index 0000000000000000000000000000000000000000..14eeeaa830458f9249bd0277b8d1791dd709df56 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_preg.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PREG_H +#define MAPLE_IR_INCLUDE_MIR_PREG_H +#if MIR_FEATURE_FULL +#include +#include "mir_module.h" +#include "global_tables.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +extern void PrintIndentation(int32 indent); + +// these special registers are encoded by negating the enumeration +enum SpecialReg : signed int { + kSregSp = 1, + kSregFp = 2, + kSregGp = 3, + kSregThrownval = 4, + kSregMethodhdl = 5, + kSregRetval0 = 6, + kSregRetval1 = 7, + kSregLast = 8, +}; +#if MIR_FEATURE_FULL +class MIRPreg { + public: + explicit MIRPreg(uint32 n = 0) : MIRPreg(n, kPtyInvalid, nullptr) {} + + MIRPreg(uint32 n, PrimType ptyp) : primType(ptyp), pregNo(n) {} + + MIRPreg(uint32 n, PrimType ptyp, MIRType *mType) : primType(ptyp), pregNo(n), mirType(mType) {} + + ~MIRPreg() = default; + void SetNeedRC(bool needRC = true) { + this->needRC = needRC; + } + + bool NeedRC() const { + return needRC; + } + + bool IsRef() const { + return mirType != nullptr && primType == PTY_ref; + } + + PrimType GetPrimType() const { + return primType; + } + + void SetPrimType(PrimType pty) { + primType = pty; + } + + Opcode GetOp() const { + return op; + } + + void SetOp(Opcode o) { + this->op = o; + } + + int32 GetPregNo() const { + return pregNo; + } + + void SetPregNo(int32 pregNo) { + this->pregNo = pregNo; + } + + MIRType *GetMIRType() const { + return mirType; + } + + void SetMIRType(MIRType *mirType) { + this->mirType = mirType; + } + + private: + PrimType primType = kPtyInvalid; + bool needRC = false; + Opcode op = OP_undef; // OP_constval, OP_addrof or OP_dread if rematerializable + int32 pregNo; // the number in maple IR after the % + MIRType *mirType = nullptr; + public: + union RematInfo { + const MIRConst *mirConst; // used only when op is OP_constval + const MIRSymbol *sym; // used only when op is OP_addrof or OP_dread + } rematInfo; + FieldID fieldID = 0; // used only when op is OP_addrof or OP_dread + bool addrUpper = false; // used only when op is OP_addrof to indicate upper bits of address +}; + +class MIRPregTable { + public: + explicit MIRPregTable(MapleAllocator *allocator) + : pregNoToPregIdxMap(allocator->Adapter()), + pregTable(allocator->Adapter()), + mAllocator(allocator) { + pregTable.push_back(nullptr); + specPregTable[0].SetPregNo(0); + specPregTable[kSregSp].SetPregNo(-kSregSp); + specPregTable[kSregFp].SetPregNo(-kSregFp); + specPregTable[kSregGp].SetPregNo(-kSregGp); + specPregTable[kSregThrownval].SetPregNo(-kSregThrownval); + specPregTable[kSregMethodhdl].SetPregNo(-kSregMethodhdl); + specPregTable[kSregRetval0].SetPregNo(-kSregRetval0); + specPregTable[kSregRetval1].SetPregNo(-kSregRetval1); + for (uint32 i = 0; i < kSregLast; ++i) { + specPregTable[i].SetPrimType(PTY_unknown); + } + } + + ~MIRPregTable(); + + PregIdx CreatePreg(PrimType primType, MIRType *mtype = nullptr) { + DEBUG_ASSERT(!mtype || mtype->GetPrimType() == PTY_ref || mtype->GetPrimType() == PTY_ptr, "ref or ptr type"); + uint32 index = ++maxPregNo; + auto *preg = mAllocator->GetMemPool()->New(index, primType, mtype); + return AddPreg(*preg); + } + + PregIdx ClonePreg(const MIRPreg &rfpreg) { + PregIdx idx = CreatePreg(rfpreg.GetPrimType(), rfpreg.GetMIRType()); + MIRPreg *preg = pregTable[static_cast(idx)]; + preg->SetNeedRC(rfpreg.NeedRC()); + return idx; + } + + MIRPreg *PregFromPregIdx(PregIdx pregidx) { + if (pregidx < 0) { // special register + return &specPregTable[-pregidx]; + } else { + return pregTable.at(static_cast(pregidx)); + } + } + + PregIdx GetPregIdxFromPregno(uint32 pregNo) { + auto it = pregNoToPregIdxMap.find(pregNo); + return (it == pregNoToPregIdxMap.end()) ? PregIdx(0) : it->second; + } + + void DumpPregsWithTypes(int32 indent) { + MapleVector &pregtable = pregTable; + for (uint32 i = 1; i < pregtable.size(); i++) { + MIRPreg *mirpreg = pregtable[i]; + if (mirpreg->GetMIRType() == nullptr) { + continue; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "%" << mirpreg->GetPregNo(); + LogInfo::MapleLogger() << " "; + mirpreg->GetMIRType()->Dump(0); + LogInfo::MapleLogger() << " " << (mirpreg->NeedRC() ? 1 : 0); + LogInfo::MapleLogger() << "\n"; + } + } + + size_t Size() const { + return pregTable.size(); + } + + PregIdx AddPreg(MIRPreg &preg) { + PregIdx idx = static_cast(pregTable.size()); + pregTable.push_back(&preg); + DEBUG_ASSERT(pregNoToPregIdxMap.find(preg.GetPregNo()) == pregNoToPregIdxMap.end(), "The same pregno is already taken"); + pregNoToPregIdxMap[preg.GetPregNo()] = idx; + return idx; + } + + PregIdx EnterPregNo(uint32 pregNo, PrimType ptyp, MIRType *ty = nullptr) { + PregIdx idx = GetPregIdxFromPregno(pregNo); + if (idx == 0) { + if (pregNo > maxPregNo) { + maxPregNo = pregNo; + } + MIRPreg *preg = mAllocator->GetMemPool()->New(pregNo, ptyp, ty); + return AddPreg(*preg); + } + return idx; + } + + MapleVector &GetPregTable() { + return pregTable; + } + + const MapleVector &GetPregTable() const { + return pregTable; + } + + const MIRPreg *GetPregTableItem(const uint32 index) const { + CHECK_FATAL(index < pregTable.size(), "array index out of range"); + return pregTable[index]; + } + + void SetPregNoToPregIdxMapItem(uint32 key, PregIdx value) { + pregNoToPregIdxMap[key] = value; + } + + uint32 GetMaxPregNo() const { + return maxPregNo; + } + + void SetMaxPregNo(uint32 index) { + maxPregNo = index; + } + + size_t SpecPregSize() { + return kSregLast; + } + + private: + uint32 maxPregNo = 0; // the max pregNo that has been allocated + MapleMap pregNoToPregIdxMap; // for quick lookup based on pregno + MapleVector pregTable; + MIRPreg specPregTable[kSregLast]; // for the MIRPreg nodes corresponding to special registers + MapleAllocator *mAllocator; +}; + +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PREG_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_scope.h b/ecmascript/mapleall/maple_ir/include/mir_scope.h new file mode 100644 index 0000000000000000000000000000000000000000..aae2f6fe8e93eaea998fd1a8053d0c0f73ae517b --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_scope.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_SCOPE_H +#define MAPLE_IR_INCLUDE_MIR_SCOPE_H +#include "mir_module.h" +#include "mir_type.h" +#include "src_position.h" + +namespace maple { +// mapping src variable to mpl variables to display debug info +struct MIRAliasVars { + GStrIdx mplStrIdx; // maple varialbe name + TyIdx tyIdx; + bool isLocal; + GStrIdx sigStrIdx; +}; + +class MIRScope { + public: + explicit MIRScope(MIRModule *mod) : module(mod) {} + MIRScope(MIRModule *mod, unsigned l) : module(mod), level(l) {} + ~MIRScope() = default; + + bool NeedEmitAliasInfo() const { + return aliasVarMap.size() != 0 || subScopes.size() != 0; + } + + bool IsSubScope(const MIRScope *scp) const; + bool HasJoinScope(const MIRScope *scp1, const MIRScope *scp2) const; + bool HasSameRange(const MIRScope *s1, const MIRScope *s2) const; + + unsigned GetLevel() const { + return level; + } + + const SrcPosition &GetRangeLow() const { + return range.first; + } + + const SrcPosition &GetRangeHigh() const { + return range.second; + } + + void SetRange(SrcPosition low, SrcPosition high) { + DEBUG_ASSERT(low.IsBfOrEq(high), "wrong order of low and high"); + range.first = low; + range.second = high; + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + DEBUG_ASSERT(aliasVarMap.find(idx) == aliasVarMap.end(), "alias already exist"); + aliasVarMap[idx] = vars; + } + + void AddAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + /* allow same idx, save last aliasVars */ + aliasVarMap[idx] = vars; + } + + MapleMap &GetAliasVarMap() { + return aliasVarMap; + } + + MapleVector &GetSubScopes() { + return subScopes; + } + + void IncLevel(); + bool AddScope(MIRScope *scope); + void Dump(int32 indent) const; + void Dump() const; + + private: + MIRModule *module; + unsigned level = 0; + std::pair range; + // source to maple variable alias + MapleMap aliasVarMap { module->GetMPAllocator().Adapter() }; + MapleVector subScopes { module->GetMPAllocator().Adapter() }; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SCOPE_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_symbol.h b/ecmascript/mapleall/maple_ir/include/mir_symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..262a7616b9d85d2817dd09a2544b5372d03b1c33 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_symbol.h @@ -0,0 +1,734 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#define MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#include +#include "mir_const.h" +#include "mir_preg.h" +#include "src_position.h" + +constexpr int kScopeLocal = 2; // the default scope level for function variables +constexpr int kScopeGlobal = 1; // the scope level for global variables + +namespace maple { +enum MIRSymKind { + kStInvalid, + kStVar, + kStFunc, + kStConst, + kStJavaClass, + kStJavaInterface, + kStPreg +}; + +enum MIRStorageClass : uint8 { + kScInvalid, + kScAuto, + kScAliased, + kScFormal, + kScExtern, + kScGlobal, + kScPstatic, // PU-static + kScFstatic, // file-static + kScText, + kScTypeInfo, // used for eh type st + kScTypeInfoName, // used for eh type st name + kScTypeCxxAbi, // used for eh inherited from c++ __cxxabiv1 + kScEHRegionSupp, // used for tables that control C++ exception handling + kScUnused +}; + +// to represent a single symbol +class MIRSymbol { + public: + union SymbolType { // a symbol can either be a const or a function or a preg which currently used for formal + MIRConst *konst; + MIRFunction *mirFunc; + MIRPreg *preg; // the MIRSymKind must be kStPreg + }; + + MIRSymbol() = default; + MIRSymbol(uint32 idx, uint8 scp) : stIdx(scp, idx) {} + ~MIRSymbol() = default; + + void SetIsTmp(bool temp) { + isTmp = temp; + } + + bool GetIsTmp() const { + return isTmp; + } + + void SetNeedForwDecl() { + needForwDecl = true; + } + + bool IsNeedForwDecl() const { + return needForwDecl; + } + + void SetInstrumented() { + instrumented = true; + } + + bool IsInstrumented() const { + return instrumented; + } + + void SetIsImported(bool imported) { + isImported = imported; + } + + bool GetIsImported() const { + return isImported; + } + + void SetWPOFakeParm() { + wpoFakeParam = true; + } + + bool IsWpoFakeParm() const { + return wpoFakeParam; + } + + bool IsWpoFakeRet() const { + return wpoFakeRet; + } + + void SetWPOFakeRet() { + wpoFakeRet = true; + } + + void SetIsTmpUnused(bool unused) { + isTmpUnused = unused; + } + + void SetIsImportedDecl(bool imported) { + isImportedDecl = imported; + } + + bool GetIsImportedDecl() const { + return isImportedDecl; + } + + bool IsTmpUnused() const { + return isTmpUnused; + } + + void SetAppearsInCode(bool appears) { + appearsInCode = appears; + } + + bool GetAppearsInCode() const { + return appearsInCode; + } + + void SetTyIdx(TyIdx tyIdx) { + this->tyIdx = tyIdx; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetInferredTyIdx(TyIdx inferredTyIdx) { + this->inferredTyIdx = inferredTyIdx; + } + + TyIdx GetInferredTyIdx() const { + return inferredTyIdx; + } + + void SetStIdx(StIdx stIdx) { + this->stIdx = stIdx; + } + + StIdx GetStIdx() const { + return stIdx; + } + + void SetSKind(MIRSymKind m) { + sKind = m; + } + + MIRSymKind GetSKind() const { + return sKind; + } + + uint32 GetScopeIdx() const { + return stIdx.Scope(); + } + + uint32 GetStIndex() const { + return stIdx.Idx(); + } + + bool IsLocal() const { + return stIdx.Islocal(); + } + + bool IsGlobal() const { + return stIdx.IsGlobal(); + } + + const TypeAttrs &GetAttrs() const { + return typeAttrs; + } + + TypeAttrs &GetAttrs() { + return typeAttrs; + } + + void SetAttrs(TypeAttrs attr) { + typeAttrs = attr; + } + + // AddAttrs adds more attributes instead of overrides the current one + void AddAttrs(TypeAttrs attr) { + typeAttrs.SetAttrFlag(typeAttrs.GetAttrFlag() | attr.GetAttrFlag()); + typeAttrs.AddAttrBoundary(attr.GetAttrBoundary()); + } + + bool GetAttr(AttrKind attrKind) const { + return typeAttrs.GetAttr(attrKind); + } + + void SetAttr(AttrKind attrKind) { + typeAttrs.SetAttr(attrKind); + } + + void ResetAttr(AttrKind attrKind) { + typeAttrs.ResetAttr(attrKind); + } + + bool IsVolatile() const { + return typeAttrs.GetAttr(ATTR_volatile); + } + + bool IsTypeVolatile(int fieldID) const; + + bool NeedPIC() const; + + bool IsThreadLocal() const { + return typeAttrs.GetAttr(ATTR_tls_static) || typeAttrs.GetAttr(ATTR_tls_dynamic); + } + + bool IsStatic() const { + return typeAttrs.GetAttr(ATTR_static); + } + + bool IsPUStatic() const { + return GetStorageClass() == kScPstatic; + } + + bool IsFinal() const { + return ((typeAttrs.GetAttr(ATTR_final) || typeAttrs.GetAttr(ATTR_readonly)) && + staticFinalBlackList.find(GetName()) == staticFinalBlackList.end()) || + IsLiteral() || IsLiteralPtr(); + } + + bool IsWeak() const { + return typeAttrs.GetAttr(ATTR_weak); + } + + bool IsPrivate() const { + return typeAttrs.GetAttr(ATTR_private); + } + + bool IsRefType() const { + return typeAttrs.GetAttr(ATTR_localrefvar); + } + + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + + void SetNameStrIdx(const std::string &name); + + GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + + MIRStorageClass GetStorageClass() const { + return storageClass; + } + + void SetStorageClass(MIRStorageClass cl) { + storageClass = cl; + } + + bool IsReadOnly() const { + return kScFstatic == storageClass && kStConst == sKind; + } + + bool IsConst() const { + return sKind == kStConst || (sKind == kStVar && value.konst != nullptr); + } + + MIRType *GetType() const; + + const std::string &GetName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameStrIdx); + } + + MIRConst *GetKonst() const { + DEBUG_ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + return value.konst; + } + + void SetKonst(MIRConst *mirconst) { + DEBUG_ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + value.konst = mirconst; + } + + void SetIsDeleted() { + isDeleted = true; + } + + void ResetIsDeleted() { + isDeleted = false; + } + + bool IsDeleted() const { + return isDeleted; + } + + bool IsVar() const { + return sKind == kStVar; + } + + bool IsPreg() const { + return sKind == kStPreg; + } + + bool IsJavaClassInterface() const { + return sKind == kStJavaClass || sKind == kStJavaInterface; + } + + SymbolType GetValue() const { + return value; + } + + void SetValue(SymbolType value) { + this->value = value; + } + + SrcPosition &GetSrcPosition() { + return srcPosition; + } + + const SrcPosition &GetSrcPosition() const { + return srcPosition; + } + + void SetSrcPosition(const SrcPosition &position) { + srcPosition = position; + } + + MIRPreg *GetPreg() { + DEBUG_ASSERT(IsPreg(), "must be Preg"); + return value.preg; + } + + const MIRPreg *GetPreg() const { + CHECK_FATAL(IsPreg(), "must be Preg"); + return value.preg; + } + + void SetPreg(MIRPreg *preg) { + CHECK_FATAL(IsPreg(), "must be Preg"); + value.preg = preg; + } + + bool CanBeIgnored() const { + return isDeleted; + } + + void SetLocalRefVar() { + SetAttr(ATTR_localrefvar); + } + + void ResetLocalRefVar() { + ResetAttr(ATTR_localrefvar); + } + + MIRFunction *GetFunction() const { + DEBUG_ASSERT(sKind == kStFunc, "must be function symbol"); + return value.mirFunc; + } + + void SetFunction(MIRFunction *func) { + DEBUG_ASSERT(sKind == kStFunc, "must be function symbol"); + value.mirFunc = func; + } + + bool IsEhIndex() const { + return GetName() == "__eh_index__"; + } + + bool HasAddrOfValues() const; + bool IsLiteral() const; + bool IsLiteralPtr() const; + bool PointsToConstString() const; + bool IsConstString() const; + bool IsClassInitBridge() const; + bool IsReflectionStrTab() const; + bool IsReflectionHashTabBucket() const; + bool IsReflectionInfo() const; + bool IsReflectionFieldsInfo() const; + bool IsReflectionFieldsInfoCompact() const; + bool IsReflectionSuperclassInfo() const; + bool IsReflectionFieldOffsetData() const; + bool IsReflectionMethodAddrData() const; + bool IsReflectionMethodSignature() const; + bool IsReflectionClassInfo() const; + bool IsReflectionArrayClassInfo() const; + bool IsReflectionClassInfoPtr() const; + bool IsReflectionClassInfoRO() const; + bool IsITabConflictInfo() const; + bool IsVTabInfo() const; + bool IsITabInfo() const; + bool IsReflectionPrimitiveClassInfo() const; + bool IsReflectionMethodsInfo() const; + bool IsReflectionMethodsInfoCompact() const; + bool IsRegJNITab() const; + bool IsRegJNIFuncTab() const; + bool IsMuidTab() const; + bool IsMuidRoTab() const; + bool IsCodeLayoutInfo() const; + std::string GetMuidTabName() const; + bool IsMuidFuncDefTab() const; + bool IsMuidFuncDefOrigTab() const; + bool IsMuidFuncInfTab() const; + bool IsMuidFuncUndefTab() const; + bool IsMuidDataDefTab() const; + bool IsMuidDataDefOrigTab() const; + bool IsMuidDataUndefTab() const; + bool IsMuidFuncDefMuidTab() const; + bool IsMuidFuncUndefMuidTab() const; + bool IsMuidDataDefMuidTab() const; + bool IsMuidDataUndefMuidTab() const; + bool IsMuidFuncMuidIdxMuidTab() const; + bool IsMuidRangeTab() const; + bool IsArrayClassCache() const; + bool IsArrayClassCacheName() const; + bool IsForcedGlobalFunc() const; + bool IsForcedGlobalClassinfo() const; + bool IsGctibSym() const; + bool IsPrimordialObject() const; + bool IgnoreRC() const; + void Dump(bool isLocal, int32 indent, bool suppressInit = false, const MIRSymbolTable *localsymtab = nullptr) const; + void DumpAsLiteralVar() const; + bool operator==(const MIRSymbol &msym) const { + return nameStrIdx == msym.nameStrIdx; + } + + bool operator!=(const MIRSymbol &msym) const { + return nameStrIdx != msym.nameStrIdx; + } + + bool operator<(const MIRSymbol &msym) const { + return nameStrIdx < msym.nameStrIdx; + } + + static uint32 &LastPrintedLineNumRef() { + return lastPrintedLineNum; + } + + static uint16 &LastPrintedColumnNumRef() { + return lastPrintedColumnNum; + } + + bool HasPotentialAssignment() const { + return hasPotentialAssignment; + } + + void SetHasPotentialAssignment() { + hasPotentialAssignment = true; + } + + void SetAsmAttr(const UStrIdx &idx) { + asmAttr = idx; + } + + const UStrIdx &GetAsmAttr() const { + return asmAttr; + } + + void SetWeakrefAttr(const std::pair &idx) { + weakrefAttr = idx; + } + + const std::pair &GetWeakrefAttr() const { + return weakrefAttr; + } + + bool IsFormal() const { + return storageClass == kScFormal; + } + + bool LMBCAllocateOffSpecialReg() const { + if (isDeleted) { + return false; + } + switch (storageClass) { + case kScAuto: + return true; + case kScPstatic: + case kScFstatic: + return value.konst == nullptr && !hasPotentialAssignment; + default: + return false; + } + } + + // Please keep order of the fields, avoid paddings. + private: + TyIdx tyIdx{ 0 }; + TyIdx inferredTyIdx{ kInitTyIdx }; + MIRStorageClass storageClass{ kScInvalid }; + MIRSymKind sKind{ kStInvalid }; + bool isTmp = false; + bool needForwDecl = false; // addrof of this symbol used in initialization, NOT serialized + bool wpoFakeParam = false; // fake symbol introduced in wpo phase for a parameter, NOT serialized + bool wpoFakeRet = false; // fake symbol introduced in wpo phase for return value, NOT serialized + bool isDeleted = false; // tell if it is deleted, NOT serialized + bool instrumented = false; // a local ref pointer instrumented by RC opt, NOT serialized + bool isImported = false; + bool isImportedDecl = false; + bool isTmpUnused = false; // when parse the mplt_inline file, mark all the new symbol as tmpunused + bool appearsInCode = false; // only used for kStFunc + bool hasPotentialAssignment = false; // for global static vars, init as false and will be set true + // if assigned by stmt or the address of itself is taken + StIdx stIdx { 0, 0 }; + TypeAttrs typeAttrs; + GStrIdx nameStrIdx{ 0 }; + std::pair weakrefAttr { false, 0 }; + public: + UStrIdx asmAttr { 0 }; // if not 0, the string for the name in C's asm attribute + UStrIdx sectionAttr { 0 }; // if not 0, the string for the name in C's section attribute + private: + SymbolType value = { nullptr }; + SrcPosition srcPosition; // where the symbol is defined + // following cannot be assumed final even though they are declared final + static const std::set staticFinalBlackList; + static GStrIdx reflectClassNameIdx; + static GStrIdx reflectMethodNameIdx; + static GStrIdx reflectFieldNameIdx; + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; +}; + +class MIRSymbolTable { + public: + explicit MIRSymbolTable(const MapleAllocator &allocator) + : mAllocator(allocator), + strIdxToStIdxMap(mAllocator.Adapter()), + symbolTable({ nullptr }, mAllocator.Adapter()) {} + + ~MIRSymbolTable() = default; + + bool IsValidIdx(uint32 idx) const { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStIdx(uint32 idx, bool checkFirst = false) const { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + CHECK_FATAL(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + MIRSymbol *CreateSymbol(uint8 scopeID) { + auto *st = mAllocator.GetMemPool()->New(symbolTable.size(), scopeID); + symbolTable.push_back(st); + return st; + } + + void PushNullSymbol() { + symbolTable.push_back(nullptr); + } + + // add sym from other symbol table, happens in inline + bool AddStOutside(MIRSymbol *sym) { + if (sym == nullptr) { + return false; + } + sym->SetStIdx(StIdx(sym->GetScopeIdx(), symbolTable.size())); + symbolTable.push_back(sym); + return AddToStringSymbolMap(*sym); + } + + bool AddToStringSymbolMap(const MIRSymbol &st) { + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const { + auto it = strIdxToStIdxMap.find(idx); + return (it == strIdxToStIdxMap.end()) ? StIdx() : it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(GStrIdx idx, bool checkFirst = false) { + return GetSymbolFromStIdx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + void Dump(bool isLocal, int32 indent = 0, bool printDeleted = false, + MIRFlavor flavor = kFlavorUnknown) const; + + size_t GetSymbolTableSize() const { + return symbolTable.size(); + } + + MapleVector &GetTable() { + return symbolTable; + } + + const MapleVector &GetTable() const { + return symbolTable; + } + + const MIRSymbol *GetSymbolAt(uint32 idx) const { + ASSERT(idx < symbolTable.size(), "symbol id out of table range"); + return symbolTable[idx]; + } + + MIRSymbol *GetSymbolAt(uint32 idx) { + return const_cast(const_cast(this)->GetSymbolAt(idx)); + } + + void Clear() { + symbolTable.clear(); + strIdxToStIdxMap.clear(); + } + + MIRSymbol *CloneLocalSymbol(const MIRSymbol &oldSym) const { + auto *memPool = mAllocator.GetMemPool(); + auto *newSym = memPool->New(oldSym); + if (oldSym.GetSKind() == kStConst) { + newSym->SetKonst(oldSym.GetKonst()->Clone(*memPool)); + } else if (oldSym.GetSKind() == kStPreg) { + newSym->SetPreg(memPool->New(*oldSym.GetPreg())); + } else if (oldSym.GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym.GetName().c_str()); + } + return newSym; + } + + private: + MapleAllocator mAllocator; + // hash table mapping string index to st index + MapleMap strIdxToStIdxMap; + // map symbol idx to symbol node + MapleVector symbolTable; +}; + +class MIRLabelTable { + public: + explicit MIRLabelTable(MapleAllocator &allocator) + : addrTakenLabels(allocator.Adapter()), + caseLabelSet(allocator.Adapter()), + mAllocator(allocator), + strIdxToLabIdxMap(std::less(), mAllocator.Adapter()), + labelTable(mAllocator.Adapter()) { + labelTable.push_back(GStrIdx(kDummyLabel)); // push dummy label index 0 + } + + ~MIRLabelTable() = default; + + LabelIdx CreateLabel() { + LabelIdx labelIdx = labelTable.size(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(std::to_string(labelIdx)); + labelTable.push_back(strIdx); + return labelIdx; + } + + LabelIdx CreateLabelWithPrefix(char c); + + LabelIdx AddLabel(GStrIdx nameIdx) { + LabelIdx labelIdx = labelTable.size(); + labelTable.push_back(nameIdx); + strIdxToLabIdxMap[nameIdx] = labelIdx; + return labelIdx; + } + + LabelIdx GetLabelIdxFromStrIdx(GStrIdx idx) const { + auto it = strIdxToLabIdxMap.find(idx); + if (it == strIdxToLabIdxMap.end()) { + return LabelIdx(); + } + return it->second; + } + + void AddToStringLabelMap(LabelIdx labelIdx); + size_t GetLabelTableSize() const { + return labelTable.size(); + } + + const std::string &GetName(LabelIdx labelIdx) const; + + size_t Size() const { + return labelTable.size(); + } + + static uint32 GetDummyLabel() { + return kDummyLabel; + } + + GStrIdx GetSymbolFromStIdx(LabelIdx idx) const { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + return labelTable[idx]; + } + + void SetSymbolFromStIdx(LabelIdx idx, GStrIdx strIdx) { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + labelTable[idx] = strIdx; + } + + MapleVector GetLabelTable() { + return labelTable; + } + + const MapleUnorderedSet &GetAddrTakenLabels() const { + return addrTakenLabels; + } + + MapleUnorderedSet &GetAddrTakenLabels() { + return addrTakenLabels; + } + + const MapleMap &GetStrIdxToLabelIdxMap() const { + return strIdxToLabIdxMap; + } + void EraseStrIdxToLabelIdxElem(GStrIdx idx) { + strIdxToLabIdxMap.erase(idx); + } + + MapleUnorderedSet addrTakenLabels; // those appeared in addroflabel or MIRLblConst + MapleUnorderedSet caseLabelSet; // labels marking starts of switch cases + + private: + static constexpr uint32 kDummyLabel = 0; + MapleAllocator mAllocator; + MapleMap strIdxToLabIdxMap; + MapleVector labelTable; // map label idx to label name +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SYMBOL_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_symbol_builder.h b/ecmascript/mapleall/maple_ir/include/mir_symbol_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..866d2e2e3090f581b8d0bb3a2e714d3613b083ef --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_symbol_builder.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#define MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#include +#include +#include +#include +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +class MIRSymbolBuilder { + public: + static MIRSymbolBuilder &Instance() { + static MIRSymbolBuilder builder; + return builder; + } + + MIRSymbol *GetLocalDecl(const MIRSymbolTable &symbolTable, const GStrIdx &strIdx) const; + MIRSymbol *CreateLocalDecl(MIRSymbolTable &symbolTable, GStrIdx strIdx, const MIRType &type) const; + MIRSymbol *GetGlobalDecl(GStrIdx strIdx) const; + MIRSymbol *CreateGlobalDecl(GStrIdx strIdx, const MIRType &type, MIRStorageClass sc) const; + MIRSymbol *GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + bool sameType = false) const; + MIRSymbol *CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const; + size_t GetSymbolTableSize(const MIRFunction *func = nullptr) const; + const MIRSymbol *GetSymbolFromStIdx(uint32 idx, const MIRFunction *func = nullptr) const; + + private: + MIRSymbolBuilder() = default; + ~MIRSymbolBuilder() = default; + MIRSymbolBuilder(const MIRSymbolBuilder&) = delete; + MIRSymbolBuilder(const MIRSymbolBuilder&&) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder&) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder&&) = delete; +}; +} // maple +#endif // MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H diff --git a/ecmascript/mapleall/maple_ir/include/mir_type.h b/ecmascript/mapleall/maple_ir/include/mir_type.h new file mode 100644 index 0000000000000000000000000000000000000000..44ba467aafe9b4a12bdd7be7367f1909643f394a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mir_type.h @@ -0,0 +1,2161 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_TYPE_H +#define MAPLE_IR_INCLUDE_MIR_TYPE_H +#include +#include +#include "prim_types.h" +#include "mir_pragma.h" +#include "mpl_logging.h" +#if MIR_FEATURE_FULL +#include "mempool.h" +#include "mempool_allocator.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +constexpr uint32 kTypeHashLength = 12289; // hash length for mirtype, ref: planetmath.org/goodhashtableprimes +const std::string kRenameKeyWord = "_MNO"; // A static symbol name will be renamed as oriname_MNOxxx. + +class FieldAttrs; // circular dependency exists, no other choice +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; +using MIRTypePtr = MIRType*; + +constexpr size_t kMaxArrayDim = 20; +const std::string kJstrTypeName = "constStr"; +constexpr uint32 kInvalidFieldNum = UINT32_MAX; +constexpr size_t kInvalidSize = UINT64_MAX; +#if MIR_FEATURE_FULL +extern bool VerifyPrimType(PrimType primType1, PrimType primType2); // verify if primType1 and primType2 match +extern PrimType GetExactPtrPrimType(); // return either PTY_a64 or PTY_a32 +extern uint32 GetPrimTypeSize(PrimType primType); // answer in bytes; 0 if unknown +extern uint32 GetPrimTypeP2Size(PrimType primType); // answer in bytes in power-of-two. +extern PrimType GetSignedPrimType(PrimType pty); // return signed version +extern PrimType GetUnsignedPrimType(PrimType pty); // return unsigned version +extern uint32 GetVecEleSize(PrimType primType); // element size of each lane in vector +extern uint32 GetVecLanes(PrimType primType); // lane size if vector +extern const char *GetPrimTypeName(PrimType primType); +extern const char *GetPrimTypeJavaName(PrimType primType); +extern int64 MinValOfSignedInteger(PrimType primType); +extern PrimType GetVecElemPrimType(PrimType primType); +constexpr uint32 k0BitSize = 0; +constexpr uint32 k1BitSize = 1; +constexpr uint32 k2BitSize = 2; +constexpr uint32 k3BitSize = 3; +constexpr uint32 k4BitSize = 4; +constexpr uint32 k5BitSize = 5; +constexpr uint32 k8BitSize = 8; +constexpr uint32 k9BitSize = 9; +constexpr uint32 k10BitSize = 10; +constexpr uint32 k16BitSize = 16; +constexpr uint32 k32BitSize = 32; +constexpr uint32 k64BitSize = 64; + +inline uint32 GetPrimTypeBitSize(PrimType primType) { + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +inline uint32 GetPrimTypeActualBitSize(PrimType primType) { + // GetPrimTypeSize(PTY_u1) will return 1, so we take it as a special case + if (primType == PTY_u1) { + return 1; + } + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +#endif // MIR_FEATURE_FULL +// return the same type with size increased to register size +PrimType GetRegPrimType(PrimType primType); +PrimType GetDynType(PrimType primType); +PrimType GetReg64PrimType(PrimType primType); +PrimType GetNonDynType(PrimType primType); +PrimType GetIntegerPrimTypeBySizeAndSign(size_t sizeBit, bool isSign); + +inline bool IsAddress(PrimitiveType primitiveType) { + return primitiveType.IsAddress(); +} + +inline bool IsPossible64BitAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u64 || tp == PTY_a64); +} + +inline bool IsPossible32BitAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u32 || tp == PTY_a32); +} + +inline bool MustBeAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_a64 || tp == PTY_a32); +} + +inline bool IsPrimitivePureScalar(PrimitiveType primitiveType) { + return primitiveType.IsInteger() && !primitiveType.IsAddress() && + !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveUnsigned(PrimitiveType primitiveType) { + return primitiveType.IsUnsigned(); +} + +inline bool IsUnsignedInteger(PrimitiveType primitiveType) { + return IsPrimitiveUnsigned(primitiveType) && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsSignedInteger(PrimitiveType primitiveType) { + return !IsPrimitiveUnsigned(primitiveType) && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveInteger(PrimitiveType primitiveType) { + return primitiveType.IsInteger() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveDynType(PrimitiveType primitiveType) { + return primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveDynInteger(PrimitiveType primitiveType) { + return primitiveType.IsDynamic() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveDynFloat(PrimitiveType primitiveType) { + return primitiveType.IsDynamic() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveFloat(PrimitiveType primitiveType) { + return primitiveType.IsFloat() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveScalar(PrimitiveType primitiveType) { + return primitiveType.IsInteger() || primitiveType.IsFloat() || + (primitiveType.IsDynamic() && !primitiveType.IsDynamicNone()) || + primitiveType.IsSimple(); +} + +inline bool IsPrimitiveValid(PrimitiveType primitiveType) { + return IsPrimitiveScalar(primitiveType) && !primitiveType.IsDynamicAny(); +} + +inline bool IsPrimitivePoint(PrimitiveType primitiveType) { + return primitiveType.IsPointer(); +} + +inline bool IsPrimitiveVector(PrimitiveType primitiveType) { + return primitiveType.IsVector(); +} + +inline bool IsPrimitiveVectorFloat(PrimitiveType primitiveType) { + return primitiveType.IsVector() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveVectorInteger(PrimitiveType primitiveType) { + return primitiveType.IsVector() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveUnSignedVector(const PrimitiveType &primitiveType) { + return IsPrimitiveUnsigned(primitiveType) && primitiveType.IsVector(); +} + +bool IsNoCvtNeeded(PrimType toType, PrimType fromType); +bool NeedCvtOrRetype(PrimType origin, PrimType compared); + +uint8 GetPointerSize(); +uint8 GetP2Size(); +PrimType GetLoweredPtrType(); + +inline bool IsRefOrPtrAssign(PrimType toType, PrimType fromType) { + return (toType == PTY_ref && fromType == PTY_ptr) || (toType == PTY_ptr && fromType == PTY_ref); +} + +enum MIRTypeKind : std::uint8_t { + kTypeInvalid, + kTypeUnknown, + kTypeScalar, + kTypeBitField, + kTypeArray, + kTypeFArray, + kTypeJArray, + kTypeStruct, + kTypeUnion, + kTypeClass, + kTypeInterface, + kTypeStructIncomplete, + kTypeClassIncomplete, + kTypeConstString, + kTypeInterfaceIncomplete, + kTypePointer, + kTypeFunction, + kTypeVoid, + kTypeByName, // type definition not yet seen + kTypeParam, // to support java generics + kTypeInstantVector, // represent a vector of instantiation pairs + kTypeGenericInstant, // type to be formed by instantiation of a generic type +}; + +enum AttrKind : unsigned { +#define TYPE_ATTR +#define ATTR(STR) ATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR +}; + +class AttrBoundary { + public: + AttrBoundary() = default; + ~AttrBoundary() = default; + + bool operator==(const AttrBoundary &tA) const { + return lenExprHash == tA.lenExprHash && lenParamIdx == tA.lenParamIdx && isBytedLen == tA.isBytedLen; + } + + bool operator!=(const AttrBoundary &tA) const { + return !(*this == tA); + } + + bool operator<(const AttrBoundary &tA) const { + return lenExprHash < tA.lenExprHash && lenParamIdx < tA.lenParamIdx && + static_cast(isBytedLen) < static_cast(tA.isBytedLen); + } + + void SetLenExprHash(uint32 val) { + lenExprHash = val; + } + + uint32 GetLenExprHash() const { + return lenExprHash; + } + + void SetLenParamIdx(int8 idx) { + lenParamIdx = idx; + } + + int8 GetLenParamIdx() const { + return lenParamIdx; + } + + void SetIsBytedLen(bool flag) { + isBytedLen = flag; + } + + bool IsBytedLen() const { + return isBytedLen; + } + + void Clear() { + lenExprHash = 0; + lenParamIdx = -1; + isBytedLen = false; + } + + private: + bool isBytedLen = false; + int8 lenParamIdx = -1; + uint32 lenExprHash = 0; +}; + +class TypeAttrs { + public: + TypeAttrs() = default; + TypeAttrs(const TypeAttrs &ta) = default; + TypeAttrs &operator=(const TypeAttrs &t) = default; + ~TypeAttrs() = default; + + void SetAlignValue(uint8 align) { + attrAlign = align; + } + + uint8 GetAlignValue() const { + return attrAlign; + } + + void SetAttrFlag(uint64 flag) { + attrFlag = flag; + } + + uint64 GetAttrFlag() const { + return attrFlag; + } + + void SetAttr(AttrKind x) { + attrFlag |= (1ULL << static_cast(x)); + } + + void ResetAttr(AttrKind x) { + attrFlag &= ~(1ULL << static_cast(x)); + } + + bool GetAttr(AttrKind x) const { + return (attrFlag & (1ULL << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) { + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const { + if (attrAlign == 0) { + return 1; + } + uint32 res = 1; + uint32 exp = attrAlign; + do { + --exp; + res *= 2; + } while (exp != 0); + return res; + } + + bool operator==(const TypeAttrs &tA) const { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const TypeAttrs &tA) const { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + void AddAttrBoundary(const AttrBoundary &attr) { + if (attr.GetLenExprHash() != 0) { + attrBoundary.SetLenExprHash(attr.GetLenExprHash()); + } + if (attr.GetLenParamIdx() != -1) { + attrBoundary.SetLenParamIdx(attr.GetLenParamIdx()); + } + if (attr.IsBytedLen()) { + attrBoundary.SetIsBytedLen(attr.IsBytedLen()); + } + } + + void SetPack(uint32 pack) { + attrPack = pack; + } + + uint32 GetPack() const { + return attrPack; + } + + bool IsPacked() const { + return GetAttr(ATTR_pack); + } + + private: + uint64 attrFlag = 0; + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrPack = -1; // -1 means inactive + AttrBoundary attrBoundary; // boundary attr for EnhanceC +}; + +enum FieldAttrKind { +#define FIELD_ATTR +#define ATTR(STR) FLDATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR +}; + +class FieldAttrs { + public: + FieldAttrs() = default; + FieldAttrs(const FieldAttrs &ta) = default; + FieldAttrs &operator=(const FieldAttrs &p) = default; + ~FieldAttrs() = default; + + void SetAlignValue(uint8 align) { + attrAlign = align; + } + + uint8 GetAlignValue() const { + return attrAlign; + } + + void SetAttrFlag(uint32 flag) { + attrFlag = flag; + } + + uint32 GetAttrFlag() const { + return attrFlag; + } + + void SetAttr(FieldAttrKind x) { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(FieldAttrKind x) const { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) { + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const { + return 1U << attrAlign; + } + + bool operator==(const FieldAttrs &tA) const { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const FieldAttrs &tA) const { + return !(*this == tA); + } + + bool operator<(const FieldAttrs &tA) const { + return attrFlag < tA.attrFlag && attrAlign < tA.attrAlign && attrBoundary < tA.attrBoundary; + } + + void Clear() { + attrFlag = 0; + attrAlign = 0; + attrBoundary.Clear(); + } + + void DumpAttributes() const; + TypeAttrs ConvertToTypeAttrs(); + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + bool IsPacked() const { + return GetAttr(FLDATTR_pack); + } + + private: + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrFlag = 0; + AttrBoundary attrBoundary; +}; + +enum StmtAttrKind : unsigned { +#define STMT_ATTR +#define ATTR(STR) STMTATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef STMT_ATTR +}; + +class StmtAttrs { + public: + StmtAttrs() = default; + StmtAttrs(const StmtAttrs &ta) = default; + StmtAttrs &operator=(const StmtAttrs &p) = default; + ~StmtAttrs() = default; + + void SetAttr(StmtAttrKind x) { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(StmtAttrKind x) const { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + uint32 GetTargetAttrFlag(StmtAttrKind x) const { + return attrFlag & (1u << static_cast(x)); + } + + uint32 GetAttrFlag() const { + return attrFlag; + } + + void AppendAttr(uint32 flag) { + attrFlag |= flag; + } + + void Clear() { + attrFlag = 0; + } + + void DumpAttributes() const; + + private: + uint32 attrFlag = 0; +}; + +enum FuncAttrKind : unsigned { +#define FUNC_ATTR +#define ATTR(STR) FUNCATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +}; + +class FuncAttrs { + public: + FuncAttrs() = default; + FuncAttrs(const FuncAttrs &ta) = default; + FuncAttrs &operator=(const FuncAttrs &p) = default; + ~FuncAttrs() = default; + + void SetAttr(FuncAttrKind x, bool unSet = false) { + if (!unSet) { + attrFlag |= (1ULL << x); + } else { + attrFlag &= ~(1ULL << x); + } + } + + void SetAliasFuncName(const std::string &name) { + aliasFuncName = name; + } + + const std::string &GetAliasFuncName() const { + return aliasFuncName; + } + + void SetPrefixSectionName(const std::string &name) { + prefixSectionName = name; + } + + const std::string &GetPrefixSectionName() const { + return prefixSectionName; + } + + void SetAttrFlag(uint64 flag) { + attrFlag = flag; + } + + uint64 GetAttrFlag() const { + return attrFlag; + } + + bool GetAttr(FuncAttrKind x) const { + return (attrFlag & (1ULL << x)) != 0; + } + + bool operator==(const FuncAttrs &tA) const { + return attrFlag == tA.attrFlag; + } + + bool operator!=(const FuncAttrs &tA) const { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + void SetConstructorPriority(int priority) { + constructorPriority = priority; + } + + int GetConstructorPriority() const { + return constructorPriority; + } + + void SetDestructorPriority(int priority) { + destructorPriority = priority; + } + + int GetDestructorPriority() const { + return destructorPriority; + } + + int GetFrameResverdSlot() const { + return frameResverdSlot; + } + + void SetFrameResverdSlot(int slot) { + SetAttr(FUNCATTR_frame_pointer); + frameResverdSlot = slot; + } + + void SetFramePointer(std::string framePointer_) { + SetAttr(FUNCATTR_frame_reserved_slots); + framePointer = framePointer_; + } + private: + uint64 attrFlag = 0; + std::string aliasFuncName; + std::string prefixSectionName; + std::string framePointer; + AttrBoundary attrBoundary; // ret boundary for EnhanceC + int constructorPriority = -1; // 0~65535, -1 means inactive + int destructorPriority = -1; // 0~65535, -1 means inactive + int frameResverdSlot = 0; +}; + +#if MIR_FEATURE_FULL +constexpr size_t kShiftNumOfTypeKind = 8; +constexpr size_t kShiftNumOfNameStrIdx = 6; +constexpr int32 kOffsetUnknown = INT_MAX; +constexpr int32 kOffsetMax = (INT_MAX - 1); +constexpr int32 kOffsetMin = INT_MIN; +struct OffsetType { + explicit OffsetType(int64 offset) { + Set(offset); + } + + OffsetType(const OffsetType &other) : val(other.val) {} + + ~OffsetType() = default; + + void Set(int64 offsetVal) { + val = (offsetVal >= kOffsetMin && offsetVal <= kOffsetMax) ? static_cast(offsetVal) + : kOffsetUnknown; + } + + bool IsInvalid() const { + return val == kOffsetUnknown; + } + + OffsetType &operator=(const OffsetType &other) { + val = other.val; + return *this; + } + + OffsetType operator+(int64 offset) const { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + return InvalidOffset(); + } + return OffsetType(val + offset); + } + + OffsetType operator+(OffsetType other) const { + return other + val; + } + + void operator+=(int64 offset) { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + val = kOffsetUnknown; + return; + } + Set(offset + val); + } + + void operator+=(OffsetType other) { + this->operator+=(other.val); + } + + OffsetType operator-() const { + if (this->IsInvalid()) { + return *this; + } + return OffsetType(-val); + } + + bool operator<(OffsetType other) const { + return val < other.val; + } + + bool operator==(OffsetType other) const { + return val == other.val; + } + + bool operator!=(OffsetType other) const { + return val != other.val; + } + + static OffsetType InvalidOffset() { + return OffsetType(kOffsetUnknown); + } + + int32 val = kOffsetUnknown; +}; + +class MIRStructType; // circular dependency exists, no other choice +class MIRFuncType; + +class MIRType { + public: + MIRType(MIRTypeKind kind, PrimType pType) : typeKind(kind), primType(pType) {} + + MIRType(MIRTypeKind kind, PrimType pType, GStrIdx strIdx) + : typeKind(kind), primType(pType), nameStrIdx(strIdx) {} + + virtual ~MIRType() = default; + + virtual void Dump(int indent, bool dontUseName = false) const; + virtual void DumpAsCxx(int indent) const; + virtual bool EqualTo(const MIRType &mirType) const; + virtual bool IsStructType() const { + return false; + } + + virtual MIRType *CopyMIRTypeNode() const { + return new MIRType(*this); + } + + PrimType GetPrimType() const { + return primType; + } + void SetPrimType(const PrimType pt) { + primType = pt; + } + + TyIdx GetTypeIndex() const { + return tyIdx; + } + void SetTypeIndex(TyIdx idx) { + tyIdx = idx; + } + + MIRTypeKind GetKind() const { + return typeKind; + } + void SetMIRTypeKind(MIRTypeKind kind) { + typeKind = kind; + } + + bool IsNameIsLocal() const { + return nameIsLocal; + } + void SetNameIsLocal(bool flag) { + nameIsLocal = flag; + } + + GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + void SetNameStrIdxItem(uint32 idx) { + nameStrIdx.reset(idx); + } + + virtual size_t GetSize() const { + return GetPrimTypeSize(primType); + } + + virtual uint32 GetAlign() const { + return GetPrimTypeSize(primType); + } + + virtual bool HasVolatileField() const { + return false; + } + + virtual bool HasTypeParam() const { + return false; + } + + virtual bool IsIncomplete() const { + return typeKind == kTypeStructIncomplete || typeKind == kTypeClassIncomplete || + typeKind == kTypeInterfaceIncomplete; + } + + bool IsVolatile(int fieldID) const; + + bool IsMIRPtrType() const { + return typeKind == kTypePointer; + } + + bool IsMIRStructType() const { + return (typeKind == kTypeStruct) || (typeKind == kTypeStructIncomplete); + } + + bool IsMIRUnionType() const { + return typeKind == kTypeUnion; + } + + bool IsMIRClassType() const { + return (typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete); + } + + bool IsMIRInterfaceType() const { + return (typeKind == kTypeInterface) || (typeKind == kTypeInterfaceIncomplete); + } + + bool IsInstanceOfMIRStructType() const { + return IsMIRStructType() || IsMIRClassType() || IsMIRInterfaceType(); + } + + bool IsMIRJarrayType() const { + return typeKind == kTypeJArray; + } + + bool IsMIRArrayType() const { + return typeKind == kTypeArray; + } + + bool IsMIRFuncType() const { + return typeKind == kTypeFunction; + } + + bool IsScalarType() const { + return typeKind == kTypeScalar; + } + + bool IsMIRTypeByName() const { + return typeKind == kTypeByName; + } + + bool IsMIRBitFieldType() const { + return typeKind == kTypeBitField; + } + + virtual bool IsUnsafeType() const { + return false; + } + virtual bool IsVoidPointer() const { + return false; + } + + bool ValidateClassOrInterface(const std::string &className, bool noWarning) const; + bool IsOfSameType(MIRType &type); + const std::string &GetName() const; + virtual std::string GetMplTypeName() const; + virtual std::string GetCompactMplTypeName() const; + virtual bool PointsToConstString() const; + virtual size_t GetHashIndex() const { + constexpr uint8 idxShift = 2; + return ((static_cast(primType) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + virtual bool HasFields() const { return false; } + // total number of field IDs the type is consisted of, excluding its own field ID + virtual uint32 NumberOfFieldIDs() const { return 0; } + // return any struct type directly embedded in this type + virtual MIRStructType *EmbeddedStructType() { return nullptr; } + + virtual int64 GetBitOffsetFromBaseAddr(FieldID fieldID) { + (void)fieldID; + return 0; + } + + protected: + MIRTypeKind typeKind; + PrimType primType; + bool nameIsLocal = false; // needed when printing the type name + TyIdx tyIdx{ 0 }; + GStrIdx nameStrIdx{ 0 }; // name in global string table +}; + +class MIRPtrType : public MIRType { + public: + explicit MIRPtrType(TyIdx pTyIdx) : MIRType(kTypePointer, PTY_ptr), pointedTyIdx(pTyIdx) {} + + MIRPtrType(TyIdx pTyIdx, PrimType pty) : MIRType(kTypePointer, pty), pointedTyIdx(pTyIdx) {} + + MIRPtrType(PrimType primType, GStrIdx strIdx) : MIRType(kTypePointer, primType, strIdx), pointedTyIdx(0) {} + + ~MIRPtrType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRPtrType(*this); + } + + MIRType *GetPointedType() const; + + TyIdx GetPointedTyIdx() const { + return pointedTyIdx; + } + void SetPointedTyIdx(TyIdx idx) { + pointedTyIdx = idx; + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + bool EqualTo(const MIRType &type) const override; + + bool HasTypeParam() const override; + bool IsPointedTypeVolatile(int fieldID) const; + bool IsUnsafeType() const override; + bool IsVoidPointer() const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override; + uint32 GetAlign() const override; + TyIdxFieldAttrPair GetPointedTyIdxFldAttrPairWithFieldID(FieldID fldId) const; + TyIdx GetPointedTyIdxWithFieldID(FieldID fieldID) const; + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 4; + constexpr uint8 attrShift = 3; + size_t hIdx = (static_cast(pointedTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + bool IsFunctionPtr() const { + MIRType *pointedType = GetPointedType(); + if (pointedType->GetKind() == kTypeFunction) { + return true; + } + if (pointedType->GetKind() == kTypePointer) { + MIRPtrType *pointedPtrType = static_cast(pointedType); + return pointedPtrType->GetPointedType()->GetKind() == kTypeFunction; + } + return false; + } + + MIRFuncType *GetPointedFuncType() const; + + bool PointsToConstString() const override; + + std::string GetMplTypeName() const override; + + std::string GetCompactMplTypeName() const override; + private: + TyIdx pointedTyIdx; + TypeAttrs typeAttrs; +}; + +class MIRArrayType : public MIRType { + public: + MIRArrayType() : MIRType(kTypeArray, PTY_agg) {} + explicit MIRArrayType(GStrIdx strIdx) : MIRType(kTypeArray, PTY_agg, strIdx) {} + + MIRArrayType(TyIdx eTyIdx, const std::vector &sizeArray) + : MIRType(kTypeArray, PTY_agg), + eTyIdx(eTyIdx), + dim(sizeArray.size()) { + for (size_t i = 0; i < kMaxArrayDim; ++i) { + this->sizeArray[i] = (i < dim) ? sizeArray[i] : 0; + } + } + + MIRArrayType(const MIRArrayType &pat) = default; + MIRArrayType &operator=(const MIRArrayType &p) = default; + ~MIRArrayType() override = default; + + TyIdx GetElemTyIdx() const { + return eTyIdx; + } + void SetElemTyIdx(TyIdx idx) { + eTyIdx = idx; + } + + uint32 GetSizeArrayItem(uint32 n) const { + CHECK_FATAL((n >= 0 && n < kMaxArrayDim), "out of bound of array!"); + return sizeArray[n]; + } + void SetSizeArrayItem(uint32 idx, uint32 value) { + CHECK_FATAL((idx >= 0 && idx < kMaxArrayDim), "out of bound of array!"); + sizeArray[idx] = value; + } + + bool IsIncompleteArray() const { + return typeAttrs.GetAttr(ATTR_incomplete_array); + } + + bool EqualTo(const MIRType &type) const override; + + uint16 GetDim() const { + return dim; + } + void SetDim(uint16 dim) { + this->dim = dim; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + MIRType *GetElemType() const; + + MIRType *CopyMIRTypeNode() const override { + return new MIRArrayType(*this); + } + + bool HasTypeParam() const override { + return GetElemType()->HasTypeParam(); + } + + void Dump(int indent, bool dontUseName) const override; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + size_t hIdx = (static_cast(eTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (size_t i = 0; i < dim; ++i) { + CHECK_FATAL(i < kMaxArrayDim, "array index out of range"); + hIdx += (sizeArray[i] << i); + } + constexpr uint8 attrShift = 3; + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override { + (void)fieldID; + return kOffsetUnknown; + } + int64 GetBitOffsetFromArrayAddress(std::vector &indexArray); + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + size_t ElemNumber(); + + private: + TyIdx eTyIdx{ 0 }; + uint16 dim = 0; + std::array sizeArray{ {0} }; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; +}; + +// flexible array type, must be last field of a top-level struct +class MIRFarrayType : public MIRType { + public: + MIRFarrayType() : MIRType(kTypeFArray, PTY_agg), elemTyIdx(TyIdx(0)) {}; + + explicit MIRFarrayType(TyIdx elemTyIdx) : MIRType(kTypeFArray, PTY_agg), elemTyIdx(elemTyIdx) {} + + explicit MIRFarrayType(GStrIdx strIdx) : MIRType(kTypeFArray, PTY_agg, strIdx), elemTyIdx(TyIdx(0)) {} + + ~MIRFarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRFarrayType(*this); + }; + + MIRType *GetElemType() const; + + bool HasTypeParam() const override { + return GetElemType()->HasTypeParam(); + } + + TyIdx GetElemTyIdx() const { + return elemTyIdx; + } + void SetElemtTyIdx(TyIdx idx) { + elemTyIdx = idx; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 5; + return ((static_cast(elemTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override { + (void)fieldID; + return kOffsetUnknown; + } + + int64 GetBitOffsetFromArrayAddress(int64 arrayIndex); + + private: + TyIdx elemTyIdx; + mutable uint32 fieldsNum = kInvalidFieldNum; +}; + +using TyidxFuncAttrPair = std::pair; +using MethodPair = std::pair; +using MethodVector = std::vector; +using MethodPtrVector = std::vector; +using MIREncodedArray = std::vector; +class GenericDeclare; +class AnnotationType; +class GenericType; +// used by kTypeStruct, kTypeStructIncomplete, kTypeUnion +class MIRStructType : public MIRType { + public: + explicit MIRStructType(MIRTypeKind typeKind) : MIRType(typeKind, PTY_agg) {} + + MIRStructType(MIRTypeKind typeKind, GStrIdx strIdx) : MIRType(typeKind, PTY_agg, strIdx) {} + + ~MIRStructType() override = default; + + bool IsStructType() const override { + return true; + } + + FieldVector &GetFields() { + return fields; + } + const FieldVector &GetFields() const { + return fields; + } + void SetFields(const FieldVector &fields) { + this->fields = fields; + } + + const FieldPair &GetFieldsElemt(size_t n) const { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + FieldPair &GetFieldsElemt(size_t n) { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + size_t GetFieldsSize() const { + return fields.size(); + } + + const std::vector &GetFieldInferredTyIdx() const { + return fieldInferredTyIdx; + } + + FieldVector &GetStaticFields() { + return staticFields; + } + const FieldVector &GetStaticFields() const { + return staticFields; + } + + const FieldPair &GetStaticFieldsPair(size_t i) const { + return staticFields.at(i); + } + + GStrIdx GetStaticFieldsGStrIdx(size_t i) const { + return staticFields.at(i).first; + } + + FieldVector &GetParentFields() { + return parentFields; + } + void SetParentFields(const FieldVector &parentFields) { + this->parentFields = parentFields; + } + const FieldVector &GetParentFields() const { + return parentFields; + } + const FieldPair &GetParentFieldsElemt(size_t n) const { + DEBUG_ASSERT(n < parentFields.size(), "array index out of range"); + return parentFields.at(n); + } + size_t GetParentFieldsSize() const { + return parentFields.size(); + } + + MethodVector &GetMethods() { + return methods; + } + const MethodVector &GetMethods() const { + return methods; + } + + const MethodPair &GetMethodsElement(size_t n) const { + DEBUG_ASSERT(n < methods.size(), "array index out of range"); + return methods.at(n); + } + + MethodPtrVector &GetVTableMethods() { + return vTableMethods; + } + + const MethodPair *GetVTableMethodsElemt(size_t n) const { + DEBUG_ASSERT(n < vTableMethods.size(), "array index out of range"); + return vTableMethods.at(n); + } + + size_t GetVTableMethodsSize() const { + return vTableMethods.size(); + } + + const MethodPtrVector &GetItableMethods() const { + return iTableMethods; + } + + bool IsImported() const { + return isImported; + } + + void SetIsImported(bool flag) { + isImported = flag; + } + + bool IsUsed() const { + return isUsed; + } + + void SetIsUsed(bool flag) { + isUsed = flag; + } + + bool IsCPlusPlus() const { + return isCPlusPlus; + } + + void SetIsCPlusPlus(bool flag) { + isCPlusPlus = flag; + } + + GStrIdx GetFieldGStrIdx(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.first; + } + + const TyIdxFieldAttrPair GetFieldTyIdxAttrPair(FieldID id) const { + return TraverseToField(id).second; + } + + TyIdxFieldAttrPair GetTyidxFieldAttrPair(size_t n) const { + return fields.at(n).second; + } + + TyIdx GetFieldTyIdx(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.first; + } + + FieldAttrs GetFieldAttrs(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second; + } + + FieldAttrs GetFieldAttrs(GStrIdx fieldStrIdx) const { + const FieldPair &fieldPair = TraverseToField(fieldStrIdx); + return fieldPair.second.second; + } + + bool IsFieldVolatile(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_volatile); + } + + bool IsFieldFinal(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_final); + } + + bool IsFieldRCUnownedRef(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcunowned); + } + + bool IsFieldRCWeak(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcweak); + } + + bool IsFieldRestrict(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_restrict); + } + + bool IsOwnField(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return std::find(fields.begin(), fields.end(), fieldPair) != fields.end(); + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + bool HasVolatileField() const override; + bool HasTypeParam() const override; + bool EqualTo(const MIRType &type) const override; + MIRType *CopyMIRTypeNode() const override { + return new MIRStructType(*this); + } + + TyIdx GetElemTyIdx(size_t n) const { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).second.first; + } + + void SetElemtTyIdxSimple(size_t n, TyIdx tyIdx) { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second.first = tyIdx; + } + + TyIdx GetStaticElemtTyIdx(size_t n) const { + DEBUG_ASSERT(n < staticFields.size(), "array index out of range"); + return staticFields.at(n).second.first; + } + + void SetStaticElemtTyIdx(size_t n, TyIdx tyIdx) { + staticFields.at(n).second.first = tyIdx; + } + + void SetMethodTyIdx(size_t n, TyIdx tyIdx) { + DEBUG_ASSERT(n < methods.size(), "array index out of range"); + methods.at(n).second.first = tyIdx; + } + + MIRType *GetElemType(uint32 n) const; + + MIRType *GetFieldType(FieldID fieldID); + + void SetElemtTyIdx(size_t n, TyIdx tyIdx) { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second = TyIdxFieldAttrPair(tyIdx, FieldAttrs()); + } + + GStrIdx GetElemStrIdx(size_t n) const { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).first; + } + + void SetElemStrIdx(size_t n, GStrIdx idx) { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).first = idx; + } + + void SetElemInferredTyIdx(size_t n, TyIdx tyIdx) { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + DEBUG_ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + fieldInferredTyIdx.at(n) = tyIdx; + } + + TyIdx GetElemInferredTyIdx(size_t n) { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + DEBUG_ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + return fieldInferredTyIdx.at(n); + } + + void DumpFieldsAndMethods(int indent, bool hasMethod) const; + void Dump(int indent, bool dontUseName = false) const override; + + virtual void SetComplete() { + typeKind = (typeKind == kTypeUnion) ? typeKind : kTypeStruct; + } + + // only meaningful for MIRClassType and MIRInterface types + bool IsLocal() const; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override { + constexpr uint8 attrShift = 3; + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind) + + ((typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue())) % kTypeHashLength; + } + + virtual void ClearContents() { + fields.clear(); + staticFields.clear(); + parentFields.clear(); + methods.clear(); + vTableMethods.clear(); + iTableMethods.clear(); + isImported = false; + isUsed = false; + hasVolatileField = false; + hasVolatileFieldSet = false; + } + + virtual const std::vector &GetInfo() const { + CHECK_FATAL(false, "can not use GetInfo"); + } + + virtual const MIRInfoPair &GetInfoElemt(size_t) const { + CHECK_FATAL(false, "can not use GetInfoElemt"); + } + + virtual const std::vector &GetInfoIsString() const { + CHECK_FATAL(false, "can not use GetInfoIsString"); + } + + virtual bool GetInfoIsStringElemt(size_t) const { + CHECK_FATAL(false, "can not use GetInfoIsStringElemt"); + } + + virtual const std::vector &GetPragmaVec() const { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + virtual std::vector &GetPragmaVec() { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + std::vector& GetGenericDeclare() { + return genericDeclare; + } + + void AddClassGenericDeclare(GenericDeclare *gd) { + genericDeclare.push_back(gd); + } + + void AddFieldGenericDeclare(const GStrIdx &g, AnnotationType *a) { + if (fieldGenericDeclare.find(g) != fieldGenericDeclare.end()) { + CHECK_FATAL(fieldGenericDeclare[g] == a, "MUST BE"); + } + fieldGenericDeclare[g] = a; + } + + AnnotationType *GetFieldGenericDeclare(const GStrIdx &g) { + if (fieldGenericDeclare.find(g) == fieldGenericDeclare.end()) { + return nullptr; + } + return fieldGenericDeclare[g]; + } + + void AddInheritaceGeneric(GenericType *a) { + inheritanceGeneric.push_back(a); + } + + std::vector &GetInheritanceGeneric() { + return inheritanceGeneric; + } + + virtual const MIREncodedArray &GetStaticValue() const { + CHECK_FATAL(false, "can not use GetStaticValue"); + } + + virtual void PushbackMIRInfo(const MIRInfoPair&) { + CHECK_FATAL(false, "can not use PushbackMIRInfo"); + } + + virtual void PushbackPragma(MIRPragma*) { + CHECK_FATAL(false, "can not use PushbackPragma"); + } + + virtual void PushbackStaticValue(EncodedValue&) { + CHECK_FATAL(false, "can not use PushbackStaticValue"); + } + + virtual void PushbackIsString(bool) { + CHECK_FATAL(false, "can not use PushbackIsString"); + } + + bool HasFields() const override { return true; } + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override { return this; } + + virtual FieldPair TraverseToFieldRef(FieldID &fieldID) const; + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + FieldPair TraverseToField(FieldID fieldID) const ; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override; + + bool HasPadding() const; + + protected: + FieldVector fields{}; + std::vector fieldInferredTyIdx{}; + FieldVector staticFields{}; + FieldVector parentFields{}; // fields belong to the ancestors not fully defined + MethodVector methods{}; // for the list of member function prototypes + MethodPtrVector vTableMethods{}; // the list of implmentation for all virtual functions for this type + MethodPtrVector iTableMethods{}; // the list of all interface functions for this type; For classes, they are + // implementation functions, For interfaces, they are abstact functions. + // Weak indicates the actual definition is in another module. + bool isImported = false; + bool isUsed = false; + bool isCPlusPlus = false; // empty struct in C++ has size 1 byte + mutable bool hasVolatileField = false; // for caching computed value + mutable bool hasVolatileFieldSet = false; // if true, just read hasVolatileField; + // otherwise compute to initialize hasVolatileField + std::vector genericDeclare; + std::map fieldGenericDeclare; + std::vector inheritanceGeneric; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; + + private: + FieldPair TraverseToField(GStrIdx fieldStrIdx) const ; + bool HasVolatileFieldInFields(const FieldVector &fieldsOfStruct) const; + bool HasTypeParamInFields(const FieldVector &fieldsOfStruct) const; + int64 GetBitOffsetFromUnionBaseAddr(FieldID fieldID); + int64 GetBitOffsetFromStructBaseAddr(FieldID fieldID); +}; + +// java array type, must not be nested inside another aggregate +class MIRJarrayType : public MIRFarrayType { + public: + MIRJarrayType() { + typeKind = kTypeJArray; + }; + + explicit MIRJarrayType(TyIdx elemTyIdx) : MIRFarrayType(elemTyIdx) { + typeKind = kTypeJArray; + } + + explicit MIRJarrayType(GStrIdx strIdx) : MIRFarrayType(strIdx) { + typeKind = kTypeJArray; + } + + ~MIRJarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRJarrayType(*this); + } + + MIRStructType *GetParentType(); + const std::string &GetJavaName(); + + bool IsPrimitiveArray() { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return fromPrimitive; + } + + int GetDim() { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return dim; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 5; + return ((static_cast(GetElemTyIdx()) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + private: + void DetermineName(); // determine the internal name of this type + TyIdx parentTyIdx{ 0 }; // since Jarray is also an object, this is java.lang.Object + GStrIdx javaNameStrIdx{ 0 }; // for internal java name of Jarray. nameStrIdx is used for other purpose + bool fromPrimitive = false; // the lowest dimension is primitive type + int dim = 0; // the dimension if decidable at compile time. otherwise 0 +}; + +// used by kTypeClass, kTypeClassIncomplete +class MIRClassType : public MIRStructType { + public: + explicit MIRClassType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRClassType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRClassType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override { + return new MIRClassType(*this); + } + + const std::vector &GetInfo() const override { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override { + DEBUG_ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override { + return infoIsString; + } + + void PushbackIsString(bool isString) override { + infoIsString.push_back(isString); + } + + size_t GetInfoIsStringSize() const { + return infoIsString.size(); + } + + bool GetInfoIsStringElemt(size_t n) const override { + DEBUG_ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override { + staticValue.push_back(encodedValue); + } + + TyIdx GetParentTyIdx() const { + return parentTyIdx; + } + void SetParentTyIdx(TyIdx idx) { + parentTyIdx = idx; + } + + std::vector &GetInterfaceImplemented() { + return interfacesImplemented; + } + const std::vector &GetInterfaceImplemented() const { + return interfacesImplemented; + } + TyIdx GetNthInterfaceImplemented(size_t i) const { + DEBUG_ASSERT(i < interfacesImplemented.size(), "array index out of range"); + return interfacesImplemented.at(i); + } + + void SetNthInterfaceImplemented(size_t i, TyIdx tyIdx) { + DEBUG_ASSERT(i < interfacesImplemented.size(), "array index out of range"); + interfacesImplemented.at(i) = tyIdx; + } + void PushbackInterfaceImplemented(TyIdx idx) { + interfacesImplemented.push_back(idx); + } + + void Dump(int indent, bool dontUseName = false) const override; + void DumpAsCxx(int indent) const override; + void SetComplete() override { + typeKind = kTypeClass; + } + + bool IsFinal() const; + bool IsAbstract() const; + bool IsInner() const; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + size_t GetSize() const override; + + FieldID GetLastFieldID() const; + FieldID GetFirstFieldID() const { + return GetLastFieldID() - fields.size() + 1; + } + + FieldID GetFirstLocalFieldID() const; + // return class id or superclass id accroding to input string + MIRClassType *GetExceptionRootType(); + const MIRClassType *GetExceptionRootType() const; + bool IsExceptionType() const; + void AddImplementedInterface(TyIdx interfaceTyIdx) { + if (std::find(interfacesImplemented.begin(), interfacesImplemented.end(), interfaceTyIdx) == + interfacesImplemented.end()) { + interfacesImplemented.push_back(interfaceTyIdx); + } + } + + void ClearContents() override { + MIRStructType::ClearContents(); + parentTyIdx = TyIdx(0); + interfacesImplemented.clear(); // for the list of interfaces the class implements + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + uint32 NumberOfFieldIDs() const override; + + private: + TyIdx parentTyIdx{ 0 }; + std::vector interfacesImplemented{}; // for the list of interfaces the class implements + std::vector info{}; + std::vector infoIsString{}; + std::vector pragmaVec{}; + MIREncodedArray staticValue{}; // DELETE THIS +}; + +// used by kTypeInterface, kTypeInterfaceIncomplete +class MIRInterfaceType : public MIRStructType { + public: + explicit MIRInterfaceType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRInterfaceType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRInterfaceType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override { + return new MIRInterfaceType(*this); + } + + const std::vector &GetInfo() const override { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override { + DEBUG_ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override { + return infoIsString; + } + void PushbackIsString(bool isString) override { + infoIsString.push_back(isString); + } + size_t GetInfoIsStringSize() const { + return infoIsString.size(); + } + bool GetInfoIsStringElemt(size_t n) const override { + DEBUG_ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override { + staticValue.push_back(encodedValue); + } + + std::vector &GetParentsTyIdx() { + return parentsTyIdx; + } + void SetParentsTyIdx(const std::vector &parents) { + parentsTyIdx = parents; + } + const std::vector &GetParentsTyIdx() const { + return parentsTyIdx; + } + + TyIdx GetParentsElementTyIdx(size_t i) const { + DEBUG_ASSERT(i < parentsTyIdx.size(), "array index out of range"); + return parentsTyIdx[i]; + } + + void SetParentsElementTyIdx(size_t i, TyIdx tyIdx) { + DEBUG_ASSERT(i < parentsTyIdx.size(), "array index out of range"); + parentsTyIdx[i] = tyIdx; + } + + void Dump(int indent, bool dontUseName = false) const override; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + void SetComplete() override { + typeKind = kTypeInterface; + } + + size_t GetSize() const override; + + void ClearContents() override { + MIRStructType::ClearContents(); + parentsTyIdx.clear(); + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + bool HasFields() const override { return false; } + uint32 NumberOfFieldIDs() const override { return 0; } + MIRStructType *EmbeddedStructType() override { return nullptr; } + + private: + std::vector parentsTyIdx{}; // multiple inheritence + std::vector info{}; + std::vector infoIsString{}; + std::vector pragmaVec{}; + MIREncodedArray staticValue{}; // DELETE THIS +}; + + +class MIRBitFieldType : public MIRType { + public: + MIRBitFieldType(uint8 field, PrimType pt) : MIRType(kTypeBitField, pt), fieldSize(field) {} + MIRBitFieldType(uint8 field, PrimType pt, GStrIdx strIdx) : MIRType(kTypeBitField, pt, strIdx), fieldSize(field) {} + ~MIRBitFieldType() override = default; + + uint8 GetFieldSize() const { + return fieldSize; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + MIRType *CopyMIRTypeNode() const override { + return new MIRBitFieldType(*this); + } + + size_t GetSize() const override { + if (fieldSize == 0) { + return 0; + } else if (fieldSize <= 8) { + return 1; + } else { + return (fieldSize + 7) / 8; + } + } // size not be in bytes + + uint32 GetAlign() const override { + return 0; + } // align not be in bytes + + size_t GetHashIndex() const override { + return ((static_cast(primType) << fieldSize) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + private: + uint8 fieldSize; +}; + +class MIRFuncType : public MIRType { + public: + MIRFuncType() : MIRType(kTypeFunction, PTY_ptr) {} + + explicit MIRFuncType(const GStrIdx &strIdx) + : MIRType(kTypeFunction, PTY_ptr, strIdx) {} + + MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, const std::vector &vecAt, + const TypeAttrs &retAttrsIn) + : MIRType(kTypeFunction, PTY_ptr), + retTyIdx(retTyIdx), + paramTypeList(vecTy), + paramAttrsList(vecAt), + retAttrs(retAttrsIn) {} + + ~MIRFuncType() override = default; + + bool EqualTo(const MIRType &type) const override; + bool CompatibleWith(const MIRType &type) const; + MIRType *CopyMIRTypeNode() const override { + return new MIRFuncType(*this); + } + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + TyIdx GetRetTyIdx() const { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) { + retTyIdx = idx; + } + + const std::vector &GetParamTypeList() const { + return paramTypeList; + } + + std::vector &GetParamTypeList() { + return paramTypeList; + } + + TyIdx GetNthParamType(size_t i) const { + DEBUG_ASSERT(i < paramTypeList.size(), "array index out of range"); + return paramTypeList[i]; + } + + void SetParamTypeList(const std::vector &list) { + paramTypeList.clear(); + (void)paramTypeList.insert(paramTypeList.begin(), list.begin(), list.end()); + } + + const std::vector &GetParamAttrsList() const { + return paramAttrsList; + } + + std::vector &GetParamAttrsList() { + return paramAttrsList; + } + + const TypeAttrs &GetNthParamAttrs(size_t i) const { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + TypeAttrs &GetNthParamAttrs(size_t i) { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + void SetParamAttrsList(const std::vector &list) { + paramAttrsList.clear(); + (void)paramAttrsList.insert(paramAttrsList.begin(), list.begin(), list.end()); + } + + void SetNthParamAttrs(size_t i, const TypeAttrs &attrs) { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + paramAttrsList[i] = attrs; + } + + bool IsVarargs() const { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + void SetVarArgs() { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + bool FirstArgReturn() const { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + void SetFirstArgReturn() { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + const TypeAttrs &GetRetAttrs() const { + return retAttrs; + } + + TypeAttrs &GetRetAttrs() { + return retAttrs; + } + + void SetRetAttrs(const TypeAttrs &attrs) { + retAttrs = attrs; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 6; + size_t hIdx = (static_cast(retTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + size_t size = paramTypeList.size(); + hIdx += (size ? (static_cast(paramTypeList[0]) + size) : 0) << 4; // shift bit is 4 + return hIdx % kTypeHashLength; + } + + public: + FuncAttrs funcAttrs; + private: + TyIdx retTyIdx{ 0 }; + std::vector paramTypeList; + std::vector paramAttrsList; + TypeAttrs retAttrs; +}; + +class MIRTypeByName : public MIRType { + // use nameStrIdx to store the name for both local and global + public: + explicit MIRTypeByName(GStrIdx gStrIdx) : MIRType(kTypeByName, PTY_void) { + nameStrIdx = gStrIdx; + } + + ~MIRTypeByName() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRTypeByName(*this); + } + + bool EqualTo(const MIRType &type) const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + return ((static_cast(nameStrIdx) << idxShift) + nameIsLocal + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } +}; + +class MIRTypeParam : public MIRType { + // use nameStrIdx to store the name + public: + explicit MIRTypeParam(GStrIdx gStrIdx) : MIRType(kTypeParam, PTY_gen) { + nameStrIdx = gStrIdx; + } + + ~MIRTypeParam() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRTypeParam(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + bool HasTypeParam() const override { + return true; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 3; + return ((static_cast(nameStrIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } +}; + +using TypePair = std::pair; +using GenericInstantVector = std::vector; +class MIRInstantVectorType : public MIRType { + public: + MIRInstantVectorType() : MIRType(kTypeInstantVector, PTY_agg) {} + + explicit MIRInstantVectorType(MIRTypeKind kind) : MIRType(kind, PTY_agg) {} + + MIRInstantVectorType(MIRTypeKind kind, GStrIdx strIdx) : MIRType(kind, PTY_agg, strIdx) {} + + ~MIRInstantVectorType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRInstantVectorType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + const GenericInstantVector &GetInstantVec() const { + return instantVec; + } + + GenericInstantVector &GetInstantVec() { + return instantVec; + } + + void AddInstant(TypePair typePair) { + instantVec.push_back(typePair); + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 3; + uint32 hIdx = typeKind << kShiftNumOfTypeKind; + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << idxShift; + } + return hIdx % kTypeHashLength; + } + + protected: + GenericInstantVector instantVec{}; // in each pair, first is generic type, second is real type +}; + +class MIRGenericInstantType : public MIRInstantVectorType { + public: + explicit MIRGenericInstantType(TyIdx genTyIdx) + : MIRInstantVectorType(kTypeGenericInstant), genericTyIdx(genTyIdx) {} + + explicit MIRGenericInstantType(GStrIdx strIdx) + : MIRInstantVectorType(kTypeGenericInstant, strIdx), genericTyIdx(0) {} + + ~MIRGenericInstantType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRGenericInstantType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetSize() const override { + return 0; + } // size unknown + + TyIdx GetGenericTyIdx() const { + return genericTyIdx; + } + void SetGenericTyIdx(TyIdx idx) { + genericTyIdx = idx; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + uint32 hIdx = (static_cast(genericTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << 3; // shift bit is 3 + } + return hIdx % kTypeHashLength; + } + + private: + TyIdx genericTyIdx; // the generic type to be instantiated +}; + +MIRType *GetElemType(const MIRType &arrayType); +#endif // MIR_FEATURE_FULL +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_TYPE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_TYPE_H diff --git a/ecmascript/mapleall/maple_ir/include/mpl2mpl_options.h b/ecmascript/mapleall/maple_ir/include/mpl2mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..4ad15946852161398b0adf67e0ae27053dc61de1 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/mpl2mpl_options.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H +#define MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include +#include +#include + +namespace opts::mpl2mpl { + +extern maplecl::Option dumpPhase; +extern maplecl::Option skipPhase; +extern maplecl::Option skipFrom; +extern maplecl::Option skipAfter; +extern maplecl::Option dumpFunc; +extern maplecl::Option quiet; +extern maplecl::Option maplelinker; +extern maplecl::Option regnativefunc; +extern maplecl::Option inlineWithProfile; +extern maplecl::Option inlineOpt; +extern maplecl::Option ipaClone; +extern maplecl::Option noInlineFunc; +extern maplecl::Option importFileList; +extern maplecl::Option crossModuleInline; +extern maplecl::Option inlineSmallFunctionThreshold; +extern maplecl::Option inlineHotFunctionThreshold; +extern maplecl::Option inlineRecursiveFunctionThreshold; +extern maplecl::Option inlineDepth; +extern maplecl::Option inlineModuleGrow; +extern maplecl::Option inlineColdFuncThresh; +extern maplecl::Option profileHotCount; +extern maplecl::Option profileColdCount; +extern maplecl::Option profileHotRate; +extern maplecl::Option profileColdRate; +extern maplecl::Option nativewrapper; +extern maplecl::Option regnativeDynamicOnly; +extern maplecl::Option staticBindingList; +extern maplecl::Option dumpBefore; +extern maplecl::Option dumpAfter; +extern maplecl::Option dumpMuid; +extern maplecl::Option emitVtableImpl; + +#if MIR_JAVA +extern maplecl::Option skipvirtual; +#endif + +extern maplecl::Option userc; +extern maplecl::Option strictNaiveRc; +extern maplecl::Option rcOpt1; +extern maplecl::Option nativeopt; +extern maplecl::Option o0; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option criticalNative; +extern maplecl::Option fastNative; +extern maplecl::Option nodot; +extern maplecl::Option genIrProfile; +extern maplecl::Option profileTest; +extern maplecl::Option barrier; +extern maplecl::Option nativeFuncPropertyFile; +extern maplecl::Option maplelinkerNolocal; +extern maplecl::Option buildApp; +extern maplecl::Option partialAot; +extern maplecl::Option decoupleInit; +extern maplecl::Option sourceMuid; +extern maplecl::Option deferredVisit; +extern maplecl::Option deferredVisit2; +extern maplecl::Option decoupleSuper; +extern maplecl::Option genDecoupleVtab; +extern maplecl::Option profileFunc; +extern maplecl::Option dumpDevirtual; +extern maplecl::Option readDevirtual; +extern maplecl::Option usewhiteclass; +extern maplecl::Option appPackageName; +extern maplecl::Option checkClInvocation; +extern maplecl::Option dumpClInvocation; +extern maplecl::Option warning; +extern maplecl::Option lazyBinding; +extern maplecl::Option hotFix; +extern maplecl::Option compactMeta; +extern maplecl::Option genPGOReport; +extern maplecl::Option inlineCache; +extern maplecl::Option noComment; +extern maplecl::Option rmnousefunc; +extern maplecl::Option sideeffect; +extern maplecl::Option dumpIPA; +extern maplecl::Option wpaa; +extern maplecl::Option numOfCloneVersions; +extern maplecl::Option numOfImpExprLowBound; +extern maplecl::Option numOfImpExprHighBound; +extern maplecl::Option numOfCallSiteLowBound; +extern maplecl::Option numOfCallSiteUpBound; +extern maplecl::Option numOfConstpropValue; + +} + +#endif /* MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H */ diff --git a/ecmascript/mapleall/maple_ir/include/opcode_info.h b/ecmascript/mapleall/maple_ir/include/opcode_info.h new file mode 100644 index 0000000000000000000000000000000000000000..6796b70a2520b5a2e58b72a5644abd8f858c481c --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/opcode_info.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPCODE_INFO_H +#define MAPLE_IR_INCLUDE_OPCODE_INFO_H +#include "types_def.h" +#include "opcodes.h" +#include "mpl_logging.h" + +namespace maple { +enum OpcodeProp { + kOpcodePropNone, + kOpcodePropIsStmt, // The instruction is a stmt, so has 2 stmt pointers + kOpcodePropIsVarSize, // The instruction size is not fixed + kOpcodePropNotMMPL, // The instruction is not allowed in Machine Maple IR + kOpcodePropIsCompare, // The instruction is one of the 6 comparison ops + kOpcodePropIsTypeCvt, // The instruction is a type conversion op + kOpcodePropHasSSAUse, // The instruction may incur a use in SSA form + kOpcodePropHasSSADef, // The instruction may incur a def in SSA form + kOpcodePropIsCall, // The instruction is among the call instructions + kOpcodePropIsCallAssigned, // The instruction is among the call instructions with implicit assignments of the + // returned values + kOpcodePropNotPure, // The operation does not return same result with idential operands + kOpcodePropMayThrowException, + kOpcodePropIsAssertNonnull, // The operation check nonnnull + kOpcodePropIsAssertUpperBoundary, // The operation check upper boundary + kOpcodePropIsAssertLowerBoundary, // The operation check lower boundary +}; + +constexpr unsigned long OPCODEISSTMT = 1ULL << kOpcodePropIsStmt; +constexpr unsigned long OPCODEISVARSIZE = 1ULL << kOpcodePropIsVarSize; +constexpr unsigned long OPCODENOTMMPL = 1ULL << kOpcodePropNotMMPL; +constexpr unsigned long OPCODEISCOMPARE = 1ULL << kOpcodePropIsCompare; +constexpr unsigned long OPCODEISTYPECVT = 1ULL << kOpcodePropIsTypeCvt; +constexpr unsigned long OPCODEHASSSAUSE = 1ULL << kOpcodePropHasSSAUse; +constexpr unsigned long OPCODEHASSSADEF = 1ULL << kOpcodePropHasSSADef; +constexpr unsigned long OPCODEISCALL = 1ULL << kOpcodePropIsCall; +constexpr unsigned long OPCODEISCALLASSIGNED = 1ULL << kOpcodePropIsCallAssigned; +constexpr unsigned long OPCODENOTPURE = 1ULL << kOpcodePropNotPure; +constexpr unsigned long OPCODEMAYTHROWEXCEPTION = 1ULL << kOpcodePropMayThrowException; +constexpr unsigned long OPCODEASSERTNONNULL = 1ULL << kOpcodePropIsAssertNonnull; +constexpr unsigned long OPCODEASSERTUPPERBOUNDARY = 1ULL << kOpcodePropIsAssertUpperBoundary; +constexpr unsigned long OPCODEASSERTLOWERBOUNDARY = 1ULL << kOpcodePropIsAssertLowerBoundary; + +struct OpcodeDesc { + uint8 instrucSize; // size of instruction in bytes + uint16 flag; // stores the opcode property flags + std::string name; +}; + +class OpcodeTable { + public: + OpcodeTable(); + ~OpcodeTable() = default; + + OpcodeDesc GetTableItemAt(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o]; + } + + bool IsStmt(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISSTMT; + } + + bool IsVarSize(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISVARSIZE; + } + + bool NotMMPL(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTMMPL; + } + + bool IsCompare(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCOMPARE; + } + + bool IsTypeCvt(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISTYPECVT; + } + + bool HasSSAUse(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSAUSE; + } + + bool HasSSADef(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSADEF; + } + + bool IsCall(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALL; + } + + bool IsCallAssigned(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALLASSIGNED; + } + + bool IsICall(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_icall || o == OP_icallassigned || + o == OP_icallproto || o == OP_icallprotoassigned || + o == OP_virtualicall || o == OP_virtualicallassigned || + o == OP_interfaceicall || o == OP_interfaceicallassigned; + } + + bool NotPure(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTPURE; + } + + bool MayThrowException(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEMAYTHROWEXCEPTION; + } + + bool HasSideEffect(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return MayThrowException(o); + } + + const std::string &GetName(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].name; + } + + bool IsCondBr(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_brtrue || o == OP_brfalse; + } + + bool AssignActualVar(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_dassign || o == OP_regassign; + } + + bool IsAssertNonnull(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTNONNULL; + } + + bool IsCallAssertNonnull(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertnonnull; + } + + bool IsAssertBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & (OPCODEASSERTUPPERBOUNDARY | OPCODEASSERTLOWERBOUNDARY); + } + + bool IsAssertUpperBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTUPPERBOUNDARY; + } + + bool IsAssertLowerBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTLOWERBOUNDARY; + } + + bool IsCallAssertBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertle; + } + + bool IsAssertLeBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_callassertle || o == OP_returnassertle || o == OP_assignassertle); + } + + bool IsCalcAssertBoundary(Opcode o) const { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_calcassertlt || o == OP_calcassertge); + } + + private: + OpcodeDesc table[OP_last]; +}; +extern const OpcodeTable kOpcodeInfo; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODE_INFO_H diff --git a/ecmascript/mapleall/maple_ir/include/opcodes.def b/ecmascript/mapleall/maple_ir/include/opcodes.def new file mode 100755 index 0000000000000000000000000000000000000000..0e842141edcdce169dbbb3ac3ffbb357acad5965 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/opcodes.def @@ -0,0 +1,224 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// Stmt & Notmmpl + // storage access opcodes + OPCODE(dassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(piassign, PiassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(maydassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(iassign, IassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 12) + // hierarchical control flow opcodes + OPCODE(block, BlockNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(doloop, DoloopNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(dowhile, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(if, IfStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(while, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(switch, SwitchNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(multiway, MultiwayNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(foreachelem, ForeachelemNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + // other opcodes + OPCODE(comment, CommentNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(eval, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(free, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(calcassertge, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(calcassertlt, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assertge, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(assertlt, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(callassertle, CallAssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(returnassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assignassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(abort, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(assertnonnull, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(assignassertnonnull, AssignAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(callassertnonnull, CallAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(returnassertnonnull, ReturnAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) +// Expr & Notmmpl + // storage access opcodes + OPCODE(dread, AddrofNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + OPCODE(iread, IreadNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + // leaf opcodes + OPCODE(addrof, AddrofNode, OPCODENOTMMPL, 12) + OPCODE(iaddrof, IreadNode, OPCODENOTMMPL, 12) + OPCODE(sizeoftype, SizeoftypeNode, OPCODENOTMMPL, 8) + OPCODE(fieldsdist, FieldsDistNode, OPCODENOTMMPL, 8) + // N-ary expression opcodes + OPCODE(array, ArrayNode, (OPCODEISVARSIZE | OPCODENOTMMPL | OPCODEMAYTHROWEXCEPTION), 8) +// Stmt + // storage access opcodes + OPCODE(iassignoff, IassignoffNode, OPCODEISSTMT, 8) + OPCODE(iassignfpoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(regassign, RegassignNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + // flat control flow opcodes + OPCODE(goto, GotoNode, OPCODEISSTMT, 8) + OPCODE(brfalse, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(brtrue, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(return, NaryStmtNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE), 0) + OPCODE(rangegoto, RangeGotoNode, OPCODEISSTMT, 8) + // call opcodes + OPCODE(call, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(superclasscall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfacecall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(customcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(polymorphiccall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(icall, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfaceicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccallwithtype, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(xintrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(callassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(customcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(polymorphiccallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(icallassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfaceicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallwithtypeassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(xintrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // call with generic instantiation opcodes + OPCODE(callinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(callinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(virtualcallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(superclasscallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(interfacecallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // exception handling + OPCODE(jstry, JsTryNode, OPCODEISSTMT, 8) + OPCODE(try, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(cpptry, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + + OPCODE(throw, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 0) + + OPCODE(jscatch, StmtNode, OPCODEISSTMT, 4) + OPCODE(catch, CatchNode, OPCODEISSTMT, 8) + OPCODE(cppcatch, CppCatchNode, OPCODEISSTMT, 8) + + OPCODE(finally, StmtNode, OPCODEISSTMT, 6) + OPCODE(cleanuptry, StmtNode, OPCODEISSTMT, 6) + OPCODE(endtry, StmtNode, OPCODEISSTMT, 6) + OPCODE(safe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(unsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endunsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(gosub, GotoNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 8) + OPCODE(retsub, StmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 6) + // synchronizaion + OPCODE(syncenter, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(syncexit, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(decref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(incref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(decrefreset, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + // barriers + OPCODE(membaracquire, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarrelease, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstoreload, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstorestore, StmtNode, OPCODEISSTMT, 6) + // other opcodes + OPCODE(label, LabelNode, OPCODEISSTMT, 8) +// Expr + // storage access opcodes + OPCODE(ireadoff, IreadoffNode, 0, 8) + OPCODE(ireadfpoff, IreadFPoffNode, 0, 8) + OPCODE(regread, RegreadNode, OPCODEHASSSAUSE, 8) + // leaf opcodes + OPCODE(addroffunc, AddroffuncNode, 0, 8) + OPCODE(addroflabel, AddroflabelNode, 0, 8) + OPCODE(constval, ConstvalNode, 0, 8) + OPCODE(conststr, ConststrNode, OPCODENOTMMPL, 8) + OPCODE(conststr16, Conststr16Node, OPCODENOTMMPL, 8) + // type conversion expression opcodes + OPCODE(ceil, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(cvt, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(floor, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(retype, RetypeNode, OPCODEISTYPECVT, 8) + OPCODE(round, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(trunc, TypeCvtNode, OPCODEISTYPECVT, 8) + // unary expression opcodes + OPCODE(abs, UnaryNode, 0, 0) + OPCODE(bnot, UnaryNode, 0, 0) + OPCODE(lnot, UnaryNode, 0, 0) + OPCODE(neg, UnaryNode, 0, 0) + OPCODE(recip, UnaryNode, 0, 0) + OPCODE(sqrt, UnaryNode, 0, 0) + OPCODE(sext, ExtractbitsNode, 0, 8) + OPCODE(zext, ExtractbitsNode, 0, 8) + OPCODE(alloca, UnaryNode, OPCODENOTPURE, 0) + OPCODE(malloc, UnaryNode, OPCODENOTPURE, 0) + OPCODE(gcmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcpermalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(stackmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(gcpermallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(stackmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(resolveinterfacefunc, ResolveFuncNode, 0, 8) + OPCODE(resolvevirtualfunc, ResolveFuncNode, 0, 8) + // binary expression opcodes + OPCODE(add, BinaryNode, 0, 0) + OPCODE(sub, BinaryNode, 0, 0) + OPCODE(mul, BinaryNode, 0, 0) + OPCODE(div, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(rem, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(ashr, BinaryNode, 0, 0) + OPCODE(lshr, BinaryNode, 0, 0) + OPCODE(shl, BinaryNode, 0, 0) + OPCODE(ror, BinaryNode, 0, 0) + OPCODE(max, BinaryNode, 0, 0) + OPCODE(min, BinaryNode, 0, 0) + OPCODE(band, BinaryNode, 0, 0) + OPCODE(bior, BinaryNode, 0, 0) + OPCODE(bxor, BinaryNode, 0, 0) + OPCODE(CG_array_elem_add, BinaryNode, 0, 0) + OPCODE(eq, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ge, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(gt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(le, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(lt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ne, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmp, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpl, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpg, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(land, BinaryNode, 0, 0) + OPCODE(lior, BinaryNode, 0, 0) + OPCODE(cand, BinaryNode, OPCODENOTMMPL, 0) + OPCODE(cior, BinaryNode, OPCODENOTMMPL, 0) + // ternary expression opcodes + OPCODE(select, TernaryNode, 0, 0) + // N-ary expression opcodes + OPCODE(intrinsicop, IntrinsicopNode, OPCODEISVARSIZE, 8) + OPCODE(intrinsicopwithtype, IntrinsicopNode, OPCODEISVARSIZE, 12) + // Other expression opcodes + OPCODE(extractbits, ExtractbitsNode, 0, 8) + OPCODE(depositbits, DepositbitsNode, 0, 8) + // storage access + OPCODE(iassignpcoff, IassignPCoffNode, OPCODEISSTMT, 0) + OPCODE(ireadpcoff, IreadPCoffNode, 0, 0) + // barrier + OPCODE(checkpoint, StmtNode, OPCODEISSTMT, 0) + // leaf node + OPCODE(addroffpc, AddroffPCNode, 0, 0) + OPCODE(igoto, UnaryStmtNode, OPCODEISSTMT, 0) + OPCODE(asm, AsmNode, OPCODEISSTMT | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALLASSIGNED, 0) + OPCODE(dreadoff, dreadoffNode, OPCODEHASSSAUSE, 12) + OPCODE(addrofoff, addrofoffNode, 0, 12) + OPCODE(dassignoff, DassignoffNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) + OPCODE(icallproto, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(icallprotoassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 8) diff --git a/ecmascript/mapleall/maple_ir/include/opcodes.h b/ecmascript/mapleall/maple_ir/include/opcodes.h new file mode 100644 index 0000000000000000000000000000000000000000..b9f5e0f8c5b03cc60b53f30c88963fce5540992a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/opcodes.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPCODES_H +#define MAPLE_IR_INCLUDE_OPCODES_H +#include "types_def.h" +#include "mpl_logging.h" + +namespace maple { +enum Opcode : uint8 { + OP_undef, +#define OPCODE(STR, YY, ZZ, SS) OP_##STR, +#include "opcodes.def" +#undef OPCODE + OP_last, +}; + +#define CASE_OP_ASSERT_NONNULL \ + case OP_assertnonnull: \ + case OP_assignassertnonnull: \ + case OP_callassertnonnull: \ + case OP_returnassertnonnull: + +#define CASE_OP_ASSERT_BOUNDARY \ + case OP_assertge: \ + case OP_assertlt: \ + case OP_calcassertge: \ + case OP_calcassertlt: \ + case OP_callassertle: \ + case OP_returnassertle: \ + case OP_assignassertle: + +inline constexpr bool IsDAssign(Opcode code) { + return (code == OP_dassign || code == OP_maydassign); +} + +inline constexpr bool IsCallAssigned(Opcode code) { + return (code == OP_callassigned || code == OP_virtualcallassigned || + code == OP_virtualicallassigned || code == OP_superclasscallassigned || + code == OP_interfacecallassigned || code == OP_interfaceicallassigned || + code == OP_customcallassigned || code == OP_polymorphiccallassigned || + code == OP_icallassigned || code == OP_icallprotoassigned || code == OP_intrinsiccallassigned || + code == OP_xintrinsiccallassigned || code == OP_intrinsiccallwithtypeassigned); +} + +inline constexpr bool IsBranch(Opcode opcode) { + return (opcode == OP_goto || opcode == OP_brtrue || opcode == OP_brfalse || opcode == OP_switch || + opcode == OP_igoto); +} + +inline constexpr bool IsLogicalShift(Opcode opcode) { + return (opcode == OP_lshr || opcode == OP_shl); +} + +constexpr bool IsCommutative(Opcode opcode) { + switch (opcode) { + case OP_add: + case OP_mul: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_eq: + case OP_ne: + case OP_land: + case OP_lior: + return true; + default: + return false; + } +} + +constexpr bool IsStmtMustRequire(Opcode opcode) { + switch (opcode) { + case OP_jstry: + case OP_throw: + case OP_try: + case OP_catch: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_gosub: + case OP_retsub: + case OP_return: + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_icall: + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_intrinsiccallwithtypeassigned: + case OP_asm: + case OP_syncenter: + case OP_syncexit: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_free: + case OP_incref: + case OP_decref: + case OP_decrefreset: { + return true; + } + default: + return false; + } +} + +// the result of these op is actually u1(may be set as other type, but its return value can only be zero or one) +// different from kOpcodeInfo.IsCompare(op) : cmp/cmpg/cmpl have no reverse op, and may return -1/0/1 +constexpr bool IsCompareHasReverseOp(Opcode op) { + if (op == OP_eq || op == OP_ne || op == OP_ge || op == OP_gt || op == OP_le || op == OP_lt) { + return true; + } + return false; +} + +constexpr Opcode GetSwapCmpOp(Opcode op) { + switch (op) { + case OP_eq: + return OP_eq; + case OP_ne: + return OP_ne; + case OP_ge: + return OP_le; + case OP_gt: + return OP_lt; + case OP_le: + return OP_ge; + case OP_lt: + return OP_gt; + default: + CHECK_FATAL(false, "can't swap op"); + return op; + } +} + +constexpr Opcode GetReverseCmpOp(Opcode op) { + switch (op) { + case OP_eq: + return OP_ne; + case OP_ne: + return OP_eq; + case OP_ge: + return OP_lt; + case OP_gt: + return OP_le; + case OP_le: + return OP_gt; + case OP_lt: + return OP_ge; + default: + CHECK_FATAL(false, "opcode has no reverse op"); + return op; + } +} + +constexpr bool IsSupportedOpForCopyInPhasesLoopUnrollAndVRP(Opcode op) { + switch (op) { + case OP_igoto: + case OP_switch: + case OP_comment: + case OP_goto: + case OP_dassign: + case OP_regassign: + case OP_membarrelease: + case OP_brfalse: + case OP_brtrue: + case OP_maydassign: + case OP_iassign: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_membaracquire: + case OP_call: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_interfaceicallassigned: + case OP_intrinsiccall: + case OP_intrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_membarstorestore: + case OP_membarstoreload: { + return true; + } + default: + return false; + } +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODES_H diff --git a/ecmascript/mapleall/maple_ir/include/option.h b/ecmascript/mapleall/maple_ir/include/option.h new file mode 100644 index 0000000000000000000000000000000000000000..bc85bb8288abc8f3676744bc45f6c93d09c58277 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/option.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPTION_H +#define MAPLE_IR_INCLUDE_OPTION_H +#include +#include + +#include "mempool.h" +#include "mempool_allocator.h" +#include "parser_opt.h" +#include "types_def.h" + +namespace maple { +class Options { + public: + static Options &GetInstance(); + + bool ParseOptions(int argc, char **argv, std::string &fileName) const; + + bool SolveOptions(bool isDebug) const; + ~Options() = default; + + void DumpOptions() const; + const std::vector &GetSequence() const { + return phaseSeq; + } + + std::string LastPhaseName() const { + return phaseSeq.empty() ? "noopt" : phaseSeq[phaseSeq.size() - 1]; + } + + enum Level { + kMpl2MplLevelZero = 0, + kMpl2MplLevelOne = 1, + kMpl2MplLevelTwo = 2 + }; + enum DecoupleLevel { + kNoDecouple = 0, + kConservativeDecouple = 1, + kAggressiveDecouple = 2, + kDecoupleAndLazy = 3 + }; + + static bool DumpPhase(const std::string &phase) { + if (phase == "") { + return false; + } + return dumpPhase == "*" || dumpPhase == phase; + } + + static bool IsSkipPhase(const std::string &phaseName) { + return skipPhase == phaseName; + } + + static bool DumpFunc() { + return dumpFunc != "*" && dumpFunc != ""; + } + static bool IsBigEndian() { + return bigEndian; + } + + static bool dumpBefore; + static bool dumpAfter; + static std::string dumpPhase; + static std::string skipPhase; + static std::string skipFrom; + static std::string skipAfter; + static std::string dumpFunc; + static bool quiet; + static bool regNativeFunc; + static bool regNativeDynamicOnly; + static bool nativeWrapper; + static bool inlineWithProfile; + static bool useInline; + static bool enableIPAClone; + static std::string noInlineFuncList; + static std::string importFileList; + static bool useCrossModuleInline; + static uint32 numOfCloneVersions; + static uint32 numOfImpExprLowBound; + static uint32 numOfImpExprHighBound; + static uint32 numOfCallSiteLowBound; + static uint32 numOfCallSiteUpBound; + static uint32 numOfConstpropValue; + static uint32 inlineSmallFunctionThreshold; + static uint32 inlineHotFunctionThreshold; + static uint32 inlineRecursiveFunctionThreshold; + static uint32 inlineDepth; + static uint32 inlineModuleGrowth; + static uint32 inlineColdFunctionThreshold; + static uint32 profileHotCount; + static uint32 profileColdCount; + static bool profileHotCountSeted; + static bool profileColdCountSeted; + static uint32 profileHotRate; + static uint32 profileColdRate; + static std::string staticBindingList; + static bool usePreg; + static bool mapleLinker; + static bool dumpMuidFile; + static bool emitVtableImpl; +#if MIR_JAVA + static bool skipVirtualMethod; +#endif + // Ready to be deleted. + static bool noRC; + static bool analyzeCtor; + static bool strictNaiveRC; + static bool gcOnly; + static bool bigEndian; + static bool rcOpt1; + static std::string classMetaProFile; + static std::string methodMetaProfile; + static std::string fieldMetaProFile; + static std::string reflectStringProFile; + static bool nativeOpt; + static bool optForSize; + static bool O2; + static bool noDot; + static bool decoupleStatic; + static std::string criticalNativeFile; + static std::string fastNativeFile; + static bool barrier; + static std::string nativeFuncPropertyFile; + static bool mapleLinkerTransformLocal; + static uint32 buildApp; + static bool partialAot; + static uint32 decoupleInit; + static std::string sourceMuid; + static bool decoupleSuper; + static bool deferredVisit; + static bool deferredVisit2; + static bool genVtabAndItabForDecouple; + static bool profileFunc; + static uint32 parserOpt; + static std::string dumpDevirtualList; + static std::string readDevirtualList; + static bool usePreloadedClass; + static std::string profile; + static bool profileGen; + static bool profileUse; + static std::string appPackageName; + static std::string proFileData; + static std::string proFileFuncData; + static std::string proFileClassData; + static bool profileStaticFields; + static bool genIRProfile; + static bool profileTest; + static std::string classLoaderInvocationList; + static bool dumpClassLoaderInvocation; + static unsigned int warningLevel; + static bool lazyBinding; + static bool hotFix; + static bool compactMeta; + static bool genPGOReport; + static bool verify; + static uint32 inlineCache; + static bool checkArrayStore; + static bool noComment; + static bool rmNoUseFunc; + static bool sideEffect; + static bool dumpIPA; + static bool wpaa; + static bool genLMBC; + + private: + void DecideMpl2MplRealLevel() const; + std::vector phaseSeq; +}; +} // namespace maple +#ifndef TRACE_PHASE +#define TRACE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif + +#ifndef TRACE_MAPLE_PHASE +#define TRACE_MAPLE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif +#endif // MAPLE_IR_INCLUDE_OPTION_H diff --git a/ecmascript/mapleall/maple_ir/include/parser_opt.h b/ecmascript/mapleall/maple_ir/include/parser_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..fa470b82d0a8932d93e9ab57b8cf2122e64fb1d6 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/parser_opt.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PARSER_OPT_H +#define MAPLE_IR_INCLUDE_PARSER_OPT_H +#include "types_def.h" + +namespace maple { +// option bits passed into ParseMIR +enum ParserOptions : uint8 { + kInvalidOption = 0x0, + kWithDbgInfo = 0x1, // collect dbginfo + kKeepFirst = 0x2, // ignore second type def, not emit error + kWithProfileInfo = 0x4, + kParseOptFunc = 0x08, // parse optimized function mpl file + kParseInlineFuncBody = 0x10 // parse to-be-inlined function bodies +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PARSER_OPT_H diff --git a/ecmascript/mapleall/maple_ir/include/prim_types.def b/ecmascript/mapleall/maple_ir/include/prim_types.def new file mode 100644 index 0000000000000000000000000000000000000000..09f95fa2fbfe5cb50284f6ffdabd72513426b607 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/prim_types.def @@ -0,0 +1,490 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifdef LOAD_ALGO_PRIMARY_TYPE +#undef LOAD_ALGO_PRIMARY_TYPE +// NOTE: this ordering needs to be in sync with ptypesizetable[] in maplevm/src/vmfunc.cpp + PRIMTYPE(void) + PRIMTYPE(i8) + PRIMTYPE(i16) + PRIMTYPE(i32) + PRIMTYPE(i64) + PRIMTYPE(i128) + PRIMTYPE(u8) + PRIMTYPE(u16) + PRIMTYPE(u32) + PRIMTYPE(u64) + PRIMTYPE(u128) + PRIMTYPE(u1) + PRIMTYPE(ptr) + PRIMTYPE(ref) + PRIMTYPE(a32) + PRIMTYPE(a64) + PRIMTYPE(f32) + PRIMTYPE(f64) + PRIMTYPE(f128) + PRIMTYPE(c64) + PRIMTYPE(c128) +#ifdef DYNAMICLANG + PRIMTYPE(simplestr) + PRIMTYPE(simpleobj) + PRIMTYPE(dynany) + PRIMTYPE(dynundef) + PRIMTYPE(dynnull) + PRIMTYPE(dynbool) + PRIMTYPE(dyni32) + PRIMTYPE(dynstr) + PRIMTYPE(dynobj) + PRIMTYPE(dynf64) + PRIMTYPE(dynf32) + PRIMTYPE(dynnone) +#endif + PRIMTYPE(constStr) + PRIMTYPE(gen) + PRIMTYPE(agg) + PRIMTYPE(v2i64) + PRIMTYPE(v4i32) + PRIMTYPE(v8i16) + PRIMTYPE(v16i8) + PRIMTYPE(v2u64) + PRIMTYPE(v4u32) + PRIMTYPE(v8u16) + PRIMTYPE(v16u8) + PRIMTYPE(v2f64) + PRIMTYPE(v4f32) + PRIMTYPE(v2i32) + PRIMTYPE(v4i16) + PRIMTYPE(v8i8) + PRIMTYPE(v2u32) + PRIMTYPE(v4u16) + PRIMTYPE(v8u8) + PRIMTYPE(v2f32) + PRIMTYPE(reservedpty1) + PRIMTYPE(reservedpty2) + PRIMTYPE(reservedpty3) + PRIMTYPE(reservedpty4) + PRIMTYPE(reservedpty5) + PRIMTYPE(reservedpty6) + PRIMTYPE(reservedpty7) + PRIMTYPE(reservedpty8) + PRIMTYPE(reservedpty9) + PRIMTYPE(reservedpty10) + PRIMTYPE(unknown) +#endif // ~LOAD_ALGO_PRIMARY_TYPE + + +#ifdef LOAD_PRIMARY_TYPE_PROPERTY +#undef LOAD_PRIMARY_TYPE_PROPERTY + +static const PrimitiveTypeProperty PTProperty_begin = { + /*type=*/PTY_begin, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_void = { + /*type=*/PTY_void, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i8 = { + /*type=*/PTY_i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i16 = { + /*type=*/PTY_i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i32 = { + /*type=*/PTY_i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i64 = { + /*type=*/PTY_i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i128 = { + /*type=*/PTY_i128, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u8 = { + /*type=*/PTY_u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u16 = { + /*type=*/PTY_u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_u32 */ +static const PrimitiveTypeProperty PTProperty_u32 = { + /*type=*/PTY_u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_64 */ +static const PrimitiveTypeProperty PTProperty_u64 = { + /*type=*/PTY_u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u128 = { + /*type=*/PTY_u128, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u1 = { + /*type=*/PTY_u1, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ptr = { + /*type=*/PTY_ptr, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ref = { + /*type=*/PTY_ref, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a32 = { + /*type=*/PTY_a32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a64 = { + /*type=*/PTY_a64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f32 = { + /*type=*/PTY_f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f64 = { + /*type=*/PTY_f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f128 = { + /*type=*/PTY_f128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c64 = { + /*type=*/PTY_c64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c128 = { + /*type=*/PTY_c128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +#ifdef DYNAMICLANG +static const PrimitiveTypeProperty PTProperty_simplestr = { + /*type=*/PTY_simplestr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_simpleobj = { + /*type=*/PTY_simpleobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynany = { + /*type=*/PTY_dynany, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/true, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynundef = { + /*type=*/PTY_dynundef, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnull = { + /*type=*/PTY_dynnull, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynbool = { + /*type=*/PTY_dynbool, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dyni32 = { + /*type=*/PTY_dyni32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynstr = { + /*type=*/PTY_dynstr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynobj = { + /*type=*/PTY_dynobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf64 = { + /*type=*/PTY_dynf64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf32 = { + /*type=*/PTY_dynf32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnone = { + /*type=*/PTY_dynnone, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/true, + /*isVector*/false +}; +#endif // ~DYNAMICLANG + +static const PrimitiveTypeProperty PTProperty_constStr = { + /*type=*/PTY_constStr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_gen = { + /*type=*/PTY_gen, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_agg = { + /*type=*/PTY_agg, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_v2i64 = { + /*type=*/PTY_v2i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i32 = { + /*type=*/PTY_v4i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i16 = { + /*type=*/PTY_v8i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16i8 = { + /*type=*/PTY_v16i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u64 = { + /*type=*/PTY_v2u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u32 = { + /*type=*/PTY_v4u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u16 = { + /*type=*/PTY_v8u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16u8 = { + /*type=*/PTY_v16u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f64 = { + /*type=*/PTY_v2f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4f32 = { + /*type=*/PTY_v4f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2i32 = { + /*type=*/PTY_v2i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i16 = { + /*type=*/PTY_v4i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i8 = { + /*type=*/PTY_v8i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u32 = { + /*type=*/PTY_v2u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u16 = { + /*type=*/PTY_v4u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u8 = { + /*type=*/PTY_v8u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f32 = { + /*type=*/PTY_v2f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty1 = { + /*type=*/PTY_reservedpty1, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty2 = { + /*type=*/PTY_reservedpty2, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty3 = { + /*type=*/PTY_reservedpty3, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty4 = { + /*type=*/PTY_reservedpty4, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty5 = { + /*type=*/PTY_reservedpty5, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty6 = { + /*type=*/PTY_reservedpty6, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty7 = { + /*type=*/PTY_reservedpty7, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty8 = { + /*type=*/PTY_reservedpty8, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty9 = { + /*type=*/PTY_reservedpty9, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty10 = { + /*type=*/PTY_reservedpty10, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_unknown = { + /*type=*/PTY_unknown, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_end = { + /*type=*/PTY_end, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +#endif // ~LOAD_PRIMARY_TYPE_PROPERTY diff --git a/ecmascript/mapleall/maple_ir/include/prim_types.h b/ecmascript/mapleall/maple_ir/include/prim_types.h new file mode 100644 index 0000000000000000000000000000000000000000..51e84517a643542847fa059a0937f480392a234a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/prim_types.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PRIM_TYPES_H +#define MAPLE_IR_INCLUDE_PRIM_TYPES_H +#include "types_def.h" +#include "cfg_primitive_types.h" + +namespace maple { +class PrimitiveType { + public: + // we need implicit conversion from PrimType to PrimitiveType, so there is no explicit keyword here. + PrimitiveType(PrimType type) : property(GetPrimitiveTypeProperty(type)) {} + ~PrimitiveType() = default; + + PrimType GetType() const { + return property.type; + } + + bool IsInteger() const { + return property.IsInteger(); + } + bool IsUnsigned() const { + return property.IsUnsigned(); + } + bool IsAddress() const { + return property.IsAddress(); + } + bool IsFloat() const { + return property.IsFloat(); + } + bool IsPointer() const { + return property.IsPointer(); + } + bool IsDynamic() const { + return property.IsDynamic(); + } + bool IsSimple() const { + return property.IsSimple(); + } + bool IsDynamicAny() const { + return property.IsDynamicAny(); + } + bool IsDynamicNone() const { + return property.IsDynamicNone(); + } + bool IsVector() const { + return property.IsVector(); + } + + private: + const PrimitiveTypeProperty &property; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRIM_TYPES_H diff --git a/ecmascript/mapleall/maple_ir/include/printing.h b/ecmascript/mapleall/maple_ir/include/printing.h new file mode 100644 index 0000000000000000000000000000000000000000..305932b74732e6c3ace2a2df665d169b65d216e5 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/printing.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PRINTING_H +#define MAPLE_IR_INCLUDE_PRINTING_H +#include +#include "types_def.h" + +namespace maple { +void PrintIndentation(int32 indent); +void PrintString(const std::string &str); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRINTING_H diff --git a/ecmascript/mapleall/maple_ir/include/simplifyintrinsics.def b/ecmascript/mapleall/maple_ir/include/simplifyintrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..30c04a7c4576da4d1562a9e3f4ff346b3b3ad804 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/simplifyintrinsics.def @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +/* INTRINSIC(STR, NAME) */ +DEF_MIR_INTRINSIC(GET_AND_ADDI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_ADDL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(GET_AND_SETI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_SETL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(COMP_AND_SWAPI, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapInt_7C_28Ljava_2Flang_2FObject_3BJII_29Z",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(COMP_AND_SWAPL, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(STR_INDEXOF, "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef) + diff --git a/ecmascript/mapleall/maple_ir/include/src_position.h b/ecmascript/mapleall/maple_ir/include/src_position.h new file mode 100644 index 0000000000000000000000000000000000000000..ed0f5e5784fcf95499f9223a2a4a397535f09b29 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/src_position.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_SRC_POSITION_H +#define MAPLE_IR_INCLUDE_SRC_POSITION_H +#include "mpl_logging.h" + +namespace maple { +// to store source position information +class SrcPosition { + public: + SrcPosition() : lineNum(0), mplLineNum(0) { + u.word0 = 0; + } + + virtual ~SrcPosition() = default; + + uint32 RawData() const { + return u.word0; + } + + uint32 FileNum() const { + return u.fileColumn.fileNum; + } + + uint32 Column() const { + return u.fileColumn.column; + } + + uint32 LineNum() const { + return lineNum; + } + + uint32 MplLineNum() const { + return mplLineNum; + } + + void SetFileNum(uint16 n) { + u.fileColumn.fileNum = n; + } + + void SetColumn(uint16 n) { + u.fileColumn.column = n; + } + + void SetLineNum(uint32 n) { + lineNum = n; + } + + void SetRawData(uint32 n) { + u.word0 = n; + } + + void SetMplLineNum(uint32 n) { + mplLineNum = n; + } + + void CondSetLineNum(uint32 n) { + lineNum = lineNum ? lineNum : n; + } + + void CondSetFileNum(uint16 n) { + uint16 i = u.fileColumn.fileNum; + u.fileColumn.fileNum = i ? i : n; + } + + // as you read: this->IsBfOrEq(pos) + bool IsBfOrEq(SrcPosition pos) const { + return (pos.FileNum() == FileNum() && + ((LineNum() < pos.LineNum()) || + ((LineNum() == pos.LineNum()) && (Column() <= pos.Column())))); + } + + bool IsSrcPostionEq(SrcPosition pos) const { + return FileNum() == pos.FileNum() && LineNum() == pos.LineNum() && Column() == pos.Column(); + } + + void DumpLoc(uint32 &lastLineNum, uint16 &lastColumnNum) const { + if (FileNum() != 0 && LineNum() != 0) { + if (Column() != 0 && (LineNum() != lastLineNum || Column() != lastColumnNum)) { + DumpLocWithCol(); + lastLineNum = LineNum(); + lastColumnNum = Column(); + } else if (LineNum() != lastLineNum) { + DumpLocWithLine(); + lastLineNum = LineNum(); + } + } + } + + void DumpLocWithLine() const { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << '\n'; + } + + void DumpLocWithCol() const { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << " " << Column() << '\n'; + } + + std::string DumpLocWithColToString() const { + std::stringstream ss; + ss << "LOC " << FileNum() << " " << LineNum() << " " << Column(); + return ss.str(); + } + + private: + union { + struct { + uint16 fileNum; + uint16 column : 12; + uint16 stmtBegin : 1; + uint16 bbBegin : 1; + uint16 unused : 2; + } fileColumn; + uint32 word0; + } u; + uint32 lineNum; // line number of original src file, like foo.java + uint32 mplLineNum; // line number of mpl file +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_SRC_POSITION_H diff --git a/ecmascript/mapleall/maple_ir/include/tokens.h b/ecmascript/mapleall/maple_ir/include/tokens.h new file mode 100644 index 0000000000000000000000000000000000000000..f78767e13b7191c58815a085a0e9d38f4d4ab251 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/tokens.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_TOKENS_H +#define MAPLE_IR_INCLUDE_TOKENS_H + +namespace maple { +enum TokenKind { + TK_invalid, +// keywords from this file +#define KEYWORD(STR) TK_##STR, +#include "keywords.def" +#undef KEYWORD + // non-keywords starting here + // constants + TK_intconst, + TK_floatconst, + TK_doubleconst, + // local name + TK_lname, + // global name + TK_gname, + // function name + TK_fname, + // pseudo register + TK_preg, + // special register + TK_specialreg, + // parent field + TK_prntfield, + // type parameter name + TK_typeparam, + // misc. + TK_newline, + TK_lparen, // ( + TK_rparen, // ) + TK_lbrace, // { + TK_rbrace, // } + TK_lbrack, // [ + TK_rbrack, // ] + TK_langle, // < + TK_rangle, // > + TK_eqsign, // = + TK_coma, // , + TK_dotdotdot, // ... + TK_colon, // : + TK_asterisk, // * + TK_string, // a literal string enclosed between " + TK_eof +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_TOKENS_H diff --git a/ecmascript/mapleall/maple_ir/include/types_def.h b/ecmascript/mapleall/maple_ir/include/types_def.h new file mode 100644 index 0000000000000000000000000000000000000000..df71899b2e766b82f0e91d81b9596c7fadaba070 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/types_def.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_TYPES_DEF_H +#define MAPLE_IR_INCLUDE_TYPES_DEF_H + +// NOTE: Since we already committed to -std=c++0x, we should eventually use the +// standard definitions in the and headers rather than +// reinventing our own primitive types. +#include +#include +#include +#include "mpl_number.h" + +namespace maple { +// Let's keep the following definitions so that existing code will continue to work. +using int8 = std::int8_t; +using int16 = std::int16_t; +using int32 = std::int32_t; +using int64 = std::int64_t; +using uint8 = std::uint8_t; +using uint16 = std::uint16_t; +using uint32 = std::uint32_t; +using uint64 = std::uint64_t; +class StIdx { // scope nesting level + symbol table index + public: + union un { + struct { + uint32 idx : 24; + uint8 scope; // scope level, with the global scope is at level 1 + } scopeIdx; + + uint32 fullIdx; + }; + + StIdx() { + u.fullIdx = 0; + } + + StIdx(uint32 level, uint32 i) { + u.scopeIdx.scope = level; + u.scopeIdx.idx = i; + } + + StIdx(uint32 fidx) { + u.fullIdx = fidx; + } + + ~StIdx() = default; + + uint32 Idx() const { + return u.scopeIdx.idx; + } + + void SetIdx(uint32 idx) { + u.scopeIdx.idx = idx; + } + + uint32 Scope() const { + return u.scopeIdx.scope; + } + + void SetScope(uint32 scpe) { + u.scopeIdx.scope = static_cast(scpe); + } + + uint32 FullIdx() const { + return u.fullIdx; + } + + void SetFullIdx(uint32 idx) { + u.fullIdx = idx; + } + + bool Islocal() const { + return u.scopeIdx.scope > 1; + } + + bool IsGlobal() const { + return u.scopeIdx.scope == 1; + } + + bool operator==(const StIdx &x) const { + return u.fullIdx == x.u.fullIdx; + } + + bool operator!=(const StIdx &x) const { + return !(*this == x); + } + + bool operator<(const StIdx &x) const { + return u.fullIdx < x.u.fullIdx; + } + + private: + un u; +}; + +using LabelIdx = uint32; +using phyRegIdx = uint64; +using OfstRegIdx = uint64; +using LabelIDOrder = uint32; +using PUIdx = uint32; +using PregIdx = int32; +using ExprIdx = int32; +using FieldID = int32; + +class TypeTag; +using TyIdx = utils::Index; // global type table index + +class GStrTag; +using GStrIdx = utils::Index; // global string table index + +class UStrTag; +using UStrIdx = utils::Index; // user string table index (from the conststr opcode) + +class U16StrTag; +using U16StrIdx = utils::Index; // user string table index (from the conststr opcode) + +const TyIdx kInitTyIdx = TyIdx(0); +const TyIdx kNoneTyIdx = TyIdx(UINT32_MAX); + +enum SSALevel : uint8 { + kSSAInvalid = 0x00, + kSSATopLevel = 0x01, // ssa only for local top-level is valid + kSSAAddrTaken = 0x02, // ssa only for addr-taken is valid + kSSAMemory = kSSATopLevel | kSSAAddrTaken, // ssa for both top-level and addr-taken is valid + kSSAHSSA = 0x04, // hssa is valid +}; + +constexpr uint8 kOperandNumUnary = 1; +constexpr uint8 kOperandNumBinary = 2; +constexpr uint8 kOperandNumTernary = 3; +} // namespace maple +namespace std { +template<> // function-template-specialization +class hash { + public: + size_t operator()(const maple::StIdx &x) const { + std::size_t seed = 0; + hash_combine(seed, x.Scope()); + hash_combine(seed, x.Idx()); + return seed; + } +}; +} +#endif // MAPLE_IR_INCLUDE_TYPES_DEF_H diff --git a/ecmascript/mapleall/maple_ir/include/unary_op.def b/ecmascript/mapleall/maple_ir/include/unary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..7c7b072e1c4d86b6fa8e5a48320f2fb7592d09bf --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/unary_op.def @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +UNARYOP(abs) +UNARYOP(bnot) +UNARYOP(lnot) +UNARYOP(neg) +UNARYOP(recip) +UNARYOP(sqrt) +UNARYOP(sext) +UNARYOP(zext) +UNARYOP(extractbits) +UNARYOP(alloca) +UNARYOP(malloc) +UNARYOP(gcmallocjarray) +UNARYOP(gcpermallocjarray) diff --git a/ecmascript/mapleall/maple_ir/include/verification.h b/ecmascript/mapleall/maple_ir/include/verification.h new file mode 100644 index 0000000000000000000000000000000000000000..3c0c3eff813f5c1d566bbe389455942e8f623535 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/verification.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFICATION_PHASE_H +#define MAPLEIR_VERIFICATION_PHASE_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +using ClassVerifyPragmas = MapleUnorderedMap>; + +class VerifyResult { + public: + VerifyResult(const MIRModule &module, const KlassHierarchy &klassHierarchy, MemPool &memPool) + : module(module), + klassHierarchy(klassHierarchy), + allocator(&memPool), + classesCorrectness(allocator.Adapter()), + classesPragma(allocator.Adapter()) {} + + ~VerifyResult() = default; + + const KlassHierarchy &GetKlassHierarchy() const { + return klassHierarchy; + } + + const MIRModule &GetMIRModule() const { + return module; + } + + const MIRFunction *GetCurrentFunction() const { + return module.GetFunctionList().front(); + } + + const std::string &GetCurrentClassName() const { + return GetCurrentFunction()->GetClassType()->GetName(); + } + + const ClassVerifyPragmas &GetDeferredClassesPragma() const { + return classesPragma; + } + + void AddPragmaVerifyError(const std::string &className, std::string errMsg); + void AddPragmaAssignableCheck(const std::string &className, std::string fromType, std::string toType); + void AddPragmaExtendFinalCheck(const std::string &className); + void AddPragmaOverrideFinalCheck(const std::string &className); + + const MapleUnorderedMap &GetResultMap() const { + return classesCorrectness; + } + void SetClassCorrectness(const std::string &className, bool result) { + classesCorrectness[className] = result; + } + + bool HasErrorNotDeferred() const { + for (auto &classResult : classesCorrectness) { + if (!classResult.second) { + if (classesPragma.find(classResult.first) == classesPragma.end()) { + // Verify result is not OK, but has no deferred check or verify error in runtime + return true; + } + } + } + return false; + } + + private: + bool HasVerifyError(const std::vector &pragmaInfoPtrVec) const; + bool HasSamePragmaInfo(const std::vector &pragmaInfoPtrVec, + const VerifyPragmaInfo &verifyPragmaInfo) const; + + const MIRModule &module; + const KlassHierarchy &klassHierarchy; + MapleAllocator allocator; + // classesCorrectness, correctness is true only if the class is verified OK + MapleUnorderedMap classesCorrectness; + // classesPragma + ClassVerifyPragmas classesPragma; +}; + +class VerificationPhaseResult : public AnalysisResult { + public: + VerificationPhaseResult(MemPool &mp, const VerifyResult &verifyResult) + : AnalysisResult(&mp), verifyResult(verifyResult) {} + ~VerificationPhaseResult() = default; + + const ClassVerifyPragmas &GetDeferredClassesPragma() const { + return verifyResult.GetDeferredClassesPragma(); + } + + private: + const VerifyResult &verifyResult; +}; + +#ifdef NOT_USED +class DoVerification : public ModulePhase { + public: + explicit DoVerification(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + std::string PhaseName() const override { + return "verification"; + } + + ~DoVerification() = default; + + private: + void VerifyModule(MIRModule &module, VerifyResult &result) const; + void DeferredCheckFinalClassAndMethod(VerifyResult &result) const; + bool IsLazyBindingOrDecouple(const KlassHierarchy &klassHierarchy) const; + bool NeedRuntimeFinalCheck(const KlassHierarchy &klassHierarchy, const std::string &className) const; + void CheckExtendFinalClass(VerifyResult &result) const; +}; +#endif +} // namespace maple +#endif // MAPLEIR_VERIFICATION_PHASE_H diff --git a/ecmascript/mapleall/maple_ir/include/verify_annotation.h b/ecmascript/mapleall/maple_ir/include/verify_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..ac19180d4801351b8255db63885113742cecda2a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/verify_annotation.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFY_ANNOTATION_H +#define MAPLEIR_VERIFY_ANNOTATION_H +#include "mir_module.h" +#include "mir_type.h" +#include "verify_pragma_info.h" + +namespace maple { +void AddVerfAnnoThrowVerifyError(MIRModule &md, const ThrowVerifyErrorPragma &info, MIRStructType &clsType); +void AddVerfAnnoAssignableCheck(MIRModule &md, + std::vector &info, + MIRStructType &clsType); +void AddVerfAnnoExtendFinalCheck(MIRModule &md, MIRStructType &clsType); +void AddVerfAnnoOverrideFinalCheck(MIRModule &md, MIRStructType &clsType); +} // namespace maple +#endif // MAPLEALL_VERIFY_ANNOTATION_H \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ir/include/verify_mark.h b/ecmascript/mapleall/maple_ir/include/verify_mark.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb72a498e3f0cb0b53c52faf056f53e526d8c72 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/verify_mark.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_VERIFY_MARK_H +#define MAPLEALL_VERIFY_MARK_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +#ifdef NOT_USED +class DoVerifyMark : public ModulePhase { + public: + explicit DoVerifyMark(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + + std::string PhaseName() const override { + return "verifymark"; + } + + ~DoVerifyMark() override = default; + + private: + void AddAnnotations(MIRModule &module, const Klass &klass, const std::vector &pragmaInfoVec); +}; +#endif +} // namespace maple +#endif // MAPLEALL_VERIFY_MARK_H \ No newline at end of file diff --git a/ecmascript/mapleall/maple_ir/include/verify_pragma_info.h b/ecmascript/mapleall/maple_ir/include/verify_pragma_info.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe43caf005d35f8f0457cc1e62b944ab2f7f9c8 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/include/verify_pragma_info.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFY_PRAGMA_INFO_H +#define MAPLEIR_VERIFY_PRAGMA_INFO_H +#include +#include + +namespace maple { +enum PragmaInfoType { + kThrowVerifyError, + kAssignableCheck, + kExtendFinalCheck, + kOverrideFinalCheck +}; + +class VerifyPragmaInfo { + public: + VerifyPragmaInfo() = default;; + virtual ~VerifyPragmaInfo() = default; + + virtual PragmaInfoType GetPragmaType() const = 0; + bool IsEqualTo(const VerifyPragmaInfo &pragmaInfo) const { + return GetPragmaType() == pragmaInfo.GetPragmaType(); + } + bool IsVerifyError() const { + return GetPragmaType() == kThrowVerifyError; + } + bool IsAssignableCheck() const { + return GetPragmaType() == kAssignableCheck; + } + bool IsExtendFinalCheck() const { + return GetPragmaType() == kExtendFinalCheck; + } + bool IsOverrideFinalCheck() const { + return GetPragmaType() == kOverrideFinalCheck; + } +}; + +class ThrowVerifyErrorPragma : public VerifyPragmaInfo { + public: + explicit ThrowVerifyErrorPragma(std::string errorMessage) + : VerifyPragmaInfo(), + errorMessage(std::move(errorMessage)) {} + ~ThrowVerifyErrorPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kThrowVerifyError; + } + + const std::string &GetMessage() const { + return errorMessage; + } + + private: + std::string errorMessage; +}; + +class AssignableCheckPragma : public VerifyPragmaInfo { + public: + AssignableCheckPragma(std::string fromType, std::string toType) + : VerifyPragmaInfo(), + fromType(std::move(fromType)), + toType(std::move(toType)) {} + ~AssignableCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kAssignableCheck; + } + + bool IsEqualTo(const AssignableCheckPragma &pragma) const { + return fromType == pragma.GetFromType() && toType == pragma.GetToType(); + } + + const std::string &GetFromType() const { + return fromType; + } + + const std::string &GetToType() const { + return toType; + } + + private: + std::string fromType; + std::string toType; +}; + +class ExtendFinalCheckPragma : public VerifyPragmaInfo { + public: + ExtendFinalCheckPragma() : VerifyPragmaInfo() {} + ~ExtendFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kExtendFinalCheck; + } +}; + +class OverrideFinalCheckPragma : public VerifyPragmaInfo { + public: + OverrideFinalCheckPragma() : VerifyPragmaInfo() {} + ~OverrideFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kOverrideFinalCheck; + } +}; +} // namespace maple +#endif // MAPLEIR_VERIFY_PRAGMA_INFO_H diff --git a/ecmascript/mapleall/maple_ir/src/bin_func_export.cpp b/ecmascript/mapleall/maple_ir/src/bin_func_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2015da5d32f34c251e4d802e3be384e49a22ec35 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/bin_func_export.cpp @@ -0,0 +1,742 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +#include "bin_mplt.h" +#include +#include + +using namespace std; +namespace maple { +void BinaryMplExport::OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString) { + if (!mod.IsWithDbgInfo()) { + Write(0); + return; + } + WriteNum(infoVector.size()); + for (uint32 i = 0; i < infoVector.size(); i++) { + OutputStr(infoVector[i].first); + WriteNum(infoVectorIsString[i] ? 1 : 0); + if (!infoVectorIsString[i]) { + WriteNum(infoVector[i].second); + } else { + OutputStr(GStrIdx(infoVector[i].second)); + } + } +} + +void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { + WriteNum(kBinFuncIdInfoStart); + WriteNum(func->GetPuidxOrigin()); // the funcid + OutputInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(func->GetFrameSize()); + } +} + +void BinaryMplExport::OutputBaseNode(const BaseNode *b) { + Write(static_cast(b->GetOpCode())); + Write(static_cast(b->GetPrimType())); +} + +void BinaryMplExport::OutputLocalSymbol(MIRSymbol *sym) { + std::unordered_map::iterator it = localSymMark.find(sym); + if (it != localSymMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinSymbol); + OutputStr(sym->GetNameStrIdx()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = localSymMark.size(); + localSymMark[sym] = mark; + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(static_cast(sym->GetIsTmp())); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); + if (sym->GetSKind() == kStPreg) { + OutputPreg(sym->GetPreg()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFuncViaSym(sym->GetFunction()->GetPuidx()); + } else { + CHECK_FATAL(false, "should not used"); + } +} + +void BinaryMplExport::OutputPreg(MIRPreg *preg) { + if (preg->GetPregNo() < 0) { + WriteNum(kBinSpecialReg); + Write(static_cast(-preg->GetPregNo())); + return; + } + std::unordered_map::iterator it = localPregMark.find(preg); + if (it != localPregMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinPreg); + Write(static_cast(preg->GetPrimType())); + size_t mark = localPregMark.size(); + localPregMark[preg] = mark; +} + +void BinaryMplExport::OutputLabel(LabelIdx lidx) { + std::unordered_map::iterator it = labelMark.find(lidx); + if (it != labelMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinLabel); + size_t mark = labelMark.size(); + labelMark[lidx] = mark; +} + +void BinaryMplExport::OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab) { + WriteNum(kBinTypenameStart); + WriteNum(static_cast(typeNameTab->Size())); + for (std::pair it : typeNameTab->GetGStrIdxToTyIdxMap()) { + OutputStr(it.first); + OutputType(it.second); + } +} + +void BinaryMplExport::OutputFormalsStIdx(MIRFunction *func) { + WriteNum(kBinFormalStart); + WriteNum(func->GetFormalDefVec().size()); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputLocalSymbol(formalDef.formalSym); + } +} + +void BinaryMplExport::OutputAliasMap(MapleMap &aliasVarMap) { + WriteNum(kBinAliasMapStart); + WriteInt(static_cast(aliasVarMap.size())); + for (std::pair it : aliasVarMap) { + OutputStr(it.first); + OutputStr(it.second.mplStrIdx); + OutputType(it.second.tyIdx); + OutputStr(it.second.sigStrIdx); + } +} + +void BinaryMplExport::OutputFuncViaSym(PUIdx puIdx) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + OutputSymbol(funcSt); +} + +void BinaryMplExport::OutputExpression(BaseNode *e) { + OutputBaseNode(e); + switch (e->GetOpCode()) { + // leaf + case OP_constval: { + MIRConst *constVal = static_cast(e)->GetConstVal(); + OutputConst(constVal); + return; + } + case OP_conststr: { + UStrIdx strIdx = static_cast(e)->GetStrIdx(); + OutputUsrStr(strIdx); + return; + } + case OP_addroflabel: { + AddroflabelNode *lNode = static_cast(e); + OutputLabel(lNode->GetOffset()); + return; + } + case OP_addroffunc: { + AddroffuncNode *addrNode = static_cast(e); + OutputFuncViaSym(addrNode->GetPUIdx()); + return; + } + case OP_sizeoftype: { + SizeoftypeNode *sot = static_cast(e); + OutputType(sot->GetTyIdx()); + return; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + StIdx stIdx; + if (e->GetOpCode() == OP_addrof || e->GetOpCode() == OP_dread) { + AddrofNode *drNode = static_cast(e); + WriteNum(drNode->GetFieldID()); + stIdx = drNode->GetStIdx(); + } else { + DreadoffNode *droff = static_cast(e); + WriteNum(droff->offset); + stIdx = droff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + return; + } + case OP_regread: { + RegreadNode *regreadNode = static_cast(e); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(regreadNode->GetRegIdx()); + OutputPreg(preg); + return; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + GCMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + return; + } + // unary + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = static_cast(e); + Write(static_cast(typecvtNode->FromType())); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(e); + OutputType(retypeNode->GetTyIdx()); + break; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = static_cast(e); + OutputType(irNode->GetTyIdx()); + WriteNum(irNode->GetFieldID()); + break; + } + case OP_ireadoff: { + IreadoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = static_cast(e); + Write(extNode->GetBitsOffset()); + Write(extNode->GetBitsSize()); + break; + } + case OP_depositbits: { + DepositbitsNode *dbNode = static_cast(e); + Write(dbNode->GetBitsOffset()); + Write(dbNode->GetBitsSize()); + break; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + break; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + break; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = static_cast(e); + Write(static_cast(cmpNode->GetOpndType())); + break; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = static_cast(e); + OutputFuncViaSym(rsNode->GetPuIdx()); + break; + } + // ternary + case OP_select: { + break; + } + // nary + case OP_array: { + ArrayNode *arrNode = static_cast(e); + OutputType(arrNode->GetTyIdx()); + Write(static_cast(arrNode->GetBoundsCheck())); + WriteNum(static_cast(arrNode->NumOpnds())); + break; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + default: + break; + } + for (uint32 i = 0; i < e->NumOpnds(); ++i) { + OutputExpression(e->Opnd(i)); + } +} + +static SrcPosition lastOutputSrcPosition; + +void BinaryMplExport::OutputSrcPos(const SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } + if (pos.FileNum() == 0 || pos.LineNum() == 0) { // error case, so output 0 + WriteNum(lastOutputSrcPosition.RawData()); + WriteNum(lastOutputSrcPosition.LineNum()); + return; + } + WriteNum(pos.RawData()); + WriteNum(pos.LineNum()); + lastOutputSrcPosition = pos; +} + +void BinaryMplExport::OutputReturnValues(const CallReturnVector *retv) { + WriteNum(kBinReturnvals); + WriteNum(static_cast(retv->size())); + for (uint32 i = 0; i < retv->size(); i++) { + RegFieldPair rfp = (*retv)[i].second; + if (rfp.IsReg()) { + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rfp.GetPregIdx()); + OutputPreg(preg); + } else { + WriteNum(0); + WriteNum((rfp.GetFieldID())); + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol((*retv)[i].first)); + } + } +} + +void BinaryMplExport::OutputBlockNode(BlockNode *block) { + WriteNum(kBinNodeBlock); + if (!block->GetStmtNodes().empty()) { + OutputSrcPos(block->GetSrcPos()); + } else { + OutputSrcPos(SrcPosition()); // output 0 + } + int32 num = 0; + uint64 idx = buf.size(); + ExpandFourBuffSize(); // place holder, Fixup later + for (StmtNode *s = block->GetFirst(); s; s = s->GetNext()) { + bool doneWithOpnds = false; + OutputSrcPos(s->GetSrcPos()); + WriteNum(s->GetOpCode()); + switch (s->GetOpCode()) { + case OP_dassign: + case OP_dassignoff: { + StIdx stIdx; + if (s->GetOpCode() == OP_dassign) { + DassignNode *dass = static_cast(s); + WriteNum(dass->GetFieldID()); + stIdx = dass->GetStIdx(); + } else { + DassignoffNode *dassoff = static_cast(s); + WriteNum(dassoff->GetPrimType()); + WriteNum(dassoff->offset); + stIdx = dassoff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + break; + } + case OP_regassign: { + RegassignNode *rass = static_cast(s); + Write(static_cast(rass->GetPrimType())); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rass->GetRegIdx()); + OutputPreg(preg); + break; + } + case OP_iassign: { + IassignNode *iass = static_cast(s); + OutputType(iass->GetTyIdx()); + WriteNum(iass->GetFieldID()); + break; + } + case OP_iassignoff: { + IassignoffNode *iassoff = static_cast(s); + Write(static_cast(iassoff->GetPrimType())); + WriteNum(iassoff->GetOffset()); + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *iassfpoff = static_cast(s); + Write(static_cast(iassfpoff->GetPrimType())); + WriteNum(iassfpoff->GetOffset()); + break; + } + case OP_blkassignoff: { + BlkassignoffNode *bass = static_cast(s); + int32 offsetAlign = (bass->offset << 4) | bass->alignLog2; + WriteNum(offsetAlign); + WriteNum(bass->blockSize); + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + if (s->GetOpCode() == OP_polymorphiccall) { + OutputType(static_cast(callnode)->GetTyIdx()); + } + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_polymorphiccallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputType(callnode->GetTyIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + OutputReturnValues(&icallnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + break; + } + case OP_label: { + LabelNode *lNode = static_cast(s); + OutputLabel(lNode->GetLabelIdx()); + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *gtoNode = static_cast(s); + OutputLabel(gtoNode->GetOffset()); + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *cgotoNode = static_cast(s); + OutputLabel(cgotoNode->GetOffset()); + break; + } + case OP_switch: { + SwitchNode *swNode = static_cast(s); + OutputLabel(swNode->GetDefaultLabel()); + WriteNum(static_cast(swNode->GetSwitchTable().size())); + for (CasePair cpair : swNode->GetSwitchTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_rangegoto: { + RangeGotoNode *rgoto = static_cast(s); + WriteNum(rgoto->GetTagOffset()); + WriteNum(static_cast(rgoto->GetRangeGotoTable().size())); + for (SmallCasePair cpair : rgoto->GetRangeGotoTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_jstry: { + JsTryNode *tryNode = static_cast(s); + OutputLabel(tryNode->GetCatchOffset()); + OutputLabel(tryNode->GetFinallyOffset()); + break; + } + case OP_cpptry: + case OP_try: { + TryNode *tryNode = static_cast(s); + WriteNum(static_cast(tryNode->GetOffsetsCount())); + for (LabelIdx lidx : tryNode->GetOffsets()) { + OutputLabel(lidx); + } + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(s); + WriteNum(static_cast(catchNode->GetExceptionTyIdxVec().size())); + for (TyIdx tidx : catchNode->GetExceptionTyIdxVec()) { + OutputType(tidx); + } + break; + } + case OP_comment: { + string str(static_cast(s)->GetComment().c_str()); + WriteAsciiStr(str); + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *whileNode = static_cast(s); + OutputBlockNode(whileNode->GetBody()); + OutputExpression(whileNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_if: { + IfStmtNode *ifNode = static_cast(s); + bool hasElsePart = ifNode->GetElsePart() != nullptr; + WriteNum(static_cast(hasElsePart)); + OutputBlockNode(ifNode->GetThenPart()); + if (hasElsePart) { + OutputBlockNode(ifNode->GetElsePart()); + } + OutputExpression(ifNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_block: { + BlockNode *blockNode = static_cast(s); + OutputBlockNode(blockNode); + doneWithOpnds = true; + break; + } + case OP_asm: { + AsmNode *asmNode = static_cast(s); + WriteNum(asmNode->qualifiers); + string str(asmNode->asmString.c_str()); + WriteAsciiStr(str); + // the outputs + size_t count = asmNode->asmOutputs.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->outputConstraints[i]); + } + OutputReturnValues(&asmNode->asmOutputs); + // the clobber list + count = asmNode->clobberList.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->clobberList[i]); + } + // the labels + count = asmNode->gotoLabels.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputLabel(asmNode->gotoLabels[i]); + } + // the inputs + WriteNum(asmNode->NumOpnds()); + for (uint8 i = 0; i < asmNode->numOpnds; ++i) { + OutputUsrStr(asmNode->inputConstraints[i]); + } + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode %d", s->GetOpCode()); + break; + } + num++; + if (!doneWithOpnds) { + for (uint32 i = 0; i < s->NumOpnds(); ++i) { + OutputExpression(s->Opnd(i)); + } + } + } + Fixup(idx, num); +} + +void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet) { + Fixup(contentIdx, static_cast(buf.size())); + // LogInfo::MapleLogger() << "Write FunctionBody Field " << std::endl; + WriteNum(kBinFunctionBodyStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); /// total size of this field to ~BIN_FUNCTIONBODY_START + uint64 outFunctionBodySizeIdx = buf.size(); + ExpandFourBuffSize(); /// size of outFunctionBody + int32 size = 0; + + if (not2mplt) { + for (MIRFunction *func : GetMIRModule().GetFunctionList()) { + curFunc = func; + if (func->GetAttr(FUNCATTR_optimized)) { + continue; + } + if (func->GetCodeMemPool() == nullptr || func->GetBody() == nullptr) { + continue; + } + if (dumpFuncSet != nullptr && !dumpFuncSet->empty()) { + // output only if this func matches any name in *dumpFuncSet + const std::string &name = func->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != string::npos) { + matched = true; + break; + } + } + if (!matched) { + continue; + } + } + localSymMark.clear(); + localSymMark[nullptr] = 0; + localPregMark.clear(); + localPregMark[nullptr] = 0; + labelMark.clear(); + labelMark[0] = 0; + OutputFunction(func->GetPuidx()); + CHECK_FATAL(func->GetBody() != nullptr, "WriteFunctionBodyField: no function body"); + OutputFuncIdInfo(func); + OutputLocalTypeNameTab(func->GetTypeNameTab()); + OutputFormalsStIdx(func); + if (mod.GetFlavor() < kMmpl) { + OutputAliasMap(func->GetAliasVarMap()); + } + lastOutputSrcPosition = SrcPosition(); + OutputBlockNode(func->GetBody()); + size++; + } + } + + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); + Fixup(outFunctionBodySizeIdx, size); + return; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/bin_func_import.cpp b/ecmascript/mapleall/maple_ir/src/bin_func_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27b86b1e7a362d363c8eb020bfc2dcc851572aaa --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/bin_func_import.cpp @@ -0,0 +1,937 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +using namespace std; + +namespace maple { +constexpr uint32 kOffset4bit = 4; +void BinaryMplImport::ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString) { + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx gStrIdx = ImportStr(); + bool isstring = (ReadNum() != 0); + infoVectorIsString.push_back(isstring); + if (isstring) { + GStrIdx fieldval = ImportStr(); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval.GetIdx())); + } else { + auto fieldval = static_cast(ReadNum()); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval)); + } + } +} + +void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinFuncIdInfoStart, "kBinFuncIdInfoStart expected"); + func->SetPuidxOrigin(static_cast(ReadNum())); + ImportInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + func->SetFrameSize(static_cast(ReadNum())); + } +} + +void BinaryMplImport::ImportBaseNode(Opcode &o, PrimType &typ) { + o = static_cast(Read()); + typ = static_cast(Read()); +} + +MIRSymbol *BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localSymTab.size(), "index out of bounds"); + return localSymTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol in ImportLocalSymbol()"); + MIRSymbol *sym = func->GetSymTab()->CreateSymbol(kScopeLocal); + localSymTab.push_back(sym); + sym->SetNameStrIdx(ImportStr()); + (void)func->GetSymTab()->AddToStringSymbolMap(*sym); + sym->SetSKind(static_cast(ReadNum())); + sym->SetStorageClass(static_cast(ReadNum())); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + sym->SetTyIdx(ImportType()); + if (sym->GetSKind() == kStPreg) { + PregIdx pregidx = ImportPreg(func); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(pregidx); + sym->SetPreg(preg); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (sym->GetSKind() == kStFunc) { + PUIdx puIdx = ImportFuncViaSym(func); + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + } + return sym; +} + +PregIdx BinaryMplImport::ImportPreg(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag == kBinSpecialReg) { + return -Read(); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localPregTab.size(), "index out of bounds"); + return localPregTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinPreg, "expecting kBinPreg in ImportPreg()"); + + PrimType primType = static_cast(Read()); + PregIdx pidx = func->GetPregTab()->CreatePreg(primType); + localPregTab.push_back(pidx); + return pidx; +} + +LabelIdx BinaryMplImport::ImportLabel(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localLabelTab.size(), "index out of bounds"); + return localLabelTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinLabel, "kBinLabel expected in ImportLabel()"); + + LabelIdx lidx = func->GetLabelTab()->CreateLabel(); + localLabelTab.push_back(lidx); + return lidx; +} + +void BinaryMplImport::ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinTypenameStart, "kBinTypenameStart expected in ImportLocalTypeNameTable()"); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = ImportType(); + typeNameTab->SetGStrIdxToTyIdx(strIdx, tyIdx); + } +} + +void BinaryMplImport::ImportFormalsStIdx(MIRFunction *func) { + auto tag = ReadNum(); + CHECK_FATAL(tag == kBinFormalStart, "kBinFormalStart expected in ImportFormalsStIdx()"); + auto size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + func->GetFormalDefVec()[static_cast(i)].formalSym = ImportLocalSymbol(func); + } +} + +void BinaryMplImport::ImportAliasMap(MIRFunction *func) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinAliasMapStart, "kBinAliasMapStart expected in ImportAliasMap()"); + int32 size = ReadInt(); + for (int32 i = 0; i < size; ++i) { + MIRAliasVars aliasvars; + GStrIdx strIdx = ImportStr(); + aliasvars.mplStrIdx = ImportStr(); + aliasvars.tyIdx = ImportType(); + (void)ImportStr(); // not assigning to mimic parser + func->GetAliasVarMap()[strIdx] = aliasvars; + } +} + +PUIdx BinaryMplImport::ImportFuncViaSym(MIRFunction *func) { + MIRSymbol *sym = InSymbol(func); + MIRFunction *f = sym->GetFunction(); + return f->GetPuidx(); +} + +BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { + Opcode op; + PrimType typ; + ImportBaseNode(op, typ); + switch (op) { + // leaf + case OP_constval: { + MIRConst *constv = ImportConst(func); + ConstvalNode *constNode = mod.CurFuncCodeMemPool()->New(constv); + constNode->SetPrimType(typ); + return constNode; + } + case OP_conststr: { + UStrIdx strIdx = ImportUsrStr(); + ConststrNode *constNode = mod.CurFuncCodeMemPool()->New(typ, strIdx); + constNode->SetPrimType(typ); + return constNode; + } + case OP_addroflabel: { + AddroflabelNode *alabNode = mod.CurFuncCodeMemPool()->New(); + alabNode->SetOffset(ImportLabel(func)); + alabNode->SetPrimType(typ); + (void)func->GetLabelTab()->addrTakenLabels.insert(alabNode->GetOffset()); + return alabNode; + } + case OP_addroffunc: { + PUIdx puIdx = ImportFuncViaSym(func); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); + AddroffuncNode *addrNode = mod.CurFuncCodeMemPool()->New(typ, puIdx); + return addrNode; + } + case OP_sizeoftype: { + TyIdx tidx = ImportType(); + SizeoftypeNode *sot = mod.CurFuncCodeMemPool()->New(tidx); + return sot; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + if (op == OP_addrof) { + sym->SetHasPotentialAssignment(); + } + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_addrof || op == OP_dread) { + AddrofNode *drNode = mod.CurFuncCodeMemPool()->New(op); + drNode->SetPrimType(typ); + drNode->SetStIdx(stIdx); + drNode->SetFieldID(num); + return drNode; + } else { + DreadoffNode *dreadoff = mod.CurFuncCodeMemPool()->New(op); + dreadoff->SetPrimType(typ); + dreadoff->stIdx = stIdx; + dreadoff->offset = num; + return dreadoff; + } + } + case OP_regread: { + RegreadNode *regreadNode = mod.CurFuncCodeMemPool()->New(); + regreadNode->SetRegIdx(ImportPreg(func)); + regreadNode->SetPrimType(typ); + return regreadNode; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + TyIdx tyIdx = ImportType(); + GCMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ, tyIdx); + return gcNode; + } + // unary + case OP_abs: + case OP_bnot: + case OP_lnot: + case OP_neg: + case OP_recip: + case OP_sqrt: + case OP_alloca: + case OP_malloc: { + UnaryNode *unNode = mod.CurFuncCodeMemPool()->New(op, typ); + unNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return unNode; + } + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = mod.CurFuncCodeMemPool()->New(op, typ); + typecvtNode->SetFromType(static_cast(Read())); + typecvtNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return typecvtNode; + } + case OP_retype: { + RetypeNode *retypeNode = mod.CurFuncCodeMemPool()->New(typ); + retypeNode->SetTyIdx(ImportType()); + retypeNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return retypeNode; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = mod.CurFuncCodeMemPool()->New(op, typ); + irNode->SetTyIdx(ImportType()); + irNode->SetFieldID(static_cast(ReadNum())); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadoff: { + int32 ofst = static_cast(ReadNum()); + IreadoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadfpoff: { + int32 ofst = static_cast(ReadNum()); + IreadFPoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + return irNode; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = mod.CurFuncCodeMemPool()->New(op, typ); + extNode->SetBitsOffset(Read()); + extNode->SetBitsSize(Read()); + extNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return extNode; + } + case OP_depositbits: { + DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); + dbNode->SetBitsOffset(static_cast(ReadNum())); + dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetOpnd(ImportExpression(func), kFirstOpnd); + dbNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return dbNode; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ); + gcNode->SetTyIdx(ImportType()); + gcNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return gcNode; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + BinaryNode *binNode = mod.CurFuncCodeMemPool()->New(op, typ); + binNode->SetOpnd(ImportExpression(func), kFirstOpnd); + binNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return binNode; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = mod.CurFuncCodeMemPool()->New(op, typ); + cmpNode->SetOpndType(static_cast(Read())); + cmpNode->SetOpnd(ImportExpression(func), kFirstOpnd); + cmpNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return cmpNode; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = mod.CurFuncCodeMemPool()->New(op, typ); + rsNode->SetPUIdx(ImportFuncViaSym(func)); + rsNode->SetOpnd(ImportExpression(func), kFirstOpnd); + rsNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return rsNode; + } + // ternary + case OP_select: { + TernaryNode *tNode = mod.CurFuncCodeMemPool()->New(op, typ); + tNode->SetOpnd(ImportExpression(func), kFirstOpnd); + tNode->SetOpnd(ImportExpression(func), kSecondOpnd); + tNode->SetOpnd(ImportExpression(func), kThirdOpnd); + return tNode; + } + // nary + case OP_array: { + TyIdx tidx = ImportType(); + bool boundsCheck = static_cast(Read()); + ArrayNode *arrNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), typ, tidx, boundsCheck); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + arrNode->GetNopnd().push_back(ImportExpression(func)); + } + arrNode->SetNumOpnds(static_cast(arrNode->GetNopnd().size())); + return arrNode; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), op, typ); + intrnNode->SetIntrinsic(static_cast(ReadNum())); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), OP_intrinsicopwithtype, typ); + intrnNode->SetIntrinsic((MIRIntrinsicID)ReadNum()); + intrnNode->SetTyIdx(ImportType()); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + default: + CHECK_FATAL(false, "Unhandled op %d", op); + break; + } +} + +void BinaryMplImport::ImportSrcPos(SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } + pos.SetRawData(static_cast(ReadNum())); + pos.SetLineNum(static_cast(ReadNum())); +} + +void BinaryMplImport::ImportReturnValues(MIRFunction *func, CallReturnVector *retv) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinReturnvals, "expecting return values"); + auto size = static_cast(ReadNum()); + for (uint32 i = 0; i < size; ++i) { + RegFieldPair rfp; + rfp.SetPregIdx(ImportPreg(func)); + if (rfp.IsReg()) { + retv->push_back(std::make_pair(StIdx(), rfp)); + continue; + } + rfp.SetFieldID(static_cast(ReadNum())); + MIRSymbol *lsym = ImportLocalSymbol(func); + CHECK_FATAL(lsym != nullptr, "null ptr check"); + retv->push_back(std::make_pair(lsym->GetStIdx(), rfp)); + if (lsym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lsym->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Pointer type expected for L_STR prefix"); + MIRPtrType tempType(static_cast(ty)->GetPointedTyIdx(), PTY_ptr); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&tempType); + lsym->SetTyIdx(newTyidx); + } + } +} + +BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { + int64 tag = ReadNum(); + DEBUG_ASSERT(tag == kBinNodeBlock, "expecting a BlockNode"); + + BlockNode *block = func->GetCodeMemPool()->New(); + Opcode op; + uint8 numOpr; + ImportSrcPos(block->GetSrcPos()); + int32 size = ReadInt(); + for (int32 k = 0; k < size; ++k) { + SrcPosition thesrcPosition; + ImportSrcPos(thesrcPosition); + op = static_cast(ReadNum()); + StmtNode *stmt = nullptr; + switch (op) { + case OP_dassign: + case OP_dassignoff: { + PrimType primType = PTY_void; + if (op == OP_dassignoff) { + primType = static_cast(ReadNum()); + } + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_dassign) { + DassignNode *s = func->GetCodeMemPool()->New(); + s->SetStIdx(stIdx); + s->SetFieldID(num); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } else { + DassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(primType); + s->stIdx = stIdx; + s->offset = num; + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } + break; + } + case OP_regassign: { + RegassignNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(static_cast(Read())); + s->SetRegIdx(ImportPreg(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_iassign: { + IassignNode *s = func->GetCodeMemPool()->New(); + s->SetTyIdx(ImportType()); + s->SetFieldID(static_cast(ReadNum())); + s->SetAddrExpr(ImportExpression(func)); + s->SetRHS(ImportExpression(func)); + stmt = s; + break; + } + case OP_iassignoff: { + IassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType((PrimType)Read()); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *s = func->GetCodeMemPool()->New(op); + s->SetPrimType(static_cast(Read())); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_blkassignoff: { + BlkassignoffNode *s = func->GetCodeMemPool()->New(); + int32 offsetAlign = static_cast(ReadNum()); + s->offset = offsetAlign >> kOffset4bit; + s->alignLog2 = offsetAlign & 0xf; + s->blockSize = static_cast(ReadNum()); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + const auto &calleeName = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx())->GetName(); + if (calleeName == "setjmp") { + func->SetHasSetjmp(); + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic(static_cast(ReadNum())); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + NaryStmtNode *s = func->GetCodeMemPool()->New(mod, op); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + stmt = mod.CurFuncCodeMemPool()->New(op); + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + UnaryStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_label: { + LabelNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetLabelIdx(ImportLabel(func)); + stmt = s; + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_switch: { + SwitchNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetDefaultLabel(ImportLabel(func)); + auto tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + int64 casetag = ReadNum(); + LabelIdx lidx = ImportLabel(func); + CasePair cpair = std::make_pair(casetag, lidx); + s->GetSwitchTable().push_back(cpair); + } + s->SetSwitchOpnd(ImportExpression(func)); + stmt = s; + break; + } + case OP_rangegoto: { + RangeGotoNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetTagOffset(static_cast(ReadNum())); + uint32 tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + uint16 casetag = static_cast(ReadNum()); + LabelIdx lidx = ImportLabel(func); + s->AddRangeGoto(casetag, lidx); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_jstry: { + JsTryNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetCatchOffset(ImportLabel(func)); + s->SetFinallyOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_cpptry: + case OP_try: { + TryNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numLabels = static_cast(ReadNum()); + for (uint32 i = 0; i < numLabels; ++i) { + s->GetOffsets().push_back(ImportLabel(func)); + } + stmt = s; + break; + } + case OP_catch: { + CatchNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numTys = static_cast(ReadNum()); + for (uint32 i = 0; i < numTys; ++i) { + s->PushBack(ImportType()); + } + stmt = s; + break; + } + case OP_comment: { + CommentNode *s = mod.CurFuncCodeMemPool()->New(mod); + string str; + ReadAsciiStr(str); + s->SetComment(str); + stmt = s; + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetBody(ImportBlockNode(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_if: { + IfStmtNode *s = mod.CurFuncCodeMemPool()->New(); + bool hasElsePart = (static_cast(ReadNum()) != kFirstOpnd); + s->SetThenPart(ImportBlockNode(func)); + if (hasElsePart) { + s->SetElsePart(ImportBlockNode(func)); + s->SetNumOpnds(kOperandNumTernary); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_block: { + stmt = ImportBlockNode(func); + break; + } + case OP_asm: { + AsmNode *s = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + s->qualifiers = static_cast(ReadNum()); + string str; + ReadAsciiStr(str); + s->asmString = str; + // the outputs + auto count = static_cast(ReadNum()); + UStrIdx strIdx; + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->outputConstraints.push_back(strIdx); + } + ImportReturnValues(func, &s->asmOutputs); + // the clobber list + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->clobberList.push_back(strIdx); + } + // the labels + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + LabelIdx lidx = ImportLabel(func); + s->gotoLabels.push_back(lidx); + } + // the inputs + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + strIdx = ImportUsrStr(); + s->inputConstraints.push_back(strIdx); + const std::string &inStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strIdx); + if (inStr[0] == '+') { + s->SetHasWriteInputs(); + } + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode tag %d", tag); + break; + } + stmt->SetSrcPos(thesrcPosition); + block->AddStatement(stmt); + } + if (func != nullptr) { + func->SetBody(block); + } + return block; +} + +void BinaryMplImport::ReadFunctionBodyField() { + (void)ReadInt(); /// skip total size + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + PUIdx puIdx = ImportFunction(); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + mod.SetCurFunction(fn); + fn->GetFuncSymbol()->SetAppearsInCode(true); + localSymTab.clear(); + localSymTab.push_back(nullptr); + localPregTab.clear(); + localPregTab.push_back(0); + localLabelTab.clear(); + localLabelTab.push_back(0); + + fn->AllocSymTab(); + fn->AllocPregTab(); + fn->AllocTypeNameTab(); + fn->AllocLabelTab(); + + ImportFuncIdInfo(fn); + ImportLocalTypeNameTable(fn->GetTypeNameTab()); + ImportFormalsStIdx(fn); + if (mod.GetFlavor() < kMmpl) { + ImportAliasMap(fn); + } + (void)ImportBlockNode(fn); + mod.AddFunction(fn); + } + return; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/bin_mpl_export.cpp b/ecmascript/mapleall/maple_ir/src/bin_mpl_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f303ef294767afb961c49ce064922ad6f7135d61 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -0,0 +1,1332 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bin_mpl_export.h" +#include +#include +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "bin_mplt.h" +#include "factory.h" + +namespace { +using namespace maple; +/* Storage location of field */ +constexpr uint32 kFirstField = 0; +constexpr uint32 kSecondField = 1; +constexpr uint32 kThirdField = 2; +constexpr uint32 kFourthField = 3; +constexpr int32 kFourthFieldInt = 3; +constexpr uint32 kFifthField = 4; +constexpr int32 kSixthFieldInt = 5; + +using OutputConstFactory = FunctionFactory; +using OutputTypeFactory = FunctionFactory; + +void OutputConstInt(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstInt); + mplExport.OutputConstBase(constVal); + mplExport.WriteNum(static_cast(constVal).GetExtValue()); +} + +void OutputConstAddrof(const MIRConst &constVal, BinaryMplExport &mplExport) { + const MIRAddrofConst &addrof = static_cast(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.WriteNum(kBinKindConstAddrof); + } else { + mplExport.WriteNum(kBinKindConstAddrofLocal); + } + mplExport.OutputConstBase(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.OutputSymbol(mplExport.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } else { + mplExport.OutputLocalSymbol(mplExport.curFunc->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } + mplExport.WriteNum(addrof.GetFieldID()); + mplExport.WriteNum(addrof.GetOffset()); +} + +void OutputConstAddrofFunc(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAddrofFunc); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputFunction(newConst.GetValue()); +} + +void OutputConstLbl(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAddrofLabel); + mplExport.OutputConstBase(constVal); + const MIRLblConst &lblConst = static_cast(constVal); + mplExport.OutputLabel(lblConst.GetValue()); +} + +void OutputConstStr(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstStr); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputUsrStr(newConst.GetValue()); +} + +void OutputConstStr16(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstStr16); + mplExport.OutputConstBase(constVal); + const auto &mirStr16 = static_cast(constVal); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16.GetValue()); + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + mplExport.WriteNum(str.length()); + for (char c : str) { + mplExport.Write(static_cast(c)); + } +} + +void OutputConstFloat(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstFloat); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstDouble(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstDouble); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstAgg(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAgg); + mplExport.OutputConstBase(constVal); + const auto &aggConst = static_cast(constVal); + size_t size = aggConst.GetConstVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(aggConst.GetFieldIdItem(i)); + mplExport.OutputConst(aggConst.GetConstVecItem(i)); + } +} + +void OutputConstSt(MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstSt); + mplExport.OutputConstBase(constVal); + auto &stConst = static_cast(constVal); + size_t size = stConst.GetStVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputSymbol(stConst.GetStVecItem(i)); + } + size = stConst.GetStOffsetVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(stConst.GetStOffsetVecItem(i)); + } +} + +static bool InitOutputConstFactory() { + RegisterFactoryFunction(kConstInt, OutputConstInt); + RegisterFactoryFunction(kConstAddrof, OutputConstAddrof); + RegisterFactoryFunction(kConstAddrofFunc, OutputConstAddrofFunc); + RegisterFactoryFunction(kConstLblConst, OutputConstLbl); + RegisterFactoryFunction(kConstStrConst, OutputConstStr); + RegisterFactoryFunction(kConstStr16Const, OutputConstStr16); + RegisterFactoryFunction(kConstFloatConst, OutputConstFloat); + RegisterFactoryFunction(kConstDoubleConst, OutputConstDouble); + RegisterFactoryFunction(kConstAggConst, OutputConstAgg); + RegisterFactoryFunction(kConstStConst, OutputConstSt); + return true; +} + +void OutputTypeScalar(const MIRType &ty, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindTypeScalar); + mplExport.OutputTypeBase(ty); +} + +void OutputTypePointer(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypePointer); + mplExport.OutputTypeBase(type); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + mplExport.OutputType(type.GetPointedTyIdx()); +} + +void OutputTypeByName(const MIRType &ty, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindTypeByName); + mplExport.OutputTypeBase(ty); +} + +void OutputTypeFArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFArray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeJArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeJarray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeArray); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetDim()); + for (uint16 i = 0; i < type.GetDim(); ++i) { + mplExport.WriteNum(type.GetSizeArrayItem(i)); + } + mplExport.OutputType(type.GetElemTyIdx()); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); +} + +void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFunction); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetRetTyIdx()); + mplExport.WriteNum(type.funcAttrs.GetAttrFlag()); + size_t size = type.GetParamTypeList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputType(type.GetNthParamType(i)); + } + size = type.GetParamAttrsList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputTypeAttrs(type.GetNthParamAttrs(i)); + } +} + +void OutputTypeParam(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeParam); + mplExport.OutputTypeBase(type); +} + +void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInstantVector); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(ty.GetKind()); + mplExport.OutputTypePairs(type); +} + +void OutputTypeGenericInstant(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeGenericInstant); + mplExport.OutputTypeBase(type); + mplExport.OutputTypePairs(type); + mplExport.OutputType(type.GetGenericTyIdx()); +} + +void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeBitField); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetFieldSize()); +} + +// for Struct/StructIncomplete/Union +void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeStruct); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + CHECK_FATAL(ty.GetKind() != kTypeUnion, "Must be."); + kind = kTypeStructIncomplete; + } + mplExport.WriteNum(kind); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + if (kind != kTypeStructIncomplete) { + mplExport.OutputStructTypeData(type); + } +} + +void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeClass); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeClassIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeClassIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputClassTypeData(type); + } +} + +void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInterface); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeInterfaceIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeInterfaceIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputInterfaceTypeData(type); + } +} + +void OutputTypeConstString(const MIRType &ty, BinaryMplExport&) { + DEBUG_ASSERT(false, "Type's kind not yet implemented: %d", ty.GetKind()); + (void)ty; +} + +static bool InitOutputTypeFactory() { + RegisterFactoryFunction(kTypeScalar, OutputTypeScalar); + RegisterFactoryFunction(kTypePointer, OutputTypePointer); + RegisterFactoryFunction(kTypeByName, OutputTypeByName); + RegisterFactoryFunction(kTypeFArray, OutputTypeFArray); + RegisterFactoryFunction(kTypeJArray, OutputTypeJArray); + RegisterFactoryFunction(kTypeArray, OutputTypeArray); + RegisterFactoryFunction(kTypeFunction, OutputTypeFunction); + RegisterFactoryFunction(kTypeParam, OutputTypeParam); + RegisterFactoryFunction(kTypeInstantVector, OutputTypeInstantVector); + RegisterFactoryFunction(kTypeGenericInstant, OutputTypeGenericInstant); + RegisterFactoryFunction(kTypeBitField, OutputTypeBitField); + RegisterFactoryFunction(kTypeStruct, OutputTypeStruct); + RegisterFactoryFunction(kTypeStructIncomplete, OutputTypeStruct); + RegisterFactoryFunction(kTypeUnion, OutputTypeStruct); + RegisterFactoryFunction(kTypeClass, OutputTypeClass); + RegisterFactoryFunction(kTypeClassIncomplete, OutputTypeClass); + RegisterFactoryFunction(kTypeInterface, OutputTypeInterface); + RegisterFactoryFunction(kTypeInterfaceIncomplete, OutputTypeInterface); + RegisterFactoryFunction(kTypeConstString, OutputTypeConstString); + return true; +} +}; // namespace + +namespace maple { +int BinaryMplExport::typeMarkOffset = 0; + +BinaryMplExport::BinaryMplExport(MIRModule &md) : mod(md) { + bufI = 0; + Init(); + (void)InitOutputConstFactory(); + (void)InitOutputTypeFactory(); + not2mplt = false; +} + +uint8 BinaryMplExport::Read() { + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplExport::ReadInt() { + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + int32 x = static_cast((((((x3 << 8) + x2) << 8) + x1) << 8) + x0); + return x; +} + +void BinaryMplExport::Write(uint8 b) { + buf.push_back(b); +} + +// Little endian +void BinaryMplExport::WriteInt(int32 x) { + Write(static_cast(static_cast(x) & 0xFF)); + Write(static_cast((static_cast(x) >> 8) & 0xFF)); + Write(static_cast((static_cast(x) >> 16) & 0xFF)); + Write(static_cast((static_cast(x) >> 24) & 0xFF)); +} + +void BinaryMplExport::ExpandFourBuffSize() { + WriteInt(0); +} + +void BinaryMplExport::Fixup(size_t i, int32 x) { + constexpr int fixupCount = 4; + CHECK(i <= buf.size() - fixupCount, "Index out of bound in BinaryMplImport::Fixup()"); + buf[i] = static_cast(static_cast(x) & 0xFF); + buf[i + 1] = static_cast((static_cast(x) >> 8) & 0xFF); + buf[i + 2] = static_cast((static_cast(x) >> 16) & 0xFF); + buf[i + 3] = static_cast((static_cast(x) >> 24) & 0xFF); +} + +void BinaryMplExport::WriteInt64(int64 x) { + WriteInt(static_cast(static_cast(x) & 0xFFFFFFFF)); + WriteInt(static_cast((static_cast(x) >> 32) & 0xFFFFFFFF)); +} + +// LEB128 +void BinaryMplExport::WriteNum(int64 x) { + while (x < -0x40 || x >= 0x40) { + Write(static_cast((static_cast(x) & 0x7F) + 0x80)); + x = x >> 7; // This is a compress algorithm, do not cast int64 to uint64. If do so, small negtivate number like -3 + // will occupy 9 bits and we will not get the compressed benefit. + } + Write(static_cast(static_cast(x) & 0x7F)); +} + +void BinaryMplExport::WriteAsciiStr(const std::string &str) { + WriteNum(static_cast(str.size())); + for (size_t i = 0; i < str.size(); ++i) { + Write(static_cast(str[i])); + } +} + +void BinaryMplExport::DumpBuf(const std::string &name) { + FILE *f = fopen(name.c_str(), "wb"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while creating the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputConstBase(const MIRConst &constVal) { + WriteNum(constVal.GetKind()); + OutputType(constVal.GetType().GetTypeIndex()); +} + +void BinaryMplExport::OutputConst(MIRConst *constVal) { + if (constVal == nullptr) { + WriteNum(0); + } else { + auto func = CreateProductFunction(constVal->GetKind()); + if (func != nullptr) { + func(*constVal, *this); + } + } +} + +void BinaryMplExport::OutputStr(const GStrIdx &gstr) { + if (gstr == 0u) { + WriteNum(0); + return; + } + + auto it = gStrMark.find(gstr); + if (it != gStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = gStrMark.size(); + gStrMark[gstr] = mark; + WriteNum(kBinString); + DEBUG_ASSERT(GlobalTables::GetStrTable().StringTableSize() != 0, "Container check"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(gstr)); +} + +void BinaryMplExport::OutputUsrStr(UStrIdx ustr) { + if (ustr == 0u) { + WriteNum(0); + return; + } + + auto it = uStrMark.find(ustr); + if (it != uStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = uStrMark.size(); + uStrMark[ustr] = mark; + WriteNum(kBinUsrString); + WriteAsciiStr(GlobalTables::GetUStrTable().GetStringFromStrIdx(ustr)); +} + +void BinaryMplExport::OutputPragmaElement(const MIRPragmaElement &e) { + OutputStr(e.GetNameStrIdx()); + OutputStr(e.GetTypeStrIdx()); + WriteNum(e.GetType()); + + if (e.GetType() == kValueString || e.GetType() == kValueType || e.GetType() == kValueField || + e.GetType() == kValueMethod || e.GetType() == kValueEnum) { + OutputStr(GStrIdx(e.GetI32Val())); + } else { + WriteInt64(e.GetU64Val()); + } + size_t size = e.GetSubElemVec().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(e.GetSubElement(i))); + } +} + +void BinaryMplExport::OutputPragma(const MIRPragma &p) { + WriteNum(p.GetKind()); + WriteNum(p.GetVisibility()); + OutputStr(p.GetStrIdx()); + OutputType(p.GetTyIdx()); + OutputType(p.GetTyIdxEx()); + WriteNum(p.GetParamNum()); + size_t size = p.GetElementVector().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(p.GetNthElement(i))); + } +} + +void BinaryMplExport::OutputTypeBase(const MIRType &type) { + WriteNum(type.GetPrimType()); + OutputStr(type.GetNameStrIdx()); + WriteNum(type.IsNameIsLocal()); +} + +void BinaryMplExport::OutputFieldPair(const FieldPair &fp) { + OutputStr(fp.first); // GStrIdx + OutputType(fp.second.first); // TyIdx + FieldAttrs fa = fp.second.second; + WriteNum(fa.GetAttrFlag()); + WriteNum(fa.GetAlignValue()); + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fp.first); + MIRSymbol *fieldVar = mod.GetMIRBuilder()->GetGlobalDecl(fieldName); + if ((fieldVar != nullptr) && (fieldVar->GetKonst() != nullptr) && + (fieldVar->GetKonst()->GetKind() == kConstStr16Const)) { + WriteNum(kBinInitConst); + OutputConst(fieldVar->GetKonst()); + } else { + WriteNum(0); + } + } +} + +void BinaryMplExport::OutputMethodPair(const MethodPair &memPool) { + // use GStrIdx instead, StIdx will be created by ImportMethodPair + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(memPool.first.Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, can't get symbol! Check it!"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(funcSt->GetNameStrIdx())); + OutputType(memPool.second.first); // TyIdx + WriteNum(memPool.second.second.GetAttrFlag()); // FuncAttrs +} + +void BinaryMplExport::OutputFieldsOfStruct(const FieldVector &fields) { + WriteNum(fields.size()); + for (const FieldPair &fp : fields) { + OutputFieldPair(fp); + } +} + +void BinaryMplExport::OutputMethodsOfStruct(const MethodVector &methods) { + WriteNum(methods.size()); + for (const MethodPair &memPool : methods) { + OutputMethodPair(memPool); + } +} + +void BinaryMplExport::OutputStructTypeData(const MIRStructType &type) { + OutputFieldsOfStruct(type.GetFields()); + OutputFieldsOfStruct(type.GetStaticFields()); + OutputFieldsOfStruct(type.GetParentFields()); + OutputMethodsOfStruct(type.GetMethods()); +} + +void BinaryMplExport::OutputImplementedInterfaces(const std::vector &interfaces) { + WriteNum(interfaces.size()); + for (const TyIdx &tyIdx : interfaces) { + OutputType(tyIdx); + } +} + +void BinaryMplExport::OutputInfoIsString(const std::vector &infoIsString) { + WriteNum(infoIsString.size()); + for (bool isString : infoIsString) { + WriteNum(static_cast(isString)); + } +} + +void BinaryMplExport::OutputInfo(const std::vector &info, const std::vector &infoIsString) { + size_t size = info.size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputStr(info[i].first); // GStrIdx + if (infoIsString[i]) { + OutputStr(GStrIdx(info[i].second)); + } else { + WriteNum(info[i].second); + } + } +} + +void BinaryMplExport::OutputPragmaVec(const std::vector &pragmaVec) { + WriteNum(pragmaVec.size()); + for (MIRPragma *pragma : pragmaVec) { + OutputPragma(*pragma); + } +} + +void BinaryMplExport::OutputClassTypeData(const MIRClassType &type) { + OutputType(type.GetParentTyIdx()); + OutputImplementedInterfaces(type.GetInterfaceImplemented()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::OutputInterfaceTypeData(const MIRInterfaceType &type) { + OutputImplementedInterfaces(type.GetParentsTyIdx()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::Init() { + BinaryMplExport::typeMarkOffset = 0; + gStrMark.clear(); + uStrMark.clear(); + symMark.clear(); + funcMark.clear(); + typMark.clear(); + gStrMark[GStrIdx(0)] = 0; + uStrMark[UStrIdx(0)] = 0; + symMark[nullptr] = 0; + funcMark[nullptr] = 0; + eaNodeMark[nullptr] = 0; + curFunc = nullptr; + for (uint32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typMark[GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(pti))] = pti; + } +} + +void BinaryMplExport::OutputSymbol(MIRSymbol *sym) { + if (sym == nullptr) { + WriteNum(0); + return; + } + + std::unordered_map::iterator it = symMark.find(sym); + if (it != symMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinSymbol); + WriteNum(sym->GetScopeIdx()); + OutputStr(sym->GetNameStrIdx()); + OutputUsrStr(sym->sectionAttr); + OutputUsrStr(sym->GetAsmAttr()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = symMark.size(); + symMark[sym] = mark; + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(sym->GetIsTmp() ? 1 : 0); + if (sym->GetSKind() == kStPreg) { + WriteNum(sym->GetPreg()->GetPregNo()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + if (sym->GetKonst() != nullptr) { + sym->GetKonst()->SetType(*sym->GetType()); + } + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFunction(sym->GetFunction()->GetPuidx()); + } else if (sym->GetSKind() == kStJavaClass || sym->GetSKind() == kStJavaInterface) { + } else { + CHECK_FATAL(false, "should not used"); + } + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); +} + +void BinaryMplExport::OutputFunction(PUIdx puIdx) { + if (puIdx == 0) { + WriteNum(0); + mod.SetCurFunction(nullptr); + return; + } + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_FATAL(func != nullptr, "Cannot get MIRFunction."); + auto it = funcMark.find(func); + if (it != funcMark.end()) { + WriteNum(-it->second); + mod.SetCurFunction(func); + return; + } + size_t mark = funcMark.size(); + funcMark[func] = mark; + MIRFunction *savedFunc = mod.CurFunction(); + mod.SetCurFunction(func); + + WriteNum(kBinFunction); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, cannot get symbol! Check it!"); + OutputSymbol(funcSt); + OutputType(func->GetMIRFuncType()->GetTypeIndex()); + WriteNum(func->GetFuncAttrs().GetAttrFlag()); + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + WriteNum(attributes.GetConstructorPriority()); + } + + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + WriteNum(attributes.GetDestructorPriority()); + } + + WriteNum(func->GetFlag()); + OutputType(func->GetClassTyIdx()); + // output formal parameter information + WriteNum(static_cast(func->GetFormalDefVec().size())); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputStr(formalDef.formalStrIdx); + OutputType(formalDef.formalTyIdx); + WriteNum(static_cast(formalDef.formalAttrs.GetAttrFlag())); + } + // store Side Effect for each func + if (func2SEMap) { + uint32 isSee = func->IsIpaSeen() == true ? 1 : 0; + uint32 isPure = func->IsPure() == true ? 1 : 0; + uint32 noDefArg = func->IsNoDefArgEffect() == true ? 1 : 0; + uint32 noDef = func->IsNoDefEffect() == true ? 1 : 0; + uint32 noRetGlobal = func->IsNoRetGlobal() == true ? 1 : 0; + uint32 noThr = func->IsNoThrowException() == true ? 1 : 0; + uint32 noRetArg = func->IsNoRetArg() == true ? 1 : 0; + uint32 noPriDef = func->IsNoPrivateDefEffect() == true ? 1 : 0; + uint32 i = 0; + uint8 se = noThr << i++; + se |= noRetGlobal << i++; + se |= noDef << i++; + se |= noDefArg << i++; + se |= isPure << i++; + se |= isSee << i++; + se |= noRetArg << i++; + se |= noPriDef << i; + if ((*func2SEMap).find(func->GetNameStrIdx()) == (*func2SEMap).end()) { + (*func2SEMap)[func->GetNameStrIdx()] = se; + } else if ((*func2SEMap)[func->GetNameStrIdx()] != se) { + FATAL(kLncFatal, "It is a bug."); + } + } + mod.SetCurFunction(savedFunc); +} + +void BinaryMplExport::WriteStrField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinStrStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_STR_START + size_t outStrSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputStr + + int32 size = 0; + for (const auto &entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + MIRSymbol *sym = entity.second; + if (sym->IsLiteral()) { + OutputStr(sym->GetNameStrIdx()); + ++size; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outStrSizeIdx, size); + WriteNum(~kBinStrStart); +} + +void BinaryMplExport::WriteHeaderField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinHeaderStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_IMPORT_START + WriteNum(mod.GetFlavor()); + WriteNum(mod.GetSrcLang()); + WriteNum(mod.GetID()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(mod.GetGlobalMemSize()); + WriteNum(mod.IsWithDbgInfo()); + } + WriteNum(mod.GetNumFuncs()); + WriteAsciiStr(mod.GetEntryFuncName()); + OutputInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + if (mod.IsWithDbgInfo()) { + WriteNum(static_cast(mod.GetSrcFileInfo().size())); + for (uint32 i = 0; i < mod.GetSrcFileInfo().size(); i++) { + OutputStr(mod.GetSrcFileInfo()[i].first); + WriteNum(mod.GetSrcFileInfo()[i].second); + } + } else { + Write(0); + } + + WriteNum(static_cast(mod.GetImportFiles().size())); + for (GStrIdx strIdx : mod.GetImportFiles()) { + OutputStr(strIdx); + } + + WriteNum(static_cast(mod.GetAsmDecls().size())); + for (MapleString mapleStr : mod.GetAsmDecls()) { + std::string str(mapleStr.c_str()); + WriteAsciiStr(str); + } + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinHeaderStart); + return; +} + +void BinaryMplExport::WriteTypeField(uint64 contentIdx, bool useClassList) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinTypeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_TYPE_START + size_t outTypeSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputType + int32 size = 0; + if (useClassList) { + for (uint32 tyIdx : mod.GetClassList()) { + TyIdx curTyidx(tyIdx); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyidx); + CHECK_FATAL(type != nullptr, "Pointer type is nullptr, cannot get type, check it!"); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + OutputType(curTyidx); + ++size; + } + } + } + } else { + uint32 idx = GlobalTables::GetTypeTable().lastDefaultTyIdx.GetIdx(); + for (idx = idx + 1; idx < GlobalTables::GetTypeTable().GetTypeTableSize(); idx++) { + OutputType(TyIdx(idx)); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outTypeSizeIdx, size); + WriteNum(~kBinTypeStart); +} + +void BinaryMplExport::OutputCallInfo(CallInfo &callInfo) { + auto it = callInfoMark.find(callInfo.GetID()); + if (it != callInfoMark.end()) { + WriteNum(-(it->second)); + return; + } + WriteNum(kBinCallinfo); + size_t mark = callInfoMark.size(); + callInfoMark[callInfo.GetID()] = mark; + WriteNum(callInfo.GetCallType()); // call type + WriteInt(callInfo.GetLoopDepth()); + WriteInt(callInfo.GetID()); + callInfo.AreAllArgsLocal() ? Write(1) : Write(0); // All args are local variables or not. + OutputSymbol(callInfo.GetFunc()->GetFuncSymbol()); +} + +void BinaryMplExport::WriteCgField(uint64 contentIdx, const CallGraph *cg) { + if (contentIdx != 0) { + Fixup(contentIdx, buf.size()); + } + WriteNum(kBinCgStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_CG_START + size_t outcgSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutCG + int32 size = 0; + if (cg != nullptr) { + for (auto entry : cg->GetNodesMap()) { + MIRSymbol *methodSym = entry.first->GetFuncSymbol(); + WriteNum(kStartMethod); + OutputSymbol(methodSym); + size_t targetTyIdx = buf.size(); + ExpandFourBuffSize(); + int32 targSize = 0; + callInfoMark.clear(); + callInfoMark[0xffffffff] = 0; + for (const auto &callSite : entry.second->GetCallee()) { + OutputCallInfo(*(callSite.first)); + ++targSize; + } + Fixup(targetTyIdx, targSize); + WriteNum(~kStartMethod); + ++size; + } + } + + DEBUG_ASSERT((buf.size() - totalSizeIdx) <= 0xffffffff, "Integer overflow."); + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outcgSizeIdx, size); + WriteNum(~kBinCgStart); +} + +void BinaryMplExport::WriteSeField() { + DEBUG_ASSERT(func2SEMap != nullptr, "Expecting a func2SE map"); + WriteNum(kBinSeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + size_t outseSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + int32 size = 0; + + for (const auto &func2SE : *func2SEMap) { + uint8 se = func2SE.second; + if (static_cast(se)) { + OutputStr(func2SE.first); + Write(se); + if ((se & kPureFunc) == kPureFunc) { + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(func2SE.first); + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = (funcSymbol != nullptr) ? GetMIRModule().GetMIRBuilder()->GetFunctionFromSymbol(*funcSymbol) + : nullptr; + OutputType(func->GetReturnTyIdx()); + } + ++size; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outseSizeIdx, size); + WriteNum(~kBinSeStart); +} + +void BinaryMplExport::OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart) { + if (firstPart) { + WriteNum(node.eaStatus); + WriteInt(static_cast(node.id)); + } else { + // in and out set in base node is not necessary to be outed + // start to out point-to set + size_t outP2SizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (EACGBaseNode *outNode : node.GetPointsToSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out in set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetInSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out out set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetOutSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + } +} + +void BinaryMplExport::OutEaCgObjNode(EACGObjectNode &obj) { + Write(uint8(obj.isPhantom)); + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (const auto &fieldNodePair : obj.fieldNodes) { + EACGBaseNode *fieldNode = fieldNodePair.second; + DEBUG_ASSERT(fieldNodePair.first == static_cast(fieldNode)->GetFieldID(), "Must be."); + OutEaCgNode(*fieldNode); + ++size; + } + Fixup(outFieldSizeIdx, size); + // start to out point by + outFieldSizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *node : obj.pointsBy) { + OutEaCgNode(*node); + ++size; + } + Fixup(outFieldSizeIdx, size); +} + +void BinaryMplExport::OutEaCgRefNode(const EACGRefNode &ref) { + Write(uint8(ref.isStaticField)); +} + +void BinaryMplExport::OutEaCgFieldNode(EACGFieldNode &field) { + WriteInt(field.GetFieldID()); + int32 size = 0; + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + for (EACGBaseNode *obj : field.belongsTo) { + OutEaCgNode(*obj); + ++size; + } + Fixup(outFieldSizeIdx, size); + Write(uint8(field.isPhantom)); +} + +void BinaryMplExport::OutEaCgActNode(const EACGActualNode &act) { + Write(uint8(act.isPhantom)); + Write(uint8(act.isReturn)); + Write(act.argIdx); + WriteInt(act.callSiteInfo); +} + +void BinaryMplExport::OutEaCgNode(EACGBaseNode &node) { + auto it = eaNodeMark.find(&node); + if (it != eaNodeMark.end()) { + WriteNum(-it->second); + return; + } + size_t mark = eaNodeMark.size(); + eaNodeMark[&node] = mark; + WriteNum(kBinEaCgNode); + WriteNum(node.kind); + OutEaCgBaseNode(node, true); + if (node.IsActualNode()) { + WriteNum(kBinEaCgActNode); + OutEaCgActNode(static_cast(node)); + } else if (node.IsFieldNode()) { + WriteNum(kBinEaCgFieldNode); + OutEaCgFieldNode(static_cast(node)); + } else if (node.IsObjectNode()) { + WriteNum(kBinEaCgObjNode); + OutEaCgObjNode(static_cast(node)); + } else if (node.IsReferenceNode()) { + WriteNum(kBinEaCgRefNode); + OutEaCgRefNode(static_cast(node)); + } else { + DEBUG_ASSERT(false, "Must be."); + } + OutEaCgBaseNode(node, false); + WriteNum(~kBinEaCgNode); +} + +void BinaryMplExport::WriteEaField(const CallGraph &cg) { + WriteNum(kBinEaStart); + uint64 totalSizeIdx = buf.size(); + WriteInt(0); + uint64 outeaSizeIdx = buf.size(); + WriteInt(0); + int32 size = 0; + for (auto cgNodePair : cg.GetNodesMap()) { + MIRFunction *func = cgNodePair.first; + if (func->GetEACG() == nullptr) { + continue; + } + EAConnectionGraph *eacg = func->GetEACG(); + DEBUG_ASSERT(eacg != nullptr, "Must be."); + OutputStr(eacg->GetFuncNameStrIdx()); + WriteInt(eacg->GetNodes().size()); + OutEaCgNode(*eacg->GetGlobalObject()); + uint64 outFunceaIdx = buf.size(); + WriteInt(0); + size_t funceaSize = 0; + for (EACGBaseNode *node : eacg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++funceaSize; + } + Fixup(outFunceaIdx, funceaSize); + ++size; + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outeaSizeIdx, size); + WriteNum(~kBinEaStart); +} + +void BinaryMplExport::WriteEaCgField(EAConnectionGraph *eaCg) { + if (eaCg == nullptr) { + WriteNum(~kBinEaCgStart); + return; + } + WriteNum(kBinEaCgStart); + size_t totalSizeIdx = buf.size(); + WriteInt(0); + // out this function's arg list + OutputStr(eaCg->GetFuncNameStrIdx()); + WriteInt(eaCg->GetNodes().size()); + OutEaCgNode(*eaCg->GetGlobalObject()); + size_t outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t argNodeSize = 0; + for (EACGBaseNode *node : eaCg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++argNodeSize; + } + Fixup(outNodeSizeIdx, argNodeSize); + // out this function's call site's arg list + outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t callSiteSize = 0; + for (auto nodePair : eaCg->GetCallSite2Nodes()) { + uint32 id = nodePair.first; + MapleVector *calleeArgNode = nodePair.second; + WriteInt(id); + size_t outCalleeArgSizeIdx = buf.size(); + WriteInt(0); + size_t calleeArgSize = 0; + for (EACGBaseNode *node : *calleeArgNode) { + OutEaCgNode(*node); + ++calleeArgSize; + } + Fixup(outCalleeArgSizeIdx, calleeArgSize); + ++callSiteSize; + } + Fixup(outNodeSizeIdx, callSiteSize); + + Fixup(totalSizeIdx, buf.size()-totalSizeIdx); + WriteNum(~kBinEaCgStart); +} + +void BinaryMplExport::WriteSymField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinSymStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + uint64 outsymSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + int32 size = 0; + + if (not2mplt) { + for (auto sit = GetMIRModule().GetSymbolDefOrder().begin(); + sit != GetMIRModule().GetSymbolDefOrder().end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx()); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + // Verify: all wpofake variables should have been deleted from globaltable + DEBUG_ASSERT(!(s->IsWpoFakeParm() || s->IsWpoFakeRet()) || s->IsDeleted(), "wpofake var not deleted"); + MIRStorageClass storageClass = s->GetStorageClass(); + MIRSymKind sKind = s->GetSKind(); + if (s->IsDeleted() || storageClass == kScUnused || + (s->GetIsImported() && !s->GetAppearsInCode()) || + (sKind == kStFunc && (storageClass == kScExtern || !s->GetAppearsInCode()))) { + continue; + } + OutputSymbol(s); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outsymSizeIdx, size); + WriteNum(~kBinSymStart); + return; +} + +void BinaryMplExport::WriteContentField4mplt(int fieldNum, uint64 *fieldStartP) { + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinStrStart); + fieldStartP[0] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinTypeStart); + fieldStartP[1] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinCgStart); + fieldStartP[2] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP) { + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP) { + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinStrStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinTypeStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kFourthField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kFifthField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::Export(const std::string &fname, std::unordered_set *dumpFuncSet) { + uint64 fieldStartPoint[5]; + if (!not2mplt) { + WriteInt(kMpltMagicNumber); + WriteContentField4mplt(kFourthFieldInt, fieldStartPoint); + WriteStrField(fieldStartPoint[kFirstField]); + WriteTypeField(fieldStartPoint[kSecondField]); + WriteCgField(fieldStartPoint[kThirdField], nullptr); + importFileName = fname; + } else { + WriteInt(kMpltMagicNumber + 0x10); + if (mod.IsJavaModule()) { + WriteContentField4nonmplt(kFourthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteSymField(fieldStartPoint[kSecondField]); + WriteFunctionBodyField(fieldStartPoint[kThirdField], dumpFuncSet); + } else { + WriteContentField4nonJava(kSixthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteSymField(fieldStartPoint[kFourthField]); + WriteFunctionBodyField(fieldStartPoint[kFifthField], dumpFuncSet); + } + } + WriteNum(kBinFinish); + DumpBuf(fname); +} + +void BinaryMplExport::AppendAt(const std::string &name, int32 offset) { + FILE *f = fopen(name.c_str(), "r+b"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while opening the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + int seekRet = fseek(f, static_cast(offset), SEEK_SET); + CHECK_FATAL(seekRet == 0, "Call fseek failed."); + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputTypePairs(const MIRInstantVectorType &type) { + size_t size = type.GetInstantVec().size(); + WriteNum(size); + for (const TypePair &typePair : type.GetInstantVec()) { + OutputType(typePair.first); + OutputType(typePair.second); + } +} + +void BinaryMplExport::OutputTypeAttrs(const TypeAttrs &ta) { + WriteNum(ta.GetAttrFlag()); + WriteNum(ta.GetAlignValue()); + WriteNum(ta.GetPack()); +} + +void BinaryMplExport::OutputType(TyIdx tyIdx) { + if (tyIdx == 0u) { + WriteNum(0); + return; + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty != nullptr, "If gets nulltype, should have been returned!"); + auto it = typMark.find(ty); + if (it != typMark.end()) { + if (ty->GetKind() != kTypeFunction) { + WriteNum(-(it->second)); + return; + } + ++BinaryMplExport::typeMarkOffset; + } else { + size_t mark = typMark.size() + BinaryMplExport::typeMarkOffset; + typMark[ty] = mark; + } + + auto func = CreateProductFunction(ty->GetKind()); + if (func != nullptr) { + func(*ty, *this); + } else { + DEBUG_ASSERT(false, "Type's kind not yet implemented: %d", ty->GetKind()); + } +} + +void UpdateMplt::UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg) { + BinaryMplImport &binImport = binMplt.GetBinImport(); + BinaryMplExport &binExport = binMplt.GetBinExport(); + binImport.SetBufI(0); + if (binImport.IsBufEmpty() || binImport.ReadInt() != kMpltMagicNumber) { + INFO(kLncInfo, " This Module depends on nothing"); + return; + } + int64 cgStart = binImport.GetContent(kBinCgStart); + DEBUG_ASSERT(cgStart != 0, "Should be updated in import processing."); + binImport.SetBufI(cgStart); + int64 checkReadNum = binImport.ReadNum(); + DEBUG_ASSERT(checkReadNum == kBinCgStart, "Should be cg start point."); + int32 totalSize = binImport.ReadInt(); + constexpr int32 headLen = 4; + binImport.SetBufI(binImport.GetBufI() + totalSize - headLen); + checkReadNum = binImport.ReadNum(); + DEBUG_ASSERT(checkReadNum == ~kBinCgStart, "Should be end of cg."); + binExport.Init(); + std::map tmp; + binExport.func2SEMap = &tmp; + binExport.inIPA = true; + binExport.WriteCgField(0, &cg); + binExport.Init(); + binExport.WriteSeField(); + binExport.eaNodeMark.clear(); + binExport.eaNodeMark[nullptr] = 0; + binExport.gStrMark.clear(); + binExport.gStrMark[GStrIdx(0)] = 0; + binExport.WriteEaField(cg); + binExport.WriteNum(kBinFinish); + std::string filename(binMplt.GetImportFileName()); + binExport.AppendAt(filename, cgStart); +} + +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/bin_mpl_import.cpp b/ecmascript/mapleall/maple_ir/src/bin_mpl_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ddd38f60144cbd3232f2ecd4d94e5a9a1454a2bf --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -0,0 +1,1669 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bin_mpl_import.h" +#include +#include +#include +#include +#include "bin_mplt.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" + +namespace maple { +uint8 BinaryMplImport::Read() { + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplImport::ReadInt() { + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + return (((((x3 << 8u) + x2) << 8u) + x1) << 8u) + x0; +} + +int64 BinaryMplImport::ReadInt64() { + // casts to avoid sign extension + uint32 x0 = static_cast(ReadInt()); + uint64 x1 = static_cast(ReadInt()); + return static_cast((x1 << 32) + x0); +} + +// LEB128 +int64 BinaryMplImport::ReadNum() { + uint64 n = 0; + int64 y = 0; + uint64 b = static_cast(Read()); + while (b >= 0x80) { + y += ((b - 0x80) << n); + n += 7; + b = static_cast(Read()); + } + b = (b & 0x3F) - (b & 0x40); + return y + (b << n); +} + +void BinaryMplImport::ReadAsciiStr(std::string &str) { + int64 n = ReadNum(); + for (int64 i = 0; i < n; i++) { + uint8 ch = Read(); + str.push_back(static_cast(ch)); + } +} + +void BinaryMplImport::ReadFileAt(const std::string &name, int32 offset) { + FILE *f = fopen(name.c_str(), "rb"); + CHECK_FATAL(f != nullptr, "Error while reading the binary file: %s", name.c_str()); + + int seekRet = fseek(f, 0, SEEK_END); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + + long size = ftell(f); + size -= offset; + + CHECK_FATAL(size >= 0, "should not be negative"); + + seekRet = fseek(f, offset, SEEK_SET); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + buf.resize(size); + + size_t result = fread(&buf[0], sizeof(uint8), static_cast(size), f); + fclose(f); + CHECK_FATAL(result == static_cast(size), "Error while reading the binary file: %s", name.c_str()); +} + +void BinaryMplImport::ImportConstBase(MIRConstKind &kind, MIRTypePtr &type) { + kind = static_cast(ReadNum()); + TyIdx tyidx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); +} + +MIRConst *BinaryMplImport::ImportConst(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + + MIRConstKind kind; + MIRType *type = nullptr; + MemPool *memPool = mod.GetMemPool(); + + ImportConstBase(kind, type); + switch (tag) { + case kBinKindConstInt: + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(ReadNum(), *type); + case kBinKindConstAddrof: { + MIRSymbol *sym = InSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + FieldID fi = ReadNum(); + int32 ofst = static_cast(ReadNum()); + // do not use "type"; instead, get exprTy from sym + TyIdx ptyIdx = sym->GetTyIdx(); + MIRPtrType ptrType(ptyIdx, (mod.IsJavaModule() ? PTY_ref : GetExactPtrPrimType())); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + return memPool->New(sym->GetStIdx(), fi, *exprTy, ofst); + } + case kBinKindConstAddrofLocal: { + MIRSymbol *sym = ImportLocalSymbol(func); + FieldID fi = static_cast(ReadNum()); + int32 ofst = static_cast(ReadNum()); + return memPool->New(sym->GetStIdx(), fi, *type, ofst); + } + case kBinKindConstAddrofFunc: { + PUIdx puIdx = ImportFunction(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); + mod.SetCurFunction(func); + return memPool->New(puIdx, *type); + } + case kBinKindConstAddrofLabel: { + LabelIdx lidx = ImportLabel(func); + PUIdx puIdx = func->GetPuidx(); + MIRLblConst *lblConst = memPool->New(lidx, puIdx, *type); + (void)func->GetLabelTab()->addrTakenLabels.insert(lidx); + return lblConst; + } + case kBinKindConstStr: { + UStrIdx ustr = ImportUsrStr(); + return memPool->New(ustr, *type); + } + case kBinKindConstStr16: { + Conststr16Node *cs; + cs = memPool->New(); + cs->SetPrimType(type->GetPrimType()); + int64 len = ReadNum(); + std::ostringstream ostr; + for (int64 i = 0; i < len; ++i) { + ostr << Read(); + } + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, ostr.str()); + cs->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + return memPool->New(cs->GetStrIdx(), *type); + } + case kBinKindConstFloat: { + union { + float fvalue; + int32 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateFloatConst(value.fvalue); + } + case kBinKindConstDouble: { + union { + double dvalue; + int64 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(value.dvalue); + } + case kBinKindConstAgg: { + MIRAggConst *aggConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + auto fieldId = static_cast(ReadNum()); + auto fieldConst = ImportConst(func); + aggConst->AddItem(fieldConst, fieldId); + } + return aggConst; + } + case kBinKindConstSt: { + MIRStConst *stConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackSymbolToSt(InSymbol(func)); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackOffsetToSt(ReadNum()); + } + return stConst; + } + default: + CHECK_FATAL(false, "Unhandled const type"); + } +} + +GStrIdx BinaryMplImport::ImportStr() { + int64 tag = ReadNum(); + if (tag == 0) { + return GStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(gStrTab.size()), "index out of range in BinaryMplt::ImportStr"); + return gStrTab[-tag]; + } + CHECK_FATAL(tag == kBinString, "expecting kBinString"); + std::string str; + ReadAsciiStr(str); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + gStrTab.push_back(strIdx); + return strIdx; +} + +UStrIdx BinaryMplImport::ImportUsrStr() { + int64 tag = ReadNum(); + if (tag == 0) { + return UStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(uStrTab.size()), "index out of range in BinaryMplt::InUsrStr"); + return uStrTab[-tag]; + } + CHECK_FATAL(tag == kBinUsrString, "expecting kBinUsrString"); + std::string str; + ReadAsciiStr(str); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + uStrTab.push_back(strIdx); + return strIdx; +} + +MIRPragmaElement *BinaryMplImport::ImportPragmaElement() { + MIRPragmaElement *element = mod.GetPragmaMemPool()->New(mod); + element->SetNameStrIdx(ImportStr()); + element->SetTypeStrIdx(ImportStr()); + element->SetType(static_cast(ReadNum())); + if (element->GetType() == kValueString || element->GetType() == kValueType || element->GetType() == kValueField || + element->GetType() == kValueMethod || element->GetType() == kValueEnum) { + element->SetI32Val(static_cast(ImportStr())); + } else { + element->SetU64Val(static_cast(ReadInt64())); + } + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + element->SubElemVecPushBack(ImportPragmaElement()); + } + return element; +} + +MIRPragma *BinaryMplImport::ImportPragma() { + MIRPragma *p = mod.GetPragmaMemPool()->New(mod); + p->SetKind(static_cast(ReadNum())); + p->SetVisibility(ReadNum()); + p->SetStrIdx(ImportStr()); + if (mod.IsJavaModule()) { + p->SetTyIdx(ImportType()); + p->SetTyIdxEx(ImportType()); + } else { + p->SetTyIdx(ImportTypeNonJava()); + p->SetTyIdxEx(ImportTypeNonJava()); + } + p->SetParamNum(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + p->PushElementVector(ImportPragmaElement()); + } + return p; +} + +void BinaryMplImport::ImportFieldPair(FieldPair &fp) { + fp.first = ImportStr(); + fp.second.first = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + fp.second.second.SetAttrFlag(ReadNum()); + fp.second.second.SetAlignValue(ReadNum()); + FieldAttrs fa = fp.second.second; + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + int64 tag = ReadNum(); + if (tag == kBinInitConst) { + GlobalTables::GetConstPool().InsertConstPool(fp.first, ImportConst(nullptr)); + } + } +} + +void BinaryMplImport::ImportMethodPair(MethodPair &memPool) { + std::string funcName; + ReadAsciiStr(funcName); + TyIdx funcTyIdx = ImportType(); + int64 x = ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum error, x: %d", x); + auto attrFlag = static_cast(x); + + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *prevFuncSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + MIRSymbol *funcSt = nullptr; + MIRFunction *fn = nullptr; + + if (prevFuncSt != nullptr && (prevFuncSt->GetStorageClass() == kScText && prevFuncSt->GetSKind() == kStFunc)) { + funcSt = prevFuncSt; + fn = funcSt->GetFunction(); + } else { + funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetTyIdx(funcTyIdx); + funcSt->SetIsImported(imported); + funcSt->SetIsImportedDecl(imported); + methodSymbols.push_back(funcSt); + + fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = static_cast(funcSt->GetType()); + fn->SetMIRFuncType(funcType); + fn->SetFileIndex(0); + fn->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + fn->SetFuncAttrs(attrFlag); + } + memPool.first.SetFullIdx(funcSt->GetStIdx().FullIdx()); + memPool.second.first.reset(funcTyIdx); + memPool.second.second.SetAttrFlag(attrFlag); +} + +void BinaryMplImport::UpdateMethodSymbols() { + for (auto sym : methodSymbols) { + MIRFunction *fn = sym->GetFunction(); + CHECK_FATAL(fn != nullptr, "fn is null"); + auto *funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx())); + fn->SetMIRFuncType(funcType); + fn->SetReturnStruct(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx())); + if (fn->GetFormalDefVec().size() != 0) { + continue; // already updated in ImportFunction() + } + for (size_t i = 0; i < funcType->GetParamTypeList().size(); ++i) { + FormalDef formalDef(nullptr, funcType->GetParamTypeList()[i], funcType->GetParamAttrsList()[i]); + fn->GetFormalDefVec().push_back(formalDef); + } + } +} + +void BinaryMplImport::ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize) { + int64 size = ReadNum(); + int64 initSize = fields.size() + methodSize; + for (int64 i = 0; i < size; ++i) { + FieldPair fp; + ImportFieldPair(fp); + if (initSize == 0) { + fields.push_back(fp); + } + } +} + +void BinaryMplImport::ImportMethodsOfStructType(MethodVector &methods) { + int64 size = ReadNum(); + bool isEmpty = methods.empty(); + for (int64 i = 0; i < size; ++i) { + MethodPair memPool; + ImportMethodPair(memPool); + if (isEmpty) { + methods.push_back(memPool); + } + } +} + +void BinaryMplImport::ImportStructTypeData(MIRStructType &type) { + uint32 methodSize = type.GetMethods().size(); + ImportFieldsOfStructType(type.GetFields(), methodSize); + ImportFieldsOfStructType(type.GetStaticFields(), methodSize); + ImportFieldsOfStructType(type.GetParentFields(), methodSize); + ImportMethodsOfStructType(type.GetMethods()); + type.SetIsImported(imported); +} + +void BinaryMplImport::ImportInterfacesOfClassType(std::vector &interfaces) { + int64 size = ReadNum(); + bool isEmpty = interfaces.empty(); + for (int64 i = 0; i < size; ++i) { + TyIdx idx = ImportType(); + if (isEmpty) { + interfaces.push_back(idx); + } + } +} + +void BinaryMplImport::ImportInfoIsStringOfStructType(MIRStructType &type) { + int64 size = ReadNum(); + bool isEmpty = type.GetInfoIsString().empty(); + + for (int64 i = 0; i < size; ++i) { + auto isString = static_cast(ReadNum()); + + if (isEmpty) { + type.PushbackIsString(isString); + } + } +} + +void BinaryMplImport::ImportInfoOfStructType(MIRStructType &type) { + uint64 size = static_cast(ReadNum()); + bool isEmpty = type.GetInfo().empty(); + for (size_t i = 0; i < size; ++i) { + GStrIdx idx = ImportStr(); + int64 x = (type.GetInfoIsStringElemt(i)) ? static_cast(ImportStr()) : ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum nagative, x: %d", x); + CHECK_FATAL(x <= std::numeric_limits::max(), "ReadNum too large, x: %d", x); + if (isEmpty) { + type.PushbackMIRInfo(MIRInfoPair(idx, static_cast(x))); + } + } +} + +void BinaryMplImport::ImportPragmaOfStructType(MIRStructType &type) { + int64 size = ReadNum(); + bool isEmpty = type.GetPragmaVec().empty(); + for (int64 i = 0; i < size; ++i) { + MIRPragma *pragma = ImportPragma(); + if (isEmpty) { + type.PushbackPragma(pragma); + } + } +} + +void BinaryMplImport::SetClassTyidxOfMethods(MIRStructType &type) { + if (type.GetTypeIndex() != 0u) { + // set up classTyIdx for methods + for (size_t i = 0; i < type.GetMethods().size(); ++i) { + StIdx stidx = type.GetMethodsElement(i).first; + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + CHECK_FATAL(st != nullptr, "st is null"); + CHECK_FATAL(st->GetSKind() == kStFunc, "unexpected st->sKind"); + st->GetFunction()->SetClassTyIdx(type.GetTypeIndex()); + } + } +} + +void BinaryMplImport::ImportClassTypeData(MIRClassType &type) { + TyIdx tempType = ImportType(); + // Keep the parent_tyidx we first met. + if (type.GetParentTyIdx() == 0u) { + type.SetParentTyIdx(tempType); + } + ImportInterfacesOfClassType(type.GetInterfaceImplemented()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::ImportInterfaceTypeData(MIRInterfaceType &type) { + ImportInterfacesOfClassType(type.GetParentsTyIdx()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::Reset() { + buf.clear(); + bufI = 0; + gStrTab.clear(); + uStrTab.clear(); + typTab.clear(); + funcTab.clear(); + symTab.clear(); + methodSymbols.clear(); + definedLabels.clear(); + gStrTab.push_back(GStrIdx(0)); // Dummy + uStrTab.push_back(UStrIdx(0)); // Dummy + symTab.push_back(nullptr); // Dummy + funcTab.push_back(nullptr); // Dummy + eaCgTab.push_back(nullptr); + for (int32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typTab.push_back(TyIdx(pti)); + } +} + +TypeAttrs BinaryMplImport::ImportTypeAttrs() { + TypeAttrs ta; + ta.SetAttrFlag(static_cast(ReadNum())); + ta.SetAlignValue(static_cast(ReadNum())); + ta.SetPack(static_cast(ReadNum())); + return ta; +} + +void BinaryMplImport::ImportTypePairs(std::vector &insVecType) { + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + TyIdx t0 = ImportType(); + TyIdx t1 = ImportType(); + TypePair tp(t0, t1); + insVecType.push_back(tp); + } +} + +void BinaryMplImport::CompleteAggInfo(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type != nullptr, "MIRType is null"); + if (type->GetKind() == kTypeInterface) { + auto *interfaceType = static_cast(type); + ImportStructTypeData(*interfaceType); + ImportInterfaceTypeData(*interfaceType); + } else if (type->GetKind() == kTypeClass) { + auto *classType = static_cast(type); + ImportStructTypeData(*classType); + ImportClassTypeData(*classType); + } else if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeUnion) { + auto *structType = static_cast(type); + ImportStructTypeData(*structType); + } else { + ERR(kLncErr, "in BinaryMplImport::CompleteAggInfo, MIRType error"); + } +} + +inline static bool IsIncomplete(const MIRType &type) { + return (type.GetKind() == kTypeInterfaceIncomplete || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeStructIncomplete); +} + +TyIdx BinaryMplImport::ImportType(bool forPointedType) { + int64 tag = ReadNum(); + static MIRType *typeNeedsComplete = nullptr; + static int ptrLev = 0; + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab.at(static_cast(-tag)); + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetTypeAttrs(ImportTypeAttrs()); + ++ptrLev; + type.SetPointedTyIdx(ImportType(true)); + --ptrLev; + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + if (typeNeedsComplete != nullptr && ptrLev == 0) { + TyIdx tyIdxNeedsComplete = typeNeedsComplete->GetTypeIndex(); + typeNeedsComplete = nullptr; + CompleteAggInfo(tyIdxNeedsComplete); + } + return origType->GetTypeIndex(); + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemTyIdx(ImportType(forPointedType)); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetRetTyIdx(ImportType()); + type.funcAttrs.SetAttrFlag(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportType()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + return origType->GetTypeIndex(); + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + origType->SetGenericTyIdx(ImportType()); + return origType->GetTypeIndex(); + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRStructType &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeStructIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeClassIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportClassTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeInterfaceIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportInterfaceTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +TyIdx BinaryMplImport::ImportTypeNonJava() { + int64 tag = ReadNum(); + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab[static_cast(-tag)]; + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + TyIdx tyIdxUsed(GlobalTables::GetTypeTable().GetTypeTableSize()); + if (tag != kBinKindTypeScalar) { + GlobalTables::GetTypeTable().PushNull(); + typTab.push_back(tyIdxUsed); + } + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + type.SetPointedTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + type.SetElemTyIdx(ImportTypeNonJava()); + type.SetTypeAttrs(ImportTypeAttrs()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetRetTyIdx(ImportTypeNonJava()); + type.funcAttrs.SetAttrFlag(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportTypeNonJava()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + type.SetGenericTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + if (kind != kTypeStructIncomplete) { + ImportStructTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeClassIncomplete) { + ImportStructTypeData(type); + ImportClassTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeInterfaceIncomplete) { + ImportStructTypeData(type); + ImportInterfaceTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +void BinaryMplImport::ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal) { + primType = static_cast(ReadNum()); + strIdx = ImportStr(); + nameIsLocal = ReadNum(); +} + +inline static bool IsObject(const MIRType &type) { + return (type.GetKind() == kTypeClass || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeInterface || type.GetKind() == kTypeInterfaceIncomplete); +} + +MIRType &BinaryMplImport::InsertInTypeTables(MIRType &type) { + MIRType *resultTypePtr = &type; + TyIdx prevTyIdx = mod.GetTypeNameTab()->GetTyIdxFromGStrIdx(type.GetNameStrIdx()); + if (prevTyIdx != 0u && !type.IsNameIsLocal()) { + MIRType *prevType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!prevType->IsMIRTypeByName() && + ((IsIncomplete(*prevType) && IsIncomplete(type)) || + (!IsIncomplete(*prevType) && !IsIncomplete(type)) || + (!IsIncomplete(*prevType) && IsIncomplete(type)))) { + resultTypePtr = prevType->CopyMIRTypeNode(); + if (resultTypePtr->GetKind() == kTypeStruct || resultTypePtr->GetKind() == kTypeUnion || + resultTypePtr->GetKind() == kTypeStructIncomplete) { + tmpStruct.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeClass || resultTypePtr->GetKind() == kTypeClassIncomplete) { + tmpClass.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeInterface || resultTypePtr->GetKind() == kTypeInterfaceIncomplete) { + tmpInterface.push_back(static_cast(resultTypePtr)); + } + } else { + // New definition wins + type.SetTypeIndex(prevTyIdx); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().empty() == false, "container check"); + GlobalTables::GetTypeTable().SetTypeWithTyIdx(prevTyIdx, *type.CopyMIRTypeNode()); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(resultTypePtr->GetNameStrIdx(), + resultTypePtr->GetTypeIndex()); + } + } + } else { + // New type, no previous definition or anonymous type + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&type); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (tyIdx + 1 == GlobalTables::GetTypeTable().GetTypeTable().size() && !resultTypePtr->IsNameIsLocal()) { + GStrIdx stridx = resultTypePtr->GetNameStrIdx(); + if (stridx != 0) { + mod.GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdx); + mod.PushbackTypeDefOrder(stridx); + if (IsObject(*resultTypePtr)) { + mod.AddClass(tyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdx); + } + } + } + } + } + return *resultTypePtr; +} + +void BinaryMplImport::SetupEHRootType() { + // setup eh root type with most recent Ljava_2Flang_2FObject_3B + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangObjectStr); + if (gStrIdx == 0u) { + return; + } + + TyIdx tyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx); + if (tyIdx != 0u) { + mod.SetThrowableTyIdx(tyIdx); + } +} + +MIRSymbol *BinaryMplImport::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, + MIRStorageClass sclass, MIRFunction *func, uint8 scpID) { + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (st != nullptr && st->GetStorageClass() == sclass && st->GetSKind() == mclass && scpID == kScopeGlobal) { + return st; + } + return mirBuilder.CreateSymbol(tyIdx, strIdx, mclass, sclass, func, scpID); +} + +MIRSymbol *BinaryMplImport::InSymbol(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < symTab.size(), "index out of bounds"); + return symTab.at(-tag); + } else { + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol"); + int64 scope = ReadNum(); + GStrIdx stridx = ImportStr(); + UStrIdx secAttr = ImportUsrStr(); + UStrIdx asmAttr = ImportUsrStr(); + auto skind = static_cast(ReadNum()); + auto sclass = static_cast(ReadNum()); + TyIdx tyTmp(0); + MIRSymbol *sym = GetOrCreateSymbol(tyTmp, stridx, skind, sclass, func, scope); + if (secAttr != 0) { + sym->sectionAttr = secAttr; + } + if (asmAttr != 0) { + sym->SetAsmAttr(asmAttr); + } + symTab.push_back(sym); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + sym->SetIsImported(imported); + uint32 thepregno = 0; + if (skind == kStPreg) { + CHECK_FATAL(scope == kScopeLocal && func != nullptr, "Expecting kScopeLocal"); + thepregno = static_cast(ReadNum()); + } else if (skind == kStConst || skind == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (skind == kStFunc) { + PUIdx puidx = ImportFunction(); + mod.SetCurFunction(func); + if (puidx != 0) { + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx)); + } + } + if (skind == kStVar || skind == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + sym->SetTyIdx(tyIdx); + if (skind == kStPreg) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + PregIdx pregidx = func->GetPregTab()->EnterPregNo(thepregno, mirType->GetPrimType(), mirType); + MIRPregTable *pregTab = func->GetPregTab(); + MIRPreg *preg = pregTab->PregFromPregIdx(pregidx); + preg->SetPrimType(mirType->GetPrimType()); + sym->SetPreg(preg); + } + return sym; + } +} + +PUIdx BinaryMplImport::ImportFunction() { + int64 tag = ReadNum(); + if (tag == 0) { + mod.SetCurFunction(nullptr); + return 0; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) <= funcTab.size(), "index out of bounds"); + if (static_cast(-tag) == funcTab.size()) { // function was exported before its symbol + return static_cast(0); + } + PUIdx puIdx = funcTab[static_cast(-tag)]->GetPuidx(); + mod.SetCurFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + return puIdx; + } + CHECK_FATAL(tag == kBinFunction, "expecting kBinFunction"); + MIRSymbol *funcSt = InSymbol(nullptr); + CHECK_FATAL(funcSt != nullptr, "null ptr check"); + MIRFunction *func = nullptr; + if (funcSt->GetFunction() == nullptr) { + maple::MIRBuilder builder(&mod); + func = builder.CreateFunction(funcSt->GetStIdx()); + funcTab.push_back(func); + } else { + func = funcSt->GetFunction(); + funcTab.push_back(func); + } + funcSt->SetFunction(func); + methodSymbols.push_back(funcSt); + if (mod.IsJavaModule()) { + func->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + } + TyIdx funcTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->SetMIRFuncType(static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx))); + + func->SetStIdx(funcSt->GetStIdx()); + if (!inCG) { + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + if (!func->IsDirty()) { + func->SetDirty(true); + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + FuncAttrs tmp; + tmp.SetAttrFlag(ReadNum()); + if (func->IsNoDefArgEffect() != tmp.GetAttr(FUNCATTR_nodefargeffect)) { + tmp.SetAttr(FUNCATTR_nodefargeffect, true); + } + if (func->IsNoDefEffect() != tmp.GetAttr(FUNCATTR_nodefeffect)) { + tmp.SetAttr(FUNCATTR_nodefeffect, true); + } + if (func->IsNoRetGlobal() != tmp.GetAttr(FUNCATTR_noretglobal)) { + tmp.SetAttr(FUNCATTR_noretglobal, true); + } + if (func->IsNoThrowException() != tmp.GetAttr(FUNCATTR_nothrow_exception)) { + tmp.SetAttr(FUNCATTR_nothrow_exception, true); + } + if (func->IsIpaSeen() != tmp.GetAttr(FUNCATTR_ipaseen)) { + tmp.SetAttr(FUNCATTR_ipaseen); + } + if (func->IsPure() != tmp.GetAttr(FUNCATTR_pure)) { + tmp.SetAttr(FUNCATTR_pure, true); + } + if (func->IsNoRetArg() != tmp.GetAttr(FUNCATTR_noretarg)) { + tmp.SetAttr(FUNCATTR_noretarg, true); + } + if (func->IsNoPrivateDefEffect() != tmp.GetAttr(FUNCATTR_noprivate_defeffect)) { + tmp.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + func->SetFuncAttrs(tmp); + } + } + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + attributes.SetConstructorPriority(static_cast(ReadNum())); + } + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + attributes.SetDestructorPriority(static_cast(ReadNum())); + } + + func->SetFlag(ReadNum()); + if (mod.IsJavaModule()) { + (void)ImportType(); // not set the field to mimic parser + } else { + (void)ImportTypeNonJava(); // not set the field to mimic parser + } + size_t size = static_cast(ReadNum()); + if (func->GetFormalDefVec().size() == 0) { + for (size_t i = 0; i < size; i++) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + FormalDef formalDef(strIdx, nullptr, tyIdx, TypeAttrs()); + formalDef.formalAttrs.SetAttrFlag(static_cast(ReadNum())); + func->GetFormalDefVec().push_back(formalDef); + } + } else { + CHECK_FATAL(func->GetFormalDefVec().size() >= size, "ImportFunction: inconsistent number of formals"); + for (size_t i = 0; i < size; i++) { + func->GetFormalDefVec()[i].formalStrIdx = ImportStr(); + func->GetFormalDefVec()[i].formalTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->GetFormalDefVec()[i].formalAttrs.SetAttrFlag(static_cast(ReadNum())); + } + } + + mod.SetCurFunction(func); + return func->GetPuidx(); +} + +inline void BinaryMplImport::SkipTotalSize() { + ReadInt(); +} + +void BinaryMplImport::ReadStrField() { + SkipTotalSize(); + + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + GStrIdx stridx = ImportStr(); + GlobalTables::GetConstPool().PutLiteralNameAsImported(stridx); + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinStrStart, "pattern mismatch in Read STR"); +} + +void BinaryMplImport::ReadHeaderField() { + SkipTotalSize(); + mod.SetFlavor(static_cast(ReadNum())); + mod.SetSrcLang(static_cast(ReadNum())); + mod.SetID(static_cast(ReadNum())); + if (mod.GetFlavor() == kFlavorLmbc) { + mod.SetGlobalMemSize(static_cast(ReadNum())); + mod.SetWithDbgInfo(static_cast(ReadNum())); + } + mod.SetNumFuncs(static_cast(ReadNum())); + std::string inStr; + ReadAsciiStr(inStr); + mod.SetEntryFuncName(inStr); + ImportInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + int32 size = static_cast(ReadNum()); + MIRInfoPair infopair; + for (int32 i = 0; i < size; i++) { + infopair.first = ImportStr(); + infopair.second = static_cast(ReadNum()); + mod.PushbackFileInfo(infopair); + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + GStrIdx gStrIdx = ImportStr(); + mod.GetImportFiles().push_back(gStrIdx); + std::string importfilename = GlobalTables::GetStrTable().GetStringFromStrIdx(gStrIdx); + // record the imported file for later reading summary info, if exists + mod.PushbackImportedMplt(importfilename); + BinaryMplt *binMplt = new BinaryMplt(mod); + binMplt->GetBinImport().imported = true; + + INFO(kLncInfo, "importing %s", importfilename.c_str()); + if (!binMplt->GetBinImport().Import(importfilename, false)) { // not a binary mplt + FATAL(kLncFatal, "cannot open binary MPLT file: %s\n", importfilename.c_str()); + } else { + INFO(kLncInfo, "finished import of %s", importfilename.c_str()); + } + if (i == 0) { + binMplt->SetImportFileName(importfilename); + mod.SetBinMplt(binMplt); + } else { + delete binMplt; + } + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + mod.GetAsmDecls().emplace_back(MapleString(str, mod.GetMemPool())); + } + + int32 tag = static_cast(ReadNum()); + CHECK_FATAL(tag == ~kBinHeaderStart, "pattern mismatch in Read Import"); + return; +} + +void BinaryMplImport::ReadTypeField() { + SkipTotalSize(); + + int32 size = ReadInt(); + if (mod.IsJavaModule()) { + for (int64 i = 0; i < size; ++i) { + ImportType(); + } + } else { + for (int64 i = 0; i < size; ++i) { + (void)ImportTypeNonJava(); + } + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinTypeStart, "pattern mismatch in Read TYPE"); +} + +CallInfo *BinaryMplImport::ImportCallInfo() { + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < callInfoTab.size(), "index out of bounds"); + return callInfoTab.at(-tag); + } + CHECK_FATAL(tag == kBinCallinfo, "expecting kBinCallinfo"); + CallType ctype = static_cast(ReadNum()); // call type + uint32 loopDepth = static_cast(ReadInt()); + uint32 id = static_cast(ReadInt()); + bool argLocal = Read() == 1; + MIRSymbol *funcSym = InSymbol(nullptr); + CHECK_FATAL(funcSym != nullptr, "func_sym is null in BinaryMplImport::InCallInfo"); + CallInfo *ret = mod.GetMemPool()->New(ctype, *funcSym->GetFunction(), + static_cast(nullptr), loopDepth, id, argLocal); + callInfoTab.push_back(ret); + return ret; +} + +void BinaryMplImport::MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, + std::vector &newSet) { + if (targetSet.empty()) { + (void)targetSet.insert(targetSet.begin(), newSet.begin(), newSet.end()); + std::unordered_set tmp; + mod.AddValueToMethod2TargetHash(methodPuidx, tmp); + for (size_t i = 0; i < newSet.size(); ++i) { + mod.InsertTargetHash(methodPuidx, newSet[i]->GetID()); + } + } else { + for (size_t i = 0; i < newSet.size(); ++i) { + CallInfo *newItem = newSet[i]; + if (!mod.HasTargetHash(methodPuidx, newItem->GetID())) { + targetSet.push_back(newItem); + mod.InsertTargetHash(methodPuidx, newItem->GetID()); + } + } + } +} + +void BinaryMplImport::ReadCgField() { + SkipTotalSize(); + + int32 size = ReadInt(); + int64 tag = 0; + + for (int i = 0; i < size; ++i) { + tag = ReadNum(); + CHECK_FATAL(tag == kStartMethod, " should be start point of method"); + MIRSymbol *tmpInSymbol = InSymbol(nullptr); + CHECK_FATAL(tmpInSymbol != nullptr, "null ptr check"); + PUIdx methodPuidx = tmpInSymbol->GetFunction()->GetPuidx(); + CHECK_FATAL(methodPuidx, "should not be 0"); + if (mod.GetMethod2TargetMap().find(methodPuidx) == mod.GetMethod2TargetMap().end()) { + std::vector targetSetTmp; + mod.AddMemToMethod2TargetMap(methodPuidx, targetSetTmp); + } + int32 targSize = ReadInt(); + std::vector targetSet; + callInfoTab.clear(); + callInfoTab.push_back(nullptr); + for (int32 j = 0; j < targSize; ++j) { + CallInfo *callInfo = ImportCallInfo(); + targetSet.push_back(callInfo); + } + MergeDuplicated(methodPuidx, mod.GetMemFromMethod2TargetMap(methodPuidx), targetSet); + tag = ReadNum(); + CHECK_FATAL(tag == ~kStartMethod, " should be start point of method"); + } + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinCgStart, "pattern mismatch in Read CG"); +} + +void BinaryMplImport::ReadEaField() { + ReadInt(); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcName, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + int eaSize = ReadInt(); + for (int j = 0; j < eaSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + mod.SetEAConnectionGraph(funcName, newEaCg); + } + CHECK_FATAL(ReadNum() == ~kBinEaStart, "pattern mismatch in Read EA"); +} + +void BinaryMplImport::ReadSeField() { + SkipTotalSize(); + + int32 size = ReadInt(); +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "SE SIZE : " << size << '\n'; +#endif + for (int32 i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + uint8 specialEffect = Read(); + TyIdx tyIdx = kInitTyIdx; + if ((specialEffect & kPureFunc) == kPureFunc) { + tyIdx = ImportType(); + } + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(funcName); + if (funcStr == "Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V") { + specialEffect = 0; + } + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = funcSymbol != nullptr ? mirBuilder.GetFunctionFromSymbol(*funcSymbol) : nullptr; + if (func != nullptr) { + func->SetAttrsFromSe(specialEffect); + } else if ((specialEffect & kPureFunc) == kPureFunc) { + func = mirBuilder.GetOrCreateFunction(funcStr, tyIdx); + func->SetAttrsFromSe(specialEffect); + } + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSeStart, "pattern mismatch in Read TYPE"); +} + +void BinaryMplImport::InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart) { + if (firstPart) { + base.SetEAStatus(static_cast(ReadNum())); + base.SetID(ReadInt()); + } else { + // start to in points to + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + CHECK_FATAL(point2Node->IsObjectNode(), "must be"); + (void)base.pointsTo.insert(static_cast(point2Node)); + } + // start to in in + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertInSet(point2Node); + } + // start to in out + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertOutSet(point2Node); + } + } +} + +void BinaryMplImport::InEaCgActNode(EACGActualNode &actual) { + actual.isPhantom = Read() == 1; + actual.isReturn = Read() == 1; + actual.argIdx = Read(); + actual.callSiteInfo = static_cast(ReadInt()); +} + +void BinaryMplImport::InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg) { + field.SetFieldID(ReadInt()); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode* node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsObjectNode(), "must be"); + (void)field.belongsTo.insert(static_cast(node)); + } + field.isPhantom = Read() == 1; +} + +void BinaryMplImport::InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg) { + Read(); + obj.isPhantom = true; int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsFieldNode(), "must be"); + auto *field = static_cast(node); + obj.fieldNodes[static_cast(field)->GetFieldID()] = field; + } + // start to in point by + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + (void)obj.pointsBy.insert(point2Node); + } +} + +void BinaryMplImport::InEaCgRefNode(EACGRefNode &ref) { + ref.isStaticField = Read() == 1 ? true : false; +} + +EACGBaseNode &BinaryMplImport::InEaCgNode(EAConnectionGraph &newEaCg) { + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < eaCgTab.size(), "index out of bounds"); + return *eaCgTab[-tag]; + } + CHECK_FATAL(tag == kBinEaCgNode, "must be"); + NodeKind kind = static_cast(ReadNum()); + EACGBaseNode *node = nullptr; + switch (kind) { + case kObejectNode: + node = new EACGObjectNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kReferenceNode: + node = new EACGRefNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kFieldNode: + node = new EACGFieldNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kActualNode: + node = new EACGActualNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + default: + CHECK_FATAL(false, "impossible"); + } + node->SetEACG(&newEaCg); + eaCgTab.push_back(node); + InEaCgBaseNode(*node, newEaCg, true); + newEaCg.SetNodeAt(node->id - 1, node); + if (node->IsActualNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgActNode, "must be"); + InEaCgActNode(static_cast(*node)); + } else if (node->IsFieldNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgFieldNode, "must be"); + InEaCgFieldNode(static_cast(*node), newEaCg); + } else if (node->IsObjectNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgObjNode, "must be"); + InEaCgObjNode(static_cast(*node), newEaCg); + } else if (node->IsReferenceNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgRefNode, "must be"); + InEaCgRefNode(static_cast(*node)); + } + InEaCgBaseNode(*node, newEaCg, false); + CHECK_FATAL(ReadNum() == ~kBinEaCgNode, "must be"); + return *node; +} + +EAConnectionGraph* BinaryMplImport::ReadEaCgField() { + if (ReadNum() == ~kBinEaCgStart) { + return nullptr; + } + ReadInt(); + GStrIdx funcStr = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcStr, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + CHECK_FATAL(newEaCg->GetNode(0)->IsObjectNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(1)->IsReferenceNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(2)->IsFieldNode(), "must be"); + newEaCg->globalField = static_cast(newEaCg->GetNode(2)); + newEaCg->globalObj = static_cast(newEaCg->GetNode(0)); + newEaCg->globalRef = static_cast(newEaCg->GetNode(1)); + CHECK_FATAL(newEaCg->globalField && newEaCg->globalObj && newEaCg->globalRef, "must be"); + int32 nodeSize = ReadInt(); + for (int j = 0; j < nodeSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + + int32 callSitesize = ReadInt(); + for (int i = 0; i < callSitesize; ++i) { + uint32 id = static_cast(ReadInt()); + newEaCg->callSite2Nodes[id] = mod.GetMemPool()->New>(mod.GetMPAllocator().Adapter()); + int32 calleeArgSize = ReadInt(); + for (int j = 0; j < calleeArgSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->callSite2Nodes[id]->push_back(node); + } + } + +#ifdef DEBUG + for (EACGBaseNode *node : newEaCg->GetNodes()) { + if (node == nullptr) { + continue; + } + node->CheckAllConnectionInNodes(); + } +#endif + CHECK_FATAL(ReadNum() == ~kBinEaCgStart, "pattern mismatch in Read EACG"); + return newEaCg; +} + +void BinaryMplImport::ReadSymField() { + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + (void)InSymbol(nullptr); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymStart, "pattern mismatch in Read SYM"); + return; +} + +void BinaryMplImport::ReadSymTabField() { + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymTabStart, "pattern mismatch in Read TYPE"); + return; +} + +void BinaryMplImport::ReadContentField() { + SkipTotalSize(); + + int32 size = ReadInt(); + int64 item; + int32 offset; + for (int32 i = 0; i < size; ++i) { + item = ReadNum(); + offset = ReadInt(); + content[item] = offset; + } + CHECK_FATAL(ReadNum() == ~kBinContentStart, "pattern mismatch in Read CONTENT"); +} + +void BinaryMplImport::Jump2NextField() { + uint32 totalSize = static_cast(ReadInt()); + bufI += (totalSize - sizeof(uint32)); + ReadNum(); // skip end tag for this field +} + +bool BinaryMplImport::ImportForSrcLang(const std::string &fname, MIRSrcLang &srcLang) { + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinHeaderStart: { + SkipTotalSize(); + (void)ReadNum(); // skip flavor + srcLang = static_cast(ReadNum()); + return true; + } + default: { + Jump2NextField(); + break; + } + } + fieldID = ReadNum(); + } + return false; +} + +bool BinaryMplImport::Import(const std::string &fname, bool readSymbols, bool readSe) { + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + if (readSe) { + while (fieldID != kBinFinish) { + if (fieldID == kBinSeStart) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read SE of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadSeField(); + Jump2NextField(); + } else if (fieldID == kBinEaStart) { + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + fieldID = ReadNum(); + } + return true; + } + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinContentStart: { + ReadContentField(); + break; + } + case kBinStrStart: { + ReadStrField(); + break; + } + case kBinHeaderStart: { + ReadHeaderField(); + break; + } + case kBinTypeStart: { + ReadTypeField(); + break; + } + case kBinSymStart: { + if (readSymbols) { + ReadSymField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSymTabStart: { + ReadSymTabField(); + break; + } + case kBinCgStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read CG of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.inIPA = true; + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadCgField(); + tmp.UpdateMethodSymbols(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSeStart: { + Jump2NextField(); + break; + } + case kBinEaStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read EA of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinFunctionBodyStart: { + ReadFunctionBodyField(); + break; + } + default: + CHECK_FATAL(false, "should not run here"); + } + fieldID = ReadNum(); + } + UpdateMethodSymbols(); + SetupEHRootType(); + return true; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/debug_info.cpp b/ecmascript/mapleall/maple_ir/src/debug_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..963323bdc690d50f7daac3b0f95fd2c02345fa9a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/debug_info.cpp @@ -0,0 +1,1455 @@ +/* + * Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#include "debug_info.h" +#include "mir_builder.h" +#include "printing.h" +#include "maple_string.h" +#include "global_tables.h" +#include "mir_type.h" +#include +#include "securec.h" +#include "mpl_logging.h" +#include "version.h" + +namespace maple { +extern const char *GetDwTagName(unsigned n); +extern const char *GetDwFormName(unsigned n); +extern const char *GetDwAtName(unsigned n); +extern const char *GetDwOpName(unsigned n); +extern const char *GetDwAteName(unsigned n); +extern const char *GetDwCfaName(unsigned n); +extern DwAte GetAteFromPTY(PrimType pty); + +constexpr uint32 kIndx2 = 2; +constexpr uint32 kStructDBGSize = 8888; + +// DBGDie methods +DBGDie::DBGDie(MIRModule *m, DwTag tag) + : module(m), + tag(tag), + id(m->GetDbgInfo()->GetMaxId()), + withChildren(false), + sibling(nullptr), + firstChild(nullptr), + abbrevId(0), + tyIdx(0), + offset(0), + size(0), + attrVec(m->GetMPAllocator().Adapter()), + subDieVec(m->GetMPAllocator().Adapter()) { + if (module->GetDbgInfo()->GetParentDieSize()) { + parent = module->GetDbgInfo()->GetParentDie(); + } else { + parent = nullptr; + } + m->GetDbgInfo()->SetIdDieMap(m->GetDbgInfo()->GetIncMaxId(), this); + attrVec.clear(); + subDieVec.clear(); +} + +void DBGDie::ResetParentDie() { + module->GetDbgInfo()->ResetParentDie(); +} + +DBGDieAttr *DBGDie::AddAttr(DwAt at, DwForm form, uint64 val) { + // collect strps which need label + if (form == DW_FORM_strp) { + module->GetDbgInfo()->AddStrps(static_cast(val)); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, val); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddSimpLocAttr(DwAt at, DwForm form, uint64 val) { + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_fbreg); + if (val != kDbgDefaultVal) { + p->AddSimpLocOpnd(val); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddGlobalLocAttr(DwAt at, DwForm form, uint64 val) { + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_addr); + p->SetGvarStridx(static_cast(val)); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddFrmBaseAttr(DwAt at, DwForm form) { + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_call_frame_cfa); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGExprLoc *DBGDie::GetExprLoc() { + for (auto it : attrVec) { + if (it->GetDwAt() == DW_AT_location) { + return it->GetPtr(); + } + } + return nullptr; +} + +bool DBGDie::SetAttr(DwAt attr, uint64 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetU(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetI(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, uint32 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetId(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int64 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetJ(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, float val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetF(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, double val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetD(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, DBGExprLoc *ptr) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetPtr(ptr); + return true; + } + } + return false; +} + +void DBGDie::AddAttr(DBGDieAttr *attr) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr->GetDwAt()) { + return; + } + } + attrVec.push_back(attr); +} + +void DBGDie::AddSubVec(DBGDie *die) { + if (!die) return; + for (auto it : subDieVec) { + if (it->GetId() == die->GetId()) { + return; + } + } + subDieVec.push_back(die); + die->parent = this; +} + +// DBGAbbrevEntry methods +DBGAbbrevEntry::DBGAbbrevEntry(MIRModule *m, DBGDie *die) : attrPairs(m->GetMPAllocator().Adapter()) { + tag = die->GetTag(); + abbrevId = 0; + withChildren = die->GetWithChildren(); + for (auto it : die->GetAttrVec()) { + attrPairs.push_back(it->GetDwAt()); + attrPairs.push_back(it->GetDwForm()); + } +} + +bool DBGAbbrevEntry::Equalto(DBGAbbrevEntry *entry) { + if (attrPairs.size() != entry->attrPairs.size()) { + return false; + } + if (withChildren != entry->GetWithChildren()) { + return false; + } + for (uint32 i = 0; i < attrPairs.size(); i++) { + if (attrPairs[i] != entry->attrPairs[i]) { + return false; + } + } + return true; +} + +// DebugInfo methods +void DebugInfo::Init() { + mplSrcIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(module->GetFileName()); + compUnit = module->GetMemPool()->New(module, DW_TAG_compile_unit); + module->SetWithDbgInfo(true); + ResetParentDie(); + if (module->GetSrcLang() == kSrcLangC) { + varPtrPrefix = ""; + } +} + +void DebugInfo::SetupCU() { + compUnit->SetWithChildren(true); + /* Add the Producer (Compiler) Information */ + const char *producer = strdup((std::string("Maple Version ") + Version::GetVersionStr()).c_str()); + GStrIdx strIdx = module->GetMIRBuilder()->GetOrCreateStringIndex(producer); + delete producer; + producer = nullptr; + compUnit->AddAttr(DW_AT_producer, DW_FORM_strp, strIdx.GetIdx()); + + /* Source Languate */ + compUnit->AddAttr(DW_AT_language, DW_FORM_data4, DW_LANG_C99); + + /* Add the compiled source file information */ + compUnit->AddAttr(DW_AT_name, DW_FORM_strp, mplSrcIdx.GetIdx()); + strIdx = module->GetMIRBuilder()->GetOrCreateStringIndex("/to/be/done/current/path"); + compUnit->AddAttr(DW_AT_comp_dir, DW_FORM_strp, strIdx.GetIdx()); + + compUnit->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + compUnit->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + + compUnit->AddAttr(DW_AT_stmt_list, DW_FORM_sec_offset, kDbgDefaultVal); +} + +void DebugInfo::AddScopeDie(MIRScope *scope) { + if (!scope->NeedEmitAliasInfo()) { + return; + } + + if (scope->GetLevel() != 0) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_lexical_block); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + + // add die to parent + GetParentDie()->AddSubVec(die); + + PushParentDie(die); + } + + // process aliasVarMap + AddAliasDies(scope->GetAliasVarMap()); + + if (scope->GetSubScopes().size() > 0) { + // process subScopes + for (auto it : scope->GetSubScopes()) { + AddScopeDie(it); + } + } + + if (scope->GetLevel() != 0) { + PopParentDie(); + } +} + +void DebugInfo::AddAliasDies(MapleMap &aliasMap) { + MIRFunction *func = GetCurFunction(); + for (auto &i : aliasMap) { + // maple var + MIRSymbol *var = nullptr; + if (i.second.isLocal) { + var = func->GetSymTab()->GetSymbolFromStrIdx(i.second.mplStrIdx); + } else { + var = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(i.second.mplStrIdx); + } + DEBUG_ASSERT(var, "can not find symbol"); + + // create alias die using maple var except name + DBGDie *vdie = CreateVarDie(var, i.first); + + GetParentDie()->AddSubVec(vdie); + + // add alias var name to debug_str section + strps.insert(i.first.GetIdx()); + } +} + +void DebugInfo::Finish() { + SetupCU(); + FillTypeAttrWithDieId(); + // build tree from root DIE compUnit + BuildDieTree(); + BuildAbbrev(); + ComputeSizeAndOffsets(); +} + +void DebugInfo::BuildDebugInfo() { + DEBUG_ASSERT(module->GetDbgInfo(), "null dbgInfo"); + + Init(); + + // containner types + for (auto it : module->GetTypeNameTab()->GetGStrIdxToTyIdxMap()) { + GStrIdx strIdx = it.first; + TyIdx tyIdx = it.second; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx()); + + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: + { + (void) GetOrCreateStructTypeDie(type); + break; + } + default: + LogInfo::MapleLogger() << "named type " << GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx).c_str() + << "\n"; + break; + } + } + + for (size_t i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused || + mirSymbol->GetStorageClass() == kScExtern) { + continue; + } + if (module->IsCModule() && mirSymbol->IsGlobal() && mirSymbol->IsVar()) { + DBGDie *vdie = CreateVarDie(mirSymbol); + compUnit->AddSubVec(vdie); + } + } + + // setup debug info for functions + for (auto func : GlobalTables::GetFunctionTable().GetFuncTable()) { + // the first one in funcTable is nullptr + if (!func) { + continue; + } + SetCurFunction(func); + // function decl + if (stridxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == stridxDieIdMap.end()) { + DBGDie *fdie = GetOrCreateFuncDeclDie(func); + if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + compUnit->AddSubVec(fdie); + } + } + // function def + if (funcDefStrIdxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == funcDefStrIdxDieIdMap.end()) { + DBGDie *fdie = GetOrCreateFuncDefDie(func, 0); + if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + compUnit->AddSubVec(fdie); + } + } + } + + // finalize debug info + Finish(); +} + +DBGDieAttr *DebugInfo::CreateAttr(DwAt at, DwForm form, uint64 val) { + DBGDieAttr *attr = module->GetMemPool()->New(kDwAt); + attr->SetDwAt(at); + attr->SetDwForm(form); + attr->SetU(val); + return attr; +} + +void DebugInfo::SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die) { + (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetLocalDie(MIRFunction *func, GStrIdx strIdx) { + uint32 id = (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()]; + return idDieMap[id]; +} + +void DebugInfo::SetLocalDie(GStrIdx strIdx, const DBGDie *die) { + (funcLstrIdxDieIdMap[GetCurFunction()])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetLocalDie(GStrIdx strIdx) { + uint32 id = (funcLstrIdxDieIdMap[GetCurFunction()])[strIdx.GetIdx()]; + return idDieMap[id]; +} + +void DebugInfo::SetLabelIdx(MIRFunction *func, GStrIdx strIdx, LabelIdx labidx) { + (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()] = labidx; +} + +LabelIdx DebugInfo::GetLabelIdx(MIRFunction *func, GStrIdx strIdx) { + LabelIdx labidx = (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()]; + return labidx; +} + +void DebugInfo::SetLabelIdx(GStrIdx strIdx, LabelIdx labidx) { + (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()] = labidx; +} + +LabelIdx DebugInfo::GetLabelIdx(GStrIdx strIdx) { + LabelIdx labidx = (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()]; + return labidx; +} + +DBGDie *DebugInfo::CreateFormalParaDie(MIRFunction *func, MIRType *type, MIRSymbol *sym) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + + (void)GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + /* var Name */ + if (sym) { + die->AddAttr(DW_AT_name, DW_FORM_strp, sym->GetNameStrIdx().GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, kDbgDefaultVal); + SetLocalDie(func, sym->GetNameStrIdx(), die); + } + return die; +} + +DBGDie *DebugInfo::GetOrCreateLabelDie(LabelIdx labid) { + MIRFunction *func = GetCurFunction(); + CHECK(labid < func->GetLabelTab()->GetLabelTableSize(), "index out of range in DebugInfo::GetOrCreateLabelDie"); + GStrIdx strid = func->GetLabelTab()->GetSymbolFromStIdx(labid); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strid.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strid); + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_label); + die->AddAttr(DW_AT_name, DW_FORM_strp, strid.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lexer->GetLineNum()); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + GetParentDie()->AddSubVec(die); + SetLocalDie(strid, die); + SetLabelIdx(strid, labid); + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym) { + // filter vtab + if (sym->GetName().find(VTAB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetName().find(GCTIB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetStorageClass() == kScFormal) { + return nullptr; + } + + bool isLocal = sym->IsLocal(); + GStrIdx strIdx = sym->GetNameStrIdx(); + + if (isLocal) { + MIRFunction *func = GetCurFunction(); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strIdx.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strIdx); + } + } else { + if (stridxDieIdMap.find(strIdx.GetIdx()) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[strIdx.GetIdx()]; + return idDieMap[id]; + } + } + + DBGDie *die = CreateVarDie(sym, strIdx); + + GetParentDie()->AddSubVec(die); + if (isLocal) { + SetLocalDie(strIdx, die); + } else { + stridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym, GStrIdx strIdx) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_variable); + + /* var Name */ + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + bool isLocal = sym->IsLocal(); + if (isLocal) { + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, kDbgDefaultVal); + } else { + // global var just use its name as address in .s + uint64 idx = strIdx.GetIdx(); + if ((sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo()) || sym->IsStatic()) { + std::string ptrName = varPtrPrefix + sym->GetName(); + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(ptrName).GetIdx(); + } + die->AddGlobalLocAttr(DW_AT_location, DW_FORM_exprloc, idx); + } + + MIRType *type = sym->GetType(); + (void)GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateFuncDeclDie(MIRFunction *func) { + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(funcnameidx) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + stridxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_external, DW_FORM_flag_present, 1); + + // Function Name + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + + die->AddAttr(DW_AT_name, DW_FORM_strp, funcnameidx); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + // Attributes for DW_AT_accessibility + uint32 access = 0; + if (func->IsPublic()) { + access = DW_ACCESS_public; + } else if (func->IsPrivate()) { + access = DW_ACCESS_private; + } else if (func->IsProtected()) { + access = DW_ACCESS_protected; + } + if (access) { + die->AddAttr(DW_AT_accessibility, DW_FORM_data4, access); + } + + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFormalDefAt(i).formalTyIdx); + DBGDie *param = CreateFormalParaDie(func, type, nullptr); + die->AddSubVec(param); + } + + PopParentDie(); + + return die; +} + +bool LIsCompilerGenerated(const MIRFunction *func) { + return ((func->GetName().c_str())[0] != 'L'); +} + +DBGDie *DebugInfo::GetOrCreateFuncDefDie(MIRFunction *func, uint32 lnum) { + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (funcDefStrIdxDieIdMap.find(funcnameidx) != funcDefStrIdxDieIdMap.end()) { + uint32 id = funcDefStrIdxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *funcdecldie = GetOrCreateFuncDeclDie(func); + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + // update funcDefStrIdxDieIdMap and leave stridxDieIdMap for the func decl + funcDefStrIdxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_specification, DW_FORM_ref4, funcdecldie->GetId()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lnum); + + if (!func->IsReturnVoid()) { + auto returnType = func->GetReturnType(); + (void)GetOrCreateTypeDie(returnType); + die->AddAttr(DW_AT_type, DW_FORM_ref4, returnType->GetTypeIndex().GetIdx()); + } + + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + die->AddFrmBaseAttr(DW_AT_frame_base, DW_FORM_exprloc); + if (!func->IsStatic() && !LIsCompilerGenerated(func)) { + die->AddAttr(DW_AT_object_pointer, DW_FORM_ref4, kDbgDefaultVal); + } + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFormalDefAt(i).formalTyIdx); + DBGDie *pdie = CreateFormalParaDie(func, type, func->GetFormalDefAt(i).formalSym); + die->AddSubVec(pdie); + } + + if (func->GetSymTab()) { + // local variables, start from 1 + for (uint32 i = 1; i < func->GetSymTab()->GetSymbolTableSize(); i++) { + MIRSymbol *var = func->GetSymTab()->GetSymbolFromStIdx(i); + DBGDie *vdie = CreateVarDie(var); + die->AddSubVec(vdie); + } + } + + // add scope die + AddScopeDie(func->GetScope()); + + PopParentDie(); + + return die; +} + +DBGDie *DebugInfo::GetOrCreatePrimTypeDie(MIRType *ty) { + PrimType pty = ty->GetPrimType(); + uint32 tid = static_cast(pty); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_base_type); + die->SetTyIdx(static_cast(pty)); + + if (ty->GetNameStrIdx().GetIdx() == 0) { + const char *name = GetPrimTypeName(ty->GetPrimType()); + std::string pname = std::string(name); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(pname); + ty->SetNameStrIdx(strIdx); + } + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(pty)); + die->AddAttr(DW_AT_encoding, DW_FORM_data4, GetAteFromPTY(pty)); + die->AddAttr(DW_AT_name, DW_FORM_strp, ty->GetNameStrIdx().GetIdx()); + + compUnit->AddSubVec(die); + tyIdxDieIdMap[static_cast(pty)] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::CreatePointedFuncTypeDie(MIRFuncType *ftype) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subroutine_type); + + die->AddAttr(DW_AT_prototyped, DW_FORM_data4, static_cast(ftype->GetParamTypeList().size() > 0)); + MIRType *rtype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftype->GetRetTyIdx()); + (void)GetOrCreateTypeDie(rtype); + die->AddAttr(DW_AT_type, DW_FORM_ref4, ftype->GetRetTyIdx().GetIdx()); + + compUnit->AddSubVec(die); + + for (uint32 i = 0; i < ftype->GetParamTypeList().size(); i++) { + DBGDie *paramdie = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + MIRType *ptype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftype->GetNthParamType(i)); + (void)GetOrCreateTypeDie(ptype); + paramdie->AddAttr(DW_AT_type, DW_FORM_ref4, ftype->GetNthParamType(i).GetIdx()); + die->AddSubVec(paramdie); + } + + tyIdxDieIdMap[ftype->GetTypeIndex().GetIdx()] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::GetOrCreateTypeDie(MIRType *type) { + if (type == nullptr) { + return nullptr; + } + + uint32 tid = type->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + uint32 sid = type->GetNameStrIdx().GetIdx(); + if (sid) + if (stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[sid]; + return idDieMap[id]; + } + + if (type->GetTypeIndex() == static_cast(type->GetPrimType())) { + return GetOrCreatePrimTypeDie(type); + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypePointer: { + MIRPtrType *ptype = static_cast(type); + die = GetOrCreatePointTypeDie(ptype); + break; + } + case kTypeFunction: { + MIRFuncType *ftype = static_cast(type); + die = CreatePointedFuncTypeDie(ftype); + break; + } + case kTypeArray: + case kTypeFArray: + case kTypeJArray: { + MIRArrayType *atype = static_cast(type); + die = GetOrCreateArrayTypeDie(atype); + break; + } + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: { + die = GetOrCreateStructTypeDie(type); + break; + } + case kTypeBitField: + break; + default: + CHECK_FATAL(false, "TODO: support type"); + break; + } + + return die; +} + +DBGDie *DebugInfo::GetOrCreatePointTypeDie(const MIRPtrType *ptrtype) { + uint32 tid = ptrtype->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = ptrtype->GetPointedType(); + // for <* void> + if ((type != nullptr) && + (type->GetPrimType() == PTY_void || type->GetKind() == kTypeFunction)) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + if (type->GetKind() == kTypeFunction) { + DBGDie *pdie = GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[type->GetTypeIndex().GetIdx()] = pdie->GetId(); + } + tyIdxDieIdMap[ptrtype->GetTypeIndex().GetIdx()] = die->GetId(); + compUnit->AddSubVec(die); + return die; + } + + (void)GetOrCreateTypeDie(type); + if (typeDefTyIdxMap.find(type->GetTypeIndex().GetIdx()) != typeDefTyIdxMap.end()) { + uint32 tyIdx = typeDefTyIdxMap[type->GetTypeIndex().GetIdx()]; + if (pointedPointerMap.find(tyIdx) != pointedPointerMap.end()) { + uint32 tyid = pointedPointerMap[tyIdx]; + if (tyIdxDieIdMap.find(tyid) != tyIdxDieIdMap.end()) { + uint32 dieid = tyIdxDieIdMap[tyid]; + DBGDie *die = idDieMap[dieid]; + return die; + } + } + } + + // update incomplete type from stridxDieIdMap to tyIdxDieIdMap + MIRStructType *stype = static_cast(type); + if ((stype != nullptr) && stype->IsIncomplete()) { + uint32 sid = stype->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 dieid = stridxDieIdMap[sid]; + if (dieid) { + tyIdxDieIdMap[stype->GetTypeIndex().GetIdx()] = dieid; + } + } + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[ptrtype->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateArrayTypeDie(const MIRArrayType *arraytype) { + uint32 tid = arraytype->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = arraytype->GetElemType(); + (void)GetOrCreateTypeDie(type); + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_array_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[arraytype->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + // maple uses array of 1D array to represent 2D array + // so only one DW_TAG_subrange_type entry is needed + DBGDie *rangedie = module->GetMemPool()->New(module, DW_TAG_subrange_type); + (void)GetOrCreatePrimTypeDie(GlobalTables::GetTypeTable().GetUInt32()); + rangedie->AddAttr(DW_AT_type, DW_FORM_ref4, PTY_u32); + rangedie->AddAttr(DW_AT_upper_bound, DW_FORM_data4, arraytype->GetSizeArrayItem(0)); + + die->AddSubVec(rangedie); + + return die; +} + +DBGDie *DebugInfo::CreateFieldDie(maple::FieldPair pair, uint32 lnum) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + die->AddAttr(DW_AT_name, DW_FORM_strp, pair.first.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lnum); + + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pair.second.first); + (void)GetOrCreateTypeDie(type); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + return die; +} + +DBGDie *DebugInfo::CreateBitfieldDie(const MIRBitFieldType *type, GStrIdx sidx, uint32 prevBits) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + + die->AddAttr(DW_AT_name, DW_FORM_strp, sidx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, 0); + + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(type->GetPrimType()); + (void)GetOrCreateTypeDie(ty); + die->AddAttr(DW_AT_type, DW_FORM_ref4, ty->GetTypeIndex().GetIdx()); + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(type->GetPrimType())); + die->AddAttr(DW_AT_bit_size, DW_FORM_data4, type->GetFieldSize()); + die->AddAttr(DW_AT_bit_offset, DW_FORM_data4, + GetPrimTypeSize(type->GetPrimType()) * k8BitSize - type->GetFieldSize() - prevBits); + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, 0); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateStructTypeDie(const MIRType *type) { + DEBUG_ASSERT(type, "null struture type"); + GStrIdx strIdx = type->GetNameStrIdx(); + DEBUG_ASSERT(strIdx.GetIdx(), "struture type missing name"); + + if (tyIdxDieIdMap.find(type->GetTypeIndex().GetIdx()) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[type->GetTypeIndex().GetIdx()]; + return idDieMap[id]; + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + { + const MIRClassType *classtype = static_cast(type); + die = CreateClassTypeDie(strIdx, classtype); + break; + } + case kTypeInterface: + case kTypeInterfaceIncomplete: + { + const MIRInterfaceType *interfacetype = static_cast(type); + die = CreateInterfaceTypeDie(strIdx, interfacetype); + break; + } + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: + { + const MIRStructType *stype = static_cast(type); + die = CreateStructTypeDie(strIdx, stype, false); + break; + } + default: + LogInfo::MapleLogger() << "named type " << GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx).c_str() + << "\n"; + break; + } + + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(strIdx, type->GetTypeIndex()); + return die; +} + +// shared between struct and union +DBGDie *DebugInfo::CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structtype, bool update) { + DBGDie *die = nullptr; + + if (update) { + uint32 id = tyIdxDieIdMap[structtype->GetTypeIndex().GetIdx()]; + die = idDieMap[id]; + DEBUG_ASSERT(die, "update type die not exist"); + } else { + DwTag tag = structtype->GetKind() == kTypeStruct ? DW_TAG_structure_type : DW_TAG_union_type; + die = module->GetMemPool()->New(module, tag); + tyIdxDieIdMap[structtype->GetTypeIndex().GetIdx()] = die->GetId(); + } + + if (strIdx.GetIdx()) { + stridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + compUnit->AddSubVec(die); + + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, kStructDBGSize); + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, kDbgDefaultVal); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + + PushParentDie(die); + + // fields + uint32 prevBits = 0; + for (size_t i = 0; i < structtype->GetFieldsSize(); i++) { + MIRType *ety = structtype->GetElemType(static_cast(i)); + FieldPair fp = structtype->GetFieldsElemt(i); + if (ety->IsMIRBitFieldType()) { + MIRBitFieldType *bfty = static_cast(ety); + DBGDie *bfdie = CreateBitfieldDie(bfty, fp.first, prevBits); + prevBits += bfty->GetFieldSize(); + die->AddSubVec(bfdie); + } else { + prevBits = 0; + DBGDie *fdie = CreateFieldDie(fp, 0); + die->AddSubVec(fdie); + } + } + + // parentFields + for (size_t i = 0; i < structtype->GetParentFieldsSize(); i++) { + FieldPair fp = structtype->GetParentFieldsElemt(i); + DBGDie *fdie = CreateFieldDie(fp, 0); + die->AddSubVec(fdie); + } + + // member functions decl + for (auto fp : structtype->GetMethods()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(fp.first.Idx()); + DEBUG_ASSERT((symbol != nullptr) && symbol->GetSKind() == kStFunc, "member function symbol not exist"); + MIRFunction *func = symbol->GetValue().mirFunc; + DEBUG_ASSERT(func, "member function not exist"); + DBGDie *fdie = GetOrCreateFuncDeclDie(func); + die->AddSubVec(fdie); + } + + PopParentDie(); + + // member functions defination, these die are global + for (auto fp : structtype->GetMethods()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(fp.first.Idx()); + DEBUG_ASSERT(symbol && symbol->GetSKind() == kStFunc, "member function symbol not exist"); + MIRFunction *func = symbol->GetValue().mirFunc; + if (!func->GetBody()) { + continue; + } + DEBUG_ASSERT(func, "member function not exist"); + DBGDie *fdie = GetOrCreateFuncDefDie(func, 0); + compUnit->AddSubVec(fdie); + } + + return die; +} + +DBGDie *DebugInfo::CreateClassTypeDie(GStrIdx strIdx, const MIRClassType *classtype) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_class_type); + + PushParentDie(die); + + // parent + uint32 ptid = classtype->GetParentTyIdx().GetIdx(); + if (ptid) { + MIRType *parenttype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classtype->GetParentTyIdx()); + DBGDie *parentdie = GetOrCreateTypeDie(parenttype); + if (parentdie) { + parentdie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentdie->AddAttr(DW_AT_name, DW_FORM_strp, parenttype->GetNameStrIdx().GetIdx()); + parentdie->AddAttr(DW_AT_type, DW_FORM_ref4, ptid); + + // set to DW_ACCESS_public for now + parentdie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentdie); + } + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[classtype->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, classtype, true); + DEBUG_ASSERT(die == die1, "ClassTypeDie update wrong die"); + + return die1; +} + +DBGDie *DebugInfo::CreateInterfaceTypeDie(GStrIdx strIdx, const MIRInterfaceType *interfacetype) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_interface_type); + + PushParentDie(die); + + // parents + for (auto it : interfacetype->GetParentsTyIdx()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it); + DBGDie *parentdie = GetOrCreateTypeDie(type); + if (parentdie) { + continue; + } + parentdie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentdie->AddAttr(DW_AT_name, DW_FORM_strp, type->GetNameStrIdx().GetIdx()); + parentdie->AddAttr(DW_AT_type, DW_FORM_ref4, it.GetIdx()); + parentdie->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + // set to DW_ACCESS_public for now + parentdie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentdie); + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[interfacetype->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, interfacetype, true); + DEBUG_ASSERT(die == die1, "InterfaceTypeDie update wrong die"); + + return die1; +} + +uint32 DebugInfo::GetAbbrevId(DBGAbbrevEntryVec *vec, DBGAbbrevEntry *entry) { + for (auto it : vec->GetEntryvec()) { + if (it->Equalto(entry)) { + return it->GetAbbrevId();; + } + } + return 0; +} + +void DebugInfo::BuildAbbrev() { + uint32 abbrevid = 1; + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + DBGAbbrevEntry *entry = module->GetMemPool()->New(module, die); + + if (!tagAbbrevMap[die->GetTag()]) { + tagAbbrevMap[die->GetTag()] = module->GetMemPool()->New(module, die->GetTag()); + } + + uint32 id = GetAbbrevId(tagAbbrevMap[die->GetTag()], entry); + if (id) { + // using existing abbrev id + die->SetAbbrevId(id); + } else { + // add entry to vector + entry->SetAbbrevId(abbrevid++); + tagAbbrevMap[die->GetTag()]->GetEntryvec().push_back(entry); + abbrevVec.push_back(entry); + // update abbrevid in die + die->SetAbbrevId(entry->GetAbbrevId()); + } + } + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << "0 abbrevId i = " << i << " die->id = " << die->GetId() << std::endl; + } + } +} + +void DebugInfo::BuildDieTree() { + for (auto it : idDieMap) { + if (!it.first) { + continue; + } + DBGDie *die = it.second; + uint32 size = die->GetSubDieVecSize(); + die->SetWithChildren(size > 0); + if (size) { + die->SetFirstChild(die->GetSubDieVecAt(0)); + for (uint32 i = 0; i < size - 1; i++) { + DBGDie *it0 = die->GetSubDieVecAt(i); + DBGDie *it1 = die->GetSubDieVecAt(i + 1); + if (it0->GetSubDieVecSize()) { + it0->SetSibling(it1); + (void)it0->AddAttr(DW_AT_sibling, DW_FORM_ref4, it1->GetId()); + } + } + } + } +} + +void DebugInfo::FillTypeAttrWithDieId() { + for (auto it : idDieMap) { + DBGDie *die = it.second; + for (auto at : die->GetAttrVec()) { + if (at->GetDwAt() == DW_AT_type) { + uint32 tid = at->GetId(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(tid)); + if (type) { + uint32 dieid = tyIdxDieIdMap[tid]; + if (dieid) { + at->SetId(dieid); + } else { + LogInfo::MapleLogger() << "dieid not found, typeKind = " << type->GetKind() << " primType = " + << type->GetPrimType() << " nameStrIdx = " << type->GetNameStrIdx().GetIdx() << std::endl; + } + } else { + LogInfo::MapleLogger() << "type not found, tid = " << tid << std::endl; + } + break; + } + } + } +} + +DBGDie *DebugInfo::GetDie(const MIRFunction *func) { + uint32 id = stridxDieIdMap[func->GetNameStrIdx().GetIdx()]; + if (id) { + return idDieMap[id]; + } + return nullptr; +} + +// Methods for calculating Offset and Size of DW_AT_xxx +size_t DBGDieAttr::SizeOf(DBGDieAttr *attr) { + DwForm form = attr->dwForm; + switch (form) { + // case DW_FORM_implicitconst: + case DW_FORM_flag_present: + return 0; // Not handled yet. + case DW_FORM_flag: + case DW_FORM_ref1: + case DW_FORM_data1: + return sizeof(int8); + case DW_FORM_ref2: + case DW_FORM_data2: + return sizeof(int16); + case DW_FORM_ref4: + case DW_FORM_data4: + return sizeof(int32); + case DW_FORM_ref8: + case DW_FORM_ref_sig8: + case DW_FORM_data8: + return sizeof(int64); + case DW_FORM_addr: + return sizeof(int64); + case DW_FORM_sec_offset: + case DW_FORM_ref_addr: + case DW_FORM_strp: + case DW_FORM_GNU_ref_alt: + // case DW_FORM_codeLinestrp: + // case DW_FORM_strp_sup: + // case DW_FORM_ref_sup: + return k4BitSize; // DWARF32, 8 if DWARF64 + + case DW_FORM_string: { + GStrIdx stridx(attr->value.id); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(stridx); + return str.length() + 1 /* terminal null byte */; + } + case DW_FORM_exprloc: { + DBGExprLoc *ptr = attr->value.ptr; + CHECK_FATAL(ptr != (DBGExprLoc*)(0xdeadbeef), "wrong ptr"); + switch (ptr->GetOp()) { + case DW_OP_call_frame_cfa: + return k2BitSize; // size 1 byte + DW_OP_call_frame_cfa 1 byte + case DW_OP_fbreg: { + // DW_OP_fbreg 1 byte + size_t size = 1 + namemangler::GetSleb128Size(ptr->GetFboffset()); + return size + namemangler::GetUleb128Size(size); + } + case DW_OP_addr: { + return namemangler::GetUleb128Size(k9BitSize) + k9BitSize; + } + default: + return k4BitSize; + } + } + default: + CHECK_FATAL(maple::GetDwFormName(form) != nullptr, + "GetDwFormName return null in DebugInfo::FillTypeAttrWithDieId"); + LogInfo::MapleLogger() << "unhandled SizeOf: " << maple::GetDwFormName(form) << std::endl; + return 0; + } +} + +void DebugInfo::ComputeSizeAndOffsets() { + // CU-relative offset is reset to 0 here. + uint32 cuOffset = sizeof(int32_t) // Length of Unit Info + + sizeof(int16) // DWARF version number : 0x0004 + + sizeof(int32) // Offset into Abbrev. Section : 0x0000 + + sizeof(int8); // Pointer Size (in bytes) : 0x08 + + // After returning from this function, the length value is the size + // of the .debug_info section + ComputeSizeAndOffset(compUnit, cuOffset); + debugInfoLength = cuOffset - sizeof(int32_t); +} + +// Compute the size and offset of a DIE. The Offset is relative to start of the CU. +// It returns the offset after laying out the DIE. +void DebugInfo::ComputeSizeAndOffset(DBGDie *die, uint32 &cuOffset) { + uint32 cuOffsetOrg = cuOffset; + die->SetOffset(cuOffset); + + // Add the byte size of the abbreviation code + cuOffset += static_cast(namemangler::GetUleb128Size(uint64_t(die->GetAbbrevId()))); + + // Add the byte size of all the DIE attributes. + for (const auto &attr : die->GetAttrVec()) { + cuOffset += static_cast(attr->SizeOf(attr)); + } + + die->SetSize(cuOffset - cuOffsetOrg); + + // Let the children compute their offsets. + if (die->GetWithChildren()) { + uint32 size = die->GetSubDieVecSize(); + + for (uint32 i = 0; i < size; i++) { + DBGDie *childDie = die->GetSubDieVecAt(i); + ComputeSizeAndOffset(childDie, cuOffset); + } + + // Each child chain is terminated with a zero byte, adjust the offset. + cuOffset += sizeof(int8); + } +} + +/* /////////////// + * Dumps + * /////////////// + */ +void DebugInfo::Dump(int indent) { + LogInfo::MapleLogger() << "\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_information {" + << " Length: " << HEX(debugInfoLength) << std::endl; + compUnit->Dump(indent + 1); + LogInfo::MapleLogger() << "}\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_abbrev {" << std::endl; + for (uint32 i = 1; i < abbrevVec.size(); i++) { + abbrevVec[i]->Dump(indent + 1); + } + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGExprLoc::Dump() { + LogInfo::MapleLogger() << " " << HEX(GetOp()); + for (auto it : simpLoc->GetOpnd()) { + LogInfo::MapleLogger() << " " << HEX(it); + } +} + +void DBGDieAttr::Dump(int indent) { + PrintIndentation(indent); + CHECK_FATAL(GetDwFormName(dwForm) && GetDwAtName(dwAttr), "null ptr check"); + LogInfo::MapleLogger() << GetDwAtName(dwAttr) << " " << GetDwFormName(dwForm); + if (dwForm == DW_FORM_string || dwForm == DW_FORM_strp) { + GStrIdx idx(value.id); + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(idx).c_str() << "\""; + } else if (dwForm == DW_FORM_ref4) { + LogInfo::MapleLogger() << " <" << HEX(value.id) << ">"; + } else if (dwAttr == DW_AT_encoding) { + CHECK_FATAL(GetDwAteName(static_cast(value.u)), "null ptr check"); + LogInfo::MapleLogger() << " " << GetDwAteName(static_cast(value.u)); + } else if (dwAttr == DW_AT_location) { + value.ptr->Dump(); + } else { + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + } + LogInfo::MapleLogger() << std::endl; +} + +void DBGDie::Dump(int indent) { + PrintIndentation(indent); + LogInfo::MapleLogger() << "<" << HEX(id) << "><" << HEX(offset); + LogInfo::MapleLogger() << "><" << HEX(size) << "><" + << "> abbrev id: " << HEX(abbrevId); + CHECK_FATAL(GetDwTagName(tag), "null ptr check"); + LogInfo::MapleLogger() << " (" << GetDwTagName(tag) << ") "; + if (parent) { + LogInfo::MapleLogger() << "parent <" << HEX(parent->GetId()); + } + LogInfo::MapleLogger() << "> {"; + if (tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(tyIdx)); + if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + MIRStructType *stype = static_cast(type); + LogInfo::MapleLogger() << " # " << stype->GetName(); + } else { + LogInfo::MapleLogger() << " # " << GetPrimTypeName(type->GetPrimType()); + } + } + LogInfo::MapleLogger() << std::endl; + ; + for (auto it : attrVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} "; + if (subDieVec.size()) { + LogInfo::MapleLogger() << " {" << std::endl; + for (auto it : subDieVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}"; + } + LogInfo::MapleLogger() << std::endl; + return; +} + +void DBGAbbrevEntry::Dump(int indent) { + PrintIndentation(indent); + CHECK_FATAL(GetDwTagName(tag), "null ptr check "); + LogInfo::MapleLogger() << "<" << HEX(abbrevId) << "> " << GetDwTagName(tag); + if (GetWithChildren()) { + LogInfo::MapleLogger() << " [with children] {" << std::endl; + } else { + LogInfo::MapleLogger() << " [no children] {" << std::endl; + } + for (uint32 i = 0; i < attrPairs.size(); i += k2BitSize) { + PrintIndentation(indent + 1); + CHECK_FATAL(GetDwAtName(attrPairs[i]) && GetDwFormName(attrPairs[i + 1]), "NULLPTR CHECK"); + + LogInfo::MapleLogger() << " " << GetDwAtName(attrPairs[i]) << " " << GetDwFormName(attrPairs[i + 1]) + << " " << std::endl; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGAbbrevEntryVec::Dump(int indent) { + for (auto it : entryVec) { + PrintIndentation(indent); + it->Dump(indent); + } + return; +} + +// DBGCompileMsgInfo methods +void DBGCompileMsgInfo::ClearLine(uint32 n) { + errno_t eNum = memset_s(codeLine[n], MAXLINELEN, 0, MAXLINELEN); + if (eNum) { + FATAL(kLncFatal, "memset_s failed"); + } +} + +DBGCompileMsgInfo::DBGCompileMsgInfo() : startLine(0), errPos(0) { + lineNum[0] = 0; + lineNum[1] = 0; + lineNum[kIndx2] = 0; + ClearLine(0); + ClearLine(1); + ClearLine(kIndx2); + errLNum = 0; + errCNum = 0; +} + +void DBGCompileMsgInfo::SetErrPos(uint32 lnum, uint32 cnum) { + errLNum = lnum; + errCNum = cnum; +} + +void DBGCompileMsgInfo::UpdateMsg(uint32 lnum, const char *line) { + size_t size = strlen(line); + if (size > MAXLINELEN - 1) { + size = MAXLINELEN - 1; + } + startLine = (startLine + k2BitSize) % k3BitSize; + ClearLine(startLine); + errno_t eNum = memcpy_s(codeLine[startLine], MAXLINELEN, line, size); + if (eNum) { + FATAL(kLncFatal, "memcpy_s failed"); + } + codeLine[startLine][size] = '\0'; + lineNum[startLine] = lnum; +} + +void DBGCompileMsgInfo::EmitMsg() { + char str[MAXLINELEN + 1]; + + errPos = errCNum; + errPos = (errPos < k2BitSize) ? k2BitSize : errPos; + errPos = (errPos > MAXLINELEN) ? MAXLINELEN : errPos; + for (uint32 i = 0; i < errPos - 1; i++) { + str[i] = ' '; + } + str[errPos - 1] = '^'; + str[errPos] = '\0'; + + fprintf(stderr, "\n===================================================================\n"); + fprintf(stderr, "=================="); + fprintf(stderr, BOLD YEL " Compilation Error Diagnosis " RESET); + fprintf(stderr, "==================\n"); + fprintf(stderr, "===================================================================\n"); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + k2BitSize) % k3BitSize], + reinterpret_cast(codeLine[(startLine + k2BitSize) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + 1) % k3BitSize], + reinterpret_cast(codeLine[(startLine + 1) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine) % k3BitSize], + reinterpret_cast(codeLine[(startLine) % k3BitSize])); + fprintf(stderr, BOLD RED " %s\n" RESET, str); + fprintf(stderr, "===================================================================\n"); +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/debug_info_util.cpp b/ecmascript/mapleall/maple_ir/src/debug_info_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..225479cea9cbba8acedcabf0405f221af0276d01 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/debug_info_util.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "mir_builder.h" +#include "debug_info.h" +#include "global_tables.h" +#include "mir_type.h" + +namespace maple { +#define TOSTR(s) #s +// utility functions to get the string from tag value etc. +// GetDwTagName(unsigned n) +const char *GetDwTagName(unsigned n) { + switch (n) { +#define DW_TAG(ID, NAME) case DW_TAG_##NAME: return TOSTR(DW_TAG_##NAME); +#include "dwarf.def" + case DW_TAG_lo_user: return "DW_TAG_lo_user"; + case DW_TAG_hi_user: return "DW_TAG_hi_user"; + case DW_TAG_user_base: return "DW_TAG_user_base"; + default: return nullptr; + } +} + +// GetDwFormName(unsigned n) +const char *GetDwFormName(unsigned n) { + switch (n) { +#define DW_FORM(ID, NAME) case DW_FORM_##NAME: return TOSTR(DW_FORM_##NAME); +#include "dwarf.def" + case DW_FORM_lo_user: return "DW_FORM_lo_user"; + default: return nullptr; + } +} + +// GetDwAtName(unsigned n) +const char *GetDwAtName(unsigned n) { + switch (n) { +#define DW_AT(ID, NAME) case DW_AT_##NAME: return TOSTR(DW_AT_##NAME); +#include "dwarf.def" + case DW_AT_lo_user: return "DW_AT_lo_user"; + default: return nullptr; + } +} + +// GetDwOpName(unsigned n) +const char *GetDwOpName(unsigned n) { + switch (n) { +#define DW_OP(ID, NAME) case DW_OP_##NAME: return TOSTR(DW_OP_##NAME); +#include "dwarf.def" + case DW_OP_hi_user: return "DW_OP_hi_user"; + default: return nullptr; + } +} + +const unsigned kDwAteVoid = 0x20; +// GetDwAteName(unsigned n) +const char *GetDwAteName(unsigned n) { + switch (n) { +#define DW_ATE(ID, NAME) case DW_ATE_##NAME: return TOSTR(DW_ATE_##NAME); +#include "dwarf.def" + case DW_ATE_lo_user: return "DW_ATE_lo_user"; + case DW_ATE_hi_user: return "DW_ATE_hi_user"; + case kDwAteVoid: return "kDwAteVoid"; + default: return nullptr; + } +} + +DwAte GetAteFromPTY(PrimType pty) { + switch (pty) { + case PTY_u1: + return DW_ATE_boolean; + case PTY_u8: + return DW_ATE_unsigned_char; + case PTY_u16: + case PTY_u32: + case PTY_u64: + return DW_ATE_unsigned; + case PTY_i8: + return DW_ATE_signed_char; + case PTY_i16: + case PTY_i32: + case PTY_i64: + return DW_ATE_signed; + case PTY_f32: + case PTY_f64: + case PTY_f128: + return DW_ATE_float; + case PTY_agg: + case PTY_ref: + case PTY_ptr: + case PTY_a32: + case PTY_a64: + return DW_ATE_address; + case PTY_c64: + case PTY_c128: + return DW_ATE_complex_float; + case PTY_void: + return kDwAteVoid; + default: + return kDwAteVoid; + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/driver.cpp b/ecmascript/mapleall/maple_ir/src/driver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..af56fb75743c445101ad3f841b71025d9c1c4c3a --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/driver.cpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "mir_parser.h" +#include "bin_mplt.h" +#include "opcode_info.h" +#include "mir_function.h" +#include "constantfold.h" +#include "mir_type.h" + +using namespace maple; + +std::unordered_set dumpFuncSet = {}; + +#if MIR_FEATURE_FULL + +int main(int argc, char **argv) { + constexpr int judgeNumber = 2; + constexpr uint32 k2Argv = 2; + constexpr uint32 k10Argv = 10; + constexpr uint32 kNlSize = 5; + if (argc < judgeNumber) { + (void)MIR_PRINTF( + "usage: ./irbuild [-b] [-dumpfunc=] [-srclang=] \n" + " By default, the files are converted to corresponding ascii format.\n" + " If -b is specified, output is binary format instead.\n" + " If -dumpfunc= is specified, only functions with name containing the string is output.\n" + " -dumpfunc= can be specified multiple times to give multiple strings.\n" + " -srclang specifies the source language that produces the mpl file. \n" + " Each output file has .irb added after its file stem.\n"); + exit(1); + } + + std::vector themodule(argc, nullptr); + bool useBinary = false; + bool doConstantFold = false; + MIRSrcLang srcLang = kSrcLangUnknown; + // process the options which must come first + maple::uint32 i = 1; + while (argv[i][0] == '-') { + if (argv[i][1] == 'b' && argv[i][k2Argv] == '\0') { + useBinary = true; + } else if (strcmp(argv[i], "-fold") == 0) { + doConstantFold = true; + } else if (strncmp(argv[i], "-dumpfunc=", k10Argv) == 0 && strlen(argv[i]) > k10Argv) { + std::string funcName(&argv[i][k10Argv]); + dumpFuncSet.insert(funcName); + } else if (strcmp(argv[i], "-srclang=java") == 0) { + srcLang = kSrcLangJava; + } else if (strcmp(argv[i], "-srclang=c") == 0) { + srcLang = kSrcLangC; + } else if (strcmp(argv[i], "-srclang=c++") == 0) { + srcLang = kSrcLangCPlusPlus; + } else { + ERR(kLncErr, "irbuild: unrecognized command line option"); + return 1; + } + ++i; + } + // process the input files + while (i < static_cast(argc)) { + themodule[i] = new maple::MIRModule(argv[i]); + themodule[i]->SetSrcLang(srcLang); + std::string::size_type lastdot = themodule[i]->GetFileName().find_last_of("."); + bool ismplt = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mplt") == 0; + bool istmpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".tmpl") == 0; + bool ismpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mpl\0") == 0; + bool isbpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".bpl\0") == 0; + bool ismbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mbc\0") == 0; + bool islmbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".lmbc\0") == 0; + if (!ismplt && !istmpl && !ismpl && !isbpl && !ismbc && !islmbc) { + ERR(kLncErr, "irbuild: input must be .mplt or .mpl or .bpl or .mbc or .lmbc or .tmpl file"); + return 1; + } + // input the file + if (ismpl || istmpl) { + maple::MIRParser theparser(*themodule[i]); + if (!theparser.ParseMIR()) { + theparser.EmitError(themodule[i]->GetFileName().c_str()); + return 1; + } + } else { + BinaryMplImport binMplt(*themodule[i]); + binMplt.SetImported(false); + std::string modid = themodule[i]->GetFileName(); + if (!binMplt.Import(modid, true)) { + ERR(kLncErr, "irbuild: cannot open .mplt or .bpl or .mbc or .lmbc file: %s", modid.c_str()); + return 1; + } + } + + // output the file + if (!useBinary) { + themodule[i]->OutputAsciiMpl( + ".irb", (ismpl || isbpl || ismbc || islmbc) ? ".mpl" : ".tmpl", &dumpFuncSet, true, false); + } else { + BinaryMplt binMplt(*themodule[i]); + std::string modid = themodule[i]->GetFileName(); + binMplt.GetBinExport().not2mplt = ismpl || isbpl || ismbc || islmbc; + std::string filestem = modid.substr(0, lastdot); + binMplt.Export(filestem + ((ismpl || isbpl || ismbc || islmbc) ? ".irb.bpl" : ".irb.mplt"), &dumpFuncSet); + } + ++i; + } + return 0; +} +#else +#warning "this module is compiled without MIR_FEATURE_FULL=1 defined" +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/mapleall/maple_ir/src/global_tables.cpp b/ecmascript/mapleall/maple_ir/src/global_tables.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7da2f9b82f0000bf3a34e5736c40c1ac0eb0132b --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/global_tables.cpp @@ -0,0 +1,496 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "global_tables.h" +#include "mir_type.h" +#include "mir_symbol.h" + +#if MIR_FEATURE_FULL +namespace maple { +MIRType *TypeTable::CreateMirType(uint32 primTypeIdx) const { + MIRTypeKind defaultKind = (primTypeIdx == PTY_constStr ? kTypeConstString : kTypeScalar); + auto primType = static_cast(primTypeIdx); + auto *mirType = new MIRType(defaultKind, primType); + return mirType; +} + +TypeTable::TypeTable() { + // enter the primitve types in type_table_ + typeTable.push_back(static_cast(nullptr)); + DEBUG_ASSERT(typeTable.size() == static_cast(PTY_void), "use PTY_void as the first index to type table"); + uint32 primTypeIdx; + for (primTypeIdx = static_cast(PTY_begin) + 1; primTypeIdx <= static_cast(PTY_end); ++primTypeIdx) { + MIRType *type = CreateMirType(primTypeIdx); + type->SetTypeIndex(TyIdx{ primTypeIdx }); + typeTable.push_back(type); + PutToHashTable(type); + } + if (voidPtrType == nullptr) { + voidPtrType = GetOrCreatePointerType(*GetVoid(), PTY_ptr); + } + lastDefaultTyIdx.SetIdx(primTypeIdx); +} + +void TypeTable::SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type) { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + MIRType *oldType = typeTable.at(tyIdx); + typeTable.at(tyIdx) = &type; + if (oldType != nullptr && oldType != &type) { + (void)typeHashTable.erase(oldType); + (void)typeHashTable.insert(&type); + delete oldType; + } +} + +TypeTable::~TypeTable() { + for (auto index = static_cast(PTY_void); index < typeTable.size(); ++index) { + delete typeTable[index]; + typeTable[index] = nullptr; + } +} + +void TypeTable::PutToHashTable(MIRType *mirType) { + (void)typeHashTable.insert(mirType); +} + +void TypeTable::UpdateMIRType(const MIRType &pType, const TyIdx tyIdx) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdx); + SetTypeWithTyIdx(tyIdx, *nType); +} + +// used only by bin_mpl_import +void TypeTable::CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, + bool isObject, bool isIncomplete) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdxUsed); + typeTable[tyIdxUsed] = nType; + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + + GStrIdx stridx = pType.GetNameStrIdx(); + if (stridx != 0) { + module->GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdxUsed); + module->PushbackTypeDefOrder(stridx); + if (isObject) { + module->AddClass(tyIdxUsed); + if (!isIncomplete) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdxUsed); + } + } + } +} + +MIRType *TypeTable::CreateAndUpdateMirTypeNode(MIRType &pType) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(nType); + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + return nType; +} + +MIRType* TypeTable::GetOrCreateMIRTypeNode(MIRType &pType) { + if (pType.IsMIRPtrType()) { + auto &type = static_cast(pType); + if (type.GetTypeAttrs() == TypeAttrs()) { + auto *pMap = (type.GetPrimType() != PTY_ref ? &ptrTypeMap : &refTypeMap); + auto *otherPMap = (type.GetPrimType() == PTY_ref ? &ptrTypeMap : &refTypeMap); + { + std::shared_lock lock(mtx); + const auto it = pMap->find(type.GetPointedTyIdx()); + if (it != pMap->end()) { + return GetTypeFromTyIdx(it->second); + } + } + std::unique_lock lock(mtx); + CHECK_FATAL(!(type.GetPointedTyIdx().GetIdx() >= kPtyDerived && type.GetPrimType() == PTY_ref && + otherPMap->find(type.GetPointedTyIdx()) != otherPMap->end()), + "GetOrCreateMIRType: ref pointed-to type %d has previous ptr occurrence", + type.GetPointedTyIdx().GetIdx()); + return CreateAndUpdateMirTypeNode(pType); + } + } + { + std::shared_lock lock(mtx); + const auto it = typeHashTable.find(&pType); + if (it != typeHashTable.end()) { + return *it; + } + } + std::unique_lock lock(mtx); + return CreateAndUpdateMirTypeNode(pType); +} + +MIRType *TypeTable::voidPtrType = nullptr; +// get or create a type that pointing to pointedTyIdx +MIRType *TypeTable::GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType, const TypeAttrs &attrs) { + MIRPtrType type(pointedTyIdx, primType); + type.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreatePointerType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreatePointerType(const MIRType &pointTo, PrimType primType, const TypeAttrs &attrs) { + if (pointTo.GetPrimType() == PTY_constStr) { + primType = PTY_ptr; + } + return GetOrCreatePointerType(pointTo.GetTypeIndex(), primType, attrs); +} + +const MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) const { + if (type.GetKind() != kTypePointer) { + return &type; + } + auto &ptrType = static_cast(type); + return GetTypeFromTyIdx(ptrType.GetPointedTyIdx()); +} +MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) { + return const_cast(const_cast(this)->GetPointedTypeIfApplicable(type)); +} + +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs) { + std::vector sizeVector; + for (size_t i = 0; i < dim; ++i) { + sizeVector.push_back(sizeArray != nullptr ? sizeArray[i] : 0); + } + MIRArrayType arrayType(elem.GetTypeIndex(), sizeVector); + arrayType.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&arrayType); + return static_cast(typeTable[tyIdx]); +} + +// For one dimension array +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs) { + return GetOrCreateArrayType(elem, 1, &size, attrs); +} + +MIRType *TypeTable::GetOrCreateFarrayType(const MIRType &elem) { + MIRFarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateJarrayType(const MIRType &elem) { + MIRJarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateJarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, + const std::vector &vecAttrs, bool isVarg, + const TypeAttrs &retAttrs) { + MIRFuncType funcType(retTyIdx, vecType, vecAttrs, retAttrs); + if (isVarg) { + funcType.SetVarArgs(); + } + TyIdx tyIdx = GetOrCreateMIRType(&funcType); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, + const FieldVector &parentFields, MIRModule &module, bool forStruct, + const TypeAttrs &attrs) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRStructType type(forStruct ? kTypeStruct : kTypeUnion, strIdx); + type.SetFields(fields); + type.SetParentFields(parentFields); + type.SetTypeAttrs(attrs); + + TyIdx tyIdx = GetOrCreateMIRType(&type); + // Global? + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + module.PushbackTypeDefOrder(strIdx); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateStructOrUnion"); + return typeTable.at(tyIdx); +} + +void TypeTable::PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + fields.push_back(FieldPair(strIdx, TyIdxFieldAttrPair(type.GetTypeIndex(), FieldAttrs()))); +} + +MIRType *TypeTable::GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + TyIdx tyIdx = module.GetTypeNameTab()->GetTyIdxFromGStrIdx(strIdx); + if (!tyIdx) { + if (forClass) { + MIRClassType type(kTypeClassIncomplete, strIdx); // for class type + tyIdx = GetOrCreateMIRType(&type); + } else { + MIRInterfaceType type(kTypeInterfaceIncomplete, strIdx); // for interface type + tyIdx = GetOrCreateMIRType(&type); + } + module.PushbackTypeDefOrder(strIdx); + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + if (typeTable[tyIdx]->GetNameStrIdx() == 0u) { + typeTable[tyIdx]->SetNameStrIdx(strIdx); + } + } + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateClassOrInterface"); + return typeTable.at(tyIdx); +} + +void TypeTable::AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, + const MIRType &fieldType) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fieldName); + FieldAttrs fieldAttrs; + fieldAttrs.SetAttr(FLDATTR_final); // Mark compiler-generated struct fields as final to improve AliasAnalysis + structType.GetFields().push_back(FieldPair(strIdx, TyIdxFieldAttrPair(fieldType.GetTypeIndex(), fieldAttrs))); +} + +void FPConstTable::PostInit() { + MIRType &typeFloat = *GlobalTables::GetTypeTable().GetPrimType(PTY_f32); + nanFloatConst = new MIRFloatConst(NAN, typeFloat); + infFloatConst = new MIRFloatConst(INFINITY, typeFloat); + minusInfFloatConst = new MIRFloatConst(-INFINITY, typeFloat); + minusZeroFloatConst = new MIRFloatConst(-0.0, typeFloat); + MIRType &typeDouble = *GlobalTables::GetTypeTable().GetPrimType(PTY_f64); + nanDoubleConst = new MIRDoubleConst(NAN, typeDouble); + infDoubleConst = new MIRDoubleConst(INFINITY, typeDouble); + minusInfDoubleConst = new MIRDoubleConst(-INFINITY, typeDouble); + minusZeroDoubleConst = new MIRDoubleConst(-0.0, typeDouble); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(const IntVal &val, MIRType &type) { + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(val.GetExtValue(), type); + } + return DoGetOrCreateIntConst(val.GetExtValue(), type); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(uint64 val, MIRType &type) { + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(val, type); + } + return DoGetOrCreateIntConst(val, type); +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConst(uint64 val, MIRType &type) { + IntConstKey key(val, type.GetTypeIndex()); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type) { + IntConstKey key(val, type.GetTypeIndex()); + { + std::shared_lock lock(mtx); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + } + std::unique_lock lock(mtx); + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +IntConstTable::~IntConstTable() { + for (auto pair : intConstTable) { + delete pair.second; + } +} + +MIRFloatConst *FPConstTable::GetOrCreateFloatConst(float floatVal) { + if (std::isnan(floatVal)) { + return nanFloatConst; + } + if (std::isinf(floatVal)) { + return (floatVal < 0) ? minusInfFloatConst : infFloatConst; + } + if (floatVal == 0.0 && std::signbit(floatVal)) { + return minusZeroFloatConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateFloatConstThreadSafe(floatVal); + } + return DoGetOrCreateFloatConst(floatVal); +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConst(float floatVal) { + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + // create a new one + auto *floatConst = + new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx{ PTY_f32 })); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConstThreadSafe(float floatVal) { + { + std::shared_lock lock(floatMtx); + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(floatMtx); + auto *floatConst = + new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx{ PTY_f32 })); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRDoubleConst *FPConstTable::GetOrCreateDoubleConst(double doubleVal) { + if (std::isnan(doubleVal)) { + return nanDoubleConst; + } + if (std::isinf(doubleVal)) { + return (doubleVal < 0) ? minusInfDoubleConst : infDoubleConst; + } + if (doubleVal == 0.0 && std::signbit(doubleVal)) { + return minusZeroDoubleConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateDoubleConstThreadSafe(doubleVal); + } + return DoGetOrCreateDoubleConst(doubleVal); +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConst(double doubleVal) { + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + // create a new one + auto *doubleConst = new MIRDoubleConst( + doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConstThreadSafe(double doubleVal) { + { + std::shared_lock lock(doubleMtx); + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(doubleMtx); + auto *doubleConst = new MIRDoubleConst( + doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +FPConstTable::~FPConstTable() { + delete nanFloatConst; + delete infFloatConst; + delete minusInfFloatConst; + delete minusZeroFloatConst; + delete nanDoubleConst; + delete infDoubleConst; + delete minusInfDoubleConst; + delete minusZeroDoubleConst; + for (const auto &floatConst : floatConstTable) { + delete floatConst.second; + } + for (const auto &doubleConst : doubleConstTable) { + delete doubleConst.second; + } +} + +GSymbolTable::GSymbolTable() { + symbolTable.push_back(static_cast(nullptr)); +} + +GSymbolTable::~GSymbolTable() { + for (MIRSymbol *symbol : symbolTable) { + delete symbol; + } +} + +MIRSymbol *GSymbolTable::CreateSymbol(uint8 scopeID) { + auto *st = new MIRSymbol(symbolTable.size(), scopeID); + CHECK_FATAL(st != nullptr, "CreateSymbol failure"); + symbolTable.push_back(st); + module->AddSymbol(st); + return st; +} + +bool GSymbolTable::AddToStringSymbolMap(const MIRSymbol &st) { + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; +} + +bool GSymbolTable::RemoveFromStringSymbolMap(const MIRSymbol &st) { + const auto it = strIdxToStIdxMap.find(st.GetNameStrIdx()); + if (it != strIdxToStIdxMap.cend()) { + strIdxToStIdxMap.erase(it); + return true; + } + return false; +} + +void GSymbolTable::Dump(bool isLocal, int32 indent) const { + for (size_t i = 1; i < symbolTable.size(); ++i) { + const MIRSymbol *symbol = symbolTable[i]; + if (symbol != nullptr) { + symbol->Dump(isLocal, indent); + } + } +} + +GlobalTables GlobalTables::globalTables; +GlobalTables &GlobalTables::GetGlobalTables() { + return globalTables; +} +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/mapleall/maple_ir/src/intrinsics.cpp b/ecmascript/mapleall/maple_ir/src/intrinsics.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aea00da2ccf705456eea64e327a90ba5d8afff98 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/intrinsics.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "intrinsics.h" +#include "mir_module.h" +#include "mir_type.h" +#include "mir_builder.h" + +namespace maple { +MIRType *IntrinDesc::jsValueType = nullptr; +MIRModule *IntrinDesc::mirModule = nullptr; +IntrinDesc IntrinDesc::intrinTable[INTRN_LAST + 1] = { +#define DEF_MIR_INTRINSIC(X, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ + { (NAME), (INTRN_CLASS), { (RETURN_TYPE), ##__VA_ARGS__ } }, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +MIRType *IntrinDesc::GetOrCreateJSValueType() { + if (jsValueType != nullptr) { + return jsValueType; + } + MIRBuilder *jsBuilder = mirModule->GetMIRBuilder(); + FieldVector payloadFields; + GStrIdx i32 = jsBuilder->GetOrCreateStringIndex("i32"); + GStrIdx u32 = jsBuilder->GetOrCreateStringIndex("u32"); + GStrIdx boo = jsBuilder->GetOrCreateStringIndex("boo"); + GStrIdx ptr = jsBuilder->GetOrCreateStringIndex("ptr"); + payloadFields.push_back( + FieldPair(i32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(u32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(boo, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(ptr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + FieldVector parentFields; + MIRType *payloadType = GlobalTables::GetTypeTable().GetOrCreateUnionType("payload_type", payloadFields, + parentFields, *mirModule); + FieldVector sFields; + GStrIdx payload = jsBuilder->GetOrCreateStringIndex("payload"); + GStrIdx tag = jsBuilder->GetOrCreateStringIndex("tag"); + sFields.push_back(FieldPair(payload, TyIdxFieldAttrPair(payloadType->GetTypeIndex(), FieldAttrs()))); + sFields.push_back( + FieldPair(tag, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + MIRType *sType = GlobalTables::GetTypeTable().GetOrCreateStructType("s_type", sFields, parentFields, *mirModule); + CHECK_FATAL(sType != nullptr, "can't get struct type, check it!"); + FieldVector jsValLayoutFields; + GStrIdx asBits = jsBuilder->GetOrCreateStringIndex("asBits"); + GStrIdx s = jsBuilder->GetOrCreateStringIndex("s"); + GStrIdx asDouble = jsBuilder->GetOrCreateStringIndex("asDouble"); + GStrIdx asPtr = jsBuilder->GetOrCreateStringIndex("asPtr"); + jsValLayoutFields.push_back( + FieldPair(asBits, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt64()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back(FieldPair(s, TyIdxFieldAttrPair(sType->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back( + FieldPair(asDouble, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetDouble()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back( + FieldPair(asPtr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + MIRType *jsValLayoutType = GlobalTables::GetTypeTable().GetOrCreateUnionType("jsval_layout_type", + jsValLayoutFields, + parentFields, *mirModule); + return jsValLayoutType; +} + +void IntrinDesc::InitMIRModule(MIRModule *mod) { + mirModule = mod; +} + +MIRType *IntrinDesc::GetTypeFromArgTy(IntrinArgType argType) const { + switch (argType) { + case kArgTyVoid: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_void)); + case kArgTyI8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i8)); + case kArgTyI16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i16)); + case kArgTyI32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + case kArgTyI64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyU8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u8)); + case kArgTyU16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u16)); + case kArgTyU32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u32)); + case kArgTyU64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyU1: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u1)); + case kArgTyPtr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ptr)); + case kArgTyRef: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ref)); + case kArgTyA32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)); + case kArgTyA64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + case kArgTyF32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f32)); + case kArgTyF64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyF128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f128)); + case kArgTyC64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c64)); + case kArgTyC128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c128)); + case kArgTyAgg: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_agg)); + case kArgTyV2I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i64)); + case kArgTyV4I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i32)); + case kArgTyV8I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i16)); + case kArgTyV16I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16i8)); + case kArgTyV2U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u64)); + case kArgTyV4U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u32)); + case kArgTyV8U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u16)); + case kArgTyV16U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16u8)); + case kArgTyV2F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f64)); + case kArgTyV4F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4f32)); + case kArgTyV1I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyV2I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i32)); + case kArgTyV4I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i16)); + case kArgTyV8I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i8)); + case kArgTyV1U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyV2U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u32)); + case kArgTyV4U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u16)); + case kArgTyV8U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u8)); + case kArgTyV1F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyV2F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f32)); +#ifdef DYNAMICLANG + case kArgTySimplestr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simplestr)); + case kArgTySimpleobj: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simpleobj)); + case kArgTyDynany: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dynany)); + case kArgTyDyni32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dyni32)); +#endif + default: + return nullptr; + } +} + +MIRType *IntrinDesc::GetArgType(uint32 index) const { + // 0 is the arg of return type + CHECK_FATAL(index < kMaxArgsNum, "index out of range"); + return GetTypeFromArgTy(argTypes[index + 1]); +} + +MIRType *IntrinDesc::GetReturnType() const { + return GetTypeFromArgTy(argTypes[0]); +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/lexer.cpp b/ecmascript/mapleall/maple_ir/src/lexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..890037502e037a71fe05f0cb111433cce2e8bc16 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/lexer.cpp @@ -0,0 +1,759 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "lexer.h" +#include +#include +#include +#include "mpl_logging.h" +#include "debug_info.h" +#include "mir_module.h" +#include "securec.h" +#include "utils.h" + +namespace maple { +int32 HexCharToDigit(char c) { + int32 ret = utils::ToDigit<16, int32>(c); + return (ret != INT32_MAX ? ret : 0); +} + +static uint8 Char2num(char c) { + uint8 ret = utils::ToDigit<16>(c); + DEBUG_ASSERT(ret != UINT8_MAX, "not a hex value"); + return ret; +} + +// Read (next) line from the MIR (text) file, and return the read +// number of chars. +// if the line is empty (nothing but a newline), returns 0. +// if EOF, return -1. +// The trailing new-line character has been removed. +int MIRLexer::ReadALine() { + if (airFile == nullptr) { + line = ""; + return -1; + } + + curIdx = 0; + if (!std::getline(*airFile, line)) { // EOF + line = ""; + airFile = nullptr; + currentLineSize = 0; + return -1; + } + + RemoveReturnInline(line); + currentLineSize = line.length(); + return currentLineSize; +} + +int MIRLexer::ReadALineByMirQueue() { + if (mirQueue.empty()) { + line = ""; + return -1; + } + curIdx = 0; + line = mirQueue.front(); + RemoveReturnInline(line); + currentLineSize = line.length(); + mirQueue.pop(); + return currentLineSize; +} + +MIRLexer::MIRLexer(MIRModule &mod) + : module(mod), + seenComments(mod.GetMPAllocator().Adapter()), + keywordMap(mod.GetMPAllocator().Adapter()) { + // initialize keywordMap + keywordMap.clear(); +#define KEYWORD(STR) \ + { \ + std::string str; \ + str = #STR; \ + keywordMap[str] = TK_##STR; \ + } +#include "keywords.def" +#undef KEYWORD +} + +void MIRLexer::PrepareForFile(const std::string &filename) { + // open MIR file + airFileInternal.open(filename); + CHECK_FATAL(airFileInternal.is_open(), "cannot open MIR file %s\n", &filename); + + airFile = &airFileInternal; + // try to read the first line + if (ReadALine() < 0) { + lineNum = 0; + } else { + lineNum = 1; + } + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + kind = TK_invalid; +} + +void MIRLexer::PrepareForString(const std::string &src) { + SetMirQueue(src); + if (ReadALineByMirQueue() < 0) { + lineNum = 0; + } else { + lineNum = 1; + } + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + kind = TK_invalid; +} + +void MIRLexer::GenName() { + uint32 startIdx = curIdx; + char c = GetNextCurrentCharWithUpperCheck(); + char cp = GetCharAt(curIdx - 1); + if (c == '@' && (cp == 'h' || cp == 'f')) { + // special pattern for exception handling labels: catch or finally + c = GetNextCurrentCharWithUpperCheck(); + } + while (utils::IsAlnum(c) || c < 0 || c == '_' || c == '$' || c == ';' || + c == '/' || c == '|' || c == '.' || c == '?' || + c == '@') { + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(startIdx, curIdx - startIdx); +} + +// get the constant value +TokenKind MIRLexer::GetConstVal() { + bool negative = false; + int valStart = curIdx; + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '-') { + c = GetNextCurrentCharWithUpperCheck(); + TokenKind tk = GetSpecialFloatConst(); + if (tk != TK_invalid) { + return tk; + } + negative = true; + } + const uint32 lenHexPrefix = 2; + if (line.compare(curIdx, lenHexPrefix, "0x") == 0) { + curIdx += lenHexPrefix; + return GetHexConst(valStart, negative); + } + uint32 startIdx = curIdx; + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + char cs = GetCharAtWithUpperCheck(startIdx); + if (!isdigit(cs) && c != '.') { + return TK_invalid; + } + if (c != '.' && c != 'f' && c != 'F' && c != 'e' && c != 'E') { + curIdx = startIdx; + return GetIntConst(valStart, negative); + } + return GetFloatConst(valStart, startIdx, negative); +} + +TokenKind MIRLexer::GetSpecialFloatConst() { + constexpr uint32 lenSpecFloat = 4; + constexpr uint32 lenSpecDouble = 3; + if (line.compare(curIdx, lenSpecFloat, "inff") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -INFINITY; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "inf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -INFINITY; + return TK_doubleconst; + } + if (line.compare(curIdx, lenSpecFloat, "nanf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -NAN; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "nan") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -NAN; + return TK_doubleconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetHexConst(uint32 valStart, bool negative) { + char c = GetCharAtWithUpperCheck(curIdx); + if (!isxdigit(c)) { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + uint64 tmp = static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + while (isxdigit(c)) { + tmp = (tmp << 4) + static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + } + theIntVal = static_cast(static_cast(tmp)); + if (negative) { + theIntVal = -theIntVal; + } + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + if (negative && theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_intconst; +} + +TokenKind MIRLexer::GetIntConst(uint32 valStart, bool negative) { + auto negOrSelf = [negative](uint64 val) { return negative ? ~val + 1 : val; }; + + theIntVal = HexCharToDigit(GetCharAtWithUpperCheck(curIdx)); + + uint64 radix = theIntVal == 0 ? 8 : 10; + + char c = GetNextCurrentCharWithUpperCheck(); + + for (theIntVal = negOrSelf(theIntVal); isdigit(c); c = GetNextCurrentCharWithUpperCheck()) { + theIntVal = (theIntVal * radix) + negOrSelf(HexCharToDigit(c)); + } + + if (c == 'u' || c == 'U') { // skip 'u' or 'U' + c = GetNextCurrentCharWithUpperCheck(); + + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + + if (c == 'l' || c == 'L' || c == 'u' || c == 'U') { + ++curIdx; + } + } + + name = line.substr(valStart, curIdx - valStart); + + if (negative) { + theFloatVal = static_cast(static_cast(theIntVal)); + theDoubleVal = static_cast(static_cast(theIntVal)); + + if (theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + } else { + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + } + + return TK_intconst; +} + +TokenKind MIRLexer::GetFloatConst(uint32 valStart, uint32 startIdx, bool negative) { + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '.') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + bool doublePrec = true; + if (c == 'e' || c == 'E') { + c = GetNextCurrentCharWithUpperCheck(); + if (!isdigit(c) && c != '-' && c != '+') { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + if (c == '-' || c == '+') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + } + if (c == 'f' || c == 'F') { + doublePrec = false; + c = GetNextCurrentCharWithUpperCheck(); + } + if (c == 'l' || c == 'L') { + MIR_ERROR("warning: not yet support long double\n"); + ++curIdx; + } + + std::string floatStr = line.substr(startIdx, curIdx - startIdx); + // get the float constant value + if (!doublePrec) { + int eNum = sscanf_s(floatStr.c_str(), "%e", &theFloatVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theFloatVal = -theFloatVal; + } + theIntVal = static_cast(theFloatVal); + theDoubleVal = static_cast(theFloatVal); + if (negative && fabs(theFloatVal) <= 1e-6) { + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_floatconst; + } else { + int eNum = sscanf_s(floatStr.c_str(), "%le", &theDoubleVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theDoubleVal = -theDoubleVal; + } + theIntVal = static_cast(theDoubleVal); + theFloatVal = static_cast(theDoubleVal); + if (negative && fabs(theDoubleVal) <= 1e-15) { + theFloatVal = -theFloatVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_doubleconst; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixDollar() { + // token with prefix '$' + char c = GetCharAtWithUpperCheck(curIdx); + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_gname; + } else { + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixPercent() { + // token with prefix '%' + char c = GetCharAtWithUpperCheck(curIdx); + if (isdigit(c)) { + int valStart = curIdx - 1; + theIntVal = HexCharToDigit(c); + c = GetNextCurrentCharWithUpperCheck(); + while (isdigit(c)) { + theIntVal = (theIntVal * 10) + HexCharToDigit(c); + DEBUG_ASSERT(theIntVal >= 0, "int value overflow"); + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(valStart, curIdx - valStart); + return TK_preg; + } + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_lname; + } + if (c == '%' && utils::IsAlpha(GetCharAtWithUpperCheck(curIdx + 1))) { + ++curIdx; + GenName(); + return TK_specialreg; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAmpersand() { + // token with prefix '&' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c) || c == '_') { + GenName(); + return TK_fname; + } + // for error reporting. + constexpr uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAtOrCircumflex(char prefix) { + // token with prefix '@' or `^` + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlnum(c) || c < 0 || c == '_' || c == '@' || c == '$' || c == '|') { + GenName(); + if (prefix == '@') { + return TK_label; + } + return TK_prntfield; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixExclamation() { + // token with prefix '!' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c)) { + GenName(); + return TK_typeparam; + } + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixQuotation() { + if (GetCharAtWithUpperCheck(curIdx + 1) == '\'') { + theIntVal = GetCharAtWithUpperCheck(curIdx); + curIdx += 2; + return TK_intconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixDoubleQuotation() { + uint32 startIdx = curIdx; + uint32 shift = 0; + // for \", skip the \ to leave " only internally + // and also for the pair of chars \ and n become '\n' etc. + char c = GetCurrentCharWithUpperCheck(); + while ((c != 0) && (c != '\"' || GetCharAtWithLowerCheck(curIdx - 1) == '\\')) { + if (GetCharAtWithLowerCheck(curIdx - 1) == '\\') { + shift++; + switch (c) { + case '"': + line[curIdx - shift] = c; + break; + case '\\': + line[curIdx - shift] = c; + // avoid 3rd \ in \\\ being treated as an escaped one + line[curIdx] = 0; + break; + case 'a': + line[curIdx - shift] = '\a'; + break; + case 'b': + line[curIdx - shift] = '\b'; + break; + case 't': + line[curIdx - shift] = '\t'; + break; + case 'n': + line[curIdx - shift] = '\n'; + break; + case 'v': + line[curIdx - shift] = '\v'; + break; + case 'f': + line[curIdx - shift] = '\f'; + break; + case 'r': + line[curIdx - shift] = '\r'; + break; + // support hex value \xNN + case 'x': { + const uint32 hexShift = 4; + const uint32 hexLength = 2; + uint8 c1 = Char2num(GetCharAtWithLowerCheck(curIdx + 1)); + uint8 c2 = Char2num(GetCharAtWithLowerCheck(curIdx + 2)); + uint32 cNew = (c1 << hexShift) + c2; + line[curIdx - shift] = cNew; + curIdx += hexLength; + shift += hexLength; + break; + } + // support oct value \NNN + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + const uint32 octShift1 = 3; + const uint32 octShift2 = 6; + const uint32 octLength = 3; + DEBUG_ASSERT(curIdx + octLength < line.size(), "index out of range"); + uint32 cNew = (static_cast(GetCharAtWithLowerCheck(curIdx + 1) - '0') << octShift2) + + (static_cast(GetCharAtWithLowerCheck(curIdx + 2) - '0') << octShift1) + + static_cast(GetCharAtWithLowerCheck(curIdx + 3) - '0'); + line[curIdx - shift] = cNew; + curIdx += octLength; + shift += octLength; + break; + } + default: + line[curIdx - shift] = '\\'; + --shift; + line[curIdx - shift] = c; + break; + } + } else if (shift) { + line[curIdx - shift] = c; + } + c = GetNextCurrentCharWithUpperCheck(); + } + if (c != '\"') { + return TK_invalid; + } + // for empty string + if (startIdx == curIdx) { + name = ""; + } else { + name = line.substr(startIdx, curIdx - startIdx - shift); + } + ++curIdx; + return TK_string; +} + +TokenKind MIRLexer::GetTokenSpecial() { + --curIdx; + char c = GetCharAtWithLowerCheck(curIdx); + if (utils::IsAlpha(c) || c < 0 || c == '_') { + GenName(); + TokenKind tk = keywordMap[name]; + switch (tk) { + case TK_nanf: + theFloatVal = NAN; + return TK_floatconst; + case TK_nan: + theDoubleVal = NAN; + return TK_doubleconst; + case TK_inff: + theFloatVal = INFINITY; + return TK_floatconst; + case TK_inf: + theDoubleVal = INFINITY; + return TK_doubleconst; + default: + return tk; + } + } + MIR_ERROR("error in input file\n"); + return TK_eof; +} + +TokenKind MIRLexer::LexToken() { + // skip spaces + char c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + // check end of line + while (c == 0 || c == '#') { + if (c == '#') { // process comment contents + seenComments.push_back(line.substr(curIdx + 1, currentLineSize - curIdx - 1)); + } + if (needFile) { + if (ReadALine() < 0) { + return TK_eof; + } + } else { + if (ReadALineByMirQueue() < 0) { + return TK_eof; + } + } + ++lineNum; // a new line read. + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + // skip spaces + c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + char curChar = c; + ++curIdx; + switch (curChar) { + case '\n': + return TK_newline; + case '(': + return TK_lparen; + case ')': + return TK_rparen; + case '{': + return TK_lbrace; + case '}': + return TK_rbrace; + case '[': + return TK_lbrack; + case ']': + return TK_rbrack; + case '<': + return TK_langle; + case '>': + return TK_rangle; + case '=': + return TK_eqsign; + case ',': + return TK_coma; + case ':': + return TK_colon; + case '*': + return TK_asterisk; + case '.': + if (GetCharAtWithUpperCheck(curIdx) == '.') { + const uint32 lenDotdot = 2; + curIdx += lenDotdot; + return TK_dotdotdot; + } + // fall thru for .9100 == 0.9100 + [[clang::fallthrough]]; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + --curIdx; + return GetConstVal(); + case '$': + return GetTokenWithPrefixDollar(); + case '%': + return GetTokenWithPrefixPercent(); + case '&': + return GetTokenWithPrefixAmpersand(); + case '@': + case '^': + return GetTokenWithPrefixAtOrCircumflex(curChar); + case '!': + return GetTokenWithPrefixExclamation(); + case '\'': + return GetTokenWithPrefixQuotation(); + case '\"': + return GetTokenWithPrefixDoubleQuotation(); + default: + return GetTokenSpecial(); + } +} + +TokenKind MIRLexer::NextToken() { + kind = LexToken(); + return kind; +} + +std::string MIRLexer::GetTokenString() const { + std::string temp; + switch (kind) { + case TK_gname: { + temp = "$"; + temp.append(name); + return temp; + } + case TK_lname: + case TK_preg: { + temp = "%"; + temp.append(name); + return temp; + } + case TK_specialreg: { + temp = "%%"; + temp.append(name); + return temp; + } + case TK_label: { + temp = "@"; + temp.append(name); + return temp; + } + case TK_prntfield: { + temp = "^"; + temp.append(name); + return temp; + } + case TK_intconst: { + temp = std::to_string(theIntVal); + return temp; + } + case TK_floatconst: { + temp = std::to_string(theFloatVal); + return temp; + } + case TK_doubleconst: { + temp = std::to_string(theDoubleVal); + return temp; + } + // misc. + case TK_newline: { + temp = "\\n"; + return temp; + } + case TK_lparen: { + temp = "("; + return temp; + } + case TK_rparen: { + temp = ")"; + return temp; + } + case TK_lbrace: { + temp = "{"; + return temp; + } + case TK_rbrace: { + temp = "}"; + return temp; + } + case TK_lbrack: { + temp = "["; + return temp; + } + case TK_rbrack: { + temp = "]"; + return temp; + } + case TK_langle: { + temp = "<"; + return temp; + } + case TK_rangle: { + temp = ">"; + return temp; + } + case TK_eqsign: { + temp = "="; + return temp; + } + case TK_coma: { + temp = ","; + return temp; + } + case TK_dotdotdot: { + temp = "..."; + return temp; + } + case TK_colon: { + temp = ":"; + return temp; + } + case TK_asterisk: { + temp = "*"; + return temp; + } + case TK_string: { + temp = "\""; + temp.append(name); + temp.append("\""); + return temp; + } + default: + temp = "invalid token"; + return temp; + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_builder.cpp b/ecmascript/mapleall/maple_ir/src/mir_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..c13f391e84a16106e2ef35416e78d92020cf35c5 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_builder.cpp @@ -0,0 +1,1179 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_builder.h" +#include "mir_symbol_builder.h" + +namespace maple { +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue) { + auto *fieldConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + constValue, *sType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddrofFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &fieldSymbol) { + AddrofNode *fieldExpr = CreateExprAddrof(0, fieldSymbol, mirModule->GetMemPool()); + auto *fieldConst = mirModule->GetMemPool()->New(fieldExpr->GetStIdx(), fieldExpr->GetFieldID(), + *structType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddroffuncFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSymbol) { + MIRConst *fieldConst = nullptr; + MIRFunction *vMethod = funcSymbol.GetFunction(); + if (vMethod->IsAbstract()) { + fieldConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *structType.GetElemType(fieldID - 1)); + } else { + AddroffuncNode *addrofFuncExpr = + CreateExprAddroffunc(funcSymbol.GetFunction()->GetPuidx(), mirModule->GetMemPool()); + fieldConst = mirModule->GetMemPool()->New(addrofFuncExpr->GetPUIdx(), + *structType.GetElemType(fieldID - 1)); + } + newConst.AddItem(fieldConst, fieldID); +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +bool MIRBuilder::TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID) { + TyIdx tid(0); + return TraverseToNamedFieldWithTypeAndMatchStyle(structType, nameIdx, tid, fieldID, kMatchAnyField); +} + +// traverse parent first but match self first. +void MIRBuilder::TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, uint32 &idx) { + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + // process parent + if (structType.GetKind() == kTypeClass || structType.GetKind() == kTypeClassIncomplete) { + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + TraverseToNamedFieldWithType(*parentType, nameIdx, typeIdx, fieldID, idx); + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + if (structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx) { + idx = fieldID; + continue; + } + // for pointer type, check their pointed type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + if (type->IsOfSameType(*fieldType)) { + idx = fieldID; + } + } + + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + TraverseToNamedFieldWithType(*subStructType, nameIdx, typeIdx, fieldID, idx); + } + } +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +// +// typeidx: TyIdx(0) means do not check types. +// matchstyle: 0: do not match but traverse to update fieldID +// 1: match top level field only +// 2: match any field +// 4: traverse parent first +// 0xc: do not match but traverse to update fieldID, traverse parent first, found in child +bool MIRBuilder::TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle) { + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + if (matchStyle & kParentFirst) { + // process parent + if ((structType.GetKind() != kTypeClass) && (structType.GetKind() != kTypeClassIncomplete)) { + return false; + } + + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + if (matchStyle == (kFoundInChild | kParentFirst | kUpdateFieldID)) { + matchStyle = kParentFirst; + uint32 idxBackup = nameIdx; + nameIdx.reset(); + // do not match but traverse to update fieldID, traverse parent first + TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle); + nameIdx.reset(idxBackup); + } else if (TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle)) { + return true; + } + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + DEBUG_ASSERT(fieldType != nullptr, "fieldType is null"); + if (matchStyle && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx || + fieldType->IsOfSameType(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx))) { + return true; + } + } + unsigned int style = matchStyle & kMatchAnyField; + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + if (TraverseToNamedFieldWithTypeAndMatchStyle(*subStructType, nameIdx, typeIdx, fieldID, style)) { + return true; + } + } + } + return false; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, + unsigned int matchStyle) { + auto &structType = static_cast(type); + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + if (TraverseToNamedFieldWithTypeAndMatchStyle(structType, strIdx, idx, fieldID, matchStyle)) { + return fieldID; + } + return 0; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx) { + return GetStructFieldIDFromNameAndType(type, name, idx, kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx) { + return GetStructFieldIDFromNameAndType(type, name, idx, kParentFirst); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, + TyIdx idx) { + // do not match but traverse to update fieldID, traverse parent first, found in child + return GetStructFieldIDFromNameAndType(type, name, idx, kFoundInChild | kParentFirst | kUpdateFieldID); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldName(MIRType &type, const std::string &name) { + return GetStructFieldIDFromNameAndType(type, name, TyIdx(0), kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name) { + if (type == nullptr) { + return 0; + } + return GetStructFieldIDFromNameAndType(*type, name, TyIdx(0), kParentFirst); +} + +void MIRBuilder::SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType) { + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + while (true) { + if (structType.GetElemStrIdx(fieldID) == strIdx) { + if (newStrIdx != 0u) { + structType.SetElemStrIdx(fieldID, newStrIdx); + } + structType.SetElemtTyIdx(fieldID, newFieldType.GetTypeIndex()); + return; + } + ++fieldID; + } +} + +// create a function named str +MIRFunction *MIRBuilder::GetOrCreateFunction(const std::string &str, TyIdx retTyIdx) { + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *funcSt = nullptr; + if (strIdx != 0u) { + funcSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (funcSt == nullptr) { + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } else { + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "runtime check error"); + return funcSt->GetFunction(); + } + } else { + strIdx = GetOrCreateStringIndex(str); + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + MIRFuncType funcType; + funcType.SetRetTyIdx(retTyIdx); + auto funcTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&funcType); + auto *funcTypeInTypeTable = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx)); + fn->SetMIRFuncType(funcTypeInTypeTable); + fn->SetReturnTyIdx(retTyIdx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + funcSt->SetTyIdx(funcTyIdx); + return fn; +} + +MIRFunction *MIRBuilder::GetFunctionFromSymbol(const MIRSymbol &funcSymbol) { + DEBUG_ASSERT(funcSymbol.GetSKind() == kStFunc, "Symbol %s is not a function symbol", funcSymbol.GetName().c_str()); + return funcSymbol.GetFunction(); +} + +MIRFunction *MIRBuilder::GetFunctionFromName(const std::string &str) { + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(str)); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::GetFunctionFromStidx(StIdx stIdx) { + auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg, bool createBody) const { + MIRSymbol *funcSymbol = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + GStrIdx strIdx = GetOrCreateStringIndex(name); + funcSymbol->SetNameStrIdx(strIdx); + if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSymbol)) { + return nullptr; + } + funcSymbol->SetStorageClass(kScText); + funcSymbol->SetSKind(kStFunc); + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSymbol->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + std::vector funcVecType; + std::vector funcVecAttrs; + for (size_t i = 0; i < arguments.size(); ++i) { + MIRType *ty = arguments[i].second; + FormalDef formalDef(GetOrCreateStringIndex(arguments[i].first.c_str()), nullptr, ty->GetTypeIndex(), TypeAttrs()); + fn->GetFormalDefVec().push_back(formalDef); + funcVecType.push_back(ty->GetTypeIndex()); + funcVecAttrs.push_back(TypeAttrs()); + if (fn->GetSymTab() != nullptr && formalDef.formalSym != nullptr) { + (void)fn->GetSymTab()->AddToStringSymbolMap(*formalDef.formalSym); + } + } + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType( + returnType.GetTypeIndex(), funcVecType, funcVecAttrs, isVarg)->GetTypeIndex()); + auto *funcType = static_cast(funcSymbol->GetType()); + fn->SetMIRFuncType(funcType); + funcSymbol->SetFunction(fn); + if (createBody) { + fn->NewBody(); + } + return fn; +} + +MIRFunction *MIRBuilder::CreateFunction(StIdx stIdx, bool addToTable) const { + auto *fn = mirModule->GetMemPool()->New(mirModule, stIdx); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + if (addToTable) { + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + } + + auto *funcType = mirModule->GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + return fn; +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const { + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const { + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = symbolTable.GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return symbolTable.GetSymbolFromStIdx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = symbolTable.CreateSymbol(kScopeLocal); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)symbolTable.AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func) { + MIRSymbolTable *symbolTable = func.GetSymTab(); + DEBUG_ASSERT(symbolTable != nullptr, "symbol_table is null"); + bool isCreated = false; + MIRSymbol *st = GetOrCreateLocalDecl(str, type.GetTypeIndex(), *symbolTable, isCreated); + if (isCreated) { + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + } + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, const MIRType &type) { + MIRFunction *currentFunc = GetCurrentFunction(); + CHECK_FATAL(currentFunc != nullptr, "null ptr check"); + return GetOrCreateDeclInFunc(str, type, *currentFunc); +} + +MIRSymbol *MIRBuilder::CreateLocalDecl(const std::string &str, const MIRType &type) { + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().CreateLocalDecl(*currentFunctionInner->GetSymTab(), + GetOrCreateStringIndex(str), type); +} + +MIRSymbol *MIRBuilder::GetGlobalDecl(const std::string &str) { + return MIRSymbolBuilder::Instance().GetGlobalDecl(GetStringIndex(str)); +} + +MIRSymbol *MIRBuilder::GetLocalDecl(const std::string &str) { + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().GetLocalDecl(*currentFunctionInner->GetSymTab(), GetStringIndex(str)); +} + +// search the scope hierarchy +MIRSymbol *MIRBuilder::GetDecl(const std::string &str) { + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *sym = nullptr; + if (strIdx != 0u) { + // try to find the decl in local scope first + MIRFunction *currentFunctionInner = GetCurrentFunction(); + if (currentFunctionInner != nullptr) { + sym = currentFunctionInner->GetSymTab()->GetSymbolFromStrIdx(strIdx); + } + if (sym == nullptr) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + } + } + return sym; +} + +MIRSymbol *MIRBuilder::CreateGlobalDecl(const std::string &str, const MIRType &type, MIRStorageClass sc) { + return MIRSymbolBuilder::Instance().CreateGlobalDecl(GetOrCreateStringIndex(str), type, sc); +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, const MIRType &type) { + bool isCreated = false; + MIRSymbol *st = GetOrCreateGlobalDecl(str, type.GetTypeIndex(), isCreated); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + if (isCreated) { + st->SetStorageClass(kScGlobal); + st->SetSKind(kStVar); + } else { + // Existing symbol may come from anther module. We need to register it + // in the current module so that per-module mpl file is self-sustained. + mirModule->AddSymbol(st); + } + MIRConst *cst = GlobalTables::GetConstPool().GetConstFromPool(st->GetNameStrIdx()); + if (cst != nullptr) { + st->SetKonst(cst); + } + return st; +} + +MIRSymbol *MIRBuilder::GetSymbolFromEnclosingScope(StIdx stIdx) const { + if (stIdx.FullIdx() == 0) { + return nullptr; + } + if (stIdx.Islocal()) { + MIRFunction *fun = GetCurrentFunctionNotNull(); + MIRSymbol *st = fun->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (st != nullptr) { + return st; + } + } + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); +} + +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType = false) const { + return GetSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, scpID, sameType); +} + +// when sametype is true, it means match everything the of the symbol +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType = false) const { + if (scpID != kScopeGlobal) { + ERR(kLncErr, "not yet implemented"); + return nullptr; + } + return MIRSymbolBuilder::Instance().GetSymbol(tyIdx, strIdx, mClass, sClass, sameType); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, + MIRStorageClass sClass, MIRFunction *func, uint8 scpID, + bool sametype = false) const { + return GetOrCreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID, sametype); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID, bool sameType = false) const { + if (MIRSymbol *st = GetSymbol(tyIdx, strIdx, mClass, sClass, scpID, sameType)) { + return st; + } + return CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const { + return CreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const { + return MIRSymbolBuilder::Instance().CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +MIRSymbol *MIRBuilder::CreateConstStringSymbol(const std::string &symbolName, const std::string &content) { + auto elemPrimType = PTY_u8; + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + uint32 sizeIn = static_cast(content.length()); + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetPrimType(elemPrimType), 1, &sizeIn); + + if (GetLocalDecl(symbolName)) { + return GetLocalDecl(symbolName); + } + MIRSymbol *arrayVar = GetOrCreateGlobalDecl(symbolName, *arrayTypeWithSize); + arrayVar->SetAttr(ATTR_readonly); + arrayVar->SetStorageClass(kScFstatic); + MIRAggConst *val = mirModule->GetMemPool()->New(*mirModule, *arrayTypeWithSize); + for (uint32 i = 0; i < sizeIn; ++i) { + MIRConst *cst = mirModule->GetMemPool()->New(content[i], *type); + val->PushBack(cst); + } + // This interface is only for string literal, 0 is added to the end of the string. + MIRConst *cst0 = mirModule->GetMemPool()->New(0, *type); + val->PushBack(cst0); + arrayVar->SetKonst(val); + return arrayVar; +} + +MIRSymbol *MIRBuilder::CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const { + return MIRSymbolBuilder::Instance().CreatePregFormalSymbol(tyIdx, pRegIdx, func); +} + +ConstvalNode *MIRBuilder::CreateConstval(MIRConst *mirConst) { + return GetCurrentFuncCodeMp()->New(mirConst->GetType().GetPrimType(), mirConst); +} + +ConstvalNode *MIRBuilder::CreateIntConst(uint64 val, PrimType pty) { + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetPrimType(pty)); + return GetCurrentFuncCodeMp()->New(pty, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloatConst(float val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + return GetCurrentFuncCodeMp()->New(PTY_f32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateDoubleConst(double val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + return GetCurrentFuncCodeMp()->New(PTY_f64, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloat128Const(const uint64 *val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + *val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + return GetCurrentFuncCodeMp()->New(PTY_f128, mirConst); +} + +ConstvalNode *MIRBuilder::GetConstInt(MemPool &memPool, int val) { + auto *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetInt64()); + return memPool.New(PTY_i32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateAddrofConst(BaseNode &node) { + DEBUG_ASSERT(node.GetOpCode() == OP_addrof, "illegal op for addrof const"); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + + // determine the type of 'node' and create a pointer type, accordingly + auto &aNode = static_cast(node); + const MIRSymbol *var = currentFunctionInner->GetLocalOrGlobalSymbol(aNode.GetStIdx()); + TyIdx ptyIdx = var->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType &exprType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *temp = mirModule->GetMemPool()->New(aNode.GetStIdx(), aNode.GetFieldID(), exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, temp); +} + +ConstvalNode *MIRBuilder::CreateAddroffuncConst(const BaseNode &node) { + DEBUG_ASSERT(node.GetOpCode() == OP_addroffunc, "illegal op for addroffunc const"); + + const auto &aNode = static_cast(node); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(aNode.GetPUIdx()); + TyIdx ptyIdx = f->GetFuncSymbol()->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(aNode.GetPUIdx(), *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStrConst(const BaseNode &node) { + DEBUG_ASSERT(node.GetOpCode() == OP_conststr, "illegal op for conststr const"); + UStrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u8 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index is out of range in MIRBuilder::CreateStrConst"); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8))->GetTypeIndex(); + MIRPtrType ptrType(tyIdx); + tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStr16Const(const BaseNode &node) { + DEBUG_ASSERT(node.GetOpCode() == OP_conststr16, "illegal op for conststr16 const"); + U16StrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u16 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateStr16Const"); + TyIdx ptyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u16))->GetTypeIndex(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +SizeoftypeNode *MIRBuilder::CreateExprSizeoftype(const MIRType &type) { + return GetCurrentFuncCodeMp()->New(PTY_u32, type.GetTypeIndex()); +} + +FieldsDistNode *MIRBuilder::CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2) { + return GetCurrentFuncCodeMp()->New(PTY_i32, type.GetTypeIndex(), field1, field2); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool) { + return CreateExprAddrof(fieldID, symbol.GetStIdx(), memPool); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool) { + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(OP_addrof, PTY_ptr, symbolStIdx, fieldID); +} + +AddroffuncNode *MIRBuilder::CreateExprAddroffunc(PUIdx puIdx, MemPool *memPool) { + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(PTY_ptr, puIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol) { + return CreateExprDread(type.GetPrimType(), fieldID, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol) { + auto *node = GetCurrentFuncCodeMp()->New(OP_dread, kPtyInvalid, symbol.GetStIdx(), fieldID); + node->SetPrimType(GetRegPrimType(ptyp)); + return node; +} + +RegreadNode *MIRBuilder::CreateExprRegread(PrimType pty, PregIdx regIdx) { + return GetCurrentFuncCodeMp()->New(pty, regIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRType &type, MIRSymbol &symbol) { + return CreateExprDread(type, 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol, uint16 fieldID) { + if (fieldID == 0) { + return CreateExprDread(symbol); + } + DEBUG_ASSERT(false, "NYI"); + return nullptr; +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol) { + return CreateExprDread(*symbol.GetType(), 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PregIdx pregID, PrimType pty) { + auto *dread = GetCurrentFuncCodeMp()->New(OP_dread, pty); + dread->SetStFullIdx(pregID); + return dread; +} + +DreadoffNode *MIRBuilder::CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset) { + DreadoffNode *node = GetCurrentFuncCodeMp()->New(op, pty); + node->stIdx = symbol.GetStIdx(); + node->offset = offset; + return node; +} + +IreadNode *MIRBuilder::CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) { + TyIdx returnTypeIdx = returnType.GetTypeIndex(); + CHECK(returnTypeIdx < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateExprIread"); + DEBUG_ASSERT(fieldID != 0 || ptrType.GetPrimType() != PTY_agg, + "Error: Fieldid should not be 0 when trying to iread a field from type "); + PrimType type = GetRegPrimType(returnType.GetPrimType()); + return GetCurrentFuncCodeMp()->New(OP_iread, type, ptrType.GetTypeIndex(), fieldID, addr); +} + +IreadoffNode *MIRBuilder::CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0) { + return GetCurrentFuncCodeMp()->New(pty, opnd0, offset); +} + +IreadFPoffNode *MIRBuilder::CreateExprIreadFPoff(PrimType pty, int32 offset) { + return GetCurrentFuncCodeMp()->New(pty, offset); +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) { + IaddrofNode *iAddrOfNode = CreateExprIread(returnType, ptrType, fieldID, addr); + iAddrOfNode->SetOpCode(OP_iaddrof); + return iAddrOfNode; +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr) { + return GetCurrentFuncCodeMp()->New(OP_iaddrof, returnTypePty, ptrTypeIdx, fieldID, addr); +} + +UnaryNode *MIRBuilder::CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd); +} + +GCMallocNode *MIRBuilder::CreateExprGCMalloc(Opcode opcode, const MIRType &pType, const MIRType &type) { + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex()); +} + +JarrayMallocNode *MIRBuilder::CreateExprJarrayMalloc(Opcode opcode, const MIRType &pType, const MIRType &type, + BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex(), opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd) { + return GetCurrentFuncCodeMp()->New(o, toPrimType, fromPrimType, &opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromType, BaseNode *opnd) { + return CreateExprTypeCvt(o, type.GetPrimType(), fromType.GetPrimType(), *opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) { + return CreateExprExtractbits(o, type.GetPrimType(), bOffset, bSize, opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, opnd); +} + +DepositbitsNode *MIRBuilder::CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *leftOpnd, BaseNode* rightOpnd) { + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, leftOpnd, rightOpnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd) { + return CreateExprRetype(type, fromType.GetPrimType(), opnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(type.GetPrimType(), fromType, type.GetTypeIndex(), opnd); +} + +BinaryNode *MIRBuilder::CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1); +} + +TernaryNode *MIRBuilder::CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, + BaseNode *opnd2) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1, opnd2); +} + +CompareNode *MIRBuilder::CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opndType.GetPrimType(), opnd0, opnd1); +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType) { + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + DEBUG_ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + addrType->GetPrimType(), addrType->GetTypeIndex()); + arrayNode->SetNumOpnds(0); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op) { + ArrayNode *arrayNode = CreateExprArray(arrayType); + arrayNode->GetNopnd().push_back(op); + arrayNode->SetNumOpnds(1); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2) { + ArrayNode *arrayNode = CreateExprArray(arrayType, op1); + arrayNode->GetNopnd().push_back(op2); + arrayNode->SetNumOpnds(2); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, std::vector ops) { + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + DEBUG_ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + addrType->GetPrimType(), addrType->GetTypeIndex()); + arrayNode->GetNopnd().insert(arrayNode->GetNopnd().begin(), ops.begin(), ops.end()); + arrayNode->SetNumOpnds(static_cast(ops.size())); + return arrayNode; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, TyIdx tyIdx, + const MapleVector &ops) { + auto *expr = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, primType); + expr->SetIntrinsic(id); + expr->SetNOpnd(ops); + expr->SetNumOpnds(ops.size()); + if (op == OP_intrinsicopwithtype) { + expr->SetTyIdx(tyIdx); + } + return expr; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opCode, const MIRType &type, + const MapleVector &ops) { + return CreateExprIntrinsicop(idx, opCode, type.GetPrimType(), type.GetTypeIndex(), ops); +} + +DassignNode *MIRBuilder::CreateStmtDassign(const MIRSymbol &symbol, FieldID fieldID, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(src, symbol.GetStIdx(), fieldID); +} + +RegassignNode *MIRBuilder::CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(pty, regIdx, src); +} + +DassignNode *MIRBuilder::CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(src, sIdx, fieldID); +} + +IassignNode *MIRBuilder::CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(type.GetTypeIndex(), fieldID, addr, src); +} + +IassignoffNode *MIRBuilder::CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *addr, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(pty, offset, addr, src); +} + +IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(Opcode op, PrimType pty, + int32 offset, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(op, pty, offset, src); +} + +CallNode *MIRBuilder::CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opCode, puIdx, TyIdx()); + stmt->SetNOpnd(args); + stmt->SetNumOpnds(args.size()); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCall(const std::string &callee, const MapleVector &args) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(callee); + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(st != nullptr, "MIRSymbol st is null"); + MIRFunction *func = st->GetFunction(); + return CreateStmtCall(func->GetPuidx(), args, OP_call); +} + +IcallNode *MIRBuilder::CreateStmtIcall(const MapleVector &args) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icall); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallproto(const MapleVector &args) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallproto); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallprotoassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccall : OP_intrinsiccallwithtype, idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(arguments); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_xintrinsiccall, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(arguments); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MIRSymbol *ret, Opcode op) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, puIdx); + if (ret) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MapleVector &args, const MIRSymbol *ret, + Opcode opcode, TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx, tyIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, PregIdx pRegIdx, Opcode opcode, BaseNode *arg) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + stmt->GetNopnd().push_back(arg); + stmt->SetNumOpnds(stmt->GetNopndSize()); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, const MapleVector &args, PregIdx pRegIdx, + Opcode opcode) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + PregIdx retPregIdx) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_intrinsiccallassigned, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (retPregIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, retPregIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret, TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccallassigned : OP_intrinsiccallwithtypeassigned, idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_xintrinsiccallassigned, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtReturn(BaseNode *rVal) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_return); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->PushOpnd(rVal); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, const MapleVector &rVals) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertBoundaryStmtNode *MIRBuilder::CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, + funcNameIdx, paramIndex, stmtFuncNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, BaseNode *rVal) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->PushOpnd(rVal); + return stmt; +} + +AssertNonnullStmtNode *MIRBuilder::CreateStmtAssertNonnull(Opcode op, BaseNode* rVal, GStrIdx funcNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(op, funcNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +AssertBoundaryStmtNode *MIRBuilder::CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, funcNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertNonnullStmtNode *MIRBuilder::CreateStmtCallAssertNonnull(Opcode op, BaseNode* rVal, GStrIdx callFuncNameIdx, + size_t paramIndex, GStrIdx stmtFuncNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(op, callFuncNameIdx, paramIndex, stmtFuncNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +UnaryStmtNode *MIRBuilder::CreateStmtUnary(Opcode op, BaseNode *rVal) { + return GetCurrentFuncCodeMp()->New(op, kPtyInvalid, rVal); +} + +UnaryStmtNode *MIRBuilder::CreateStmtThrow(BaseNode *rVal) { + return CreateStmtUnary(OP_throw, rVal); +} + +IfStmtNode *MIRBuilder::CreateStmtIf(BaseNode *cond) { + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + BlockNode *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + return ifStmt; +} + +IfStmtNode *MIRBuilder::CreateStmtIfThenElse(BaseNode *cond) { + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + auto *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + auto *elseBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetElsePart(elseBlock); + ifStmt->SetNumOpnds(3); + return ifStmt; +} + +DoloopNode *MIRBuilder::CreateStmtDoloop(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, + BaseNode *incrExp) { + return GetCurrentFuncCodeMp()->New(doVarStIdx, isPReg, startExp, contExp, incrExp, + GetCurrentFuncCodeMp()->New()); +} + +SwitchNode *MIRBuilder::CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable) { + auto *switchNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + defaultLabel, opnd); + switchNode->SetSwitchTable(switchTable); + return switchNode; +} + +GotoNode *MIRBuilder::CreateStmtGoto(Opcode o, LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(o, labIdx); +} + +JsTryNode *MIRBuilder::CreateStmtJsTry(Opcode, LabelIdx cLabIdx, LabelIdx fLabIdx) { + return GetCurrentFuncCodeMp()->New(static_cast(cLabIdx), static_cast(fLabIdx)); +} + +TryNode *MIRBuilder::CreateStmtTry(const MapleVector &cLabIdxs) { + return GetCurrentFuncCodeMp()->New(cLabIdxs); +} + +CatchNode *MIRBuilder::CreateStmtCatch(const MapleVector &tyIdxVec) { + return GetCurrentFuncCodeMp()->New(tyIdxVec); +} + +LabelNode *MIRBuilder::CreateStmtLabel(LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(labIdx); +} + +StmtNode *MIRBuilder::CreateStmtComment(const std::string &cmnt) { + return GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), cmnt); +} + +AddrofNode *MIRBuilder::CreateAddrof(const MIRSymbol &st, PrimType pty) { + return GetCurrentFuncCodeMp()->New(OP_addrof, pty, st.GetStIdx(), 0); +} + +AddrofNode *MIRBuilder::CreateDread(const MIRSymbol &st, PrimType pty) { + return GetCurrentFuncCodeMp()->New(OP_dread, pty, st.GetStIdx(), 0); +} + +CondGotoNode *MIRBuilder::CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(op, labIdx, cond); +} + +LabelIdx MIRBuilder::GetOrCreateMIRLabel(const std::string &name) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + LabelIdx lableIdx = currentFunctionInner->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (lableIdx == 0) { + lableIdx = currentFunctionInner->GetLabelTab()->CreateLabel(); + currentFunctionInner->GetLabelTab()->SetSymbolFromStIdx(lableIdx, strIdx); + currentFunctionInner->GetLabelTab()->AddToStringLabelMap(lableIdx); + } + return lableIdx; +} + +LabelIdx MIRBuilder::CreateLabIdx(MIRFunction &mirFunc) { + LabelIdx lableIdx = mirFunc.GetLabelTab()->CreateLabel(); + mirFunc.GetLabelTab()->AddToStringLabelMap(lableIdx); + return lableIdx; +} + +void MIRBuilder::AddStmtInCurrentFunctionBody(StmtNode &stmt) { + MIRFunction *fun = GetCurrentFunctionNotNull(); + stmt.GetSrcPos().CondSetLineNum(lineNum); + fun->GetBody()->AddStatement(&stmt); +} + +MemPool *MIRBuilder::GetCurrentFuncCodeMp() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetCodeMemPool(); + } + return mirModule->GetMemPool(); +} + +MapleAllocator *MIRBuilder::GetCurrentFuncCodeMpAllocator() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return &curFunction->GetCodeMPAllocator(); + } + return &mirModule->GetMPAllocator(); +} + +MemPool *MIRBuilder::GetCurrentFuncDataMp() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetDataMemPool(); + } + return mirModule->GetMemPool(); +} + +MIRBuilderExt::MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex) : MIRBuilder(module), mutex(mutex) {} + +MemPool *MIRBuilderExt::GetCurrentFuncCodeMp() { + DEBUG_ASSERT(curFunction, "curFunction is null"); + return curFunction->GetCodeMemPool(); +} + +MapleAllocator *MIRBuilderExt::GetCurrentFuncCodeMpAllocator() { + DEBUG_ASSERT(curFunction, "curFunction is null"); + return &curFunction->GetCodeMemPoolAllocator(); +} + +void MIRBuilderExt::GlobalLock() { + if (mutex) { + DEBUG_ASSERT(pthread_mutex_lock(mutex) == 0, "lock failed"); + } +} + +void MIRBuilderExt::GlobalUnlock() { + if (mutex) { + DEBUG_ASSERT(pthread_mutex_unlock(mutex) == 0, "unlock failed"); + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_const.cpp b/ecmascript/mapleall/maple_ir/src/mir_const.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e44a0a742fd9046e29bc8f594b307b8b94df0fc1 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_const.cpp @@ -0,0 +1,277 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_const.h" +#include "mir_function.h" +#include "global_tables.h" +#include "printing.h" +#if MIR_FEATURE_FULL + +namespace maple { +void MIRIntConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << value; +} + +bool MIRIntConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &intConst = static_cast(rhs); + return ((&intConst.GetType() == &GetType()) && (intConst.value == value)); +} + +uint8 MIRIntConst::GetActualBitWidth() const { + if (value == 0) { + return 1; + } + + int64 val = GetExtValue(); + uint64 tmp = val < 0 ? -(val + 1) : val; + + uint8 width = 0; + while (tmp != 0) { + ++width; + tmp = tmp >> 1u; + } + + return width; +} + +void MIRAddrofConst::Dump(const MIRSymbolTable *localSymTab) const { + LogInfo::MapleLogger() << "addrof " << GetPrimTypeName(PTY_ptr); + const MIRSymbol *sym = stIdx.IsGlobal() ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : localSymTab->GetSymbolFromStIdx(stIdx.Idx()); + DEBUG_ASSERT(stIdx.IsGlobal() || sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic, + "MIRAddrofConst can only point to a global symbol"); + LogInfo::MapleLogger() << (stIdx.IsGlobal() ? " $" : " %") << sym->GetName(); + if (fldID > 0) { + LogInfo::MapleLogger() << " " << fldID; + } + if (offset != 0) { + LogInfo::MapleLogger() << " (" << offset << ")"; + } +} + +bool MIRAddrofConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsA = static_cast(rhs); + if (&GetType() != &rhs.GetType()) { + return false; + } + return (stIdx == rhsA.stIdx) && (fldID == rhsA.fldID); +} + +void MIRAddroffuncConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << "addroffunc " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx())->GetName(); +} + +bool MIRAddroffuncConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsAf = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (puIdx == rhsAf.puIdx); +} + +void MIRLblConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << "addroflabel " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " @" << func->GetLabelName(value); +} + +bool MIRLblConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &lblConst = static_cast(rhs); + return (lblConst.value == value); +} + +bool MIRFloatConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.floatValue)) { + return std::isnan(value.floatValue); + } + if (std::isnan(value.floatValue)) { + return std::isnan(floatConst.value.floatValue); + } + if (floatConst.value.floatValue == 0.0 && value.floatValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and FLT_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRDoubleConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.dValue)) { + return std::isnan(value.dValue); + } + if (std::isnan(value.dValue)) { + return std::isnan(floatConst.value.dValue); + } + if (floatConst.value.dValue == 0.0 && value.dValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and DBL_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRFloat128Const::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if ((value[0] == floatConst.value[0]) && (value[1] == floatConst.value[1])) { + return true; + } + return false; +} + +bool MIRAggConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &aggregateConst = static_cast(rhs); + if (aggregateConst.constVec.size() != constVec.size()) { + return false; + } + for (size_t i = 0; i < constVec.size(); ++i) { + if (!(*aggregateConst.constVec[i] == *constVec[i])) { + return false; + } + } + return true; +} + +void MIRFloatConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.floatValue << "f"; +} + +void MIRDoubleConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.dValue; +} + +void MIRFloat128Const::Dump(const MIRSymbolTable*) const { + constexpr int fieldWidth = 16; + std::ios::fmtflags f(LogInfo::MapleLogger().flags()); + LogInfo::MapleLogger().setf(std::ios::uppercase); + LogInfo::MapleLogger() << "0xL" << std::hex << std::setfill('0') << std::setw(fieldWidth) << value[0] + << std::setfill('0') << std::setw(fieldWidth) << value[1]; + LogInfo::MapleLogger().flags(f); +} + +void MIRAggConst::Dump(const MIRSymbolTable *localSymTab) const { + LogInfo::MapleLogger() << "["; + size_t size = constVec.size(); + for (size_t i = 0; i < size; ++i) { + if (fieldIdVec[i] != 0) { + LogInfo::MapleLogger() << fieldIdVec[i] << "= "; + } + constVec[i]->Dump(localSymTab); + if (i != size - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; +} + +MIRStrConst::MIRStrConst(const std::string &str, MIRType &type) + : MIRConst(type, kConstStrConst), value(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str)) {} + +void MIRStrConst::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << "conststr " << GetPrimTypeName(GetType().GetPrimType()); + const std::string &dumpStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(value); + PrintString(dumpStr); +} + +bool MIRStrConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&rhs.GetType() == &GetType()) && (value == rhsCs.value); +} + +MIRStr16Const::MIRStr16Const(const std::u16string &str, MIRType &type) + : MIRConst(type, kConstStr16Const), + value(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str)) {} + +void MIRStr16Const::Dump(const MIRSymbolTable*) const { + LogInfo::MapleLogger() << "conststr16 " << GetPrimTypeName(GetType().GetPrimType()); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(value); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + PrintString(str); +} + +bool MIRStr16Const::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (value == rhsCs.value); +} + +bool IsDivSafe(const MIRIntConst ÷nd, const MIRIntConst &divisor, PrimType pType) { + if (IsUnsignedInteger(pType)) { + return divisor.GetValue() != 0; + } + + return divisor.GetValue() != 0 && (!dividend.GetValue().IsMinValue() || !divisor.GetValue().AreAllBitsOne()); +} + +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/mapleall/maple_ir/src/mir_function.cpp b/ecmascript/mapleall/maple_ir/src/mir_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cd067791f2d2fd0c8bd33b4b7eb52aca0e11e388 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_function.cpp @@ -0,0 +1,704 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_function.h" +#include +#include +#include "mir_nodes.h" +#include "printing.h" +#include "string_utils.h" +#include "ipa_side_effect.h" + +namespace { +using namespace maple; +enum FuncProp : uint32_t { + kFuncPropHasCall = 1U, // the function has call + kFuncPropRetStruct = 1U << 1, // the function returns struct + kFuncPropUserFunc = 1U << 2, // the function is a user func + kFuncPropInfoPrinted = 1U << 3, // to avoid printing frameSize/moduleid/funcSize info more + // than once per function since they + // can only be printed at the beginning of a block + kFuncPropNeverReturn = 1U << 4, // the function when called never returns + kFuncPropHasSetjmp = 1U << 5, // the function contains call to setjmp + kFuncPropHasAsm = 1U << 6, // the function has use of inline asm + kFuncPropStructReturnedInRegs = 1U << 7, // the function returns struct in registers +}; +} // namespace + +namespace maple { +const MIRSymbol *MIRFunction::GetFuncSymbol() const { + return GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); +} +MIRSymbol *MIRFunction::GetFuncSymbol() { + const MIRFunction *mirFunc = const_cast(this); + DEBUG_ASSERT(mirFunc != nullptr, "null ptr check"); + return const_cast(mirFunc->GetFuncSymbol()); +} + +const std::string &MIRFunction::GetName() const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetName(); +} + +GStrIdx MIRFunction::GetNameStrIdx() const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetNameStrIdx(); +} + +const std::string &MIRFunction::GetBaseClassName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseClassStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncNameWithType() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncWithTypeStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncSig() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncSigStrIdx); +} + +const std::string &MIRFunction::GetSignature() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(signatureStrIdx); +} + +const MIRType *MIRFunction::GetReturnType() const { + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} +MIRType *MIRFunction::GetReturnType() { + return const_cast(const_cast(this)->GetReturnType()); +} +const MIRType *MIRFunction::GetClassType() const { + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(classTyIdx); +} +const MIRType *MIRFunction::GetNthParamType(size_t i) const { + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + DEBUG_ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetParamTypeList()[i]); +} +MIRType *MIRFunction::GetNthParamType(size_t i) { + return const_cast(const_cast(this)->GetNthParamType(i)); +} + +// reconstruct formals, and return a new MIRFuncType +MIRFuncType *MIRFunction::ReconstructFormals(const std::vector &symbols, bool clearOldArgs) { + auto *newFuncType = static_cast(funcType->CopyMIRTypeNode()); + if (clearOldArgs) { + formalDefVec.clear(); + newFuncType->GetParamTypeList().clear(); + newFuncType->GetParamAttrsList().clear(); + } + for (auto *symbol : symbols) { + FormalDef formalDef(symbol->GetNameStrIdx(), symbol, symbol->GetTyIdx(), symbol->GetAttrs()); + formalDefVec.push_back(formalDef); + newFuncType->GetParamTypeList().push_back(symbol->GetTyIdx()); + newFuncType->GetParamAttrsList().push_back(symbol->GetAttrs()); + } + return newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs) { + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs) { + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + newFuncType->SetRetTyIdx(retTyIdx); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +LabelIdx MIRFunction::GetOrCreateLableIdxFromName(const std::string &name) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + LabelIdx labelIdx = GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labelIdx == 0) { + labelIdx = GetLabelTab()->CreateLabel(); + GetLabelTab()->SetSymbolFromStIdx(labelIdx, strIdx); + GetLabelTab()->AddToStringLabelMap(labelIdx); + } + return labelIdx; +} + +bool MIRFunction::HasCall() const { + return flag & kFuncPropHasCall; +} +void MIRFunction::SetHasCall() { + flag |= kFuncPropHasCall; +} + +bool MIRFunction::IsReturnStruct() const { + return flag & kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct() { + flag |= kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct(const MIRType &retType) { + if (retType.IsStructType()) { + flag |= kFuncPropRetStruct; + } +} +void MIRFunction::SetReturnStruct(const MIRType *retType) { + switch (retType->GetKind()) { + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + flag |= kFuncPropRetStruct; + break; + default:; + } +} + +bool MIRFunction::IsUserFunc() const { + return flag & kFuncPropUserFunc; +} +void MIRFunction::SetUserFunc() { + flag |= kFuncPropUserFunc; +} + +bool MIRFunction::IsInfoPrinted() const { + return flag & kFuncPropInfoPrinted; +} +void MIRFunction::SetInfoPrinted() { + flag |= kFuncPropInfoPrinted; +} +void MIRFunction::ResetInfoPrinted() { + flag &= ~kFuncPropInfoPrinted; +} + +void MIRFunction::SetNoReturn() { + flag |= kFuncPropNeverReturn; +} +bool MIRFunction::NeverReturns() const { + return flag & kFuncPropNeverReturn; +} + +void MIRFunction::SetHasSetjmp() { + flag |= kFuncPropHasSetjmp; +} + +bool MIRFunction::HasSetjmp() const { + return ((flag & kFuncPropHasSetjmp) != kTypeflagZero); +} + +void MIRFunction::SetHasAsm() { + flag |= kFuncPropHasAsm; +} + +bool MIRFunction::HasAsm() const { + return ((flag & kFuncPropHasAsm) != kTypeflagZero); +} + +void MIRFunction::SetStructReturnedInRegs() { + flag |= kFuncPropStructReturnedInRegs; +} + +bool MIRFunction::StructReturnedInRegs() const { + return ((flag & kFuncPropStructReturnedInRegs) != kTypeflagZero); +} + +void MIRFunction::SetAttrsFromSe(uint8 specialEffect) { + // NoPrivateDefEffect + if ((specialEffect & kDefEffect) == kDefEffect) { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + // NoPrivateUseEffect + if ((specialEffect & kUseEffect) == kUseEffect) { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + // IpaSeen + if ((specialEffect & kIpaSeen) == kIpaSeen) { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + // Pure + if ((specialEffect & kPureFunc) == kPureFunc) { + funcAttrs.SetAttr(FUNCATTR_pure); + } + // NoDefArgEffect + if ((specialEffect & kNoDefArgEffect) == kNoDefArgEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + // NoDefEffect + if ((specialEffect & kNoDefEffect) == kNoDefEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + // NoRetNewlyAllocObj + if ((specialEffect & kNoRetNewlyAllocObj) == kNoRetNewlyAllocObj) { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + // NoThrowException + if ((specialEffect & kNoThrowException) == kNoThrowException) { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } +} + +void FuncAttrs::DumpAttributes() const { +// parse no content of attr +#define STRING(s) #s +#define FUNC_ATTR +#define NOCONTENT_ATTR +#define ATTR(AT) \ + if (GetAttr(FUNCATTR_##AT)) { \ + LogInfo::MapleLogger() << " " << STRING(AT); \ + } +#include "all_attributes.def" +#undef ATTR +#undef NOCONTENT_ATTR +#undef FUNC_ATTR +// parse content of attr + if (GetAttr(FUNCATTR_alias) && !GetAliasFuncName().empty()) { + LogInfo::MapleLogger() << " alias ( \"" << GetAliasFuncName() << "\" )"; + } + if (GetAttr(FUNCATTR_constructor_priority) && GetConstructorPriority() != -1) { + LogInfo::MapleLogger() << " constructor_priority ( " << GetConstructorPriority() << " )"; + } + if (GetAttr(FUNCATTR_destructor_priority) && GetDestructorPriority() != -1) { + LogInfo::MapleLogger() << " destructor_priority ( " << GetDestructorPriority() << " )"; + } + if (GetAttr(FUNCATTR_frame_pointer) && !framePointer.empty()) { + LogInfo::MapleLogger() << " frame-pointer ( " << framePointer << " )"; + } + if (GetAttr(FUNCATTR_frame_reserved_slots) && GetFrameResverdSlot() != 0) { + LogInfo::MapleLogger() << " frame-reserved-slots ( " << GetFrameResverdSlot() << " )"; + } +} + +void MIRFunction::DumpFlavorLoweredThanMmpl() const { + LogInfo::MapleLogger() << " ("; + + // Dump arguments + bool hasPrintedFormal = false; + for (uint32 i = 0; i < formalDefVec.size(); i++) { + MIRSymbol *symbol = formalDefVec[i].formalSym; + if (symbol == nullptr && + (formalDefVec[i].formalStrIdx.GetIdx() == 0 || + GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx).empty())) { + break; + } + hasPrintedFormal = true; + if (symbol == nullptr) { + LogInfo::MapleLogger() << "var %" + << GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx) + << " "; + } else { + if (symbol->GetSKind() != kStPreg) { + LogInfo::MapleLogger() << "var %" << symbol->GetName() << " "; + } else { + LogInfo::MapleLogger() << "reg %" << symbol->GetPreg()->GetPregNo() << " "; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + constexpr uint8 indent = 2; + ty->Dump(indent); + if (symbol != nullptr) { + symbol->GetAttrs().DumpAttributes(); + } else { + formalDefVec[i].formalAttrs.DumpAttributes(); + } + if (i != (formalDefVec.size() - 1)) { + LogInfo::MapleLogger() << ", "; + } + } + if (IsVarargs()) { + if (!hasPrintedFormal) { + LogInfo::MapleLogger() << "..."; + } else { + LogInfo::MapleLogger() << ", ..."; + } + } + + LogInfo::MapleLogger() << ") "; + GetReturnType()->Dump(1); +} + +void MIRFunction::Dump(bool withoutBody) { + // skip the functions that are added during process methods in + // class and interface decls. these has nothing in formals + // they do have paramtypelist_. this can not skip ones without args + // but for them at least the func decls are valid + if ((module->IsJavaModule() && GetParamSize() != formalDefVec.size()) || + GetAttr(FUNCATTR_optimized)) { + return; + } + + // save the module's curFunction and set it to the one currently Dump()ing + MIRFunction *savedFunc = module->CurFunction(); + module->SetCurFunction(this); + + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "symbol MIRSymbol is null"); + if (!withoutBody) { + symbol->GetSrcPosition().DumpLoc(MIRSymbol::LastPrintedLineNumRef(), MIRSymbol::LastPrintedColumnNumRef()); + } + LogInfo::MapleLogger() << "func " << "&" << symbol->GetName(); + theMIRModule = module; + funcAttrs.DumpAttributes(); + + if (symbol->GetWeakrefAttr().first) { + LogInfo::MapleLogger() << " weakref"; + if (symbol->GetWeakrefAttr().second != UStrIdx(0)) { + LogInfo::MapleLogger() << " ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetWeakrefAttr().second)); + LogInfo::MapleLogger() << " )"; + } + } + + if (symbol->sectionAttr != UStrIdx(0)) { + LogInfo::MapleLogger() << " section ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->sectionAttr)); + LogInfo::MapleLogger() << " )"; + } + + if (module->GetFlavor() != kMmpl) { + DumpFlavorLoweredThanMmpl(); + } + + // codeMemPool is nullptr, means maple_ir has been released for memory's sake + if (codeMemPool == nullptr) { + LogInfo::MapleLogger() << '\n'; + } else if (GetBody() != nullptr && !withoutBody && symbol->GetStorageClass() != kScExtern) { + ResetInfoPrinted(); // this ensures funcinfo will be printed + GetBody()->Dump(0, module->GetFlavor() == kMmpl ? nullptr : GetSymTab(), + module->GetFlavor() < kMmpl ? GetPregTab() : nullptr, false, + true, module->GetFlavor()); // Dump body + } else { + LogInfo::MapleLogger() << '\n'; + } + + // restore the curFunction + module->SetCurFunction(savedFunc); +} + +void MIRFunction::DumpUpFormal(int32 indent) const { + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "upformalsize " << GetUpFormalSize() << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsTypeTagged = [ "; + const auto *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (formalWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(formalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(formalWordsRefCounted + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpFrame(int32 indent) const { + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "framesize " << static_cast(GetFrameSize()) << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsTypeTagged = [ "; + const uint32 *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (localWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(localWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsRefCounted + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpScope() { + scope->Dump(0); +} + +void MIRFunction::DumpFuncBody(int32 indent) { + LogInfo::MapleLogger() << " funcid " << GetPuidxOrigin() << '\n'; + + if (IsInfoPrinted()) { + return; + } + + SetInfoPrinted(); + + if (GetUpFormalSize() > 0) { + DumpUpFormal(indent); + } + + if (GetFrameSize() > 0) { + DumpFrame(indent); + } + + if (GetOutParmSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "outparmsize " << GetOutParmSize() << '\n'; + } + + if (GetModuleId() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "moduleID " << static_cast(GetModuleId()) << '\n'; + } + + if (GetFuncSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcSize " << GetFuncSize() << '\n'; + } + + if (GetInfoVector().empty()) { + return; + } + + const MIRInfoVector &funcInfo = GetInfoVector(); + const MapleVector &funcInfoIsString = InfoIsString(); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcinfo {\n"; + size_t size = funcInfo.size(); + constexpr int kIndentOffset = 2; + for (size_t i = 0; i < size; ++i) { + PrintIndentation(indent + kIndentOffset); + LogInfo::MapleLogger() << "@" << GlobalTables::GetStrTable().GetStringFromStrIdx(funcInfo[i].first) << " "; + if (!funcInfoIsString[i]) { + LogInfo::MapleLogger() << funcInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" + << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(funcInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +bool MIRFunction::IsEmpty() const { + return (body == nullptr || body->IsEmpty()); +} + +bool MIRFunction::IsClinit() const { + const std::string clinitPostfix = "_7C_3Cclinit_3E_7C_28_29V"; + const std::string &funcName = this->GetName(); + // this does not work for smali files like art/test/511-clinit-interface/smali/BogusInterface.smali, + // which is decorated without "constructor". + return StringUtils::EndsWith(funcName, clinitPostfix); +} + +uint32 MIRFunction::GetInfo(GStrIdx strIdx) const { + for (const auto &item : info) { + if (item.first == strIdx) { + return item.second; + } + } + DEBUG_ASSERT(false, "get info error"); + return 0; +} + +uint32 MIRFunction::GetInfo(const std::string &string) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(string); + return GetInfo(strIdx); +} + +void MIRFunction::OverrideBaseClassFuncNames(GStrIdx strIdx) { + baseClassStrIdx.reset(); + baseFuncStrIdx.reset(); + SetBaseClassFuncNames(strIdx); +} + +// there are two ways to represent the delimiter: '|' or "_7C" +// where 7C is the ascii value of char '|' in hex +void MIRFunction::SetBaseClassFuncNames(GStrIdx strIdx) { + if (baseClassStrIdx != 0u || baseFuncStrIdx != 0u) { + return; + } + const std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + std::string delimiter = "|"; + uint32 width = 1; // delimiter width + size_t pos = name.find(delimiter); + if (pos == std::string::npos) { + delimiter = namemangler::kNameSplitterStr; + width = 3; // delimiter width + pos = name.find(delimiter); + // make sure it is not __7C, but ___7C ok + while (pos != std::string::npos && (name[pos - 1] == '_' && name[pos - 2] != '_')) { + pos = name.find(delimiter, pos + width); + } + } + if (pos != std::string::npos && pos > 0) { + const std::string className = name.substr(0, pos); + baseClassStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + std::string funcNameWithType = name.substr(pos + width, name.length() - pos - width); + baseFuncWithTypeStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t index = name.find(namemangler::kRightBracketStr); + if (index != std::string::npos) { + size_t posEnd = index + (std::string(namemangler::kRightBracketStr)).length(); + funcNameWithType = name.substr(pos + width, posEnd - pos - width); + } + baseFuncSigStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t newPos = name.find(delimiter, pos + width); + while (newPos != std::string::npos && (name[newPos - 1] == '_' && name[newPos - 2] != '_')) { + newPos = name.find(delimiter, newPos + width); + } + if (newPos != 0) { + std::string funcName = name.substr(pos + width, newPos - pos - width); + baseFuncStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + std::string signature = name.substr(newPos + width, name.length() - newPos - width); + signatureStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(signature); + } + return; + } + baseFuncStrIdx = strIdx; +} + +const MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) const { + return idx.Islocal() ? GetSymbolTabItem(idx.Idx(), checkFirst) + : GlobalTables::GetGsymTable().GetSymbolFromStidx(idx.Idx(), checkFirst); +} +MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) { + return const_cast(const_cast(this)->GetLocalOrGlobalSymbol(idx, checkFirst)); +} + +const MIRType *MIRFunction::GetNodeType(const BaseNode &node) const { + if (node.GetOpCode() == OP_dread) { + const MIRSymbol *sym = GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + } + if (node.GetOpCode() == OP_regread) { + const auto &nodeReg = static_cast(node); + const MIRPreg *pReg = GetPregTab()->PregFromPregIdx(nodeReg.GetRegIdx()); + if (pReg->GetPrimType() == PTY_ref) { + return pReg->GetMIRType(); + } + } + return nullptr; +} + +void MIRFunction::EnterFormals() { + for (auto &formalDef : formalDefVec) { + formalDef.formalSym = symTab->CreateSymbol(kScopeLocal); + formalDef.formalSym->SetStorageClass(kScFormal); + formalDef.formalSym->SetNameStrIdx(formalDef.formalStrIdx); + formalDef.formalSym->SetTyIdx(formalDef.formalTyIdx); + formalDef.formalSym->SetAttrs(formalDef.formalAttrs); + const std::string &formalName = GlobalTables::GetStrTable().GetStringFromStrIdx(formalDef.formalStrIdx); + if (!isdigit(formalName.front())) { + formalDef.formalSym->SetSKind(kStVar); + (void)symTab->AddToStringSymbolMap(*formalDef.formalSym); + } else { + formalDef.formalSym->SetSKind(kStPreg); + uint32 thepregno = static_cast(std::stoi(formalName)); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDef.formalTyIdx); + PrimType pType = mirType->GetPrimType(); + // if mirType info is not needed, set mirType to nullptr + if (pType != PTY_ref && pType != PTY_ptr) { + mirType = nullptr; + } else if (pType == PTY_ptr && mirType->IsMIRPtrType()) { + MIRType *pointedType = static_cast(mirType)->GetPointedType(); + if (pointedType == nullptr || pointedType->GetKind() != kTypeFunction) { + mirType = nullptr; + } + } + PregIdx pregIdx = pregTab->EnterPregNo(thepregno, pType, mirType); + MIRPreg *preg = pregTab->PregFromPregIdx(pregIdx); + formalDef.formalSym->SetPreg(preg); + } + } +} + +void MIRFunction::NewBody() { + codeMemPool = GetCodeMemPool(); + SetBody(codeMemPool->New()); + // If mir_function.has been seen as a declaration, its symtab has to be moved + // from module mempool to function mempool. + MIRSymbolTable *oldSymTable = GetSymTab(); + MIRPregTable *oldPregTable = GetPregTab(); + MIRTypeNameTable *oldTypeNameTable = typeNameTab; + MIRLabelTable *oldLabelTable = GetLabelTab(); + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + + if (oldSymTable == nullptr) { + // formals not yet entered into symTab; enter them now + EnterFormals(); + } else { + for (size_t i = 1; i < oldSymTable->GetSymbolTableSize(); ++i) { + (void)GetSymTab()->AddStOutside(oldSymTable->GetSymbolFromStIdx(i)); + } + } + if (oldPregTable != nullptr) { + for (size_t i = 1; i < oldPregTable->Size(); ++i) { + (void)GetPregTab()->AddPreg(*oldPregTable->PregFromPregIdx(static_cast(i))); + } + } + if (oldTypeNameTable != nullptr) { + DEBUG_ASSERT(oldTypeNameTable->Size() == typeNameTab->Size(), + "Does not expect to process typeNameTab in MIRFunction::NewBody"); + } + if (oldLabelTable != nullptr) { + DEBUG_ASSERT(oldLabelTable->Size() == GetLabelTab()->Size(), + "Does not expect to process labelTab in MIRFunction::NewBody"); + } +} + +#ifdef DEBUGME +void MIRFunction::SetUpGDBEnv() { + if (codeMemPool != nullptr) { + delete codeMemPool; + } + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "tmp debug"); + codeMemPoolAllocator.SetMemPool(codeMemPool); +} + +void MIRFunction::ResetGDBEnv() { + delete codeMemPool; + codeMemPool = nullptr; +} +#endif +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_lower.cpp b/ecmascript/mapleall/maple_ir/src/mir_lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..50620764feae120745fd4b59c80d388a2798ca88 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_lower.cpp @@ -0,0 +1,1117 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_lower.h" +#include "constantfold.h" +#include "ext_constantfold.h" + +#define DO_LT_0_CHECK 1 + +namespace maple { + +static constexpr uint64 RoundUpConst(uint64 offset, uint32 align) { + return (-align) & (offset + align - 1); +} + +static inline uint64 RoundUp(uint64 offset, uint32 align) { + if (align == 0) { + return offset; + } + return RoundUpConst(offset, align); +} + +// Remove intrinsicop __builtin_expect and record likely info to brStmt +// Target condExpr example: +// ne u1 i64 ( +// intrinsicop i64 C___builtin_expect ( +// cvt i64 i32 (dread i32 %levVar_9354), cvt i64 i32 (constval i32 0)), +// constval i64 0) +void LowerCondGotoStmtWithBuiltinExpect(CondGotoNode &brStmt) { + BaseNode *condExpr = brStmt.Opnd(0); + // Poke ne for dread shortCircuit + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_32349), + // cvt i64 i32 (constval i32 0)), + // constval i64 0)) + // dassign %shortCircuit 0 (ne u1 u32 (dread u32 %shortCircuit, constval u1 0)) + if (condExpr->GetOpCode() == OP_ne && condExpr->Opnd(0)->GetOpCode() == OP_dread && + condExpr->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(condExpr->Opnd(1))->GetConstVal(); + if (constVal->GetKind() == kConstInt && static_cast(constVal)->GetValue() == 0) { + condExpr = condExpr->Opnd(0); + } + } + if (condExpr->GetOpCode() == OP_dread) { + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_9488), + // cvt i64 i32 (constval i32 1)), + // constval i64 0)) + // brfalse @shortCircuit_label_13351 (dread u32 %shortCircuit) + StIdx stIdx = static_cast(condExpr)->GetStIdx(); + FieldID fieldId = static_cast(condExpr)->GetFieldID(); + if (fieldId != 0) { + return; + } + if (brStmt.GetPrev() == nullptr || brStmt.GetPrev()->GetOpCode() != OP_dassign) { + return; // prev stmt may be a label, we skip it too + } + auto *dassign = static_cast(brStmt.GetPrev()); + if (stIdx != dassign->GetStIdx() || dassign->GetFieldID() != 0) { + return; + } + condExpr = dassign->GetRHS(); + } + if (condExpr->GetOpCode() == OP_ne) { + // opnd1 must be int const 0 + BaseNode *opnd1 = condExpr->Opnd(1); + if (opnd1->GetOpCode() != OP_constval) { + return; + } + auto *constVal = static_cast(opnd1)->GetConstVal(); + if (constVal->GetKind() != kConstInt || static_cast(constVal)->GetValue() != 0) { + return; + } + // opnd0 must be intrinsicop C___builtin_expect + BaseNode *opnd0 = condExpr->Opnd(0); + if (opnd0->GetOpCode() != OP_intrinsicop || + static_cast(opnd0)->GetIntrinsic() != INTRN_C___builtin_expect) { + return; + } + // We trust constant fold + auto *expectedConstExpr = opnd0->Opnd(1); + if (expectedConstExpr->GetOpCode() == OP_cvt) { + expectedConstExpr = expectedConstExpr->Opnd(0); + } + if (expectedConstExpr->GetOpCode() != OP_constval) { + return; + } + auto *expectedConstNode = static_cast(expectedConstExpr)->GetConstVal(); + CHECK_FATAL(expectedConstNode->GetKind() == kConstInt, "must be"); + auto expectedVal = static_cast(expectedConstNode)->GetValue(); + if (expectedVal != 0 && expectedVal != 1) { + return; + } + bool likelyTrue = (expectedVal == 1); // The condition is likely to be true + bool likelyBranch = (brStmt.GetOpCode() == OP_brtrue ? likelyTrue : !likelyTrue); // High probability jump + if (likelyBranch) { + brStmt.SetBranchProb(kProbLikely); + } else { + brStmt.SetBranchProb(kProbUnlikely); + } + // Remove __builtin_expect + condExpr->SetOpnd(opnd0->Opnd(0), 0); + } +} + +void MIRLower::LowerBuiltinExpect(BlockNode &block) { + auto *stmt = block.GetFirst(); + auto *last = block.GetLast(); + while (stmt != last) { + if (stmt->GetOpCode() == OP_brtrue || stmt->GetOpCode() == OP_brfalse) { + LowerCondGotoStmtWithBuiltinExpect(*static_cast(stmt)); + } + stmt = stmt->GetNext(); + } +} + +LabelIdx MIRLower::CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt) { + auto *brStmt = mirModule.CurFuncCodeMemPool()->New(op); + brStmt->SetOpnd(ifStmt.Opnd(), 0); + brStmt->SetSrcPos(ifStmt.GetSrcPos()); + LabelIdx lableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lableIdx); + brStmt->SetOffset(lableIdx); + blk.AddStatement(brStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(brStmt->GetStmtID(), ifStmt.GetStmtID()); + } + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + if (thenEmpty) { + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + } else { + blk.AppendStatementsFromBlock(*ifStmt.GetThenPart()); + } + return lableIdx; +} + +void MIRLower::CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + DEBUG_ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID()); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } +} + +void MIRLower::CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brtrue, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + DEBUG_ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID()); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } +} + + +void MIRLower::CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + bool fallThroughFromThen = !IfStmtNoFallThrough(ifStmt); + LabelIdx gotoLableIdx = 0; + if (fallThroughFromThen) { + auto *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + gotoLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(gotoLableIdx); + gotoStmt->SetOffset(gotoLableIdx); + blk.AddStatement(gotoStmt); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(gotoStmt->GetStmtID(), ifStmt.GetThenPart()->GetStmtID()); + } + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetElsePart()->GetStmtID()); + } + if (fallThroughFromThen) { + lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(gotoLableIdx); + blk.AddStatement(lableStmt); + // set endlabel stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } +} + +BlockNode *MIRLower::LowerIfStmt(IfStmtNode &ifStmt, bool recursive) { + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + bool elseEmpty = (ifStmt.GetElsePart() == nullptr) || (ifStmt.GetElsePart()->GetFirst() == nullptr); + if (recursive) { + if (!thenEmpty) { + ifStmt.SetThenPart(LowerBlock(*ifStmt.GetThenPart())); + } + if (!elseEmpty) { + ifStmt.SetElsePart(LowerBlock(*ifStmt.GetElsePart())); + } + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (thenEmpty && elseEmpty) { + // generate EVAL statement + auto *evalStmt = mirModule.CurFuncCodeMemPool()->New(OP_eval); + evalStmt->SetOpnd(ifStmt.Opnd(), 0); + evalStmt->SetSrcPos(ifStmt.GetSrcPos()); + blk->AddStatement(evalStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(evalStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } else if (elseEmpty) { + // brfalse + // + // label + CreateBrFalseStmt(*blk, ifStmt); + } else if (thenEmpty) { + // brtrue + // + // label + CreateBrTrueStmt(*blk, ifStmt); + } else { + // brfalse + // + // goto + // label + // + // label + CreateBrFalseAndGotoStmt(*blk, ifStmt); + } + return blk; +} + +static bool ConsecutiveCaseValsAndSameTarget(const CaseVector *switchTable) { + size_t caseNum = switchTable->size(); + int lastVal = static_cast((*switchTable)[0].first); + LabelIdx lblIdx = (*switchTable)[0].second; + for (size_t id = 1; id < caseNum; id++) { + lastVal++; + if (lastVal != (*switchTable)[id].first) { + return false; + } + if (lblIdx != (*switchTable)[id].second) { + return false; + } + } + return true; +} + +// if there is only 1 case branch, replace with conditional branch(es) and +// return the optimized multiple statements; otherwise, return nullptr +BlockNode *MIRLower::LowerSwitchStmt(SwitchNode *switchNode) { + CaseVector *switchTable = &switchNode->GetSwitchTable(); + if (switchTable->empty()) { // goto @defaultLabel + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + return blk; + } + if (!ConsecutiveCaseValsAndSameTarget(switchTable)) { + return nullptr; + } + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx caseGotoLabel = switchTable->front().second; + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + int64 minCaseVal = switchTable->front().first; + int64 maxCaseVal = switchTable->back().first; + BaseNode *switchOpnd = switchNode->Opnd(0); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + ConstvalNode *minCaseNode = builder->CreateIntConst(minCaseVal, switchOpnd->GetPrimType()); + ConstvalNode *maxCaseNode = builder->CreateIntConst(maxCaseVal, switchOpnd->GetPrimType()); + if (minCaseVal == maxCaseVal) { + // brtrue (x == minCaseVal) @case_goto_label + // goto @default_label + CompareNode *eqNode = builder->CreateExprCompare(OP_eq, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(eqNode, OP_brtrue, caseGotoLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + } else { + // brtrue (x < minCaseVal) @default_label + // brtrue (x > maxCaseVal) @default_label + // goto @case_goto_label + CompareNode *ltNode = builder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(ltNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + CompareNode *gtNode = builder->CreateExprCompare(OP_gt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, maxCaseNode); + condGoto = builder->CreateStmtCondGoto(gtNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, caseGotoLabel); + blk->AddStatement(gotoStmt); + } + return blk; +} + +// while +// is lowered to: +// brfalse +// label +// +// brtrue +// label +BlockNode *MIRLower::LowerWhileStmt(WhileStmtNode &whileStmt) { + DEBUG_ASSERT(whileStmt.GetBody() != nullptr, "nullptr check"); + whileStmt.SetBody(LowerBlock(*whileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(whileStmt.Opnd(0), 0); + brFalseStmt->SetSrcPos(whileStmt.GetSrcPos()); + LabelIdx lalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lalbeIdx); + brFalseStmt->SetOffset(lalbeIdx); + blk->AddStatement(brFalseStmt); + blk->AppendStatementsFromBlock(*whileStmt.GetBody()); + if (MeOption::optForSize) { + // still keep while-do format to avoid coping too much condition-related stmt + LabelIdx whileLalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(whileLalbeIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(whileLalbeIdx); + blk->InsertBefore(brFalseStmt, lableStmt); + auto *whilegotonode = mirModule.CurFuncCodeMemPool()->New(OP_goto, whileLalbeIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(whilegotonode->GetStmtID(), blk->GetLast()->GetStmtID()); + } + blk->AddStatement(whilegotonode); + } else { + LabelIdx bodyLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLableIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(bodyLableIdx); + blk->InsertAfter(brFalseStmt, lableStmt); + // update frequency + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), whileStmt.GetStmtID()); + GetFuncProfData()->CopyStmtFreq(brFalseStmt->GetStmtID(), whileStmt.GetStmtID()); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(whileStmt.Opnd(0)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLableIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(brTrueStmt->GetStmtID(), whileStmt.GetBody()->GetStmtID()); + } + blk->AddStatement(brTrueStmt); + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(lalbeIdx); + blk->AddStatement(lableStmt); + if (GetFuncProfData()) { + int64_t freq = GetFuncProfData()->GetStmtFreq(whileStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(blk->GetLast()->GetStmtID()); + DEBUG_ASSERT(freq >= 0, "sanity check"); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } + return blk; +} + +// doloop (,,) {} +// is lowered to: +// dassign () +// brfalse +// label +// +// dassign () +// brtrue +// label +BlockNode *MIRLower::LowerDoloopStmt(DoloopNode &doloop) { + DEBUG_ASSERT(doloop.GetDoBody() != nullptr, "nullptr check"); + doloop.SetDoBody(LowerBlock(*doloop.GetDoBody())); + int64_t doloopnodeFreq = 0, bodynodeFreq = 0; + if (GetFuncProfData()) { + doloopnodeFreq = GetFuncProfData()->GetStmtFreq(doloop.GetStmtID()); + bodynodeFreq= GetFuncProfData()->GetStmtFreq(doloop.GetDoBody()->GetStmtID()); + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (doloop.IsPreg()) { + PregIdx regIdx = static_cast(doloop.GetDoVarStIdx().FullIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType primType = mirPreg->GetPrimType(); + DEBUG_ASSERT(primType != kPtyInvalid, "runtime check error"); + auto *startRegassign = mirModule.CurFuncCodeMemPool()->New(); + startRegassign->SetRegIdx(regIdx); + startRegassign->SetPrimType(primType); + startRegassign->SetOpnd(doloop.GetStartExpr(), 0); + startRegassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startRegassign); + } else { + auto *startDassign = mirModule.CurFuncCodeMemPool()->New(); + startDassign->SetStIdx(doloop.GetDoVarStIdx()); + startDassign->SetRHS(doloop.GetStartExpr()); + startDassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startDassign); + } + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(blk->GetLast()->GetStmtID(), doloopnodeFreq - bodynodeFreq); + } + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(doloop.GetCondExpr(), 0); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + brFalseStmt->SetOffset(lIdx); + blk->AddStatement(brFalseStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brFalseStmt->GetStmtID(), (doloopnodeFreq - bodynodeFreq)); + } + LabelIdx bodyLabelIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLabelIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(bodyLabelIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), bodynodeFreq); + } + blk->AppendStatementsFromBlock(*doloop.GetDoBody()); + if (doloop.IsPreg()) { + PregIdx regIdx = (PregIdx)doloop.GetDoVarStIdx().FullIdx(); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType doVarPType = mirPreg->GetPrimType(); + DEBUG_ASSERT(doVarPType != kPtyInvalid, "runtime check error"); + auto *readDoVar = mirModule.CurFuncCodeMemPool()->New(); + readDoVar->SetRegIdx(regIdx); + readDoVar->SetPrimType(doVarPType); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, doloop.GetIncrExpr(), readDoVar); + auto *endRegassign = mirModule.CurFuncCodeMemPool()->New(); + endRegassign->SetRegIdx(regIdx); + endRegassign->SetPrimType(doVarPType); + endRegassign->SetOpnd(add, 0); + blk->AddStatement(endRegassign); + } else { + const MIRSymbol *doVarSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(doloop.GetDoVarStIdx()); + PrimType doVarPType = doVarSym->GetType()->GetPrimType(); + auto *readDovar = + mirModule.CurFuncCodeMemPool()->New(OP_dread, doVarPType, doloop.GetDoVarStIdx(), 0); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, readDovar, doloop.GetIncrExpr()); + auto *endDassign = mirModule.CurFuncCodeMemPool()->New(); + endDassign->SetStIdx(doloop.GetDoVarStIdx()); + endDassign->SetRHS(add); + blk->AddStatement(endDassign); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doloop.GetCondExpr()->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLabelIdx); + blk->AddStatement(brTrueStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brTrueStmt->GetStmtID(), bodynodeFreq); + } + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), (doloopnodeFreq - bodynodeFreq)); + } + return blk; +} + +// dowhile +// is lowered to: +// label +// +// brtrue +BlockNode *MIRLower::LowerDowhileStmt(WhileStmtNode &doWhileStmt) { + DEBUG_ASSERT(doWhileStmt.GetBody() != nullptr, "nullptr check"); + doWhileStmt.SetBody(LowerBlock(*doWhileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + blk->AppendStatementsFromBlock(*doWhileStmt.GetBody()); + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doWhileStmt.Opnd(0), 0); + brTrueStmt->SetOffset(lIdx); + blk->AddStatement(brTrueStmt); + return blk; +} + +BlockNode *MIRLower::LowerBlock(BlockNode &block) { + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + BlockNode *tmp = nullptr; + if (block.GetFirst() == nullptr) { + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + DEBUG_ASSERT(nextStmt != nullptr, "nullptr check"); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + tmp = LowerIfStmt(static_cast(*stmt), true); + newBlock->AppendStatementsFromBlock(*tmp); + break; + case OP_switch: + tmp = LowerSwitchStmt(static_cast(stmt)); + if (tmp != nullptr) { + newBlock->AppendStatementsFromBlock(*tmp); + } else { + newBlock->AddStatement(stmt); + } + break; + case OP_while: + newBlock->AppendStatementsFromBlock(*LowerWhileStmt(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AppendStatementsFromBlock(*LowerDowhileStmt(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AppendStatementsFromBlock(*LowerDoloopStmt(static_cast(*stmt))); + break; + case OP_icallassigned: + case OP_icall: { + if (mirModule.IsCModule()) { + // convert to icallproto/icallprotoassigned + IcallNode *ic = static_cast(stmt); + ic->SetOpCode(stmt->GetOpCode() == OP_icall ? OP_icallproto : OP_icallprotoassigned); + MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(stmt->Opnd(0)); + CHECK_FATAL(funcType != nullptr, "MIRLower::LowerBlock: cannot find prototype for icall"); + ic->SetRetTyIdx(funcType->GetTypeIndex()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); + if (retType->GetPrimType() == PTY_agg && retType->GetSize() > k16BitSize) { + funcType->funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + } + newBlock->AddStatement(stmt); + break; + } + case OP_block: + tmp = LowerBlock(static_cast(*stmt)); + newBlock->AppendStatementsFromBlock(*tmp); + break; + default: + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; +} + +// for lowering OP_cand and OP_cior embedded in the expression x which belongs +// to curstmt +BaseNode* MIRLower::LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *blk) { + if (x->GetOpCode() == OP_cand || x->GetOpCode() == OP_cior) { + MIRBuilder *builder = mirModule.GetMIRBuilder(); + BinaryNode *bnode = static_cast(x); + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(0), curstmt, blk), 0); + PregIdx pregIdx = mirFunc->GetPregTab()->CreatePreg(x->GetPrimType()); + RegassignNode *regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(0)); + blk->InsertBefore(curstmt, regass); + LabelIdx labIdx = mirFunc->GetLabelTab()->CreateLabel(); + mirFunc->GetLabelTab()->AddToStringLabelMap(labIdx); + BaseNode *cond = builder->CreateExprRegread(x->GetPrimType(), pregIdx); + CondGotoNode *cgoto = mirFunc->GetCodeMempool()->New( + x->GetOpCode() == OP_cior ? OP_brtrue : OP_brfalse); + cgoto->SetOpnd(cond, 0); + cgoto->SetOffset(labIdx); + blk->InsertBefore(curstmt, cgoto); + + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(1), curstmt, blk), 1); + regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(1)); + blk->InsertBefore(curstmt, regass); + LabelNode *lbl = mirFunc->GetCodeMempool()->New(); + lbl->SetLabelIdx(labIdx); + blk->InsertBefore(curstmt, lbl); + return builder->CreateExprRegread(x->GetPrimType(), pregIdx); + } else { + for (size_t i = 0; i < x->GetNumOpnds(); i++) { + x->SetOpnd(LowerEmbeddedCandCior(x->Opnd(i), curstmt, blk), i); + } + return x; + } +} + +// for lowering all appearances of OP_cand and OP_cior associated with condional +// branches in the block +void MIRLower::LowerCandCior(BlockNode &block) { +if (block.GetFirst() == nullptr) { + return; +} +StmtNode *nextStmt = block.GetFirst(); +do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + if (stmt->IsCondBr() && + (stmt->Opnd(0)->GetOpCode() == OP_cand || stmt->Opnd(0)->GetOpCode() == OP_cior)) { + CondGotoNode *condGoto = static_cast(stmt); + BinaryNode *cond = static_cast(condGoto->Opnd(0)); + if ((stmt->GetOpCode() == OP_brfalse && cond->GetOpCode() == OP_cand) || + (stmt->GetOpCode() == OP_brtrue && cond->GetOpCode() == OP_cior)) { + // short-circuit target label is same as original condGoto stmt + condGoto->SetOpnd(cond->GetBOpnd(0), 0); + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New(Opcode(stmt->GetOpCode())); + newCondGoto->SetOpnd(cond->GetBOpnd(1), 0); + newCondGoto->SetOffset(condGoto->GetOffset()); + block.InsertAfter(condGoto, newCondGoto); + nextStmt = stmt; // so it will be re-processed if another cand/cior + } else { // short-circuit target is next statement + LabelIdx lIdx; + LabelNode *labelStmt = nullptr; + if (nextStmt->GetOpCode() == OP_label) { + labelStmt = static_cast(nextStmt); + lIdx = labelStmt->GetLabelIdx(); + } else { + lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + block.InsertAfter(condGoto, labelStmt); + } + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New( + stmt->GetOpCode() == OP_brfalse ? OP_brtrue : OP_brfalse); + newCondGoto->SetOpnd(cond->GetBOpnd(0), 0); + newCondGoto->SetOffset(lIdx); + block.InsertBefore(condGoto, newCondGoto); + condGoto->SetOpnd(cond->GetBOpnd(1), 0); + nextStmt = newCondGoto; // so it will be re-processed if another cand/cior + } + } else { // call LowerEmbeddedCandCior() for all the expression operands + for (size_t i = 0; i < stmt->GetNumOpnds(); i++) { + stmt->SetOpnd(LowerEmbeddedCandCior(stmt->Opnd(i), stmt, &block), i); + } + } + } while (nextStmt != nullptr); +} + +void MIRLower::LowerFunc(MIRFunction &func) { + if (GetOptLevel() > 0) { + ExtConstantFold ecf(func.GetModule()); + (void)ecf.ExtSimplify(func.GetBody());; + } + + mirModule.SetCurFunction(&func); + if (IsLowerExpandArray()) { + ExpandArrayMrt(func); + } + BlockNode *origBody = func.GetBody(); + DEBUG_ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = LowerBlock(*origBody); + DEBUG_ASSERT(newBody != nullptr, "nullptr check"); + LowerBuiltinExpect(*newBody); + if (!InLFO()) { + LowerCandCior(*newBody); + } + func.SetBody(newBody); +} + +BaseNode *MIRLower::LowerFarray(ArrayNode *array) { + auto *farrayType = static_cast(array->GetArrayType(GlobalTables::GetTypeTable())); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx())->GetSize(); + MIRType &arrayType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType())); + /* how about multi-dimension array? */ + if (array->GetIndex(0)->GetOpCode() == OP_constval) { + const ConstvalNode *constvalNode = static_cast(array->GetIndex(0)); + if (constvalNode->GetConstVal()->GetKind() == kConstInt) { + const MIRIntConst *pIntConst = static_cast(constvalNode->GetConstVal()); + CHECK_FATAL(mirModule.IsJavaModule() || !pIntConst->IsNegative(), "Array index should >= 0."); + int64 eleOffset = pIntConst->GetExtValue() * eSize; + + BaseNode *baseNode = array->GetBase(); + if (eleOffset == 0) { + return baseNode; + } + + MIRIntConst *eleConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(eleOffset, arrayType); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(eleConst); + offsetNode->SetPrimType(array->GetPrimType()); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(offsetNode, 1); + return rAdd; + } + } + + BaseNode *rMul = nullptr; + + BaseNode *baseNode = array->GetBase(); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +BaseNode *MIRLower::LowerCArray(ArrayNode *array) { + MIRType *aType = array->GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeJArray) { + return array; + } + if (aType->GetKind() == kTypeFArray) { + return LowerFarray(array); + } + + MIRArrayType *arrayType = static_cast(aType); + /* There are two cases where dimension > 1. + * 1) arrayType->dim > 1. Process the current arrayType. (nestedArray = false) + * 2) arrayType->dim == 1, but arraytype->eTyIdx is another array. (nestedArray = true) + * Assume at this time 1) and 2) cannot mix. + * Along with the array dimension, there is the array indexing. + * It is allowed to index arrays less than the dimension. + * This is dictated by the number of indexes. + */ + bool nestedArray = false; + uint64 dim = arrayType->GetDim(); + MIRType *innerType = nullptr; + MIRArrayType *innerArrayType = nullptr; + uint64 elemSize = 0; + if (dim == 1) { + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + if (innerType->GetKind() == kTypeArray) { + nestedArray = true; + do { + innerArrayType = static_cast(innerType); + elemSize = RoundUp(innerArrayType->GetElemType()->GetSize(), + arrayType->GetElemType()->GetAlign()); + dim++; + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } while (innerType->GetKind() == kTypeArray); + } + } + + size_t numIndex = array->NumOpnds() - 1; + MIRArrayType *curArrayType = arrayType; + BaseNode *resNode = array->GetIndex(0); + if (dim > 1) { + BaseNode *prevNode = nullptr; + for (size_t i = 0; (i < dim) && (i < numIndex); ++i) { + uint32 mpyDim = 1; + if (nestedArray) { + CHECK_FATAL(arrayType->GetSizeArrayItem(0) > 0, "Zero size array dimension"); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curArrayType->GetElemTyIdx()); + curArrayType = static_cast(innerType); + while (innerType->GetKind() == kTypeArray) { + innerArrayType = static_cast(innerType); + mpyDim *= innerArrayType->GetSizeArrayItem(0); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } + } else { + CHECK_FATAL(arrayType->GetSizeArrayItem(static_cast(i)) > 0, "Zero size array dimension"); + for (size_t j = i + 1; j < dim; ++j) { + mpyDim *= arrayType->GetSizeArrayItem(static_cast(j)); + } + } + + BaseNode *index = static_cast(array->GetIndex(i)); + bool isConst = false; + uint64 indexVal = 0; + if (index->op == OP_constval) { + ConstvalNode *constNode = static_cast(index); + indexVal = (static_cast(constNode->GetConstVal()))->GetExtValue(); + isConst = true; + MIRIntConst *newConstNode = mirModule.GetMemPool()->New( + indexVal * mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *newValNode = mirModule.CurFuncCodeMemPool()->New(newConstNode); + newValNode->SetPrimType(array->GetPrimType()); + if (i == 0) { + prevNode = newValNode; + continue; + } else { + resNode = newValNode; + } + } + if (i > 0 && isConst == false) { + resNode = array->GetIndex(i); + } + + BaseNode *mpyNode; + if (isConst) { + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + static_cast(mpyDim) * indexVal, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode = mulSize; + } else if (mpyDim == 1 && prevNode) { + mpyNode = prevNode; + prevNode = resNode; + } else { + mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNode->SetPrimType(array->GetPrimType()); + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode->SetOpnd(mulSize, 1); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(resNode->GetPrimType()), resNode); + } + mpyNode->SetOpnd(resNode, 0); + } + if (i == 0) { + prevNode = mpyNode; + continue; + } + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array->GetPrimType()); + newResNode->SetOpnd(mpyNode, 0); + if (NeedCvtOrRetype(prevNode->GetPrimType(), array->GetPrimType())) { + prevNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(prevNode->GetPrimType()), prevNode); + } + newResNode->SetOpnd(prevNode, 1); + prevNode = newResNode; + } + resNode = prevNode; + } + + BaseNode *rMul = nullptr; + // esize is the size of the array element (eg. int = 4 long = 8) + uint64 esize; + if (nestedArray) { + esize = elemSize; + } else { + esize = arrayType->GetElemType()->GetSize(); + } + Opcode opadd = OP_add; + MIRIntConst *econst = mirModule.GetMemPool()->New(esize, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *eSize = mirModule.CurFuncCodeMemPool()->New(econst); + eSize->SetPrimType(array->GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(resNode->GetPrimType()), resNode); + } + rMul->SetPrimType(resNode->GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSize, 1); + BaseNode *baseNode = array->GetBase(); + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opadd); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +IfStmtNode *MIRLower::ExpandArrayMrtIfBlock(IfStmtNode &node) { + if (node.GetThenPart() != nullptr) { + node.SetThenPart(ExpandArrayMrtBlock(*node.GetThenPart())); + } + if (node.GetElsePart() != nullptr) { + node.SetElsePart(ExpandArrayMrtBlock(*node.GetElsePart())); + } + return &node; +} + +WhileStmtNode *MIRLower::ExpandArrayMrtWhileBlock(WhileStmtNode &node) { + if (node.GetBody() != nullptr) { + node.SetBody(ExpandArrayMrtBlock(*node.GetBody())); + } + return &node; +} + +DoloopNode *MIRLower::ExpandArrayMrtDoloopBlock(DoloopNode &node) { + if (node.GetDoBody() != nullptr) { + node.SetDoBody(ExpandArrayMrtBlock(*node.GetDoBody())); + } + return &node; +} + +ForeachelemNode *MIRLower::ExpandArrayMrtForeachelemBlock(ForeachelemNode &node) { + if (node.GetLoopBody() != nullptr) { + node.SetLoopBody(ExpandArrayMrtBlock(*node.GetLoopBody())); + } + return &node; +} + +void MIRLower::AddArrayMrtMpl(BaseNode &exp, BlockNode &newBlock) { + MIRModule &mod = mirModule; + MIRBuilder *builder = mod.GetMIRBuilder(); + for (size_t i = 0; i < exp.NumOpnds(); ++i) { + DEBUG_ASSERT(exp.Opnd(i) != nullptr, "nullptr check"); + AddArrayMrtMpl(*exp.Opnd(i), newBlock); + } + if (exp.GetOpCode() == OP_array) { + auto &arrayNode = static_cast(exp); + if (arrayNode.GetBoundsCheck()) { + BaseNode *arrAddr = arrayNode.Opnd(0); + BaseNode *index = arrayNode.Opnd(1); + DEBUG_ASSERT(index != nullptr, "null ptr check"); + MIRType *indexType = GlobalTables::GetTypeTable().GetPrimType(index->GetPrimType()); + UnaryStmtNode *nullCheck = builder->CreateStmtUnary(OP_assertnonnull, arrAddr); + newBlock.AddStatement(nullCheck); +#if DO_LT_0_CHECK + ConstvalNode *indexZero = builder->GetConstUInt32(0); + CompareNode *lessZero = builder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), index, indexZero); +#endif + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MapleVector arguments(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); + arguments.push_back(arrAddr); + BaseNode *arrLen = builder->CreateExprIntrinsicop(INTRN_JAVA_ARRAY_LENGTH, OP_intrinsicop, + *infoLenType, arguments); + BaseNode *cpmIndex = index; + if (arrLen->GetPrimType() != index->GetPrimType()) { + cpmIndex = builder->CreateExprTypeCvt(OP_cvt, *infoLenType, *indexType, index); + } + CompareNode *largeLen = builder->CreateExprCompare(OP_ge, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), cpmIndex, arrLen); + // maybe should use cior +#if DO_LT_0_CHECK + BinaryNode *indexCon = + builder->CreateExprBinary(OP_lior, *GlobalTables::GetTypeTable().GetUInt1(), lessZero, largeLen); +#endif + MapleVector args(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); +#if DO_LT_0_CHECK + args.push_back(indexCon); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#else + args.push_back(largeLen); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#endif + newBlock.AddStatement(boundaryTrinsicCall); + } + } +} + +BlockNode *MIRLower::ExpandArrayMrtBlock(BlockNode &block) { + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + if (block.GetFirst() == nullptr) { + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + DEBUG_ASSERT(stmt != nullptr, "nullptr check"); + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + newBlock->AddStatement(ExpandArrayMrtIfBlock(static_cast(*stmt))); + break; + case OP_while: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AddStatement(ExpandArrayMrtDoloopBlock(static_cast(*stmt))); + break; + case OP_foreachelem: + newBlock->AddStatement(ExpandArrayMrtForeachelemBlock(static_cast(*stmt))); + break; + case OP_block: + newBlock->AddStatement(ExpandArrayMrtBlock(static_cast(*stmt))); + break; + default: + AddArrayMrtMpl(*stmt, *newBlock); + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + return newBlock; +} + +void MIRLower::ExpandArrayMrt(MIRFunction &func) { + if (ShouldOptArrayMrt(func)) { + BlockNode *origBody = func.GetBody(); + DEBUG_ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = ExpandArrayMrtBlock(*origBody); + func.SetBody(newBody); + } +} + +MIRFuncType *MIRLower::FuncTypeFromFuncPtrExpr(BaseNode *x) { + MIRFuncType *res = nullptr; + MIRFunction *func = mirModule.CurFunction(); + switch (x->GetOpCode()) { + case OP_regread: { + RegreadNode *regread = static_cast(x); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(regread->GetRegIdx()); + // see if it is promoted from a symbol + if (preg->GetOp() == OP_dread) { + const MIRSymbol *symbol = preg->rematInfo.sym; + MIRType *mirType = symbol->GetType(); + if (preg->fieldID != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(preg->fieldID); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res != nullptr) { + break; + } + } + // check if a formal promoted to preg + for (FormalDef &formalDef : func->GetFormalDefVec()) { + if (!formalDef.formalSym->IsPreg()) { + continue; + } + if (formalDef.formalSym->GetPreg() == preg) { + MIRType *mirType = formalDef.formalSym->GetType(); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + } + break; + } + case OP_dread: { + DreadNode *dread = static_cast(x); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = symbol->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(dread->GetFieldID()); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_iread: { + IreadNode *iread = static_cast(x); + MIRPtrType *ptrType = static_cast(iread->GetType()); + MIRType *mirType = ptrType->GetPointedType(); + if (mirType->GetKind() == kTypeFunction) { + res = static_cast(mirType); + } else if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_addroffunc: { + AddroffuncNode *addrofFunc = static_cast(x); + PUIdx puIdx = addrofFunc->GetPUIdx(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + res = f->GetMIRFuncType(); + break; + } + case OP_retype: { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(x)->GetTyIdx()); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeFirstOpnd)); + } + break; + } + case OP_select: { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeSecondOpnd)); + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeThirdOpnd)); + } + break; + } + default: CHECK_FATAL(false, "LMBCLowerer::FuncTypeFromFuncPtrExpr: NYI"); + } + return res; +} + +const std::set MIRLower::kSetArrayHotFunc = {}; + +bool MIRLower::ShouldOptArrayMrt(const MIRFunction &func) { + return (MIRLower::kSetArrayHotFunc.find(func.GetName()) != MIRLower::kSetArrayHotFunc.end()); +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_module.cpp b/ecmascript/mapleall/maple_ir/src/mir_module.cpp new file mode 100644 index 0000000000000000000000000000000000000000..839da0250becc523e540179a90478c3f7db7b429 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_module.cpp @@ -0,0 +1,791 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_module.h" +#include "mir_const.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "debug_info.h" +#include "intrinsics.h" +#include "bin_mplt.h" + +namespace maple { +#if MIR_FEATURE_FULL // to avoid compilation error when MIR_FEATURE_FULL=0 +MIRModule::MIRModule(const std::string &fn) + : memPool(new ThreadShareMemPool(memPoolCtrler, "maple_ir mempool")), + pragmaMemPool(memPoolCtrler.NewMemPool("pragma mempool", false /* isLcalPool */)), + memPoolAllocator(memPool), + pragmaMemPoolAllocator(pragmaMemPool), + functionList(memPoolAllocator.Adapter()), + importedMplt(memPoolAllocator.Adapter()), + typeDefOrder(memPoolAllocator.Adapter()), + externStructTypeSet(std::less(), memPoolAllocator.Adapter()), + symbolSet(std::less(), memPoolAllocator.Adapter()), + symbolDefOrder(memPoolAllocator.Adapter()), + out(LogInfo::MapleLogger()), + fileName(fn), + fileInfo(memPoolAllocator.Adapter()), + fileInfoIsString(memPoolAllocator.Adapter()), + fileData(memPoolAllocator.Adapter()), + srcFileInfo(memPoolAllocator.Adapter()), + importFiles(memPoolAllocator.Adapter()), + importPaths(memPoolAllocator.Adapter()), + asmDecls(memPoolAllocator.Adapter()), + classList(memPoolAllocator.Adapter()), + optimizedFuncs(memPoolAllocator.Adapter()), + optimizedFuncsType(memPoolAllocator.Adapter()), + puIdxFieldInitializedMap(std::less(), memPoolAllocator.Adapter()), + inliningGlobals(memPoolAllocator.Adapter()), + partO2FuncList(memPoolAllocator.Adapter()), + safetyWarningMap(memPoolAllocator.Adapter()) { + GlobalTables::GetGsymTable().SetModule(this); + typeNameTab = memPool->New(memPoolAllocator); + mirBuilder = memPool->New(this); + dbgInfo = memPool->New(this); + IntrinDesc::InitMIRModule(this); +} + +MIRModule::~MIRModule() { + for (MIRFunction *mirFunc : functionList) { + mirFunc->ReleaseCodeMemory(); + } + ReleasePragmaMemPool(); + delete memPool; + delete binMplt; +} + +MemPool *MIRModule::CurFuncCodeMemPool() const { + if (useFuncCodeMemPoolTmp) { + return CurFunction()->GetCodeMemPoolTmp(); + } + return CurFunction()->GetCodeMemPool(); +} + +MapleAllocator *MIRModule::CurFuncCodeMemPoolAllocator() const { + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return &curFunc->GetCodeMempoolAllocator(); +} + +MapleAllocator &MIRModule::GetCurFuncCodeMPAllocator() const { + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return curFunc->GetCodeMPAllocator(); +} + +void MIRModule::AddExternStructType(TyIdx tyIdx) { + (void)externStructTypeSet.insert(tyIdx); +} + +void MIRModule::AddExternStructType(const MIRType *t) { + DEBUG_ASSERT(t != nullptr, "MIRType is null"); + (void)externStructTypeSet.insert(t->GetTypeIndex()); +} + +void MIRModule::AddSymbol(StIdx stIdx) { + auto it = symbolSet.find(stIdx); + if (it == symbolSet.end()) { + symbolDefOrder.push_back(stIdx); + } + (void)symbolSet.insert(stIdx); +} + +void MIRModule::AddSymbol(const MIRSymbol *s) { + DEBUG_ASSERT(s != nullptr, "s is null"); + AddSymbol(s->GetStIdx()); +} + +void MIRModule::DumpGlobals(bool emitStructureType) const { + if (flavor != kFlavorUnknown) { + LogInfo::MapleLogger() << "flavor " << flavor << '\n'; + } + if (srcLang != kSrcLangUnknown) { + LogInfo::MapleLogger() << "srclang " << srcLang << '\n'; + } + LogInfo::MapleLogger() << "id " << id << '\n'; + if (globalMemSize != 0) { + LogInfo::MapleLogger() << "globalmemsize " << globalMemSize << '\n'; + } + if (globalBlkMap != nullptr) { + LogInfo::MapleLogger() << "globalmemmap = [ "; + auto *p = reinterpret_cast(globalBlkMap); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalBlkMap + globalMemSize)) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + p++; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsTypeTagged != nullptr) { + LogInfo::MapleLogger() << "globalwordstypetagged = [ "; + auto *p = reinterpret_cast(globalWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsTypeTagged + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsRefCounted != nullptr) { + LogInfo::MapleLogger() << "globalwordsrefcounted = [ "; + auto *p = reinterpret_cast(globalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsRefCounted + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + LogInfo::MapleLogger() << "numfuncs " << numFuncs << '\n'; + if (!importFiles.empty()) { + // Output current module's mplt on top, imported ones at below + for (auto it = importFiles.rbegin(); it != importFiles.rend(); ++it) { + LogInfo::MapleLogger() << "import \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(*it) << "\"\n"; + } + } + if (!importPaths.empty()) { + size_t size = importPaths.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "importpath \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(importPaths[i]) + << "\"\n"; + } + } + if (!asmDecls.empty()) { + size_t size = asmDecls.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "asmdecl "; + EmitStr(asmDecls[i]); + } + } + if (entryFuncName.length()) { + LogInfo::MapleLogger() << "entryfunc &" << entryFuncName << '\n'; + } + if (!fileInfo.empty()) { + LogInfo::MapleLogger() << "fileinfo {\n"; + size_t size = fileInfo.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileInfo[i].first) << " "; + if (!fileInfoIsString[i]) { + LogInfo::MapleLogger() << "0x" << std::hex << fileInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + if (!srcFileInfo.empty()) { + LogInfo::MapleLogger() << "srcfileinfo {\n"; + size_t size = srcFileInfo.size(); + size_t i = 0; + for (auto infoElem : srcFileInfo) { + LogInfo::MapleLogger() << " " << infoElem.second; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(infoElem.first) << "\""; + if (i++ < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + } + if (!fileData.empty()) { + LogInfo::MapleLogger() << "filedata {\n"; + size_t size = fileData.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileData[i].first) << " "; + size_t dataSize = fileData[i].second.size(); + for (size_t j = 0; j < dataSize; ++j) { + uint8 data = fileData[i].second[j]; + LogInfo::MapleLogger() << "0x" << std::hex << static_cast(data); + if (j < dataSize - 1) { + LogInfo::MapleLogger() << ' '; + } + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + if (flavor < kMmpl || flavor == kFlavorLmbc) { + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + // still emit what in extern_structtype_set_ + if (!emitStructureType && externStructTypeSet.find(structType->GetTypeIndex()) == externStructTypeSet.end()) { + continue; + } + if (structType->IsImported()) { + continue; + } + } + + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } + if (someSymbolNeedForwDecl) { + // an extra pass thru the global symbol table to print forward decl + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + if (s->IsNeedForwDecl()) { + s->Dump(false, 0, true); + } + } + } + // dump javaclass and javainterface first + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + if (!s->IsJavaClassInterface()) { + continue; + } + // Verify: all wpofake variables should have been deleted from globaltable + if (!s->IsDeleted()) { + s->Dump(false, 0); + } + } + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + CHECK_FATAL(s != nullptr, "nullptr check"); + if (s->IsJavaClassInterface()) { + continue; + } + if (!s->IsDeleted() && !s->GetIsImported() && !s->GetIsImportedDecl()) { + s->Dump(false, 0); + } + } + } +} + +void MIRModule::Dump(bool emitStructureType, const std::unordered_set *dumpFuncSet) const { + DumpGlobals(emitStructureType); + DumpFunctionList(dumpFuncSet); +} + +void MIRModule::DumpGlobalArraySymbol() const { + for (StIdx stIdx : symbolSet) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "null ptr check"); + MIRType *symbolType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol->GetTyIdx()); + DEBUG_ASSERT(symbolType != nullptr, "null ptr check"); + if (symbolType == nullptr || symbolType->GetKind() != kTypeArray) { + continue; + } + symbol->Dump(false, 0); + } +} + +void MIRModule::Emit(const std::string &outFileName) const { + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(outFileName, std::ios::trunc); + DumpGlobals(); + for (MIRFunction *mirFunc : functionList) { + mirFunc->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpFunctionList(const std::unordered_set *dumpFuncSet) const { + for (MIRFunction *mirFunc : functionList) { + if (dumpFuncSet == nullptr || dumpFuncSet->empty()) { + mirFunc->Dump(); + } else { // dump only if this func matches any name in *dumpFuncSet + const std::string &name = mirFunc->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != std::string::npos) { + matched = true; + break; + } + } + if (matched) { + mirFunc->Dump(); + } + } + } +} + +void MIRModule::OutputFunctionListAsciiMpl(const std::string &phaseName) { + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName; + if (flavor >= kMmpl) { + outfileName = fileStem.append(".mmpl"); + } else { + outfileName = fileStem.append(".mpl"); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::app); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + DumpGlobalArraySymbol(); + DumpFunctionList(nullptr); + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +void MIRModule::DumpToFile(const std::string &fileNameStr, bool emitStructureType) const { + std::ofstream file; + file.open(fileNameStr, std::ios::trunc); + if (!file.is_open()) { + ERR(kLncErr, "Cannot open %s", fileNameStr.c_str()); + return; + } + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + Dump(emitStructureType); + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpDefType() { + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + if (structType->IsImported()) { + continue; + } + } + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } +} + +void MIRModule::DumpInlineCandidateToFile(const std::string &fileNameStr) { + if (optimizedFuncs.empty()) { + return; + } + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(fileNameStr, std::ios::trunc); + if (IsCModule()) { + DumpDefType(); + } + // dump global variables needed for inlining file + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + if (s->GetStorageClass() == kScFstatic) { + if (s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } + } + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + MIRStorageClass sc = s->GetStorageClass(); + if (s->GetStorageClass() == kScFstatic) { + if (!s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } else if (s->GetSKind() == kStFunc) { + s->GetFunction()->Dump(true); + } else { + // static fields as extern + s->SetStorageClass(kScExtern); + s->Dump(false, 0, true); + } + s->SetStorageClass(sc); + } + for (auto *func : optimizedFuncs) { + func->SetWithLocInfo(false); + func->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +// This is not efficient. Only used in debug mode for now. +const std::string &MIRModule::GetFileNameFromFileNum(uint32 fileNum) const { + GStrIdx nameIdx(0); + for (auto &info : srcFileInfo) { + if (info.second == fileNum) { + nameIdx = info.first; + break; + } + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); +} + +void MIRModule::DumpToHeaderFile(bool binaryMplt, const std::string &outputName) { + std::string outfileName; + std::string fileNameLocal = !outputName.empty() ? outputName : fileName; + std::string::size_type lastDot = fileNameLocal.find_last_of('.'); + if (lastDot == std::string::npos) { + outfileName = fileNameLocal.append(".mplt"); + } else { + outfileName = fileNameLocal.substr(0, lastDot).append(".mplt"); + } + if (binaryMplt) { + BinaryMplt binaryMpltTmp(*this); + binaryMpltTmp.Export(outfileName); + } else { + std::ofstream mpltFile; + mpltFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + for (std::pair entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + LogInfo::MapleLogger() << "var $"; + entity.second->DumpAsLiteralVar(); + LogInfo::MapleLogger() << '\n'; + } + for (auto it = classList.begin(); it != classList.end(); ++it) { + TyIdx curTyIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyIdx); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(type->GetNameStrIdx()); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + LogInfo::MapleLogger() << "type $" << name << " "; + type->Dump(1, true); + LogInfo::MapleLogger() << '\n'; + } + } + } + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mpltFile.close(); + } +} + +/* + We use MIRStructType (kTypeStruct) to represent C/C++ structs + as well as C++ classes. + + We use MIRClassType (kTypeClass) to represent Java classes, specifically. + MIRClassType has parents which encode Java class's parent (exploiting + the fact Java classes have at most one parent class. + */ +void MIRModule::DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const { + if (dumpedClasses.find(&ty) != dumpedClasses.end()) { + return; + } + // first, insert ty to the dumped_classes to prevent infinite recursion + (void)dumpedClasses.insert(&ty); + DEBUG_ASSERT(ty.GetKind() == kTypeClass || ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion || + ty.GetKind() == kTypeInterface, + "Unexpected MIRType."); + /* No need to emit interfaces; because "interface variables are + final and static by default and methods are public and abstract" + */ + if (ty.GetKind() == kTypeInterface) { + return; + } + // dump all of its parents + if (IsJavaModule()) { + DEBUG_ASSERT(ty.GetKind() != kTypeStruct, "type is not supposed to be struct"); + DEBUG_ASSERT(ty.GetKind() != kTypeUnion, "type is not supposed to be union"); + DEBUG_ASSERT(ty.GetKind() != kTypeInterface, "type is not supposed to be interface"); + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + DEBUG_ASSERT((ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion), "type should be either struct or union"); + } else { + DEBUG_ASSERT(false, "source languages other than DEX/C/C++ are not supported yet"); + } + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(ty.GetNameStrIdx()); + if (IsJavaModule()) { + // Java class has at most one parent + auto &classType = static_cast(ty); + MIRClassType *parentType = nullptr; + // find parent and generate its type as well as those of its ancestors + if (classType.GetParentTyIdx() != 0u /* invalid type idx */) { + parentType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx())); + CHECK_FATAL(parentType != nullptr, "nullptr check"); + DumpTypeTreeToCxxHeaderFile(*parentType, dumpedClasses); + } + LogInfo::MapleLogger() << "struct " << name << " "; + if (parentType != nullptr) { + LogInfo::MapleLogger() << ": " << parentType->GetName() << " "; + } + if (!classType.IsIncomplete()) { + /* dump class type; it will dump as '{ ... }' */ + classType.DumpAsCxx(1); + LogInfo::MapleLogger() << ";\n"; + } else { + LogInfo::MapleLogger() << " /* incomplete type */\n"; + } + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + // how to access parent fields???? + DEBUG_ASSERT(false, "not yet implemented"); + } +} + +void MIRModule::DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const { + std::ofstream mpltFile; + mpltFile.open(pathToOutf, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + char *headerGuard = strdup(pathToOutf.c_str()); + CHECK_FATAL(headerGuard != nullptr, "strdup failed"); + for (char *p = headerGuard; *p; ++p) { + if (!isalnum(*p)) { + *p = '_'; + } else if (isalpha(*p) && islower(*p)) { + *p = toupper(*p); + } + } + // define a hash table + std::unordered_set dumpedClasses; + const char *prefix = "__SRCLANG_UNKNOWN_"; + if (IsJavaModule()) { + prefix = "__SRCLANG_JAVA_"; + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + prefix = "__SRCLANG_CXX_"; + } + LogInfo::MapleLogger() << "#ifndef " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "#define " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "/* this file is compiler-generated; do not edit */\n\n"; + LogInfo::MapleLogger() << "#include \n"; + LogInfo::MapleLogger() << "#include \n"; + for (auto &s : leafClasses) { + CHECK_FATAL(!s.empty(), "string is null"); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(s); + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(strIdx); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty == nullptr) { + continue; + } + DEBUG_ASSERT(ty->GetKind() == kTypeClass || ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion || + ty->GetKind() == kTypeInterface, + ""); + DumpTypeTreeToCxxHeaderFile(*ty, dumpedClasses); + } + LogInfo::MapleLogger() << "#endif /* " << prefix << headerGuard << "__ */\n"; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + free(headerGuard); + headerGuard = nullptr; + mpltFile.close(); +} + +void MIRModule::DumpClassToFile(const std::string &path) const { + std::string strPath(path); + strPath.append("/"); + for (auto it : typeNameTab->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + std::string outClassFile(name); + /* replace class name / with - */ + std::replace(outClassFile.begin(), outClassFile.end(), '/', '-'); + (void)outClassFile.insert(0, strPath); + outClassFile.append(".mpl"); + std::ofstream mplFile; + mplFile.open(outClassFile, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); + /* dump class type */ + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetNameStrIdx() == it.first && type->GetKind() != kTypeByName) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mplFile.close();; + } +} + +MIRFunction *MIRModule::FindEntryFunction() { + for (MIRFunction *currFunc : functionList) { + if (currFunc->GetName() == entryFuncName) { + entryFunc = currFunc; + return currFunc; + } + } + return nullptr; +} + +// given the phase name (including '.' at beginning), output the program in the +// module to the file with given file suffix, and file stem from +// this->fileName appended with phaseName +void MIRModule::OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet, + bool emitStructureType, bool binaryform) { + DEBUG_ASSERT(!(emitStructureType && binaryform), "Cannot emit type info in .bpl"); + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName; + outfileName = fileStem + suffix; + if (!binaryform) { + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change LogInfo::MapleLogger()'s buffer to that of file + Dump(emitStructureType, dumpFuncSet); + LogInfo::MapleLogger().rdbuf(backup); // restore LogInfo::MapleLogger()'s buffer + mplFile.close(); + } else { + BinaryMplt binaryMplt(*this); + binaryMplt.GetBinExport().not2mplt = true; + binaryMplt.Export(outfileName); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + Dump(emitStructureType); + if (withDbgInfo) { + dbgInfo->Dump(0); + } + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +uint32 MIRModule::GetFileinfo(GStrIdx strIdx) const { + for (auto &infoElem : fileInfo) { + if (infoElem.first == strIdx) { + return infoElem.second; + } + } + DEBUG_ASSERT(false, "should not be here"); + return 0; +} + +std::string MIRModule::GetFileNameAsPostfix() const { + std::string fileNameStr = namemangler::kFileNameSplitterStr; + if (!fileInfo.empty()) { + // option 1: file name in INFO + uint32 fileNameIdx = GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + fileNameStr += GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileNameIdx)); + } else { + // option 2: src file name removing ext name. + if (GetSrcFileInfo().size() != 0) { + GStrIdx idx = GetSrcFileInfo()[0].first; + const std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + DEBUG_ASSERT(kStr.find_last_of('.') != kStr.npos, "not found ."); + fileNameStr += kStr.substr(0, kStr.find_last_of('.')); + } else { + DEBUG_ASSERT(0, "No fileinfo and no srcfileinfo in mpl file"); + } + } + for (char &c : fileNameStr) { + if (!isalpha(c) && !isdigit(c) && c != '_' && c != '$') { + c = '_'; + } + } + return fileNameStr; +} + +void MIRModule::AddClass(TyIdx tyIdx) { + (void)classList.insert(tyIdx); +} + +void MIRModule::RemoveClass(TyIdx tyIdx) { + (void)classList.erase(tyIdx); +} + +#endif // MIR_FEATURE_FULL +void MIRModule::ReleaseCurFuncMemPoolTmp() { + CurFunction()->ReleaseMemory(); +} + +void MIRModule::SetFuncInfoPrinted() const { + CurFunction()->SetInfoPrinted(); +} + +void MIRModule::InitPartO2List(const std::string &list) { + if (list.empty()) { + return; + } + SetHasPartO2List(true); + std::ifstream infile(list); + if (!infile.is_open()) { + LogInfo::MapleLogger(kLlErr) << "Cannot open partO2 function list file " << list << '\n'; + return; + } + std::string str; + + while (getline(infile, str)) { + if (str.empty()) { + continue; + } + GStrIdx funcStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + partO2FuncList.insert(funcStrIdx); + } + infile.close(); +} + +bool MIRModule::HasNotWarned(uint32 position, uint32 stmtOriginalID) { + auto warnedOp = safetyWarningMap.find(position); + if (warnedOp == safetyWarningMap.end()) { + MapleSet opSet(memPoolAllocator.Adapter()); + opSet.emplace(stmtOriginalID); + safetyWarningMap.emplace(std::pair>(position, std::move(opSet))); + return true; + } + if (warnedOp->second.find(stmtOriginalID) == warnedOp->second.end()) { + warnedOp->second.emplace(stmtOriginalID); + return true; + } + return false; +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_nodes.cpp b/ecmascript/mapleall/maple_ir/src/mir_nodes.cpp new file mode 100755 index 0000000000000000000000000000000000000000..f9a0592b597ab78c5e1b4a15355e63fdb8b60ebd --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_nodes.cpp @@ -0,0 +1,2616 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_nodes.h" + +#include +#include + +#include "maple_string.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "printing.h" +#include "utils.h" +#include "verification.h" + +namespace maple { +MIRModule *theMIRModule = nullptr; +std::atomic StmtNode::stmtIDNext(1); // 0 is reserved +uint32 StmtNode::lastPrintedLineNum = 0; +uint16 StmtNode::lastPrintedColumnNum = 0; +const int32 CondGotoNode::probAll = 10000; + +const char *GetIntrinsicName(MIRIntrinsicID intrn) { + switch (intrn) { + default: +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ + case INTRN_##STR: \ + return #STR; +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + } +} + +const char *BaseNode::GetOpName() const { + return kOpcodeInfo.GetTableItemAt(GetOpCode()).name.c_str(); +} + +bool BaseNode::MayThrowException() { + if (kOpcodeInfo.MayThrowException(GetOpCode())) { + if (GetOpCode() != OP_array) { + return true; + } + auto *arry = static_cast(this); + if (arry->GetBoundsCheck()) { + return true; + } + } else if (GetOpCode() == OP_intrinsicop) { + auto *inNode = static_cast(this); + if (inNode->GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { + return true; + } + } + for (size_t i = 0; i < NumOpnds(); ++i) { + if (Opnd(i)->MayThrowException()) { + return true; + } + } + return false; +} + +bool AddrofNode::CheckNode(const MIRModule &mod) const { + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + MIRType *ty = st->GetType(); + switch (ty->GetKind()) { + case kTypeScalar: { +#ifdef DYNAMICLANG + if (GetPrimType() == PTY_dynany) { + return true; + } + return IsPrimitiveScalar(GetPrimType()); +#else + return IsPrimitiveScalar(GetPrimType()); +#endif + } + case kTypeArray: { + return GetPrimType() == PTY_agg; + } + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: { + if (GetFieldID() == 0) { + return GetPrimType() == PTY_agg; + } + auto *structType = static_cast(ty); + TyIdx fTyIdx = structType->GetFieldTyIdx(fieldID); + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fTyIdx); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg) || (fTyIdx != 0u && GetPrimType() == PTY_agg); + } + case kTypeClass: + case kTypeClassIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *classType = static_cast(ty); + MIRType *subType = classType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *interfaceType = static_cast(ty); + MIRType *subType = interfaceType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypePointer: + return IsPrimitivePoint(GetPrimType()); + case kTypeParam: + case kTypeGenericInstant: + return true; + default: + return false; + } +} + +MIRType *IreadNode::GetType() const { + MIRPtrType *ptrtype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)); + if (fieldID == 0) { + return ptrtype->GetPointedType(); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrtype->GetPointedTyIdxWithFieldID(fieldID)); +} + +bool IreadNode::IsVolatile() const { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + DEBUG_ASSERT(type->IsMIRPtrType(), "type of iread should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +bool AddrofNode::IsVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DreadoffNode::IsVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DassignNode::AssigningVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool IassignNode::AssigningVolatile() const { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + DEBUG_ASSERT(type->IsMIRPtrType(), "type of iassign should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +void BlockNode::AddStatement(StmtNode *stmt) { + DEBUG_ASSERT(stmt != nullptr, "null ptr check"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::AppendStatementsFromBlock(BlockNode &blk) { + if (blk.GetStmtNodes().empty()) { + return; + } + stmtNodeList.splice(stmtNodeList.end(), blk.GetStmtNodes()); +} + +/// Insert stmt as the first +void BlockNode::InsertFirst(StmtNode *stmt) { + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_front(stmt); +} + +/// Insert stmt as the last +void BlockNode::InsertLast(StmtNode *stmt) { + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk) { + stmtNodeList.splice(&stmtNode, blk.GetStmtNodes()); + stmtNodeList.erase(&stmtNode); + stmtNode.SetNext(blk.GetLast()->GetNext()); +} + +void BlockNode::ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + if (stmtNode2 == stmtNode1) { + // do nothing + } else if (stmtNode2 == nullptr) { + // delete stmtNode1 + stmtNodeList.erase(stmtNode1); + } else { + // replace stmtNode1 with stmtNode2 + stmtNodeList.insert(stmtNode1, stmtNode2); + (void)stmtNodeList.erase(stmtNode1); + } +} + +// remove sstmtNode1 from block +void BlockNode::RemoveStmt(const StmtNode *stmtNode1) { + DEBUG_ASSERT(stmtNode1 != nullptr, "delete a null stmtment"); + (void)stmtNodeList.erase(stmtNode1); +} + +/// Insert stmtNode2 before stmtNode1 in current block. +void BlockNode::InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + stmtNodeList.insert(stmtNode1, stmtNode2); +} + +/// Insert stmtNode2 after stmtNode1 in current block. +void BlockNode::InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + stmtNodeList.insertAfter(stmtNode1, stmtNode2); +} + +// insert all the stmts in inblock to the current block after stmt1 +void BlockNode::InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1) { + DEBUG_ASSERT(stmt1 != nullptr, "null ptr check"); + DEBUG_ASSERT(!inblock.IsEmpty(), "NYI"); + stmtNodeList.splice(stmt1, inblock.GetStmtNodes()); +} + +BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp) { + auto *nnode = allocator.GetMemPool()->New(); + nnode->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq; + if (updateOp & kUpdateUnrollRemainderFreq) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[nnode->GetStmtID()] = (newFreq > 0 || (numer == 0)) ? newFreq : 1; + if (updateOp & kUpdateOrigFreq) { // upateOp & 1 : update from + int64_t left = ((oldFreq - newFreq) > 0 || (oldFreq == 0)) ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = static_cast(left); + } + } + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt; + if (stmt.GetOpCode() == OP_block) { + newStmt = static_cast((static_cast(&stmt))->CloneTreeWithFreqs( + allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_if) { + newStmt = static_cast((static_cast(&stmt))->CloneTreeWithFreqs( + allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_while) { + newStmt = static_cast((static_cast(&stmt))->CloneTreeWithFreqs( + allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_doloop) { + newStmt = static_cast((static_cast(&stmt))->CloneTreeWithFreqs( + allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else { + newStmt = static_cast(stmt.CloneTree(allocator)); + if (fromFreqs.count(stmt.GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[stmt.GetStmtID()]; + uint64_t newFreq; + if (updateOp & kUpdateUnrollRemainderFreq) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[newStmt->GetStmtID()] = (newFreq > 0 || oldFreq == 0 || numer == 0) ? + static_cast(newFreq) : 1; + if (updateOp & kUpdateOrigFreq) { + int64_t left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; + fromFreqs[stmt.GetStmtID()] = static_cast(left); + } + } + } + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + nnode->AddStatement(newStmt); + } + return nnode; +} + +void BaseNode::DumpBase(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); +} + +void CatchNode::Dump(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " {"; + size_t size = exceptionTyIdxVec.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdxVec[i])->Dump(indent + 1); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CppCatchNode::Dump(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (exceptionTyIdx.GetIdx() != 0) { + LogInfo::MapleLogger() << " { "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdx)->Dump(indent + 1); + LogInfo::MapleLogger() << " }"; + } + LogInfo::MapleLogger() << std::endl; +} + +void UnaryNode::DumpOpnd(const MIRModule&, int32 indent) const { + DumpOpnd(indent); +} + +void UnaryNode::DumpOpnd(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")"; +} + +void UnaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + DumpOpnd(*theMIRModule, indent); +} + +void TypeCvtNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " " << GetPrimTypeName(FromType()); + DumpOpnd(*theMIRModule, indent); +} + +void RetypeNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " "; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty->GetKind() == kTypeScalar) { + LogInfo::MapleLogger() << "<"; + ty->Dump(indent + 1); + LogInfo::MapleLogger() << ">"; + } else { + ty->Dump(indent + 1); + } + DumpOpnd(*theMIRModule, indent); +} + +void ExtractbitsNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_extractbits) { + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize); + } else { + LogInfo::MapleLogger() << " " << static_cast(bitsSize); + } + DumpOpnd(*theMIRModule, indent); +} + +void IreadNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + DumpOpnd(*theMIRModule, indent); +} + +void IreadoffNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; + DumpOpnd(*theMIRModule, indent); +} + +void IreadFPoffNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; +} + +void BinaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + BinaryOpnds::Dump(indent); +} + +void BinaryOpnds::Dump(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (bOpnd[0]->IsLeaf() && bOpnd[1]->IsLeaf()) { + bOpnd[0]->Dump(0); + LogInfo::MapleLogger() << ", "; + bOpnd[1]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + bOpnd[0]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + bOpnd[1]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void ResolveFuncNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << func->GetName(); + BinaryOpnds::Dump(indent); +} + +void CompareNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << GetPrimTypeName(opndType); + BinaryOpnds::Dump(indent); +} + +void DepositbitsNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize) << " ("; + if (GetBOpnd(0)->IsLeaf() && GetBOpnd(1)->IsLeaf()) { + GetBOpnd(0)->Dump(0); + LogInfo::MapleLogger() << ", "; + GetBOpnd(1)->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetBOpnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetBOpnd(1)->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void TernaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " ("; + if (topnd[kFirstOpnd]->IsLeaf() && topnd[kSecondOpnd]->IsLeaf() && topnd[kThirdOpnd]->IsLeaf()) { + topnd[kFirstOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kSecondOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kThirdOpnd]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + topnd[kFirstOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kSecondOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kThirdOpnd]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void NaryOpnds::Dump(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (GetNopndSize() == 0) { + LogInfo::MapleLogger() << ")"; + return; + } + if (GetNopndSize() == 1) { + GetNopndAt(0)->Dump(indent); + } else { + bool allisLeaf = true; + for (size_t i = 0; i < GetNopndSize(); ++i) + if (!GetNopndAt(i)->IsLeaf()) { + allisLeaf = false; + break; + } + if (allisLeaf) { + GetNopndAt(0)->Dump(0); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ", "; + GetNopndAt(i)->Dump(0); + } + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetNopndAt(0)->Dump(indent + 1); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetNopndAt(i)->Dump(indent + 1); + } + } + } + LogInfo::MapleLogger() << ")"; +} + +void DeoptBundleInfo::Dump(int32 indent) const { + size_t deoptBundleSize = deoptBundleInfo.size(); + if (deoptBundleSize == 0) { + return; + } + LogInfo::MapleLogger() << " deopt: ("; + bool isFirstItem = true; + for (const auto &elem : deoptBundleInfo) { + if (!isFirstItem) { + LogInfo::MapleLogger() << ", "; + } else { + isFirstItem = false; + } + LogInfo::MapleLogger() << elem.first << ": " << "%" << elem.second << " "; + } + LogInfo::MapleLogger() << ")"; +} + +bool NaryOpnds::VerifyOpnds() const { + bool nOpndsVerify = true; + for (size_t i = 0; i < GetNopndSize(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerify = false; + break; + } + } + return nOpndsVerify; +} + +void NaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + NaryOpnds::Dump(indent); +} + +const MIRType *ArrayNode::GetArrayType(const TypeTable &tt) const { + const MIRType *type = tt.GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "expect array type pointer"); + const auto *pointType = static_cast(type); + return tt.GetTypeFromTyIdx(pointType->GetPointedTyIdx()); +} +MIRType *ArrayNode::GetArrayType(const TypeTable &tt) { + return const_cast(const_cast(this)->GetArrayType(tt)); +} + +const BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) const { + const auto *arrayType = static_cast(GetArrayType(tt)); + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(i, *tt.GetTypeFromTyIdx(arrayType->GetElemTyIdx())); + return mod.CurFuncCodeMemPool()->New(mirConst); +} +BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) { + return const_cast(const_cast(this)->GetDim(mod, tt, i)); +} + +void ArrayNode::Dump(int32 indent) const { + PrintIndentation(0); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + if (boundsCheck) { + LogInfo::MapleLogger() << "1 "; + } else { + LogInfo::MapleLogger() << "0 "; + } + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + NaryOpnds::Dump(indent); +} + +bool ArrayNode::IsSameBase(ArrayNode *arry) { + DEBUG_ASSERT(arry != nullptr, "null ptr check"); + if (arry == this) { + return true; + } + BaseNode *curBase = this->GetBase(); + BaseNode *otherBase = arry->GetBase(); + if (curBase->GetOpCode() != OP_addrof || otherBase->GetOpCode() != OP_addrof) { + return false; + } + return static_cast(curBase)->GetStIdx() == static_cast(otherBase)->GetStIdx(); +} + +void IntrinsicopNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_intrinsicopwithtype) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + LogInfo::MapleLogger() << " " << GetIntrinsicName(GetIntrinsic()); + NaryOpnds::Dump(indent); +} + +void ConstvalNode::Dump(int32) const { + if (GetConstVal()->GetType().GetKind() != kTypePointer) { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + } + GetConstVal()->Dump(); +} + +void ConststrNode::Dump(int32) const { + BaseNode::DumpBase(0); + const std::string kStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(UStrIdx(strIdx)); + PrintString(kStr); +} + +void Conststr16Node::Dump(int32) const { + BaseNode::DumpBase(0); + const std::u16string kStr16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(U16StrIdx(strIdx)); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, kStr16); + PrintString(str); +} + +void SizeoftypeNode::Dump(int32) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void FieldsDistNode::Dump(int32) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID1 << " " << fieldID2; +} + +void AddrofNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + LogInfo::MapleLogger() << (GetStIdx().Islocal() ? " %" : " $"); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + if (fieldID != 0) { + LogInfo::MapleLogger() << " " << fieldID; + } +} + +void DreadoffNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + LogInfo::MapleLogger() << " " << offset; +} + +void RegreadNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + return; + } + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + default: + int32 retValIdx = (-regIdx) - kSregRetval0; + LogInfo::MapleLogger() << "retval" << retValIdx; + break; + } +} + +void AddroffuncNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx())->GetName(); +} + +void AddroflabelNode::Dump(int32) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); +} + +void StmtNode::DumpBase(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData() && + theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) >= 0) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << + theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name; +} + +void StmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << '\n'; +} + +// Get the next stmt skip the comment stmt. +StmtNode *StmtNode::GetRealNext() const { + StmtNode *stmt = this->GetNext(); + while (stmt != nullptr) { + if (stmt->GetOpCode() != OP_comment) { + break; + } + stmt = stmt->GetNext(); + } + return stmt; +} + +// insert this before pos +void StmtNode::InsertAfterThis(StmtNode &pos) { + this->SetNext(&pos); + if (pos.GetPrev()) { + this->SetPrev(pos.GetPrev()); + pos.GetPrev()->SetNext(this); + } + pos.SetPrev(this); +} + +// insert stmtnode after pos +void StmtNode::InsertBeforeThis(StmtNode &pos) { + this->SetPrev(&pos); + if (pos.GetNext()) { + this->SetNext(pos.GetNext()); + pos.GetNext()->SetPrev(this); + } + pos.SetNext(this); +} + +void DassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void DassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << offset; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void RegassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + } else { + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + case -kSregRetval0: + LogInfo::MapleLogger() << "retval0"; + break; + // no default + default: + break; + } + } + LogInfo::MapleLogger() << " ("; + UnaryStmtNode::Opnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ")\n"; +} + +void IassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (addrExpr->IsLeaf() && rhs->IsLeaf()) { + addrExpr->Dump(0); + LogInfo::MapleLogger() << ", "; + rhs->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + addrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ", \n"; + PrintIndentation(indent + 1); + rhs->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")\n"; +} + +void IassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void IassignFPoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + DumpOpnd(*theMIRModule, indent); + LogInfo::MapleLogger() << '\n'; +} + +void BlkassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << offset << " " << GetAlign() << " " << blockSize; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void GotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (offset == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName( + static_cast(offset)) << '\n'; + } +} + +void JsTryNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (catchOffset == 0) { + LogInfo::MapleLogger() << " 0"; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(catchOffset)); + } + if (finallyOffset == 0) { + LogInfo::MapleLogger() << " 0\n"; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName( + static_cast(finallyOffset)) << '\n'; + } +} + +void TryNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " {"; + for (size_t i = 0; i < offsets.size(); ++i) { + uint32 offset = offsets[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName((LabelIdx)offset); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CondGotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; +} + +void SwitchNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + switchOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = switchTable.begin(); it != switchTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void RangeGotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ") " << tagOffset << " {"; + for (auto it = rangegotoTable.begin(); it != rangegotoTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void MultiwayNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + multiWayOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = multiWayTable.begin(); it != multiWayTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent); + LogInfo::MapleLogger() << " ("; + it->first->Dump(indent + 1); + LogInfo::MapleLogger() << "): goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void UnaryStmtNode::DumpOpnd(const MIRModule&, int32 indent) const { + DumpOpnd(indent); +} + +void UnaryStmtNode::DumpOpnd(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")\n"; +} + +void UnaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + DumpOpnd(indent); +} + +void GCMallocNode::Dump(int32) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void JarrayMallocNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0, false); + DumpOpnd(*theMIRModule, indent); +} + +void IfStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd()->Dump(indent); + LogInfo::MapleLogger() << ")"; + thenPart->Dump(indent); + if (elsePart) { + PrintIndentation(indent); + LogInfo::MapleLogger() << "else {\n"; + for (auto &stmt : elsePart->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; + } +} + +void WhileStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (GetOpCode() == OP_while) { + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")"; + body->Dump(indent); + } else { // OP_dowhile + LogInfo::MapleLogger() << " {\n"; + for (auto &stmt : body->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; + } +} + +void DoloopNode::DumpDoVar(const MIRModule &mod) const { + if (isPreg) { + LogInfo::MapleLogger() << " %" + << mod.CurFunction()->GetPregTab()->PregFromPregIdx(doVarStIdx.FullIdx())->GetPregNo() + << " (\n"; + } else { + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(doVarStIdx); + LogInfo::MapleLogger() << " %" << st->GetName() << " (\n"; + } +} + +void DoloopNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + DumpDoVar(*theMIRModule); + PrintIndentation(indent + 1); + startExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + condExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + incrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ")"; + doBody->Dump(indent + 1); +} + +void ForeachelemNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(elemStIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << " %" << st->GetName(); + st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(arrayStIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (arrayStIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName(); + loopBody->Dump(indent + 1); +} + +void BinaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void NaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertNonnullStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertNonnullStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (theMIRModule->IsCModule()) { + SafetyCheckStmtNode::Dump(); + } + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertBoundaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertBoundaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent) { + const MIRFunction *mirFunc = mod.CurFunction(); + if (nrets.empty()) { + LogInfo::MapleLogger() << " {}\n"; + return; + } else if (nrets.size() == 1) { + StIdx stIdx = nrets.begin()->first; + RegFieldPair regFieldPair = nrets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << " { dassign "; + LogInfo::MapleLogger() << (stIdx.Islocal() ? "%" : "$"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << " }\n"; + return; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << " { regassign"; + LogInfo::MapleLogger() << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << "}\n"; + return; + } + } + LogInfo::MapleLogger() << " {\n"; + for (auto it = nrets.begin(); it != nrets.end(); it++) { + PrintIndentation(indent + 2); + StIdx stIdx = (it)->first; + RegFieldPair regFieldPair = it->second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << '\n'; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << '\n'; + } + } + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "}\n"; +} + +// iread expr has sideeffect, may cause derefference error +bool HasIreadExpr(const BaseNode *expr) { + if (expr->GetOpCode() == OP_iread) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasIreadExpr(expr->Opnd(i))) { + return true; + } + } + return false; +} + +// layer to leaf node +size_t MaxDepth(const BaseNode *expr) { + if (expr->IsLeaf()) { + return 1; + } + size_t maxSubDepth = 0; + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + size_t depth = MaxDepth(expr->Opnd(i)); + maxSubDepth = (depth > maxSubDepth) ? depth : maxSubDepth; + } + return maxSubDepth + 1; // expr itself +} + +MIRType *CallNode::GetCallReturnType() { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + DEBUG_ASSERT(GlobalTables::GetFunctionTable().GetFuncTable().empty() == false, "container check"); + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + return mirFunc->GetReturnType(); +} + +const MIRSymbol *CallNode::GetCallReturnSymbol(const MIRModule &mod) const { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void CallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + CHECK(puIdx < GlobalTables::GetFunctionTable().GetFuncTable().size(), + "index out of range in CallNode::Dump"); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << func->GetName(); + NaryOpnds::Dump(indent); + DeoptBundleInfo::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IcallNode::GetCallReturnType() { + if (op == OP_icall || op == OP_icallassigned) { + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx); + } + // icallproto or icallprotoassigned + MIRFuncType *funcType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} + +const MIRSymbol *IcallNode::GetCallReturnSymbol(const MIRModule &mod) const { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void IcallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)->Dump(indent + 1); + } + NaryOpnds::Dump(indent); + DeoptBundleInfo::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->returnValues, indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IntrinsiccallNode::GetCallReturnType() { + CHECK_FATAL(intrinsic < INTRN_LAST, "Index out of bound in IntrinsiccallNode::GetCallReturnType"); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrinsic]; + return intrinDesc->GetReturnType(); +} + +void IntrinsiccallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + if (GetOpCode() == OP_intrinsiccall || GetOpCode() == OP_intrinsiccallassigned || + GetOpCode() == OP_intrinsiccallwithtype || GetOpCode() == OP_intrinsiccallwithtypeassigned) { + LogInfo::MapleLogger() << " " << GetIntrinsicName(intrinsic); + } else { + LogInfo::MapleLogger() << " " << intrinsic; + } + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void CallinstantNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(GetPUIdx()); + LogInfo::MapleLogger() << " &" << func->GetName(); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(instVecTyIdx); + LogInfo::MapleLogger() << "<"; + auto *instVecType = static_cast(ty); + instVecType->Dump(indent); + LogInfo::MapleLogger() << ">"; + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void BlockNode::Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, + bool withInfo, bool isFuncbody, MIRFlavor flavor) const { + if (!withInfo) { + LogInfo::MapleLogger() << " {\n"; + } + // output puid for debugging purpose + if (isFuncbody) { + theMIRModule->CurFunction()->DumpFuncBody(indent); + if (theSymTab != nullptr || thePregTab != nullptr) { + // print the locally declared type names + if (theMIRModule->CurFunction()->HaveTypeNameTab()) { + for (auto it : theMIRModule->CurFunction()->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "type %" << name << " "; + if (type->GetKind() != kTypeByName) { + type->Dump(indent + 2, true); + } else { + type->Dump(indent + 2); + } + LogInfo::MapleLogger() << '\n'; + } + } + // print the locally declared variables + theSymTab->Dump(true, indent + 1, false, flavor); /* first:isLocal, third:printDeleted */ + if (thePregTab != nullptr) { + thePregTab->DumpPregsWithTypes(indent + 1); + } + } + LogInfo::MapleLogger() << '\n'; + if (theMIRModule->CurFunction()->NeedEmitAliasInfo()) { + theMIRModule->CurFunction()->DumpScope(); + } + } + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << + theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + for (auto &stmt : GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; +} + +void LabelNode::Dump(int32) const { + if (theMIRModule->CurFunction()->WithLocInfo()) { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + } + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << + theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + LogInfo::MapleLogger() << "@" << theMIRModule->CurFunction()->GetLabelName(labelIdx) << "\n"; +} + +void CommentNode::Dump(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << "#" << comment << '\n'; +} + +void EmitStr(const MapleString &mplStr) { + const char *str = mplStr.c_str(); + size_t len = mplStr.length(); + LogInfo::MapleLogger() << "\""; + + // don't expand special character; convert all \s to \\s in string + for (size_t i = 0; i < len; ++i) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + LogInfo::MapleLogger() << buf; + } else if (*str == '\b') { + LogInfo::MapleLogger() << "\\b"; + } else if (*str == '\n') { + LogInfo::MapleLogger() << "\\n"; + } else if (*str == '\r') { + LogInfo::MapleLogger() << "\\r"; + } else if (*str == '\t') { + LogInfo::MapleLogger() << "\\t"; + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + LogInfo::MapleLogger() << buf; + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), kBufSize - 1, "\\%03o", (*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + LogInfo::MapleLogger() << buf; + } + str++; + } + + LogInfo::MapleLogger() << "\"\n"; +} + +AsmNode *AsmNode::CloneTree(MapleAllocator &allocator) const { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < inputConstraints.size(); ++i) { + node->inputConstraints.push_back(inputConstraints[i]); + } + for (size_t i = 0; i < asmOutputs.size(); ++i) { + node->asmOutputs.push_back(asmOutputs[i]); + } + for (size_t i = 0; i < outputConstraints.size(); ++i) { + node->outputConstraints.push_back(outputConstraints[i]); + } + for (size_t i = 0; i < clobberList.size(); ++i) { + node->clobberList.push_back(clobberList[i]); + } + for (size_t i = 0; i < gotoLabels.size(); ++i) { + node->gotoLabels.push_back(gotoLabels[i]); + } + node->SetNumOpnds(static_cast(GetNopndSize())); + return node; +} + +void AsmNode::DumpOutputs(int32 indent, std::string &uStr) const { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + size_t numOutputs = asmOutputs.size(); + + const MIRFunction *mirFunc = theMIRModule->CurFunction(); + if (numOutputs == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOutputs; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(outputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " "; + StIdx stIdx = asmOutputs[i].first; + RegFieldPair regFieldPair = asmOutputs[i].second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo(); + } + if (i != numOutputs - 1) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +void AsmNode::DumpInputOperands(int32 indent, std::string &uStr) const { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + if (numOpnds == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOpnds; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(inputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " ("; + GetNopndAt(i)->Dump(indent + 4); // Increase the indent by 4 bytes. + LogInfo::MapleLogger() << ")"; + if (i != static_cast(static_cast(numOpnds - 1))) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << "\n"; + } + } +} + +void AsmNode::Dump(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (GetQualifier(kASMvolatile)) { LogInfo::MapleLogger() << " volatile"; } + if (GetQualifier(kASMinline)) { LogInfo::MapleLogger() << " inline"; } + if (GetQualifier(kASMgoto)) { LogInfo::MapleLogger() << " goto"; } + LogInfo::MapleLogger() << " { "; + EmitStr(asmString); + // print outputs + std::string uStr; + DumpOutputs(indent, uStr); + // print input operands + DumpInputOperands(indent, uStr); + // print clobber list + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < clobberList.size(); i++) { + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(clobberList[i]); + PrintString(uStr); + if (i != clobberList.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << '\n'; + // print labels + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < gotoLabels.size(); i++) { + LabelIdx offset = gotoLabels[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(offset); + if (i != gotoLabels.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << " }\n"; +} + +inline bool IntTypeVerify(PrimType pTyp) { + return pTyp == PTY_i32 || pTyp == PTY_u32 || pTyp == PTY_i64 || pTyp == PTY_u64; +} + +inline bool UnaryTypeVerify0(PrimType pTyp) { + bool verifyResult = IntTypeVerify(pTyp); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of bnot,extractbits,sext,zext must be in [i32,u32,i64,u64]\n"; + } + return verifyResult; +} + +bool ArithResTypeVerify(PrimType pTyp) { + switch (pTyp) { + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + case PTY_f32: + case PTY_f64: + return true; + case PTY_a32: + case PTY_a64: + case PTY_ptr: + return theMIRModule->IsCModule(); + default: + break; + } + + // Arithmetic operations on all vector types are allowed + PrimitiveType pt(pTyp); + if (pt.IsVector()) return true; + + return false; +} + +inline bool UnaryTypeVerify1(PrimType pType) { + bool verifyResult = ArithResTypeVerify(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of abs,neg must be in [i32,u32,i64,u64,f32,f64]\n"; + } + return verifyResult; +} + +inline bool UnaryTypeVerify2(PrimType pType) { + bool verifyResult = IsPrimitiveFloat(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result-type of recip,sqrt must be in [f32,f64]\n"; + } + return verifyResult; +} + +inline bool BinaryTypeVerify(PrimType pType) { + return ArithResTypeVerify(pType) || IsPrimitiveDynType(pType); +} + +inline bool BinaryGenericVerify(const BaseNode &bOpnd0, const BaseNode &bOpnd1) { + return bOpnd0.Verify() && bOpnd1.Verify(); +} + +inline bool CompareTypeVerify(PrimType pType) { + bool verifyResult = IsPrimitiveInteger(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of eq,ge,gt,le,lt,ne must be primitive integer\n"; + } + return verifyResult; +} + +enum PTYGroup { + kPTYGi32u32a32, + kPTYGi32u32a32PtrRef, + kPTYGi64u64a64, + kPTYGPtrRef, + kPTYGDynall, + kPTYGu1, + kPTYGSimpleObj, + kPTYGSimpleStr, + kPTYGOthers +}; + +uint8 GetPTYGroup(PrimType primType) { + switch (primType) { + case PTY_i32: + case PTY_u32: + case PTY_a32: + return kPTYGi32u32a32; + case PTY_i64: + case PTY_u64: + case PTY_a64: + return kPTYGi64u64a64; + case PTY_ref: + case PTY_ptr: + return kPTYGPtrRef; + case PTY_dynany: + case PTY_dyni32: + case PTY_dynf64: + case PTY_dynstr: + case PTY_dynobj: + case PTY_dynundef: + case PTY_dynbool: + case PTY_dynf32: + case PTY_dynnone: + case PTY_dynnull: + return kPTYGDynall; + case PTY_u1: + return kPTYGu1; + case PTY_simpleobj: + return kPTYGSimpleObj; + case PTY_simplestr: + return kPTYGSimpleStr; + default: + return kPTYGOthers; + } +} + +uint8 GetCompGroupID(const BaseNode &opnd) { + return GetPTYGroup(opnd.GetPrimType()); +} + +/* + Refer to C11 Language Specification. + $ 6.3.1.8 Usual arithmetic conversions + */ +bool CompatibleTypeVerify(const BaseNode &opnd1, const BaseNode &opnd2) { + uint8 groupID1 = GetCompGroupID(opnd1); + uint8 groupID2 = GetCompGroupID(opnd2); + Opcode opCode2 = opnd2.GetOpCode(); + bool verifyResult = (groupID1 == groupID2); + if (opCode2 == OP_gcmallocjarray || opCode2 == OP_gcpermallocjarray) { + verifyResult = (groupID1 == kPTYGi32u32a32); + } + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:incompatible operand types :\n"; + opnd1.Dump(); + opnd2.Dump(); + } + return verifyResult; +} + +bool FloatIntCvtTypeVerify(PrimType resPType, PrimType opndPType) { + bool resTypeVerf = resPType == PTY_i32 || resPType == PTY_u32 || resPType == PTY_i64 || resPType == PTY_u64; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of ceil,floor,round,trunc must be in [i32,u32,i64,u64]\n"; + } + bool opndTypeVerf = opndPType == PTY_f32 || opndPType == PTY_f64; + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:oerand-type of ceil,floor,round,trunc must be in [f32,f64]\n"; + } + return resTypeVerf && opndTypeVerf; +} + +inline MIRTypeKind GetTypeKind(StIdx stIdx) { + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRTypeKind GetTypeKind(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRType *GetPointedMIRType(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "TyIdx: %d is not pointer type", static_cast(tyIdx)); + auto *ptrType = static_cast(type); + return ptrType->GetPointedType(); +} + +inline MIRTypeKind GetPointedTypeKind(TyIdx tyIdx) { + MIRType *pointedType = GetPointedMIRType(tyIdx); + DEBUG_ASSERT(pointedType != nullptr, "null ptr check"); + return pointedType->GetKind(); +} + +MIRTypeKind GetFieldTypeKind(MIRStructType *structType, FieldID fieldId) { + TyIdx fieldTyIdx; + if (fieldId > 0) { + MIRType *mirType = structType->GetFieldType(fieldId); + fieldTyIdx = mirType->GetTypeIndex(); + } else { + DEBUG_ASSERT(static_cast(-fieldId) < structType->GetParentFieldsSize() + 1, "array index out of range"); + fieldTyIdx = structType->GetParentFieldsElemt(-fieldId - 1).second.first; + } + return GetTypeKind(fieldTyIdx); +} + +inline bool IsStructureTypeKind(MIRTypeKind kind) { + return kind == kTypeStruct || kind == kTypeStructIncomplete || kind == kTypeUnion || kind == kTypeClass || + kind == kTypeClassIncomplete || kind == kTypeInterface || kind == kTypeInterfaceIncomplete; +} + +inline bool IsStructureVerify(FieldID fieldID, StIdx stIdx) { + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(stIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +inline bool IsStructureVerify(FieldID fieldID, TyIdx tyIdx) { + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(tyIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +bool IsSignedType(const BaseNode *opnd) { + switch (opnd->GetPrimType()) { + case PTY_i32: + case PTY_i64: + case PTY_f32: + case PTY_f64: + case PTY_dyni32: + case PTY_dynf32: + case PTY_dynf64: + return true; + default: + break; + } + return false; +} + +inline bool BinaryStrictSignVerify0(const BaseNode *bOpnd0, const BaseNode *bOpnd1) { + DEBUG_ASSERT(bOpnd0 != nullptr, "bOpnd0 is null"); + DEBUG_ASSERT(bOpnd1 != nullptr, "bOpnd1 is null"); + bool isDynany = (bOpnd0->GetPrimType() == PTY_dynany || bOpnd1->GetPrimType() == PTY_dynany); + return isDynany || (IsSignedType(bOpnd0) && IsSignedType(bOpnd1)) || + (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1)); +} + +bool BinaryStrictSignVerify1(const BaseNode *bOpnd0, const BaseNode *bOpnd1, const BaseNode *res) { + if (GetCompGroupID(*res) == kPTYGDynall) { + return BinaryStrictSignVerify0(bOpnd0, res) && BinaryStrictSignVerify0(bOpnd1, res) && + BinaryStrictSignVerify0(bOpnd0, bOpnd1); + } + return (IsSignedType(bOpnd0) && IsSignedType(bOpnd1) && IsSignedType(res)) || + (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1) && !IsSignedType(res)); +} + +bool UnaryNode::Verify() const { + bool resTypeVerf = true; + if (GetOpCode() == OP_bnot) { + resTypeVerf = UnaryTypeVerify0(GetPrimType()); + } else if (GetOpCode() == OP_lnot) { + if (!IsPrimitiveInteger(GetPrimType())) { + resTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result-type of lnot must be primitive integer\n"; + } + } else if (GetOpCode() == OP_abs || GetOpCode() == OP_neg) { + resTypeVerf = UnaryTypeVerify1(GetPrimType()); + } else if (GetOpCode() == OP_recip || GetOpCode() == OP_sqrt) { + resTypeVerf = UnaryTypeVerify2(GetPrimType()); + } + + // When an opcode only specifies one type, check for compatibility + // between the operands and the result-type. + bool compVerf = true; + // op_alloca : return type is not compatible with operand, skip + if (GetOpCode() != OP_alloca) { + compVerf = CompatibleTypeVerify(*uOpnd, *this); + } + bool opndExprVerf = uOpnd->Verify(); + return resTypeVerf && compVerf && opndExprVerf; +} + +bool TypeCvtNode::Verify() const { + bool opndTypeVerf = true; + bool opndSizeVerf = true; + if (GetOpCode() == OP_ceil || GetOpCode() == OP_floor || GetOpCode() == OP_round || GetOpCode() == OP_trunc) { + opndTypeVerf = FloatIntCvtTypeVerify(GetPrimType(), Opnd(0)->GetPrimType()); + } else if (GetOpCode() == OP_retype) { + if (GetPrimTypeSize(GetPrimType()) != GetPrimTypeSize(Opnd(0)->GetPrimType())) { + opndSizeVerf = false; + LogInfo::MapleLogger() << "\n#Error:The size of opnd0 and prim-type must be the same\n"; + } + } + bool opndExprVerf = Opnd(0)->Verify(); + return opndTypeVerf && opndSizeVerf && opndExprVerf; +} + +void AddRuntimeVerifyError(std::string errMsg, VerifyResult &verifyResult) { + LogInfo::MapleLogger() << "\n#Error: " << errMsg << '\n'; + // Throw Verify Error + verifyResult.AddPragmaVerifyError(verifyResult.GetCurrentClassName(), std::move(errMsg)); +} + +bool RetypeNode::VerifyPrimTypesAndOpnd() const { + PrimType toPrimType = GetPrimType(); + PrimType fromPrimType = Opnd(0)->GetPrimType(); + if (GetPrimTypeSize(toPrimType) != GetPrimTypeSize(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: The size of opnd0 and prim-type must be the same\n"; + return false; + } + + if (!IsPrimitivePoint(toPrimType) || !IsPrimitivePoint(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: Wrong prim-type in retype node, should be ref or ptr\n"; + return false; + } + return Opnd(0)->Verify(); +} + +bool RetypeNode::CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const { + // Array types are subtypes of Object. + // The intent is also that array types are subtypes of Cloneable and java.io.Serializable. + if (IsInterfaceOrClass(to)) { + Klass &toKlass = utils::ToRef(verifyResult.GetKlassHierarchy().GetKlassFromStrIdx(to.GetNameStrIdx())); + const std::string &toKlassName = toKlass.GetKlassName(); + const std::string &javaLangObject = namemangler::kJavaLangObjectStr; + const std::string javaLangCloneable = "Ljava_2Flang_2FCloneable_3B"; + const std::string javaIoSerializable = "Ljava_2Fio_2FSerializable_3B"; + if (toKlassName == javaLangObject || toKlassName == javaIoSerializable || toKlassName == javaLangCloneable) { + return true; + } + } + + AddRuntimeVerifyError("Java array " + from.GetName() + " is not assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const { + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- compound(X), compound(Y), isJavaAssignable(X, Y). + // arrayOf(X), arrayOf(Y) should already be X, Y here + if (from.IsMIRJarrayType()) { + return CheckFromJarray(from, to, verifyResult); + } + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- atom(X), atom(Y), X = Y. + // This rule is not applicable to Maple IR + if (from.IsScalarType() && to.IsScalarType()) { + return true; + } + + if (IsInterfaceOrClass(from) && IsInterfaceOrClass(to)) { + const KlassHierarchy &klassHierarchy = verifyResult.GetKlassHierarchy(); + const std::string javaLangObject = namemangler::kJavaLangObjectStr; + Klass &fromKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(from.GetNameStrIdx())); + Klass &toKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(to.GetNameStrIdx())); + // We can cast everything to java.lang.Object, but interface isn't subclass of that, so we need this branch + if (toKlass.GetKlassName() == javaLangObject) { + return true; + } + // isJavaAssignable(class(_, _), class(To, L)) :- loadedClass(To, L, ToClass), classIsInterface(ToClass). + // isJavaAssignable(From, To) :- isJavaSubclassOf(From, To). + bool isAssignableKlass = klassHierarchy.IsSuperKlass(&toKlass, &fromKlass) || + klassHierarchy.IsSuperKlassForInterface(&toKlass, &fromKlass) || + klassHierarchy.IsInterfaceImplemented(&toKlass, &fromKlass); + if (isAssignableKlass) { + return true; + } + AddRuntimeVerifyError("Java type " + fromKlass.GetKlassName() + " is NOT assignable to " + toKlass.GetKlassName(), + verifyResult); + return false; + } + AddRuntimeVerifyError(from.GetName() + " is NOT assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const { + if (from.IsScalarType() && to.IsScalarType() && !isJavaRefType) { + if (GetPTYGroup(from.GetPrimType()) == GetPTYGroup(to.GetPrimType())) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: retype scalar type failed\n"; + return false; + } + if (!verifyResult.GetMIRModule().IsJavaModule()) { + return true; + } + isJavaRefType |= IsJavaRefType(from) && IsJavaRefType(to); + if (isJavaRefType) { + return IsJavaAssignable(from, to, verifyResult); + } + + if (from.GetKind() != to.GetKind()) { + if (from.GetPrimType() == PTY_void || to.GetPrimType() == PTY_void) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: Retype different kind: from " << from.GetKind() << " to " << to.GetKind() + << "\n"; + return false; + } + return true; +} + +bool RetypeNode::VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, + VerifyResult &verifyResult) const { + int fromDim = const_cast(from).GetDim(); + int toDim = const_cast(to).GetDim(); + if (fromDim == toDim) { + return true; + } else if (fromDim > toDim) { + const MIRType *toElemType = to.GetElemType(); + while (toElemType != nullptr && (toElemType->IsMIRJarrayType() || toElemType->IsMIRPtrType())) { + toElemType = toElemType->IsMIRJarrayType() ? static_cast(toElemType)->GetElemType() + : static_cast(toElemType)->GetPointedType(); + } + if (toElemType != nullptr && CheckFromJarray(from, *toElemType, verifyResult)) { + return true; + } + } + Dump(0); + std::string errorMsg = + "Arrays have different dimentions: from " + std::to_string(fromDim) + " to " + std::to_string(toDim); + AddRuntimeVerifyError(std::move(errorMsg), verifyResult); + return false; +} + +bool RetypeNode::Verify(VerifyResult &verifyResult) const { + // If RetypeNode::Verify return false, Dump this node to show the wrong IR + if (!VerifyPrimTypesAndOpnd()) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify PrimTypes and Opnd failed in retype node\n"; + return false; + } + bool isJavaRefType = false; + const MIRType *fromMIRType = verifyResult.GetCurrentFunction()->GetNodeType(*Opnd(0)); + const MIRType *toMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + while (fromMIRType != nullptr && toMIRType != nullptr && BothPointerOrJarray(*fromMIRType, *toMIRType)) { + if (fromMIRType->IsMIRJarrayType()) { + isJavaRefType = true; + if (!VerifyJarrayDimention(static_cast(*fromMIRType), + static_cast(*toMIRType), verifyResult)) { + return false; + } + fromMIRType = static_cast(fromMIRType)->GetElemType(); + toMIRType = static_cast(toMIRType)->GetElemType(); + } else { + fromMIRType = static_cast(fromMIRType)->GetPointedType(); + toMIRType = static_cast(toMIRType)->GetPointedType(); + } + } + if (fromMIRType == nullptr || toMIRType == nullptr) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: MIRType is nullptr in retype node\n"; + return false; + } + + if (fromMIRType->IsIncomplete() || toMIRType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << fromMIRType->GetName() << " to " + << toMIRType->GetName() << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, fromMIRType->GetName(), toMIRType->GetName()); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + + if (VerifyCompleteMIRType(*fromMIRType, *toMIRType, isJavaRefType, verifyResult)) { + return true; + } + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify Complete MIRType failed in retype node\n"; + return false; +} + +bool UnaryStmtNode::VerifyThrowable(VerifyResult &verifyResult) const { + const BaseNode *rhs = GetRHS(); + if (rhs == nullptr) { + return true; + } + + const MIRType *mirType = verifyResult.GetCurrentFunction()->GetNodeType(*rhs); + if (mirType != nullptr && mirType->IsMIRPtrType()) { + mirType = static_cast(mirType)->GetPointedType(); + } + if (mirType != nullptr) { + if (mirType->GetPrimType() == PTY_void) { + return true; + } + if (mirType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + std::string throwableName = "Ljava_2Flang_2FThrowable_3B"; + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << mirType->GetName() << " to " << throwableName + << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, mirType->GetName(), std::move(throwableName)); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + if (mirType->IsMIRClassType() && static_cast(mirType)->IsExceptionType()) { + return true; + } + } + Dump(0); + std::string errMsg = (mirType == nullptr ? "nullptr" : mirType->GetName()); + errMsg += " is NOT throwable."; + AddRuntimeVerifyError(std::move(errMsg), verifyResult); + return false; +} + +bool IntrinsicopNode::Verify(VerifyResult &verifyResult) const { + if (GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH && !VerifyJArrayLength(verifyResult)) { + return false; + } + return VerifyOpnds(); +} + +bool IntrinsicopNode::VerifyJArrayLength(VerifyResult &verifyResult) const { + BaseNode &val = utils::ToRef(Opnd(0)); + const MIRType *valType = verifyResult.GetCurrentFunction()->GetNodeType(val); + if (valType != nullptr && valType->IsMIRPtrType()) { + valType = static_cast(valType)->GetPointedType(); + if (valType != nullptr && !valType->IsMIRJarrayType()) { + Dump(0); + AddRuntimeVerifyError("Operand of array length is not array", verifyResult); + return false; + } + } + return true; +} + +bool IreadNode::Verify() const { + bool addrExprVerf = Opnd(0)->Verify(); + bool pTypeVerf = true; + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (GetOpCode() == OP_iaddrof) { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:prim-type must be either ptr, ref, a32 or a64\n"; + } + } else { + if (fieldID == 0 && IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() + << "\n#Error:If the content dereferenced is a structure, then should specify agg\n"; + } + } + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() << "\n#Error:If field-id is not 0, then type must specify pointer to a structure\n"; + } else { + MIRType *type = GetPointedMIRType(tyIdx); + auto *stTy = static_cast(type); + if (GetOpCode() == OP_iread && stTy->GetFieldsSize() != 0) { + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:If the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } + } + return addrExprVerf && pTypeVerf && structVerf; +} + +bool RegreadNode::Verify() const { + return true; +} + +bool IreadoffNode::Verify() const { + return true; +} + +bool IreadFPoffNode::Verify() const { + return true; +} + +bool ExtractbitsNode::Verify() const { + bool opndExprVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*Opnd(0), *this); + bool resTypeVerf = UnaryTypeVerify0(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(Opnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() + << "\n#Error: The operand of extractbits must be large enough to contain the specified bitfield\n"; + } + return opndExprVerf && compVerf && resTypeVerf && opnd0SizeVerf; +} + +bool BinaryNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = BinaryTypeVerify(GetPrimType()); + if (!resTypeVerf && theMIRModule->IsCModule()) { + if ((IsAddress(GetBOpnd(0)->GetPrimType()) && !IsAddress(GetBOpnd(1)->GetPrimType())) || + (!IsAddress(GetBOpnd(0)->GetPrimType()) && IsAddress(GetBOpnd(1)->GetPrimType()))) { + resTypeVerf = true; // don't print the same kind of error message twice + if (GetOpCode() != OP_add && GetOpCode() != OP_sub && GetOpCode() != OP_CG_array_elem_add) { + LogInfo::MapleLogger() << "\n#Error: Only add and sub are allowed for pointer arithemetic\n"; + this->Dump(); + } else if (!IsAddress(GetPrimType())) { + LogInfo::MapleLogger() + << "\n#Error: Adding an offset to a pointer or subtracting one from a pointer should result in a pointer " + "value\n"; + this->Dump(); + } + } + } + if (!resTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result type of [add,div,sub,mul,max,min] and [ashr,band,bior,bxor,land,lior,lshr,shl,rem] must " + "be in [i32,u32,i64,u64,f32,f64,dynamic-type]\n"; + this->Dump(); + } + bool comp0Verf = CompatibleTypeVerify(*GetBOpnd(0), *this); + bool comp1Verf = true; + // Shift operations do not require same-type operands + if (GetOpCode() < OP_ashr || GetOpCode() > OP_shl) { + comp1Verf = CompatibleTypeVerify(*GetBOpnd(1), *this); + } + bool signVerf = true; + bool typeVerf = resTypeVerf && comp0Verf && comp1Verf; + if (typeVerf) { + if (GetOpCode() == OP_div || GetOpCode() == OP_mul || GetOpCode() == OP_rem || GetOpCode() == OP_max || + GetOpCode() == OP_min) { + signVerf = BinaryStrictSignVerify1(GetBOpnd(0), GetBOpnd(1), this); + if (!signVerf) { + LogInfo::MapleLogger() + << "\n#Error:the result and operands of [div,mul,rem,max,min] must be of the same sign\n"; + } + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool CompareNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool compVerf = CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = CompareTypeVerify(GetPrimType()); + if (!resTypeVerf) { + this->Dump(); + } + bool signVerf = true; + bool typeVerf = compVerf && resTypeVerf; + if (typeVerf && GetOpCode() != OP_eq && GetOpCode() != OP_ne) { + signVerf = BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); + if (!signVerf) { + LogInfo::MapleLogger() << "\n#Error:the operands of [ge,gt,le,lt] must be of the same sign\n"; + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool DepositbitsNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = IntTypeVerify(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(GetBOpnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() << "\n#Error:opnd0 of depositbits must be large enough to contain the specified bitfield\n"; + } + return opndsVerf && resTypeVerf && opnd0SizeVerf; +} + +bool IntrinsicopNode::Verify() const { + return VerifyOpnds(); +} + +bool TernaryNode::Verify() const { + bool comp1Verf = CompatibleTypeVerify(*topnd[kSecondOpnd], *this); + bool comp2Verf = CompatibleTypeVerify(*topnd[kThirdOpnd], *this); + bool opnd0TypeVerf = IsPrimitiveInteger(topnd[kFirstOpnd]->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:select-opnd0 must be of integer type\n"; + } + return comp1Verf && comp2Verf && opnd0TypeVerf; +} + +bool SizeoftypeNode::Verify() const { + return true; +} + +bool ArrayNode::Verify() const { + bool opndsVerf = VerifyOpnds(); + bool resTypeVerf = IsAddress(GetPrimType()); + bool opndsTypeVerf = true; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array must be in [ptr,ref,a32,a64]\n"; + } + bool opnd0TypeVerf = IsAddress(GetNopndAt(0)->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array-opnd0 must be in [ptr,ref,a32,a64]\n"; + } + for (size_t i = 1; i < NumOpnds(); ++i) { + if (!IntTypeVerify(GetNopndAt(i)->GetPrimType())) { + opndsTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result of the array index operands must be in [i32,u32,i64,u64]\n"; + } + } + return opndsVerf && resTypeVerf && opnd0TypeVerf && opndsTypeVerf; +} + +bool DassignNode::Verify() const { + bool structVerf = IsStructureVerify(fieldID, stIdx); + bool rhsVerf = GetRHS()->Verify(); + return structVerf && rhsVerf; +} + +bool AddrofNode::Verify() const { + bool pTypeVerf = true; + bool structVerf = IsStructureVerify(fieldID, GetStIdx()); + if (GetOpCode() == OP_dread) { + if (fieldID == 0 && IsStructureTypeKind(GetTypeKind(GetStIdx()))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:if variable is a structure, prim-type should specify agg\n"; + } + } + if (fieldID != 0 && structVerf) { + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + DEBUG_ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + auto *stTy = static_cast(type); + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:if the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } else { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + } + return pTypeVerf && structVerf; +} + +bool AddroffuncNode::Verify() const { + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool AddroflabelNode::Verify() const { + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool IassignNode::Verify() const { + bool addrExpVerf = addrExpr->Verify(); + bool rhsVerf = rhs->Verify(); + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() << "\n#Error:If field-id is not 0, the computed address must correspond to a structure\n"; + } + } + return addrExpVerf && rhsVerf && structVerf; +} + +bool IassignoffNode::Verify() const { + bool addrVerf = GetBOpnd(0)->Verify(); + bool rhsVerf = GetBOpnd(1)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *GetBOpnd(1)); + return addrVerf && rhsVerf && compVerf; +} + +bool IassignFPoffNode::Verify() const { + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool RegassignNode::Verify() const { + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool CondGotoNode::Verify() const { + bool opndExprVerf = UnaryStmtNode::Opnd(0)->Verify(); + bool opndTypeVerf = true; + if (!IsPrimitiveInteger(UnaryStmtNode::Opnd(0)->GetPrimType())) { + opndTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:the operand of brfalse and trfalse must be primitive integer\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool SwitchNode::Verify() const { + bool opndExprVerf = switchOpnd->Verify(); + bool opndTypeVerf = IntTypeVerify(switchOpnd->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of switch must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BinaryStmtNode::Verify() const { + return GetBOpnd(0)->Verify() && GetBOpnd(1)->Verify() && CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)) && + BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); +} + +bool RangeGotoNode::Verify() const { + bool opndExprVerf = Opnd(0)->Verify(); + bool opndTypeVerf = IntTypeVerify(Opnd(0)->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of rangegoto must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BlockNode::Verify() const { + for (auto &stmt : GetStmtNodes()) { + if (!stmt.Verify()) { + return false; + } + } + return true; +} + +bool BlockNode::Verify(VerifyResult &verifyResult) const { + auto &nodes = GetStmtNodes(); + return !std::any_of(nodes.begin(), nodes.end(), [&verifyResult](auto &stmt) { return !stmt.Verify(verifyResult); }); +} + +bool DoloopNode::Verify() const { + bool startVerf = startExpr->Verify(); + bool contVerf = condExpr->Verify(); + bool incrVerf = incrExpr->Verify(); + bool doBodyVerf = true; + if (doBody) { + doBodyVerf = doBody->Verify(); + } + return startVerf && contVerf && incrVerf && doBodyVerf; +} + +bool IfStmtNode::Verify() const { + bool condVerf = Opnd()->Verify(); + bool thenVerf = true; + bool elseVerf = true; + if (thenPart != nullptr) { + thenVerf = thenPart->Verify(); + } + if (elsePart != nullptr) { + elseVerf = elsePart->Verify(); + } + return condVerf && thenVerf && elseVerf; +} + +bool WhileStmtNode::Verify() const { + bool condVerf = Opnd(0)->Verify(); + bool bodyVerf = true; + if (body != nullptr) { + bodyVerf = body->Verify(); + } + return condVerf && bodyVerf; +} + +bool NaryStmtNode::Verify() const { + return VerifyOpnds(); +} + +bool CallNode::Verify() const { + return VerifyOpnds(); +} + +bool IcallNode::Verify() const { + bool nOpndsVerf = true; + for (size_t i = 0; i < NumOpnds(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerf = false; + break; + } + } + return nOpndsVerf; +} + +bool IntrinsiccallNode::Verify() const { + return VerifyOpnds(); +} + +std::string SafetyCallCheckStmtNode::GetFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(callFuncNameIdx); +} + +std::string SafetyCallCheckStmtNode::GetStmtFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(stmtFuncNameIdx); +} + +std::string SafetyCheckStmtNode::GetFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcNameIdx); +} + +bool UnaryNode::IsSameContent(const BaseNode *node) const { + auto *unaryNode = dynamic_cast(node); + if ((this == unaryNode) || + (unaryNode != nullptr && (GetOpCode() == unaryNode->GetOpCode()) && (GetPrimType() == unaryNode->GetPrimType()) && + (uOpnd && unaryNode->Opnd(0) && uOpnd->IsSameContent(unaryNode->Opnd(0))))) { + return true; + } else { + return false; + } +} + +bool TypeCvtNode::IsSameContent(const BaseNode *node) const { + auto *tyCvtNode = dynamic_cast(node); + if ((this == tyCvtNode) || + (tyCvtNode != nullptr && (fromPrimType == tyCvtNode->FromType()) && UnaryNode::IsSameContent(tyCvtNode))) { + return true; + } else { + return false; + } +} + +bool IreadNode::IsSameContent(const BaseNode *node) const { + auto *ireadNode = dynamic_cast(node); + if ((this == ireadNode) || (ireadNode != nullptr && (tyIdx == ireadNode->GetTyIdx()) && + (fieldID == ireadNode->GetFieldID()) && UnaryNode::IsSameContent(ireadNode))) { + return true; + } else { + return false; + } +} + +bool IreadoffNode::IsSameContent(const BaseNode *node) const { + auto *ireadoffNode = dynamic_cast(node); + if ((this == ireadoffNode) || (ireadoffNode != nullptr && (GetOffset() == ireadoffNode->GetOffset()) && + UnaryNode::IsSameContent(ireadoffNode))) { + return true; + } else { + return false; + } +} + +bool IreadFPoffNode::IsSameContent(const BaseNode *node) const { + auto *ireadFPoffNode = dynamic_cast(node); + if ((this == ireadFPoffNode) || + (ireadFPoffNode != nullptr && (GetOpCode() == ireadFPoffNode->GetOpCode()) && + (GetPrimType() == ireadFPoffNode->GetPrimType()) && (GetOffset() == ireadFPoffNode->GetOffset()))) { + return true; + } else { + return false; + } +} + +bool BinaryOpnds::IsSameContent(const BaseNode *node) const { + auto *binaryOpnds = dynamic_cast(node); + if ((this == binaryOpnds) || (binaryOpnds != nullptr && GetBOpnd(0)->IsSameContent(binaryOpnds->GetBOpnd(0)) && + GetBOpnd(1)->IsSameContent(binaryOpnds->GetBOpnd(1)))) { + return true; + } else { + return false; + } +} + +bool BinaryNode::IsSameContent(const BaseNode *node) const { + auto *binaryNode = dynamic_cast(node); + if ((this == binaryNode) || + (binaryNode != nullptr && (GetOpCode() == binaryNode->GetOpCode()) && + (GetPrimType() == binaryNode->GetPrimType()) && BinaryOpnds::IsSameContent(binaryNode))) { + return true; + } else { + return false; + } +} + +bool ConstvalNode::IsSameContent(const BaseNode *node) const { + auto *constvalNode = dynamic_cast(node); + if (this == constvalNode) { + return true; + } + if (constvalNode == nullptr) { + return false; + } + const MIRConst *mirConst = constvalNode->GetConstVal(); + if (constVal == mirConst) { + return true; + } + if (constVal->GetKind() != mirConst->GetKind()) { + return false; + } + if (constVal->GetKind() == kConstInt) { + // integer may differ in primtype, and they may be different MIRIntConst Node + return static_cast(constVal)->GetValue() == static_cast(mirConst)->GetValue(); + } else { + return false; + } +} + +bool ConststrNode::IsSameContent(const BaseNode *node) const { + if (node->GetOpCode() != OP_conststr) { + return false; + } + auto *cstrNode = static_cast(node); + return strIdx == cstrNode->strIdx; +} + +bool Conststr16Node::IsSameContent(const BaseNode *node) const { + if (node->GetOpCode() != OP_conststr16) { + return false; + } + auto *cstr16Node = static_cast(node); + return strIdx == cstr16Node->strIdx; +} + +bool AddrofNode::IsSameContent(const BaseNode *node) const { + auto *addrofNode = dynamic_cast(node); + if ((this == addrofNode) || + (addrofNode != nullptr && (GetOpCode() == addrofNode->GetOpCode()) && + (GetPrimType() == addrofNode->GetPrimType()) && (GetNumOpnds() == addrofNode->GetNumOpnds()) && + (stIdx.FullIdx() == addrofNode->GetStIdx().FullIdx()) && (fieldID == addrofNode->GetFieldID()))) { + return true; + } else { + return false; + } +} + +bool DreadoffNode::IsSameContent(const BaseNode *node) const { + auto *dreaddoffNode = dynamic_cast(node); + if ((this == dreaddoffNode) || (dreaddoffNode != nullptr && (GetOpCode() == dreaddoffNode->GetOpCode()) && + (GetPrimType() == dreaddoffNode->GetPrimType()) && (stIdx == dreaddoffNode->stIdx) && + (offset == dreaddoffNode->offset))) { + return true; + } else { + return false; + } +} + +bool RegreadNode::IsSameContent(const BaseNode *node) const { + auto *regreadNode = dynamic_cast(node); + if ((this == regreadNode) || + (regreadNode != nullptr && (GetOpCode() == regreadNode->GetOpCode()) && + (GetPrimType() == regreadNode->GetPrimType()) && (regIdx == regreadNode->GetRegIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroffuncNode::IsSameContent(const BaseNode *node) const { + auto *addroffuncNode = dynamic_cast(node); + if ((this == addroffuncNode) || + (addroffuncNode != nullptr && (GetOpCode() == addroffuncNode->GetOpCode()) && + (GetPrimType() == addroffuncNode->GetPrimType()) && (puIdx == addroffuncNode->GetPUIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroflabelNode::IsSameContent(const BaseNode *node) const { + auto *addroflabelNode = dynamic_cast(node); + if ((this == addroflabelNode) || + (addroflabelNode != nullptr && (GetOpCode() == addroflabelNode->GetOpCode()) && + (GetPrimType() == addroflabelNode->GetPrimType()) && (offset == addroflabelNode->GetOffset()))) { + return true; + } else { + return false; + } +} +} // namespace maple diff --git a/ecmascript/mapleall/maple_ir/src/mir_parser.cpp b/ecmascript/mapleall/maple_ir/src/mir_parser.cpp new file mode 100755 index 0000000000000000000000000000000000000000..ced8517d5050276b659aad8b4cf6bcc214faaf31 --- /dev/null +++ b/ecmascript/mapleall/maple_ir/src/mir_parser.cpp @@ -0,0 +1,3495 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_parser.h" +#include "mir_function.h" +#include "opcode_info.h" + +namespace maple { +std::map MIRParser::funcPtrMapForParseExpr = + MIRParser::InitFuncPtrMapForParseExpr(); +std::map MIRParser::funcPtrMapForParseStmt = + MIRParser::InitFuncPtrMapForParseStmt(); +std::map MIRParser::funcPtrMapForParseStmtBlock = + MIRParser::InitFuncPtrMapForParseStmtBlock(); + +bool MIRParser::ParseStmtDassign(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dassign) { + Error("expect dassign but get "); + return false; + } + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + auto *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetStIdx(stidx); + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { // may be a field id + assignStmt->SetFieldID(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtDassignoff(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dassignoff) { + Error("expect dassignoff but get "); + return false; + } + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + PrimType primType = GetPrimitiveType(lexer.GetTokenKind()); + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + DassignoffNode *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetPrimType(primType); + assignStmt->stIdx = stidx; + TokenKind nextToken = lexer.NextToken(); + // parse offset + if (nextToken == TK_intconst) { + assignStmt->offset = static_cast(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRegassign(StmtNodePtr &stmt) { + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + auto *regAssign = mod.CurFuncCodeMemPool()->New(); + regAssign->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regAssign->GetPrimType(), tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + regAssign->SetOpnd(expr, 0); + if (regAssign->GetRegIdx() > 0) { // check type consistenency for the preg + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regAssign->GetRegIdx()); + if (preg->GetPrimType() == kPtyInvalid) { + preg->SetPrimType(expr->GetPrimType()); + } else if (preg->GetPrimType() == PTY_dynany) { + if (!IsPrimitiveDynType(expr->GetPrimType())) { + Error("inconsistent preg primitive dynamic type at "); + return false; + } + } + } + stmt = regAssign; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIassign(StmtNodePtr &stmt) { + // iAssign <* [10] int> () + if (lexer.GetTokenKind() != TK_iassign) { + Error("expect iassign but get "); + return false; + } + // expect <> derived type + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("ParseStmtIassign failed when parsing derived type"); + return false; + } + auto *iAssign = mod.CurFuncCodeMemPool()->New(); + iAssign->SetTyIdx(tyIdx); + if (lexer.GetTokenKind() == TK_intconst) { + iAssign->SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + // parse 2 operands then, #1 is address, the other would be value + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssign->SetOpnd(addr, 0); + iAssign->SetRHS(rhs); + lexer.NextToken(); + stmt = iAssign; + return true; +} + +bool MIRParser::ParseStmtIassignoff(StmtNodePtr &stmt) { + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassign ( , ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssignOff->SetBOpnd(addr, 0); + iAssignOff->SetBOpnd(rhs, 1); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { + Opcode op = lexer.GetTokenKind() == TK_iassignfpoff ? OP_iassignfpoff : OP_iassignspoff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassignfpoff ( ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(op); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + iAssignOff->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtBlkassignoff(StmtNodePtr &stmt) { + // blkassignoff (, ) + BlkassignoffNode *bassignoff = mod.CurFuncCodeMemPool()->New(); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + bassignoff->offset = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect align but get "); + return false; + } + bassignoff->SetAlign(static_cast(lexer.GetTheIntVal())); + if (lexer.NextToken() != TK_intconst) { + Error("expect size but get "); + return false; + } + bassignoff->blockSize = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *destAddr = nullptr; + BaseNode *srcAddr = nullptr; + // parse 2 operands, the dest address followed by src address + if (!ParseExprTwoOperand(destAddr, srcAddr)) { + return false; + } + bassignoff->SetOpnd(destAddr, 0); + bassignoff->SetOpnd(srcAddr, 1); + lexer.NextToken(); + stmt = bassignoff; + return true; +} + +bool MIRParser::ParseStmtDoloop(StmtNodePtr &stmt) { + // syntax: doloop (, , ) { + // } + auto *doLoopNode = mod.CurFuncCodeMemPool()->New(); + stmt = doLoopNode; + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_preg) { + uint32 pregNo = static_cast(lexer.GetTheIntVal()); + MIRFunction *mirFunc = mod.CurFunction(); + PregIdx pregIdx = mirFunc->GetPregTab()->EnterPregNo(pregNo, kPtyInvalid); + doLoopNode->SetIsPreg(true); + doLoopNode->SetDoVarStFullIdx(pregIdx); + // let other appearances handle the preg primitive type + } else { + StIdx stIdx; + if (!ParseDeclaredSt(stIdx)) { + return false; + } + if (stIdx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDoloop"); + return false; + } + if (stIdx.IsGlobal()) { + Error("expect local variable for doloop var but get "); + return false; + } + doLoopNode->SetDoVarStIdx(stIdx); + } + // parse ( + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + // parse start expression + lexer.NextToken(); + BaseNode *start = nullptr; + if (!ParseExpression(start)) { + Error("ParseStmtDoloop when parsing start expression"); + return false; + } + if (doLoopNode->IsPreg()) { + auto regIdx = static_cast(doLoopNode->GetDoVarStIdx().FullIdx()); + MIRPreg *mpReg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + if (mpReg->GetPrimType() == kPtyInvalid) { + CHECK_FATAL(start != nullptr, "null ptr check"); + mpReg->SetPrimType(start->GetPrimType()); + } + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after start expression but get "); + return false; + } + doLoopNode->SetStartExpr(start); + // parse end expression + lexer.NextToken(); + BaseNode *end = nullptr; + if (!ParseExpression(end)) { // here should be a compare expression + Error("ParseStmtDoloop when parsing end expression"); + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after condition expression but get "); + return false; + } + doLoopNode->SetContExpr(end); + // parse renew induction expression + lexer.NextToken(); + BaseNode *induction = nullptr; + if (!ParseExpression(induction)) { + Error("ParseStmtDoloop when parsing induction"); + return false; + } + // parse ) + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing doloop but get "); + return false; + } + doLoopNode->SetIncrExpr(induction); + // parse body of the loop + lexer.NextToken(); + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("ParseStmtDoloop when parsing body of the loop"); + return false; + } + doLoopNode->SetDoBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtForeachelem(StmtNodePtr &stmt) { + // syntax: foreachelem { + // } + auto *forNode = mod.CurFuncCodeMemPool()->New(); + stmt = forNode; + lexer.NextToken(); // skip foreachelem token + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing element variable of foreachelem in "); + return false; + } + if (stidx.IsGlobal()) { + Error("illegal global scope for element variable for foreachelem in "); + return false; + } + forNode->SetElemStIdx(stidx); + lexer.NextToken(); + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing array/collection variable of foreachelem in "); + return false; + } + forNode->SetArrayStIdx(stidx); + lexer.NextToken(); + // parse body of the loop + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("error when parsing body of foreachelem loop in "); + return false; + } + forNode->SetLoopBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtIf(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_if) { + Error("expect if but get "); + return false; + } + auto *ifStmt = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + ifStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *thenBlock = nullptr; + if (!ParseStmtBlock(thenBlock)) { + Error("ParseStmtIf failed when parsing then block"); + return false; + } + ifStmt->SetThenPart(thenBlock); + BlockNode *elseBlock = nullptr; + if (lexer.GetTokenKind() == TK_else) { + // has else part + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + if (!ParseStmtBlock(elseBlock)) { + Error("ParseStmtIf failed when parsing else block"); + return false; + } + ifStmt->SetElsePart(elseBlock); + } + stmt = ifStmt; + return true; +} + +bool MIRParser::ParseStmtWhile(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_while) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_while); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *whileBody = nullptr; + if (!ParseStmtBlock(whileBody)) { + Error("ParseStmtWhile failed when parse while body"); + return false; + } + whileStmt->SetBody(whileBody); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtDowhile(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dowhile) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_dowhile); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *doWhileBody = nullptr; + if (!ParseStmtBlock(doWhileBody)) { + Error("ParseStmtDowhile failed when trying to parsing do while body"); + return false; + } + whileStmt->SetBody(doWhileBody); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtLabel(StmtNodePtr &stmt) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } else { + if (definedLabels.size() > labIdx && definedLabels[labIdx]) { + Error("label multiply declared "); + return false; + } + } + if (definedLabels.size() <= labIdx) { + definedLabels.resize(labIdx + 1); + } + definedLabels[labIdx] = true; + auto *labNode = mod.CurFuncCodeMemPool()->New(); + labNode->SetLabelIdx(labIdx); + stmt = labNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGoto(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_goto) { + Error("expect goto but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *gotoNode = mod.CurFuncCodeMemPool()->New(OP_goto); + gotoNode->SetOffset(labIdx); + stmt = gotoNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBr(StmtNodePtr &stmt) { + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_brtrue && tk != TK_brfalse) { + Error("expect brtrue/brfalse but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *condGoto = mod.CurFuncCodeMemPool()->New(tk == TK_brtrue ? OP_brtrue : OP_brfalse); + condGoto->SetOffset(labIdx); + lexer.NextToken(); + // parse () + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + condGoto->SetOpnd(expr, 0); + stmt = condGoto; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseSwitchCase(int64 &constVal, LabelIdx &lblIdx) { + // syntax : goto + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect intconst in switch but get "); + return false; + } + constVal = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_colon) { + Error("expect : in switch but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in switch case but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in switch but get "); + return false; + } + lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSwitch(StmtNodePtr &stmt) { + auto *switchNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = switchNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + switchNode->SetSwitchOpnd(expr); + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + if (lexer.NextToken() == TK_label) { + switchNode->SetDefaultLabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else { + Error("expect label in switch but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + switchNode->InsertCasePair(CasePair(constVal, lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRangegoto(StmtNodePtr &stmt) { + auto *rangeGotoNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = rangeGotoNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + rangeGotoNode->SetOpnd(expr, 0); + if (lexer.NextToken() == TK_intconst) { + rangeGotoNode->SetTagOffset(static_cast(lexer.GetTheIntVal())); + } else { + Error("expect tag offset in rangegoto but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + int32 minIdx = UINT16_MAX; + int32 maxIdx = 0; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (constVal > UINT16_MAX || constVal < 0) { + Error("rangegoto case tag not within unsigned 16 bits range "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + if (constVal < minIdx) { + minIdx = static_cast(constVal); + } + if (constVal > maxIdx) { + maxIdx = static_cast(constVal); + } + rangeGotoNode->AddRangeGoto(static_cast(constVal), static_cast(lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + DEBUG_ASSERT(rangeGotoNode->GetNumOpnds() == 1, "Rangegoto is a UnaryOpnd; numOpnds must be 1"); + // check there is no gap + if (static_cast(static_cast(maxIdx - minIdx) + 1) != casesSet.size()) { + Error("gap not allowed in rangegoto case tags "); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtMultiway(StmtNodePtr &stmt) { + auto *multiwayNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = multiwayNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + multiwayNode->SetMultiWayOpnd(expr); + if (lexer.NextToken() == TK_label) { + multiwayNode->SetDefaultlabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else { + Error("expect label in multiway but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // (): goto + // (): goto + // ... + // (): goto + TokenKind tk = lexer.NextToken(); + while (tk != TK_rbrace) { + BaseNode *x = nullptr; + if (!ParseExprOneOperand(x)) { + return false; + } + if (lexer.NextToken() != TK_colon) { + Error("expect : parsing multiway case tag specification but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in multiway case expression but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect goto label after multiway case expression but get "); + return false; + } + LabelIdx lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + multiwayNode->AppendElemToMultiWayTable(MCasePair(static_cast(x), lblIdx)); + tk = lexer.GetTokenKind(); + } + const MapleVector &multiWayTable = multiwayNode->GetMultiWayTable(); + multiwayNode->SetNumOpnds(multiWayTable.size()); + lexer.NextToken(); + return true; +} + +// used only when parsing mmpl +PUIdx MIRParser::EnterUndeclaredFunction(bool isMcount) { + std::string funcName; + if (isMcount) { + funcName = "_mcount"; + } else { + funcName = lexer.GetName(); + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = mod.GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + if (isMcount) { + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_void)); + fn->SetReturnTyIdx(retType->GetTypeIndex()); + } + return fn->GetPuidx(); +} + +bool MIRParser::ParseStmtCallMcount(StmtNodePtr &stmt) { + // syntax: call (, ..., ) + Opcode o = OP_call; + PUIdx pIdx = EnterUndeclaredFunction(true); + auto *callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetPUIdx(pIdx); + MapleVector opndsvec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + callStmt->SetNOpnd(opndsvec); + callStmt->SetNumOpnds(opndsvec.size()); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtCall(StmtNodePtr &stmt) { + // syntax: call (, ..., ) + TokenKind tk = lexer.GetTokenKind(); + Opcode o = GetOpFromToken(tk); + DEBUG_ASSERT(kOpcodeInfo.IsCall(o), "ParseStmtCall: not a call opcode"); + bool hasAssigned = kOpcodeInfo.IsCallAssigned(o); + bool hasInstant = false; + bool withType = false; + switch (tk) { + case TK_polymorphiccall: + case TK_polymorphiccallassigned: + withType = true; + break; + case TK_callinstant: + case TK_virtualcallinstant: + case TK_superclasscallinstant: + case TK_interfacecallinstant: + case TK_callinstantassigned: + case TK_virtualcallinstantassigned: + case TK_superclasscallinstantassigned: + case TK_interfacecallinstantassigned: + hasInstant = true; + break; + default: + break; + } + TyIdx polymophicTyidx(0); + if (o == OP_polymorphiccallassigned || o == OP_polymorphiccall) { + TokenKind nextTk = lexer.NextToken(); + if (nextTk == TK_langle) { + nextTk = lexer.NextToken(); + if (nextTk == TK_func) { + lexer.NextToken(); + if (!ParseFuncType(polymophicTyidx)) { + Error("error parsing functype in ParseStmtCall for polymorphiccallassigned at "); + return false; + } + } else { + Error("expect func in functype but get "); + return false; + } + } else { + Error("expect < in functype but get "); + return false; + } + } + TokenKind funcTk = lexer.NextToken(); + if (funcTk != TK_fname) { + Error("expect func name in call but get "); + return false; + } + PUIdx pIdx; + if (!ParseDeclaredFunc(pIdx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl"); + return false; + } + pIdx = EnterUndeclaredFunction(); + } + lexer.NextToken(); + CallNode *callStmt = nullptr; + CallinstantNode *callInstantStmt = nullptr; + if (withType) { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetTyIdx(polymophicTyidx); + } else if (hasInstant) { + TokenKind langleTk = lexer.GetTokenKind(); + if (langleTk != TK_langle) { + Error("missing < in generic method instantiation at "); + return false; + } + TokenKind lbraceTk = lexer.NextToken(); + if (lbraceTk != TK_lbrace) { + Error("missing { in generic method instantiation at "); + return false; + } + MIRInstantVectorType instVecTy; + if (!ParseGenericInstantVector(instVecTy)) { + Error("error parsing generic method instantiation at "); + return false; + } + TokenKind rangleTk = lexer.GetTokenKind(); + if (rangleTk != TK_rangle) { + Error("missing > in generic method instantiation at "); + return false; + } + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&instVecTy); + callInstantStmt = mod.CurFuncCodeMemPool()->New(mod, o, tyIdx); + callStmt = callInstantStmt; + lexer.NextToken(); // skip the > + } else { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + } + callStmt->SetPUIdx(pIdx); + + MIRFunction *callee = GlobalTables::GetFunctionTable().GetFuncTable()[pIdx]; + callee->GetFuncSymbol()->SetAppearsInCode(true); + if (callee->GetName() == "setjmp") { + mod.CurFunction()->SetHasSetjmp(); + } + + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + callStmt->SetNOpnd(opndsVec); + callStmt->SetNumOpnds(opndsVec.size()); + if (hasAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + if (!hasInstant) { + DEBUG_ASSERT(callStmt != nullptr, "callstmt is null in MIRParser::ParseStmtCall"); + callStmt->SetReturnVec(retsVec); + } else { + DEBUG_ASSERT(callInstantStmt != nullptr, "callinstantstmt is null in MIRParser::ParseStmtCall"); + callInstantStmt->SetReturnVec(retsVec); + } + } + lexer.NextToken(); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt, Opcode op) { + // syntax: icall (, , ..., ) + // icallassigned (, ..., ) { + // dassign + // dassign + // . . . + // dassign } + // icallproto (, , ..., ) + // icallprotoassigned (, , ..., ) { + // dassign + // dassign + // . . . + // dassign } + IcallNode *iCallStmt = mod.CurFuncCodeMemPool()->New(mod, op); + lexer.NextToken(); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIcall for icallproto at "); + return false; + } + iCallStmt->SetRetTyIdx(tyIdx); + } + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + iCallStmt->SetNOpnd(opndsVec); + iCallStmt->SetNumOpnds(opndsVec.size()); + if (op == OP_icallassigned || op == OP_icallprotoassigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + iCallStmt->SetReturnVec(retsVec); + } + lexer.NextToken(); + stmt = iCallStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icall); +} + +bool MIRParser::ParseStmtIcallassigned(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallassigned); +} + +bool MIRParser::ParseStmtIcallproto(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallproto); +} + +bool MIRParser::ParseStmtIcallprotoassigned(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallprotoassigned); +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned) { + Opcode o = !isAssigned ? (lexer.GetTokenKind() == TK_intrinsiccall ? OP_intrinsiccall : OP_xintrinsiccall) + : (lexer.GetTokenKind() == TK_intrinsiccallassigned ? OP_intrinsiccallassigned + : OP_xintrinsiccallassigned); + auto *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + lexer.NextToken(); + if (o == !isAssigned ? OP_intrinsiccall : OP_intrinsiccallassigned) { + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + } else { + intrnCallNode->SetIntrinsic(static_cast(lexer.GetTheIntVal())); + } + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccall(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccall(stmt, true); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned) { + Opcode o = (!isAssigned) ? OP_intrinsiccallwithtype : OP_intrinsiccallwithtypeassigned; + IntrinsiccallNode *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + TokenKind tk = lexer.NextToken(); + TyIdx tyIdx(0); + if (IsPrimitiveType(tk)) { + if (!ParsePrimType(tyIdx)) { + Error("expect primitive type in ParseStmtIntrinsiccallwithtype but get "); + return false; + } + } else if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIntrinsiccallwithtype at "); + return false; + } + intrnCallNode->SetTyIdx(tyIdx); + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallwithtypeAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccallwithtype(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccallwithtype(stmt, true); +} + +bool MIRParser::ParseCallReturnPair(CallReturnPair &retpair) { + bool isst = (lexer.GetTokenKind() == TK_dassign); + if (isst) { + // parse %i + lexer.NextToken(); + StIdx stidx; + // How to use islocal?? + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (lexer.GetTokenKind() == TK_lname) { + MIRSymbolTable *lSymTab = mod.CurFunction()->GetSymTab(); + MIRSymbol *lSym = lSymTab->GetSymbolFromStIdx(stidx.Idx(), 0); + DEBUG_ASSERT(lSym != nullptr, "lsym MIRSymbol is null"); + if (lSym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lSym->GetTyIdx()); + auto *ptrTy = static_cast(ty->CopyMIRTypeNode()); + DEBUG_ASSERT(ptrTy != nullptr, "null ptr check"); + ptrTy->SetPrimType(GetExactPtrPrimType()); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(ptrTy); + delete ptrTy; + lSym->SetTyIdx(newTyidx); + } + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing call return assignment but get"); + return false; + } + uint16 fieldId = 0; + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { + fieldId = lexer.GetTheIntVal(); + lexer.NextToken(); + } + RegFieldPair regFieldPair; + regFieldPair.SetFieldID(fieldId); + retpair = CallReturnPair(stidx, regFieldPair); + } else { + // parse type + lexer.NextToken(); + TyIdx tyidx(0); + // RegreadNode regreadexpr; + bool ret = ParsePrimType(tyidx); + if (ret != true) { + Error("call ParsePrimType failed in ParseCallReturns"); + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + PrimType ptype = GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx); + PregIdx pregIdx; + if (lexer.GetTokenKind() == TK_specialreg) { + if (!ParseSpecialReg(pregIdx)) { + Error("expect specialreg parsing callassign CallReturnVector"); + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + if (!ParsePseudoReg(ptype, pregIdx)) { + Error("expect pseudoreg parsing callassign CallReturnVector"); + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + DEBUG_ASSERT(pregIdx > 0, "register number is zero"); + DEBUG_ASSERT(pregIdx <= 0xffff, "register number is over 16 bits"); + RegFieldPair regFieldPair; + regFieldPair.SetPregIdx(pregIdx); + retpair = CallReturnPair(StIdx(), regFieldPair); + } + return true; +} + +bool MIRParser::ParseCallReturns(CallReturnVector &retsvec) { + // { + // dassign + // dassign + // . . . + // dassign } + // OR + // { + // regassign + // regassign + // regassign + // } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { parsing call return values. "); + return false; + } + TokenKind tk = lexer.NextToken(); + CallReturnPair retpair; + while (tk != TK_rbrace) { + if (lexer.GetTokenKind() != TK_dassign && lexer.GetTokenKind() != TK_regassign) { + Error("expect dassign/regassign but get "); + return false; + } + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + retsvec.push_back(retpair); + tk = lexer.GetTokenKind(); + } + return true; +} + +bool MIRParser::ParseStmtAsm(StmtNodePtr &stmt) { + AsmNode *asmNode = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + lexer.NextToken(); + // parse qualifiers + while (lexer.GetTokenKind() == TK_volatile || + lexer.GetTokenKind() == TK_inline || + lexer.GetTokenKind() == TK_goto) { + AsmQualifierKind qual; + switch (lexer.GetTokenKind()) { + case TK_volatile: { + qual = kASMvolatile; + break; + } + case TK_inline: { + qual = kASMinline; + break; + } + case TK_goto: + default: { + qual = kASMgoto; + break; + } + } + asmNode->SetQualifier(qual); + lexer.NextToken(); + } + // parse open brace + if (lexer.GetTokenKind() != TK_lbrace) { + Error("Open brace not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse asm string + if (lexer.GetTokenKind() != TK_string) { + Error("asm string not found parsing asm statement."); + return false; + } + asmNode->asmString = lexer.GetName(); + lexer.NextToken(); + // parse first colon + if (lexer.GetTokenKind() != TK_colon) { + Error("first colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse outputs + UStrIdx uStrIdx; + CallReturnPair retpair; + while (lexer.GetTokenKind() == TK_string) { + // parse an output constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + lexer.NextToken(); + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + asmNode->outputConstraints.push_back(uStrIdx); + asmNode->asmOutputs.push_back(retpair); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + // parse second colon + if (lexer.GetTokenKind() != TK_colon) { + Error("second colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse inputs + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (lexer.GetName()[0] == '+') { + asmNode->SetHasWriteInputs(); + } + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseExpression failed"); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) but get "); + return false; + } + asmNode->inputConstraints.push_back(uStrIdx); + asmNode->GetNopnd().push_back(expr); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + asmNode->SetNumOpnds(static_cast(asmNode->GetNopndSize())); + // parse third colon + if (lexer.GetTokenKind() != TK_colon) { + Error("third colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse clobber list + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + asmNode->clobberList.push_back(uStrIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse fourth colon + if (lexer.GetTokenKind() != TK_colon) { + Error("fourth colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse labels + while (lexer.GetTokenKind() == TK_label) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + asmNode->gotoLabels.push_back(labIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse closing brace + if (lexer.GetTokenKind() != TK_rbrace) { + Error("Closing brace not found parsing asm statement."); + return false; + } + stmt = asmNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSafeRegion(StmtNodePtr &stmt) { + switch (lexer.GetTokenKind()) { + case TK_safe: + safeRegionFlag.push(true); + break; + case TK_unsafe: + safeRegionFlag.push(false); + break; + case TK_endsafe: + case TK_endunsafe: + safeRegionFlag.pop(); + break; + default: + Error("Only support safe/unsafe/endsafe/endunsafe."); + return false; + } + (void)stmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtJsTry(StmtNodePtr &stmt) { + auto *tryNode = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + // parse handler label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetCatchOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetCatchOffset(labidx); + } + lexer.NextToken(); + // parse finally label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetFinallyOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect finally label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetFinallyOffset(labidx); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtTry(StmtNodePtr &stmt) { + auto *tryNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + DEBUG_ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in try but get "); + lexer.NextToken(); + // parse handler label + while (lexer.GetTokenKind() != TK_rbrace) { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->AddOffset(labidx); + lexer.NextToken(); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtCatch(StmtNodePtr &stmt) { + auto *catchNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + DEBUG_ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in catch but get "); + lexer.NextToken(); + while (lexer.GetTokenKind() != TK_rbrace) { + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing java catch statement"); + return false; + } + catchNode->PushBack(tyidx); + } + catchNode->SetNumOpnds(0); + stmt = catchNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmt(Opcode op, StmtNodePtr &stmt) { + lexer.NextToken(); + auto *throwStmt = mod.CurFuncCodeMemPool()->New(op); + stmt = throwStmt; + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + throwStmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtThrow(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_throw, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRef(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_decref, stmt); +} + +bool MIRParser::ParseUnaryStmtIncRef(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_incref, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRefReset(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_decrefreset, stmt); +} + +bool MIRParser::ParseUnaryStmtIGoto(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_igoto, stmt); +} + +bool MIRParser::ParseUnaryStmtEval(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_eval, stmt); +} + +bool MIRParser::ParseUnaryStmtFree(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_free, stmt); +} + +bool MIRParser::ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt) { + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + lexer.NextToken(); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + stmt = mod.CurFuncCodeMemPool()->New(OP_callassertnonnull, stridx, index, stmtstridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseAssertInfo(std::string &funcName) { + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey assert check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey assert check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing safey assert check "); + return false; + } + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt) { + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + lexer.NextToken(); + stmt = mod.CurFuncCodeMemPool()->New(op, stridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt) { + if (mod.IsCModule()) { + return ParseUnaryStmtAssertNonNullCheck(OP_assertnonnull, stmt); + } else { + return ParseUnaryStmt(OP_assertnonnull, stmt); + } +} + +bool MIRParser::ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt) { + return ParseUnaryStmtAssertNonNullCheck(OP_assignassertnonnull, stmt); +} + +bool MIRParser::ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt) { + return ParseUnaryStmtAssertNonNullCheck(OP_returnassertnonnull, stmt); +} + +bool MIRParser::ParseStmtMarker(StmtNodePtr &stmt) { + Opcode op; + switch (paramTokenKindForStmt) { + case TK_jscatch: + op = OP_jscatch; + break; + case TK_finally: + op = OP_finally; + break; + case TK_cleanuptry: + op = OP_cleanuptry; + break; + case TK_endtry: + op = OP_endtry; + break; + case TK_retsub: + op = OP_retsub; + break; + case TK_membaracquire: + op = OP_membaracquire; + break; + case TK_membarrelease: + op = OP_membarrelease; + break; + case TK_membarstoreload: + op = OP_membarstoreload; + break; + case TK_membarstorestore: + op = OP_membarstorestore; + break; + default: + return false; + } + auto *stmtNode = mod.CurFuncCodeMemPool()->New(op); + stmt = stmtNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGosub(StmtNodePtr &stmt) { + if (lexer.NextToken() != TK_label) { + Error("expect finally label in gosub but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + auto *goSubNode = mod.CurFuncCodeMemPool()->New(OP_gosub); + goSubNode->SetOffset(labidx); + stmt = goSubNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseBinaryStmt(StmtNodePtr &stmt, Opcode op) { + auto *assStmt = mod.CurFuncCodeMemPool()->New(op); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + assStmt->SetBOpnd(opnd0, 0); + assStmt->SetBOpnd(opnd1, 1); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op) { + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + auto *assStmt = mod.CurFuncCodeMemPool()->New(mod, op, stridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryStmtAssert failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssertGE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assertge); +} + +bool MIRParser::ParseNaryStmtAssertLT(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assertlt); +} + +bool MIRParser::ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_returnassertle); +} + +bool MIRParser::ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assignassertle); +} + +bool MIRParser::ParseNaryStmtCalcassertGE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_calcassertge); +} + +bool MIRParser::ParseNaryStmtCalcassertLT(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_calcassertlt); +} + +bool MIRParser::ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName) { + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey call check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst parsing parsing safey call check "); + return false; + } + *paramIndex = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &stmtfuncname parsing parsing safey call check "); + return false; + } + stmtFuncName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing parsing safey call check "); + return false; + } + return true; +} + +bool MIRParser::ParseNaryStmtCallAssertLE(StmtNodePtr &stmt) { + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + auto *assStmt = mod.CurFuncCodeMemPool()->New(mod, OP_callassertle, stridx, index, + stmtstridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryExpr failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryExpr(NaryStmtNode &stmtNode) { + if (lexer.NextToken() != TK_lparen) { + Error("expect ( parsing NaryExpr "); + return false; + } + (void)lexer.NextToken(); // skip TK_lparen + while (lexer.GetTokenKind() != TK_rparen) { + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtNode.GetNopnd().push_back(expr); + if (lexer.GetTokenKind() != TK_coma && lexer.GetTokenKind() != TK_rparen) { + Error("expect , or ) parsing NaryStmt"); + return false; + } + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseNaryStmt(StmtNodePtr &stmt, Opcode op) { + auto *stmtReturn = mod.CurFuncCodeMemPool()->New(mod, op); + if (op == OP_syncenter) { // old code reconstruct later + if (lexer.NextToken() != TK_lparen) { + Error("expect return with ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(expr); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + BaseNode *exprSync = nullptr; + if (!ParseExpression(exprSync)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(exprSync); + } else { + MIRType *intType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + // default 2 for __sync_enter_fast() + MIRIntConst *intConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(2, *intType); + ConstvalNode *exprConst = mod.GetMemPool()->New(); + exprConst->SetPrimType(PTY_i32); + exprConst->SetConstVal(intConst); + stmtReturn->GetNopnd().push_back(exprConst); + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + } + + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing NaryStmt"); + return false; + } + } else if (!ParseNaryExpr(*stmtReturn)) { + Error("ParseNaryExpr failed"); + return false; + } + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + stmt = stmtReturn; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtReturn(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_return); +} + +bool MIRParser::ParseNaryStmtSyncEnter(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_syncenter); +} + +bool MIRParser::ParseNaryStmtSyncExit(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_syncexit); +} + +bool MIRParser::ParseLoc() { + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastFileNum = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastLineNum = lexer.GetTheIntVal(); + if (firstLineNum == 0) { + firstLineNum = lastLineNum; + } + if (lexer.NextToken() == TK_intconst) { // optional column number + lastColumnNum = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } + return true; +} + +bool MIRParser::ParseLocStmt(StmtNodePtr&) { + return ParseLoc(); +} + +bool MIRParser::ParseStatement(StmtNodePtr &stmt) { + paramTokenKindForStmt = lexer.GetTokenKind(); + uint32 mplNum = lexer.GetLineNum(); + uint32 lnum = lastLineNum; + uint32 fnum = lastFileNum; + uint16 cnum = lastColumnNum; + std::map::iterator itFuncPtr = funcPtrMapForParseStmt.find(paramTokenKindForStmt); + if (itFuncPtr != funcPtrMapForParseStmt.end()) { + if (!(this->*(itFuncPtr->second))(stmt)) { + return false; + } + } else { + return false; + } + if (stmt && stmt->GetSrcPos().MplLineNum() == 0) { + stmt->GetSrcPos().SetFileNum(fnum); + stmt->GetSrcPos().SetLineNum(lnum); + stmt->GetSrcPos().SetColumn(cnum); + stmt->GetSrcPos().SetMplLineNum(mplNum); + if (safeRegionFlag.top()) { + stmt->SetInSafeRegion(); + } + } + return true; +} + +/* parse the statements enclosed by { and } + */ +bool MIRParser::ParseStmtBlock(BlockNodePtr &blk) { + if (lexer.GetTokenKind() != TK_lbrace) { + Error("expect { for func body but get "); + return false; + } + blk = mod.CurFuncCodeMemPool()->New(); + MIRFunction *fn = mod.CurFunction(); + paramCurrFuncForParseStmtBlock = fn; + lexer.NextToken(); + // Insert _mcount for PI. + if (mod.GetWithProfileInfo()) { + StmtNode *stmtt = nullptr; + if (!ParseStmtCallMcount(stmtt)) { + return false; + } + blk->AddStatement(stmtt); + } + while (true) { + TokenKind stmtTk = lexer.GetTokenKind(); + // calculate the mpl file line number mplNum here to get accurate result + uint32 mplNum = lexer.GetLineNum(); + if (IsStatement(stmtTk)) { + ParseStmtBlockForSeenComment(blk, mplNum); + StmtNode *stmt = nullptr; + if (!ParseStatement(stmt)) { + Error("ParseStmtBlock failed when parsing a statement"); + return false; + } + if (stmt != nullptr) { // stmt is nullptr if it is a LOC + blk->AddStatement(stmt); + } + } else { + std::map::iterator itFuncPtr = funcPtrMapForParseStmtBlock.find(stmtTk); + if (itFuncPtr == funcPtrMapForParseStmtBlock.end()) { + if (stmtTk == TK_rbrace) { + ParseStmtBlockForSeenComment(blk, mplNum); + lexer.NextToken(); + return true; + } else { + Error("expect } or var or statement for func body but get "); + return false; + } + } else { + if (!(this->*(itFuncPtr->second))()) { + return false; + } + } + } + } +} + +void MIRParser::ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum) { + if (Options::noComment) { + lexer.seenComments.clear(); + return; + } + // collect accumulated comments into comment statement nodes + if (!lexer.seenComments.empty()) { + for (size_t i = 0; i < lexer.seenComments.size(); ++i) { + auto *cmnt = mod.CurFuncCodeMemPool()->New(mod); + cmnt->SetComment(lexer.seenComments[i]); + SetSrcPos(cmnt->GetSrcPos(), mplNum); + blk->AddStatement(cmnt); + } + lexer.seenComments.clear(); + } +} + +bool MIRParser::ParseStmtBlockForVar(TokenKind stmtTK) { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + MIRSymbol *st = fn->GetSymTab()->CreateSymbol(kScopeLocal); + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + SetSrcPos(st->GetSrcPosition(), lexer.GetLineNum()); + if (stmtTK == TK_tempvar) { + st->SetIsTmp(true); + } + if (!ParseDeclareVar(*st)) { + return false; + } + if (!fn->GetSymTab()->AddToStringSymbolMap(*st)) { + Error("duplicate declare symbol parse function "); + return false; + } + if (!ParseDeclareVarInitValue(*st)) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForVar() { + return ParseStmtBlockForVar(TK_var); +} + +bool MIRParser::ParseStmtBlockForTempVar() { + return ParseStmtBlockForVar(TK_tempvar); +} + +bool MIRParser::ParseStmtBlockForReg() { + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_preg) { + Error("expect %%preg after reg"); + return false; + } + PregIdx pregIdx; + if (!ParsePseudoReg(PTY_ref, pregIdx)) { + return false; + } + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("ParseDeclareVar failed when parsing the type"); + return false; + } + DEBUG_ASSERT(tyidx > 0, "parse declare var failed "); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); + preg->SetMIRType(mirType); + if (lexer.GetTokenKind() == TK_intconst) { + int64 theIntVal = lexer.GetTheIntVal(); + if (theIntVal != 0 && theIntVal != 1) { + Error("parseDeclareReg failed"); + return false; + } + preg->SetNeedRC(theIntVal == 0 ? false : true); + } else { + Error("parseDeclareReg failed"); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForType() { + paramParseLocalType = true; + if (!ParseTypedef()) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForFrameSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after frameSize but get "); + return false; + } + fn->SetFrameSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForUpformalSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after upFormalSize but get "); + return false; + } + fn->SetUpFormalSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForModuleID() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after moduleid but get "); + return false; + } + fn->SetModuleID(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcSize but get "); + return false; + } + fn->SetFuncSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncID() { + // funcid is for debugging purpose + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcid but get "); + return false; + } + fn->SetPuidxOrigin(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsTypeTagged() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordstypetagged"); + return false; + } + fn->SetFormalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsTypeTagged() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localWordsTypeTagged"); + return false; + } + fn->SetLocalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsRefCounted() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordsrefcounted"); + return false; + } + fn->SetFormalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsRefCounted() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localwordsrefcounted"); + return false; + } + fn->SetLocalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncInfo() { + lexer.NextToken(); + if (!ParseFuncInfo()) { + return false; + } + return true; +} + +/* exprparser */ +static Opcode GetUnaryOp(TokenKind tk) { + switch (tk) { +#define UNARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "unary_op.def" +#undef UNARYOP + default: + return OP_undef; + } +} + +static Opcode GetBinaryOp(TokenKind tk) { + switch (tk) { +#define BINARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "binary_op.def" +#undef BINARYOP + default: + return OP_undef; + } +} + +static Opcode GetConvertOp(TokenKind tk) { + switch (tk) { + case TK_ceil: + return OP_ceil; + case TK_cvt: + return OP_cvt; + case TK_floor: + return OP_floor; + case TK_round: + return OP_round; + case TK_trunc: + return OP_trunc; + default: + return OP_undef; + } +} + +bool MIRParser::ParseExprOneOperand(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(expr)) { + Error("expect expression as openrand of unary expression "); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd0)) { + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , between two operands but get "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd1)) { + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprNaryOperand(MapleVector &opndVec) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing nary operands "); + return false; + } + TokenKind tk = lexer.NextToken(); + while (tk != TK_rparen) { + BaseNode *opnd = nullptr; + if (!ParseExpression(opnd)) { + Error("expect expression parsing nary operands "); + return false; + } + opndVec.push_back(opnd); + tk = lexer.GetTokenKind(); + if (tk == TK_coma) { + tk = lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseDeclaredSt(StIdx &stidx) { + TokenKind varTk = lexer.GetTokenKind(); + stidx.SetFullIdx(0); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (varTk == TK_gname) { + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(stridx); + st->SetSKind(kStVar); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + return true; + } + } else if (varTk == TK_lname) { + stidx = mod.CurFunction()->GetSymTab()->GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + Error("local symbol not declared "); + return false; + } + } else { + Error("expect global/local name but get "); + return false; + } + return true; +} + +void MIRParser::CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx) { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetNeedForwDecl(); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + puidx = static_cast(GlobalTables::GetFunctionTable().GetFuncTable().size()); + fn->SetPuidx(puidx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + if (options & kParseInlineFuncBody) { + funcSt->SetIsTmpUnused(true); + } +} + +bool MIRParser::ParseDeclaredFunc(PUIdx &puidx) { + GStrIdx stridx = GlobalTables::GetStrTable().GetStrIdxFromName(lexer.GetName()); + if (stridx == 0u) { + stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + } + StIdx stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + CreateFuncMIRSymbol(puidx, stridx); + return true; + } + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + if (st->GetSKind() != kStFunc) { + Error("function name not declared as function"); + return false; + } + MIRFunction *func = st->GetFunction(); + puidx = func->GetPuidx(); + st->SetAppearsInCode(true); + return true; +} + +bool MIRParser::ParseExprDread(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_dread) { + Error("expect dread but get "); + return false; + } + AddrofNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dread); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->SetStIdx(stidx); + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else if (!IsDelimitationTK(endtk)) { + Error("expect , or ) delimitation token but get "); + return false; + } else { + dexpr->SetFieldID(0); + } + if (!dexpr->CheckNode(mod)) { + Error("dread is not legal"); + return false; + } + return true; +} + +bool MIRParser::ParseExprDreadoff(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_dreadoff) { + Error("expect dreadoff but get "); + return false; + } + DreadoffNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dreadoff); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->stIdx = stidx; + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprRegread(BaseNodePtr &expr) { + auto *regRead = mod.CurFuncCodeMemPool()->New(); + expr = regRead; + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regRead->GetPrimType(), tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + Error("expect special or pseudo register but get "); + return false; +} + +bool MIRParser::ParseExprConstval(BaseNodePtr &expr) { + auto *exprConst = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprConst->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + MIRConst *constVal = nullptr; + if (!ParseScalarValue(constVal, *GlobalTables::GetTypeTable().GetPrimType(exprConst->GetPrimType()))) { + Error("expect scalar type but get "); + return false; + } + exprConst->SetConstVal(constVal); + expr = exprConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr(BaseNodePtr &expr) { + auto *strConst = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr but get "); + return false; + } + strConst->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(strConst->GetPrimType())) { + Error("expect primitive type for conststr but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr but get "); + return false; + } + strConst->SetStrIdx(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName())); + expr = strConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr16(BaseNodePtr &expr) { + auto *str16Const = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr16 but get "); + return false; + } + str16Const->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(str16Const->GetPrimType())) { + Error("expect primitive type for conststr16 but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr16 but get "); + return false; + } + // UTF-16 strings in mpl files are presented as UTF-8 strings + // to keep the printable chars in ascii form + // so we need to do a UTF8ToUTF16 conversion + std::string str = lexer.GetName(); + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str); + str16Const->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + expr = str16Const; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprSizeoftype(BaseNodePtr &expr) { + auto *exprSizeOfType = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprSizeOfType->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing array but get "); + return false; + } + exprSizeOfType->SetTyIdx(tyidx); + expr = exprSizeOfType; + return true; +} + +bool MIRParser::ParseExprFieldsDist(BaseNodePtr &expr) { + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + auto *node = mod.CurFuncCodeMemPool()->New(); + node->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseType(tyIdx)) { + Error("expect type parsing array but get "); + return false; + } + node->SetTyIdx(tyIdx); + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID1(lexer.GetTheIntVal()); + tk = lexer.NextToken(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID2(lexer.GetTheIntVal()); + lexer.NextToken(); + expr = node; + return true; +} + +bool MIRParser::ParseExprBinary(BaseNodePtr &expr) { + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + if (opcode == OP_undef) { + Error("expect add operator but get "); + return false; + } + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprCompare(BaseNodePtr &expr) { + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing compare operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect operand type parsing compare operator but get "); + return false; + } + addExpr->SetOpndType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprDepositbits(BaseNodePtr &expr) { + // syntax: depositbits (, ) + if (lexer.GetTokenKind() != TK_depositbits) { + Error("expect depositbits but get "); + return false; + } + auto *dpsbNode = mod.CurFuncCodeMemPool()->New(); + expr = dpsbNode; + PrimType ptyp = GetPrimitiveType(lexer.NextToken()); + if (!IsPrimitiveInteger(ptyp)) { + Error("expect but get "); + return false; + } + dpsbNode->SetPrimType(ptyp); + if (lexer.NextToken() != TK_intconst) { + Error("expect bOffset but get "); + return false; + } + dpsbNode->SetBitsOffset(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect bSize but get "); + return false; + } + dpsbNode->SetBitsSize(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + Error("ParseExprDepositbits when parsing two operand"); + return false; + } + dpsbNode->SetBOpnd(opnd0, 0); + dpsbNode->SetBOpnd(opnd1, 1); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadIaddrof(IreadNode &expr) { + // syntax : iread/iaddrof () + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + expr.SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + tyidx = TyIdx(0); + if (!ParseDerivedType(tyidx)) { + Error("ParseExprIreadIaddrof failed when paring derived type"); + return false; + } + expr.SetTyIdx(tyidx); + if (lexer.GetTokenKind() == TK_intconst) { + expr.SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *opnd0 = nullptr; + if (!ParseExprOneOperand(opnd0)) { + return false; + } + expr.SetOpnd(opnd0, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIread(BaseNodePtr &expr) { + // syntax : iread () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iread); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIread failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIaddrof(BaseNodePtr &expr) { + // syntax : iaddrof () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iaddrof); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIaddrof failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIreadoff(BaseNodePtr &expr) { + // syntax : iread () + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { + Error("only scalar types allowed for ireadoff"); + return false; + } + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd = nullptr; + if (!ParseExprOneOperand(opnd)) { + Error("ParseExprIreadoff when paring one operand"); + return false; + } + iReadOff->SetOpnd(opnd, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadFPoff(BaseNodePtr &expr) { + // syntax : iread + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddrof(BaseNodePtr &expr) { + // syntax: addrof + auto *addrofNode = mod.CurFuncCodeMemPool()->New(OP_addrof); + expr = addrofNode; + if (lexer.GetTokenKind() != TK_addrof) { + Error("expect addrof but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofNode->SetStIdx(stidx); + TokenKind tk = lexer.NextToken(); + if (IsDelimitationTK(tk)) { + addrofNode->SetFieldID(0); + } else if (tk == TK_intconst) { + addrofNode->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + addrofNode->SetFieldID(0); + } + return true; +} + +bool MIRParser::ParseExprAddrofoff(BaseNodePtr &expr) { + // syntax: addrofoff + AddrofoffNode *addrofoffNode = mod.CurFuncCodeMemPool()->New(OP_addrofoff); + expr = addrofoffNode; + if (lexer.GetTokenKind() != TK_addrofoff) { + Error("expect addrofoff but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofoffNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofoffNode->stIdx = stidx; + TokenKind tk = lexer.NextToken(); + if (tk == TK_intconst) { + addrofoffNode->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprAddroffunc(BaseNodePtr &expr) { + auto *addrOfFuncNode = mod.CurFuncCodeMemPool()->New(); + expr = addrOfFuncNode; + TokenKind tk = lexer.NextToken(); + if (tk != TK_a32 && tk != TK_a64 && tk != TK_ptr) { + Error("expect address primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("ParseExprAddroffunc failed when parsing primitive type"); + return false; + } + addrOfFuncNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_fname) { + Error("expect function name but get "); + return false; + } + PUIdx pidx; + if (!ParseDeclaredFunc(pidx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl file"); + return false; + } + pidx = EnterUndeclaredFunction(); + } + addrOfFuncNode->SetPUIdx(pidx); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddroflabel(BaseNodePtr &expr) { + // syntax: addroflabel